summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bfdd/bfd.c548
-rw-r--r--bfdd/bfd.h86
-rw-r--r--bfdd/bfd_packet.c823
-rw-r--r--bfdd/bfdd_cli.c677
-rw-r--r--bfdd/bfdd_nb.c453
-rw-r--r--bfdd/bfdd_nb.h28
-rw-r--r--bfdd/bfdd_nb_config.c389
-rw-r--r--bfdd/bfdd_nb_state.c88
-rw-r--r--bfdd/bfdd_vty.c408
-rw-r--r--bfdd/event.c87
-rw-r--r--bfdd/ptm_adapter.c22
-rw-r--r--bgpd/bgp_fsm.c3
-rw-r--r--bgpd/bgp_main.c2
-rw-r--r--bgpd/bgp_nht.c19
-rw-r--r--bgpd/bgp_packet.c3
-rw-r--r--bgpd/bgp_route.c6
-rw-r--r--bgpd/bgp_vty.c65
-rw-r--r--bgpd/bgp_zebra.c73
-rw-r--r--bgpd/bgpd.c27
-rw-r--r--doc/developer/sbfd.rst140
-rw-r--r--doc/developer/subdir.am1
-rw-r--r--doc/user/sbfd.rst304
-rw-r--r--doc/user/subdir.am1
-rw-r--r--isisd/isis_tlvs.c72
-rw-r--r--lib/bfd.c19
-rw-r--r--lib/bfd.h4
-rw-r--r--lib/privs.c9
-rw-r--r--lib/srcdest_table.c10
-rw-r--r--lib/srcdest_table.h2
-rw-r--r--lib/zclient.c177
-rw-r--r--lib/zclient.h27
-rw-r--r--ospfd/ospf_zebra.c21
-rw-r--r--pimd/pim_autorp.c43
-rw-r--r--pimd/pim_cmd_common.c20
-rw-r--r--staticd/static_nb.c90
-rw-r--r--staticd/static_nb.h87
-rw-r--r--staticd/static_nb_config.c353
-rw-r--r--staticd/static_nht.c29
-rw-r--r--staticd/static_nht.h14
-rw-r--r--staticd/static_routes.c71
-rw-r--r--staticd/static_vrf.c7
-rw-r--r--staticd/static_vty.c172
-rw-r--r--staticd/static_zebra.c42
-rwxr-xr-xtests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py30
-rw-r--r--tests/topotests/bgp_table_direct_topo1/__init__.py0
-rw-r--r--tests/topotests/bgp_table_direct_topo1/r1/frr.conf31
-rw-r--r--tests/topotests/bgp_table_direct_topo1/r2/frr.conf10
-rw-r--r--tests/topotests/bgp_table_direct_topo1/r3/frr.conf10
-rw-r--r--tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py201
-rw-r--r--tests/topotests/bgp_table_map/r1/frr.conf22
-rw-r--r--tests/topotests/bgp_table_map/r2/frr.conf18
-rw-r--r--tests/topotests/bgp_table_map/test_bgp_table_map.py129
-rw-r--r--tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py16
-rw-r--r--tests/topotests/mgmt_tests/test_yang_mgmt.py20
-rwxr-xr-xtests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py6
-rw-r--r--tests/topotests/ospf_prune_next_hop/r1/frr.conf23
-rw-r--r--tests/topotests/ospf_prune_next_hop/r2/frr.conf23
-rw-r--r--tests/topotests/ospf_prune_next_hop/r3/frr.conf35
-rw-r--r--tests/topotests/ospf_prune_next_hop/r4/frr.conf34
-rw-r--r--tests/topotests/ospf_prune_next_hop/r5/frr.conf34
-rw-r--r--tests/topotests/ospf_prune_next_hop/r6/frr.conf34
-rw-r--r--tests/topotests/ospf_prune_next_hop/r7/frr.conf14
-rw-r--r--tests/topotests/ospf_prune_next_hop/r8/frr.conf14
-rw-r--r--tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py343
-rw-r--r--tests/topotests/sbfd_topo1/__init__.py0
-rw-r--r--tests/topotests/sbfd_topo1/r1/frr.conf8
-rw-r--r--tests/topotests/sbfd_topo1/r2/frr.conf8
-rw-r--r--tests/topotests/sbfd_topo1/sbfd_topo1.dot45
-rw-r--r--tests/topotests/sbfd_topo1/test_sbfd_topo1.py248
-rw-r--r--tests/topotests/static_simple/test_static_simple.py72
-rw-r--r--tests/topotests/static_srv6_sids/expected_srv6_sids.json35
-rw-r--r--tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json107
-rw-r--r--tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json72
-rw-r--r--tests/topotests/static_srv6_sids/r1/frr.conf1
-rwxr-xr-xtests/topotests/static_srv6_sids/test_static_srv6_sids.py94
-rw-r--r--tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf4
-rw-r--r--tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py189
-rwxr-xr-xtools/frr-reload.py11
-rw-r--r--vtysh/vtysh.c76
-rw-r--r--yang/frr-bfdd.yang130
-rw-r--r--yang/frr-staticd.yang24
-rw-r--r--zebra/redistribute.c115
-rw-r--r--zebra/rt_netlink.c8
-rw-r--r--zebra/zapi_msg.c28
-rw-r--r--zebra/zapi_msg.h6
-rw-r--r--zebra/zebra_nhg.c6
-rw-r--r--zebra/zebra_srv6.c23
87 files changed, 6717 insertions, 1162 deletions
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index f32bc2598b..164910556b 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -23,6 +23,7 @@ DEFINE_MTYPE_STATIC(BFDD, BFDD_CONFIG, "long-lived configuration memory");
DEFINE_MTYPE_STATIC(BFDD, BFDD_PROFILE, "long-lived profile memory");
DEFINE_MTYPE_STATIC(BFDD, BFDD_SESSION_OBSERVER, "Session observer");
DEFINE_MTYPE_STATIC(BFDD, BFDD_VRF, "BFD VRF");
+DEFINE_MTYPE_STATIC(BFDD, SBFD_REFLECTOR, "SBFD REFLECTOR");
/*
* Prototypes
@@ -39,6 +40,10 @@ static void bs_down_handler(struct bfd_session *bs, int nstate);
static void bs_init_handler(struct bfd_session *bs, int nstate);
static void bs_up_handler(struct bfd_session *bs, int nstate);
+static void ptm_sbfd_echo_xmt_TO(struct bfd_session *bfd);
+static void sbfd_down_handler(struct bfd_session *bs, int nstate);
+static void sbfd_up_handler(struct bfd_session *bs, int nstate);
+
/**
* Remove BFD profile from all BFD sessions so we don't leave dangling
* pointers.
@@ -192,10 +197,12 @@ void bfd_session_apply(struct bfd_session *bs)
}
/* Toggle 'passive-mode' if default value. */
- if (bs->peer_profile.passive == false)
- bfd_set_passive_mode(bs, bp->passive);
- else
- bfd_set_passive_mode(bs, bs->peer_profile.passive);
+ if (bs->bfd_mode == BFD_MODE_TYPE_BFD) {
+ if (bs->peer_profile.passive == false)
+ bfd_set_passive_mode(bs, bp->passive);
+ else
+ bfd_set_passive_mode(bs, bs->peer_profile.passive);
+ }
/* Toggle 'no shutdown' if default value. */
if (bs->peer_profile.admin_shutdown == false)
@@ -222,10 +229,11 @@ void bfd_profile_remove(struct bfd_session *bs)
bfd_session_apply(bs);
}
-void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
- struct sockaddr_any *local, bool mhop, const char *ifname,
- const char *vrfname)
+void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer, struct sockaddr_any *local,
+ bool mhop, const char *ifname, const char *vrfname, const char *bfdname)
{
+ struct vrf *vrf = NULL;
+
memset(key, 0, sizeof(*key));
switch (peer->sa_sin.sin_family) {
@@ -248,10 +256,20 @@ void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
key->mhop = mhop;
if (ifname && ifname[0])
strlcpy(key->ifname, ifname, sizeof(key->ifname));
- if (vrfname && vrfname[0])
- strlcpy(key->vrfname, vrfname, sizeof(key->vrfname));
- else
+ if (vrfname && vrfname[0] && strcmp(vrfname, VRF_DEFAULT_NAME) != 0) {
+ vrf = vrf_lookup_by_name(vrfname);
+ if (vrf) {
+ strlcpy(key->vrfname, vrf->name, sizeof(key->vrfname));
+ } else {
+ strlcpy(key->vrfname, vrfname, sizeof(key->vrfname));
+ }
+ } else {
strlcpy(key->vrfname, VRF_DEFAULT_NAME, sizeof(key->vrfname));
+ }
+
+ if (bfdname && bfdname[0]) {
+ strlcpy(key->bfdname, bfdname, sizeof(key->bfdname));
+ }
}
struct bfd_session *bs_peer_find(struct bfd_peer_cfg *bpc)
@@ -259,8 +277,8 @@ struct bfd_session *bs_peer_find(struct bfd_peer_cfg *bpc)
struct bfd_key key;
/* Otherwise fallback to peer/local hash lookup. */
- gen_bfd_key(&key, &bpc->bpc_peer, &bpc->bpc_local, bpc->bpc_mhop,
- bpc->bpc_localif, bpc->bpc_vrfname);
+ gen_bfd_key(&key, &bpc->bpc_peer, &bpc->bpc_local, bpc->bpc_mhop, bpc->bpc_localif,
+ bpc->bpc_vrfname, bpc->bfd_name);
return bfd_key_lookup(key);
}
@@ -333,14 +351,24 @@ int bfd_session_enable(struct bfd_session *bs)
* could use the destination port (3784) for the source
* port we wouldn't need a socket per session.
*/
- if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) {
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO || bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ psock = bp_peer_srh_socketv6(bs);
+ if (psock <= 0) {
+ zlog_err("bp_peer_srh_socketv6 error");
+ return 0;
+ }
+ } else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) {
psock = bp_peer_socket(bs);
- if (psock == -1)
+ if (psock == -1) {
+ zlog_err("bp_peer_socket error");
return 0;
+ }
} else {
psock = bp_peer_socketv6(bs);
- if (psock == -1)
+ if (psock == -1) {
+ zlog_err("bp_peer_socketv6 error");
return 0;
+ }
}
/*
@@ -351,10 +379,18 @@ int bfd_session_enable(struct bfd_session *bs)
/* Only start timers if we are using active mode. */
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE) == 0) {
- bfd_recvtimer_update(bs);
- ptm_bfd_start_xmt_timer(bs, false);
- }
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ /*enable receive echo response*/
+ bfd_set_echo(bs, true);
+ bs->echo_detect_TO = (bs->remote_detect_mult * bs->echo_xmt_TO);
+ sbfd_echo_recvtimer_update(bs);
+ ptm_bfd_start_xmt_timer(bs, true);
+ } else {
+ bfd_recvtimer_update(bs);
+ ptm_bfd_start_xmt_timer(bs, false);
+ }
+ }
/* initialize RTT */
bfd_rtt_init(bs);
@@ -383,6 +419,8 @@ void bfd_session_disable(struct bfd_session *bs)
bfd_recvtimer_delete(bs);
bfd_xmttimer_delete(bs);
ptm_bfd_echo_stop(bs);
+ bs->vrf = NULL;
+ bs->ifp = NULL;
/* Set session down so it doesn't report UP and disabled. */
ptm_bfd_sess_dn(bs, BD_PATH_DOWN);
@@ -422,10 +460,18 @@ void ptm_bfd_start_xmt_timer(struct bfd_session *bfd, bool is_echo)
jitter = (xmt_TO * (75 + (frr_weak_random() % maxpercent))) / 100;
/* XXX remove that division above */
- if (is_echo)
- bfd_echo_xmttimer_update(bfd, jitter);
- else
- bfd_xmttimer_update(bfd, jitter);
+ if (bfd->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO || bfd->bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ if (is_echo)
+ sbfd_echo_xmttimer_update(bfd, jitter);
+ else
+ sbfd_init_xmttimer_update(bfd, jitter);
+
+ } else {
+ if (is_echo)
+ bfd_echo_xmttimer_update(bfd, jitter);
+ else
+ bfd_xmttimer_update(bfd, jitter);
+ }
}
static void ptm_bfd_echo_xmt_TO(struct bfd_session *bfd)
@@ -456,6 +502,37 @@ void ptm_bfd_xmt_TO(struct bfd_session *bfd, int fbit)
ptm_bfd_start_xmt_timer(bfd, false);
}
+static void ptm_sbfd_echo_xmt_TO(struct bfd_session *bfd)
+{
+ /* Send the scheduled sbfd-echo packet */
+ ptm_sbfd_echo_snd(bfd);
+
+ /* Restart the timer for next time */
+ ptm_bfd_start_xmt_timer(bfd, true);
+}
+
+void ptm_sbfd_init_xmt_TO(struct bfd_session *bfd, int fbit)
+{
+ /* Send the scheduled control packet */
+ ptm_sbfd_initiator_snd(bfd, fbit);
+
+ /* Restart the timer for next time */
+ ptm_bfd_start_xmt_timer(bfd, false);
+}
+
+void ptm_sbfd_init_reset(struct bfd_session *bfd)
+{
+ bfd->xmt_TO = BFD_DEF_SLOWTX;
+ bfd->detect_TO = 0;
+ ptm_sbfd_init_xmt_TO(bfd, 0);
+}
+void ptm_sbfd_echo_reset(struct bfd_session *bfd)
+{
+ bfd->echo_xmt_TO = SBFD_ECHO_DEF_SLOWTX;
+ bfd->echo_detect_TO = 0;
+ ptm_sbfd_echo_xmt_TO(bfd);
+}
+
void ptm_bfd_echo_stop(struct bfd_session *bfd)
{
bfd->echo_xmt_TO = 0;
@@ -550,12 +627,103 @@ void ptm_bfd_sess_dn(struct bfd_session *bfd, uint8_t diag)
UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET);
memset(bfd->peer_hw_addr, 0, sizeof(bfd->peer_hw_addr));
/* reset local address ,it might has been be changed after bfd is up*/
- memset(&bfd->local_address, 0, sizeof(bfd->local_address));
+ if (bfd->bfd_mode == BFD_MODE_TYPE_BFD)
+ memset(&bfd->local_address, 0, sizeof(bfd->local_address));
/* reset RTT */
bfd_rtt_init(bfd);
}
+/*sbfd session up , include sbfd and sbfd echo*/
+void ptm_sbfd_sess_up(struct bfd_session *bfd)
+{
+ int old_state = bfd->ses_state;
+
+ bfd->local_diag = 0;
+ bfd->ses_state = PTM_BFD_UP;
+ monotime(&bfd->uptime);
+
+ /*notify session up*/
+ ptm_bfd_notify(bfd, bfd->ses_state);
+
+ if (old_state != bfd->ses_state) {
+ bfd->stats.session_up++;
+ if (bglobal.debug_peer_event)
+ zlog_info("state-change: [%s] %s -> %s", bs_to_string(bfd),
+ state_list[old_state].str, state_list[bfd->ses_state].str);
+ }
+}
+
+/*sbfd init session TO */
+void ptm_sbfd_init_sess_dn(struct bfd_session *bfd, uint8_t diag)
+{
+ int old_state = bfd->ses_state;
+
+ bfd->local_diag = diag;
+ bfd->ses_state = PTM_BFD_DOWN;
+ bfd->polling = 0;
+ bfd->demand_mode = 0;
+ monotime(&bfd->downtime);
+
+ /*
+ * Only attempt to send if we have a valid socket:
+ * this function might be called by session disablers and in
+ * this case we won't have a valid socket (i.e. interface was
+ * removed or VRF doesn't exist anymore).
+ */
+ if (bfd->sock != -1)
+ ptm_sbfd_init_reset(bfd);
+
+ /* Slow down the control packets, the connection is down. */
+ bs_set_slow_timers(bfd);
+
+ /* only signal clients when going from up->down state */
+ if (old_state == PTM_BFD_UP)
+ ptm_bfd_notify(bfd, PTM_BFD_DOWN);
+
+ /* Stop attempting to transmit or expect control packets if passive. */
+ if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_PASSIVE)) {
+ sbfd_init_recvtimer_delete(bfd);
+ sbfd_init_xmttimer_delete(bfd);
+ }
+
+ if (old_state != bfd->ses_state) {
+ bfd->stats.session_down++;
+ if (bglobal.debug_peer_event)
+ zlog_debug("state-change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
+ state_list[old_state].str, state_list[bfd->ses_state].str,
+ get_diag_str(bfd->local_diag));
+ }
+ /* reset local address ,it might has been be changed after bfd is up*/
+ //memset(&bfd->local_address, 0, sizeof(bfd->local_address));
+}
+
+/*sbfd echo session TO */
+void ptm_sbfd_echo_sess_dn(struct bfd_session *bfd, uint8_t diag)
+{
+ int old_state = bfd->ses_state;
+
+ bfd->local_diag = diag;
+ bfd->discrs.remote_discr = 0;
+ bfd->ses_state = PTM_BFD_DOWN;
+ bfd->polling = 0;
+ bfd->demand_mode = 0;
+ monotime(&bfd->downtime);
+ /* only signal clients when going from up->down state */
+ if (old_state == PTM_BFD_UP)
+ ptm_bfd_notify(bfd, PTM_BFD_DOWN);
+
+ ptm_sbfd_echo_reset(bfd);
+
+ if (old_state != bfd->ses_state) {
+ bfd->stats.session_down++;
+ if (bglobal.debug_peer_event)
+ zlog_warn("state-change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
+ state_list[old_state].str, state_list[bfd->ses_state].str,
+ get_diag_str(bfd->local_diag));
+ }
+}
+
static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa,
uint32_t ldisc)
{
@@ -599,7 +767,7 @@ struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp,
vrf = vrf_lookup_by_id(vrfid);
gen_bfd_key(&key, peer, local, is_mhop, ifp ? ifp->name : NULL,
- vrf ? vrf->name : VRF_DEFAULT_NAME);
+ vrf ? vrf->name : VRF_DEFAULT_NAME, NULL);
/* XXX maybe remoteDiscr should be checked for remoteHeard cases. */
return bfd_key_lookup(key);
@@ -620,6 +788,21 @@ void bfd_echo_xmt_cb(struct event *t)
ptm_bfd_echo_xmt_TO(bs);
}
+void sbfd_init_xmt_cb(struct event *t)
+{
+ struct bfd_session *bs = EVENT_ARG(t);
+
+ ptm_sbfd_init_xmt_TO(bs, 0);
+}
+
+void sbfd_echo_xmt_cb(struct event *t)
+{
+ struct bfd_session *bs = EVENT_ARG(t);
+
+ if (bs->echo_xmt_TO > 0)
+ ptm_sbfd_echo_xmt_TO(bs);
+}
+
/* Was ptm_bfd_detect_TO() */
void bfd_recvtimer_cb(struct event *t)
{
@@ -638,6 +821,11 @@ void bfd_echo_recvtimer_cb(struct event *t)
{
struct bfd_session *bs = EVENT_ARG(t);
+ if (bglobal.debug_peer_event) {
+ zlog_debug("%s: time-out bfd: [%s] bfd'state is %s", __func__, bs_to_string(bs),
+ state_list[bs->ses_state].str);
+ }
+
switch (bs->ses_state) {
case PTM_BFD_INIT:
case PTM_BFD_UP:
@@ -646,11 +834,49 @@ void bfd_echo_recvtimer_cb(struct event *t)
}
}
-struct bfd_session *bfd_session_new(void)
+void sbfd_init_recvtimer_cb(struct event *t)
+{
+ struct bfd_session *bs = EVENT_ARG(t);
+
+ switch (bs->ses_state) {
+ case PTM_BFD_INIT:
+ case PTM_BFD_UP:
+ ptm_sbfd_init_sess_dn(bs, BD_PATH_DOWN);
+ break;
+
+ default:
+ /* Second detect time expiration, zero remote discr (section
+ * 6.5.1)
+ */
+ break;
+ }
+}
+void sbfd_echo_recvtimer_cb(struct event *t)
+{
+ struct bfd_session *bs = EVENT_ARG(t);
+
+ if (bglobal.debug_peer_event) {
+ zlog_debug("%s: time-out bfd: [%s] bfd'state is %s", __func__, bs_to_string(bs),
+ state_list[bs->ses_state].str);
+ }
+
+ switch (bs->ses_state) {
+ case PTM_BFD_INIT:
+ case PTM_BFD_UP:
+ ptm_sbfd_echo_sess_dn(bs, BD_PATH_DOWN);
+ break;
+ case PTM_BFD_DOWN:
+ break;
+ }
+}
+
+struct bfd_session *bfd_session_new(enum bfd_mode_type mode)
{
struct bfd_session *bs;
- bs = XCALLOC(MTYPE_BFDD_CONFIG, sizeof(*bs));
+ bs = XCALLOC(MTYPE_BFDD_CONFIG, sizeof(struct bfd_session));
+ bs->segnum = 0;
+ bs->bfd_mode = mode;
/* Set peer session defaults. */
bfd_profile_set_default(&bs->peer_profile);
@@ -788,7 +1014,7 @@ struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc)
}
/* Get BFD session storage with its defaults. */
- bfd = bfd_session_new();
+ bfd = bfd_session_new(BFD_MODE_TYPE_BFD);
/*
* Store interface/VRF name in case we need to delay session
@@ -970,6 +1196,30 @@ static void bs_down_handler(struct bfd_session *bs, int nstate)
}
}
+static void sbfd_down_handler(struct bfd_session *bs, int nstate)
+{
+ switch (nstate) {
+ case PTM_BFD_ADM_DOWN:
+ /*
+ * Remote peer doesn't want to talk, so lets keep the
+ * connection down.
+ */
+ break;
+ case PTM_BFD_UP:
+ /* down - > up*/
+ ptm_sbfd_sess_up(bs);
+ break;
+
+ case PTM_BFD_DOWN:
+ break;
+
+ default:
+ if (bglobal.debug_peer_event)
+ zlog_err("state-change: unhandled sbfd state: %d", nstate);
+ break;
+ }
+}
+
static void bs_init_handler(struct bfd_session *bs, int nstate)
{
switch (nstate) {
@@ -1021,6 +1271,29 @@ static void bs_up_handler(struct bfd_session *bs, int nstate)
}
}
+static void sbfd_up_handler(struct bfd_session *bs, int nstate)
+{
+ switch (nstate) {
+ case PTM_BFD_ADM_DOWN:
+ case PTM_BFD_DOWN:
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ ptm_sbfd_echo_sess_dn(bs, BD_ECHO_FAILED);
+ } else
+ ptm_sbfd_init_sess_dn(bs, BD_ECHO_FAILED);
+
+ break;
+
+ case PTM_BFD_UP:
+ /* Path is up and working. */
+ break;
+
+ default:
+ if (bglobal.debug_peer_event)
+ zlog_debug("state-change: unhandled neighbor state: %d", nstate);
+ break;
+ }
+}
+
void bs_state_handler(struct bfd_session *bs, int nstate)
{
switch (bs->ses_state) {
@@ -1045,6 +1318,58 @@ void bs_state_handler(struct bfd_session *bs, int nstate)
}
}
+void sbfd_echo_state_handler(struct bfd_session *bs, int nstate)
+{
+ if (bglobal.debug_peer_event)
+ zlog_debug("%s: bfd(%u) state: %s , notify state: %s", __func__,
+ bs->discrs.my_discr, state_list[bs->ses_state].str,
+ state_list[nstate].str);
+
+ switch (bs->ses_state) {
+ case PTM_BFD_ADM_DOWN:
+ // bs_admin_down_handler(bs, nstate);
+ break;
+ case PTM_BFD_DOWN:
+ sbfd_down_handler(bs, nstate);
+ break;
+ case PTM_BFD_UP:
+ sbfd_up_handler(bs, nstate);
+ break;
+
+ default:
+ if (bglobal.debug_peer_event)
+ zlog_debug("state-change: [%s] is in invalid state: %d", bs_to_string(bs),
+ nstate);
+ break;
+ }
+}
+
+void sbfd_initiator_state_handler(struct bfd_session *bs, int nstate)
+{
+ if (bglobal.debug_peer_event)
+ zlog_debug("%s: sbfd(%u) state: %s , notify state: %s", __func__,
+ bs->discrs.my_discr, state_list[bs->ses_state].str,
+ state_list[nstate].str);
+
+ switch (bs->ses_state) {
+ case PTM_BFD_ADM_DOWN:
+ // bs_admin_down_handler(bs, nstate);
+ break;
+ case PTM_BFD_DOWN:
+ sbfd_down_handler(bs, nstate);
+ break;
+ case PTM_BFD_UP:
+ sbfd_up_handler(bs, nstate);
+ break;
+
+ default:
+ if (bglobal.debug_peer_event)
+ zlog_debug("state-change: [%s] is in invalid state: %d", bs_to_string(bs),
+ nstate);
+ break;
+ }
+}
+
/*
* Handles echo timer manipulation after updating timer.
*/
@@ -1147,6 +1472,15 @@ void bs_set_slow_timers(struct bfd_session *bs)
/* Set the appropriated timeouts for slow connection. */
bs->detect_TO = (BFD_DEFDETECTMULT * BFD_DEF_SLOWTX);
bs->xmt_TO = BFD_DEF_SLOWTX;
+
+ /* add for sbfd-echo slow connection */
+ if (BFD_MODE_TYPE_SBFD_ECHO == bs->bfd_mode) {
+ bs->echo_xmt_TO = SBFD_ECHO_DEF_SLOWTX;
+ bs->timers.desired_min_echo_tx = BFD_DEFDESIREDMINTX;
+ bs->timers.required_min_echo_rx = BFD_DEFDESIREDMINTX;
+ bs->peer_profile.min_echo_rx = BFD_DEFDESIREDMINTX;
+ bs->peer_profile.min_echo_tx = BFD_DEFDESIREDMINTX;
+ }
}
void bfd_set_echo(struct bfd_session *bs, bool echo)
@@ -1438,6 +1772,8 @@ const char *bs_to_string(const struct bfd_session *bs)
if (bs->key.ifname[0])
pos += snprintf(buf + pos, sizeof(buf) - pos, " ifname:%s",
bs->key.ifname);
+ if (bs->bfd_name[0])
+ pos += snprintf(buf + pos, sizeof(buf) - pos, " bfd_name:%s", bs->bfd_name);
(void)pos;
@@ -1516,6 +1852,10 @@ void bs_to_bpc(struct bfd_session *bs, struct bfd_peer_cfg *bpc)
static struct hash *bfd_id_hash;
static struct hash *bfd_key_hash;
+/*sbfd reflector discr hash*/
+static struct hash *sbfd_rflt_hash;
+static unsigned int sbfd_discr_hash_do(const void *p);
+
static unsigned int bfd_id_hash_do(const void *p);
static unsigned int bfd_key_hash_do(const void *p);
@@ -1567,6 +1907,8 @@ static bool bfd_key_hash_cmp(const void *n1, const void *n2)
if (memcmp(bs1->key.vrfname, bs2->key.vrfname,
sizeof(bs1->key.vrfname)))
return false;
+ if (memcmp(bs1->key.bfdname, bs2->key.bfdname, sizeof(bs1->key.bfdname)))
+ return false;
/*
* Local address is optional and can be empty.
@@ -1591,6 +1933,20 @@ static bool bfd_key_hash_cmp(const void *n1, const void *n2)
return true;
}
+/* SBFD disr hash . */
+static unsigned int sbfd_discr_hash_do(const void *p)
+{
+ const struct sbfd_reflector *sr = p;
+
+ return jhash_1word(sr->discr, 0);
+}
+
+static bool sbfd_discr_hash_cmp(const void *n1, const void *n2)
+{
+ const struct sbfd_reflector *sr1 = n1, *sr2 = n2;
+
+ return sr1->discr == sr2->discr;
+}
/*
* Hash public interface / exported functions.
@@ -1615,6 +1971,15 @@ struct bfd_session *bfd_key_lookup(struct bfd_key key)
return hash_lookup(bfd_key_hash, &bs);
}
+struct sbfd_reflector *sbfd_discr_lookup(uint32_t discr)
+{
+ struct sbfd_reflector sr;
+
+ sr.discr = discr;
+
+ return hash_lookup(sbfd_rflt_hash, &sr);
+}
+
/*
* Delete functions.
*
@@ -1643,6 +2008,15 @@ struct bfd_session *bfd_key_delete(struct bfd_key key)
return hash_release(bfd_key_hash, &bs);
}
+struct sbfd_reflector *sbfd_discr_delete(uint32_t discr)
+{
+ struct sbfd_reflector sr;
+
+ sr.discr = discr;
+
+ return hash_release(sbfd_rflt_hash, &sr);
+}
+
/* Iteration functions. */
void bfd_id_iterate(hash_iter_func hif, void *arg)
{
@@ -1654,6 +2028,11 @@ void bfd_key_iterate(hash_iter_func hif, void *arg)
hash_iterate(bfd_key_hash, hif, arg);
}
+void sbfd_discr_iterate(hash_iter_func hif, void *arg)
+{
+ hash_iterate(sbfd_rflt_hash, hif, arg);
+}
+
/*
* Insert functions.
*
@@ -1670,12 +2049,24 @@ bool bfd_key_insert(struct bfd_session *bs)
return (hash_get(bfd_key_hash, bs, hash_alloc_intern) == bs);
}
+bool sbfd_discr_insert(struct sbfd_reflector *sr)
+{
+ return (hash_get(sbfd_rflt_hash, sr, hash_alloc_intern) == sr);
+}
+
+unsigned long sbfd_discr_get_count(void)
+{
+ return sbfd_rflt_hash->count;
+}
+
void bfd_initialize(void)
{
bfd_id_hash = hash_create(bfd_id_hash_do, bfd_id_hash_cmp,
"BFD session discriminator hash");
bfd_key_hash = hash_create(bfd_key_hash_do, bfd_key_hash_cmp,
"BFD session hash");
+ sbfd_rflt_hash = hash_create(sbfd_discr_hash_do, sbfd_discr_hash_cmp,
+ "SBFD reflector discriminator hash");
TAILQ_INIT(&bplist);
}
@@ -1687,6 +2078,14 @@ static void _bfd_free(struct hash_bucket *hb,
bfd_session_free(bs);
}
+static void _sbfd_reflector_free(struct hash_bucket *hb, void *arg __attribute__((__unused__)))
+{
+ struct sbfd_reflector *sr = hb->data;
+
+
+ sbfd_reflector_free(sr->discr);
+}
+
void bfd_shutdown(void)
{
struct bfd_profile *bp;
@@ -1701,9 +2100,13 @@ void bfd_shutdown(void)
bfd_id_iterate(_bfd_free, NULL);
assert(bfd_key_hash->count == 0);
+ sbfd_discr_iterate(_sbfd_reflector_free, NULL);
+ assert(sbfd_rflt_hash->count == 0);
+
/* Now free the hashes themselves. */
hash_free(bfd_id_hash);
hash_free(bfd_key_hash);
+ hash_free(sbfd_rflt_hash);
/* Free all profile allocations. */
while ((bp = TAILQ_FIRST(&bplist)) != NULL)
@@ -1713,6 +2116,7 @@ void bfd_shutdown(void)
struct bfd_session_iterator {
int bsi_stop;
bool bsi_mhop;
+ uint32_t bsi_bfdmode;
const struct bfd_session *bsi_bs;
};
@@ -1724,7 +2128,7 @@ static int _bfd_session_next(struct hash_bucket *hb, void *arg)
/* Previous entry signaled stop. */
if (bsi->bsi_stop == 1) {
/* Match the single/multi hop sessions. */
- if (bs->key.mhop != bsi->bsi_mhop)
+ if ((bs->key.mhop != bsi->bsi_mhop) || (bs->bfd_mode != bsi->bsi_bfdmode))
return HASHWALK_CONTINUE;
bsi->bsi_bs = bs;
@@ -1736,7 +2140,8 @@ static int _bfd_session_next(struct hash_bucket *hb, void *arg)
bsi->bsi_stop = 1;
/* Set entry to NULL to signal end of list. */
bsi->bsi_bs = NULL;
- } else if (bsi->bsi_bs == NULL && bsi->bsi_mhop == bs->key.mhop) {
+ } else if (bsi->bsi_bs == NULL && bsi->bsi_mhop == bs->key.mhop &&
+ bsi->bsi_bfdmode == bs->bfd_mode) {
/* We want the first list item. */
bsi->bsi_stop = 1;
bsi->bsi_bs = hb->data;
@@ -1751,14 +2156,15 @@ static int _bfd_session_next(struct hash_bucket *hb, void *arg)
*
* `bs` might point to NULL to get the first item of the data structure.
*/
-const struct bfd_session *bfd_session_next(const struct bfd_session *bs,
- bool mhop)
+const struct bfd_session *bfd_session_next(const struct bfd_session *bs, bool mhop,
+ uint32_t bfd_mode)
{
struct bfd_session_iterator bsi;
bsi.bsi_stop = 0;
bsi.bsi_bs = bs;
bsi.bsi_mhop = mhop;
+ bsi.bsi_bfdmode = bfd_mode;
hash_walk(bfd_key_hash, _bfd_session_next, &bsi);
if (bsi.bsi_stop == 0)
return NULL;
@@ -1924,6 +2330,7 @@ static int bfd_vrf_new(struct vrf *vrf)
bvrf->bg_mhop6 = -1;
bvrf->bg_echo = -1;
bvrf->bg_echov6 = -1;
+ bvrf->bg_initv6 = -1;
return 0;
}
@@ -1957,6 +2364,8 @@ static int bfd_vrf_enable(struct vrf *vrf)
bvrf->bg_shop6 = bp_udp6_shop(vrf);
if (bvrf->bg_mhop6 == -1)
bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
+ if (bvrf->bg_initv6 == -1)
+ bvrf->bg_initv6 = bp_initv6_socket(vrf);
if (bvrf->bg_ev[0] == NULL && bvrf->bg_shop != -1)
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
@@ -1970,6 +2379,8 @@ static int bfd_vrf_enable(struct vrf *vrf)
if (bvrf->bg_ev[3] == NULL && bvrf->bg_mhop6 != -1)
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
&bvrf->bg_ev[3]);
+ if (bvrf->bg_ev[6] == NULL && bvrf->bg_initv6 != -1)
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_initv6, &bvrf->bg_ev[6]);
/* Toggle echo if VRF was disabled. */
bfd_vrf_toggle_echo(bvrf);
@@ -2006,6 +2417,7 @@ static int bfd_vrf_disable(struct vrf *vrf)
EVENT_OFF(bvrf->bg_ev[3]);
EVENT_OFF(bvrf->bg_ev[4]);
EVENT_OFF(bvrf->bg_ev[5]);
+ EVENT_OFF(bvrf->bg_ev[6]);
/* Close all descriptors. */
socket_close(&bvrf->bg_echo);
@@ -2014,6 +2426,7 @@ static int bfd_vrf_disable(struct vrf *vrf)
socket_close(&bvrf->bg_shop6);
socket_close(&bvrf->bg_mhop6);
socket_close(&bvrf->bg_echov6);
+ socket_close(&bvrf->bg_initv6);
return 0;
}
@@ -2050,6 +2463,79 @@ unsigned long bfd_get_session_count(void)
return bfd_key_hash->count;
}
+struct sbfd_reflector *sbfd_reflector_new(const uint32_t discr, struct in6_addr *sip)
+{
+ struct sbfd_reflector *sr;
+
+ sr = sbfd_discr_lookup(discr);
+ if (sr)
+ return sr;
+
+ sr = XCALLOC(MTYPE_SBFD_REFLECTOR, sizeof(*sr));
+ sr->discr = discr;
+ memcpy(&sr->local, sip, sizeof(struct in6_addr));
+
+ sbfd_discr_insert(sr);
+
+
+ return sr;
+}
+
+void sbfd_reflector_free(const uint32_t discr)
+{
+ struct sbfd_reflector *sr;
+
+ sr = sbfd_discr_lookup(discr);
+ if (!sr)
+ return;
+
+ sbfd_discr_delete(discr);
+ XFREE(MTYPE_SBFD_REFLECTOR, sr);
+
+ return;
+}
+
+void sbfd_reflector_flush()
+{
+ sbfd_discr_iterate(_sbfd_reflector_free, NULL);
+ return;
+}
+
+struct bfd_session_name_match_unique {
+ const char *bfd_name;
+ struct bfd_session *bfd_found;
+};
+
+static int _bfd_session_name_cmp(struct hash_bucket *hb, void *arg)
+{
+ struct bfd_session *bs = hb->data;
+ struct bfd_session_name_match_unique *match = (struct bfd_session_name_match_unique *)arg;
+
+ if (strlen(bs->bfd_name) != strlen(match->bfd_name)) {
+ return HASHWALK_CONTINUE;
+ }
+
+ if (!strncmp(bs->bfd_name, match->bfd_name, strlen(bs->bfd_name))) {
+ match->bfd_found = bs;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+struct bfd_session *bfd_session_get_by_name(const char *name)
+{
+ if (!name || name[0] == '\0')
+ return NULL;
+
+ struct bfd_session_name_match_unique match;
+ match.bfd_name = name;
+ match.bfd_found = NULL;
+
+ hash_walk(bfd_key_hash, _bfd_session_name_cmp, &match);
+
+ return match.bfd_found;
+}
+
void bfd_rtt_init(struct bfd_session *bfd)
{
uint8_t i;
diff --git a/bfdd/bfd.h b/bfdd/bfd.h
index d4d14ffce6..d9119d16c2 100644
--- a/bfdd/bfd.h
+++ b/bfdd/bfd.h
@@ -19,6 +19,7 @@
#include "lib/qobj.h"
#include "lib/queue.h"
#include "lib/vrf.h"
+#include "lib/bfd.h"
#ifdef BFD_DEBUG
#define BFDD_JSON_CONV_OPTIONS (JSON_C_TO_STRING_PRETTY)
@@ -86,6 +87,10 @@ struct bfd_peer_cfg {
bool bpc_has_profile;
char bpc_profile[64];
+
+ vrf_id_t vrf_id;
+ char bfd_name[BFD_NAME_SIZE + 1];
+ uint8_t bfd_name_len;
};
/* bfd Authentication Type. */
@@ -147,7 +152,6 @@ struct bfd_echo_pkt {
uint64_t time_sent_usec;
};
-
/* Macros for manipulating control packets */
#define BFD_VERMASK 0x07
#define BFD_DIAGMASK 0x1F
@@ -194,6 +198,8 @@ struct bfd_echo_pkt {
#define BFD_ECHO_VERSION 1
#define BFD_ECHO_PKT_LEN sizeof(struct bfd_echo_pkt)
+#define RTH_BASE_HEADER_LEN 8
+#define GET_RTH_HDR_LEN(size) (((size) >> 3) - 1)
enum bfd_diagnosticis {
BD_OK = 0,
/* Control Detection Time Expired. */
@@ -235,6 +241,12 @@ enum bfd_session_flags {
BFD_SESS_FLAG_MAC_SET = 1 << 11, /* MAC of peer known */
};
+enum bfd_mode_type {
+ BFD_MODE_TYPE_BFD = 0,
+ BFD_MODE_TYPE_SBFD_ECHO = 1,
+ BFD_MODE_TYPE_SBFD_INIT = 2,
+};
+
/*
* BFD session hash key.
*
@@ -254,6 +266,7 @@ struct bfd_key {
struct in6_addr local;
char ifname[IFNAMSIZ];
char vrfname[VRF_NAMSIZ];
+ char bfdname[BFD_NAME_SIZE + 1];
} __attribute__((packed));
struct bfd_session_stats {
@@ -264,6 +277,7 @@ struct bfd_session_stats {
uint64_t session_up;
uint64_t session_down;
uint64_t znotification;
+ uint64_t tx_fail_pkt;
};
/**
@@ -375,6 +389,12 @@ struct bfd_session {
uint8_t rtt_valid; /* number of valid samples */
uint8_t rtt_index; /* last index added */
uint64_t rtt[BFD_RTT_SAMPLE]; /* RRT in usec for echo to be looped */
+ char bfd_name[BFD_NAME_SIZE + 1];
+
+ uint32_t bfd_mode;
+ uint8_t segnum;
+ struct in6_addr out_sip6;
+ struct in6_addr seg_list[SRV6_MAX_SEGS];
};
struct bfd_diag_str_list {
@@ -396,6 +416,11 @@ struct bfd_session_observer {
};
TAILQ_HEAD(obslist, bfd_session_observer);
+/*sbfd reflector struct*/
+struct sbfd_reflector {
+ uint32_t discr;
+ struct in6_addr local;
+};
/* States defined per 4.1 */
#define PTM_BFD_ADM_DOWN 0
@@ -413,6 +438,7 @@ TAILQ_HEAD(obslist, bfd_session_observer);
#define BFD_DEF_DES_MIN_ECHO_TX (50 * 1000) /* microseconds. */
#define BFD_DEF_REQ_MIN_ECHO_RX (50 * 1000) /* microseconds. */
#define BFD_DEF_SLOWTX (1000 * 1000) /* microseconds. */
+#define SBFD_ECHO_DEF_SLOWTX (1000 * 1000) /* microseconds. */
/** Minimum multi hop TTL. */
#define BFD_DEF_MHOP_TTL 254
#define BFD_PKT_LEN 24 /* Length of control packet */
@@ -427,7 +453,9 @@ TAILQ_HEAD(obslist, bfd_session_observer);
#define BFD_DEFDESTPORT 3784
#define BFD_DEF_ECHO_PORT 3785
#define BFD_DEF_MHOP_DEST_PORT 4784
+#define BFD_DEF_SBFD_DEST_PORT 7784
+#define BFD_SBFD_INITIATOR_DEMAND 1
/*
* bfdd.c
@@ -441,9 +469,10 @@ struct bfd_vrf_global {
int bg_mhop6;
int bg_echo;
int bg_echov6;
+ int bg_initv6;
struct vrf *vrf;
- struct event *bg_ev[6];
+ struct event *bg_ev[7];
};
/* Forward declaration of data plane context struct. */
@@ -519,6 +548,7 @@ int bp_set_ttl(int sd, uint8_t value);
int bp_set_tosv6(int sd, uint8_t value);
int bp_set_tos(int sd, uint8_t value);
int bp_bind_dev(int sd, const char *dev);
+void bp_set_prio(int sd, int value);
int bp_udp_shop(const struct vrf *vrf);
int bp_udp_mhop(const struct vrf *vrf);
@@ -528,10 +558,15 @@ int bp_peer_socket(const struct bfd_session *bs);
int bp_peer_socketv6(const struct bfd_session *bs);
int bp_echo_socket(const struct vrf *vrf);
int bp_echov6_socket(const struct vrf *vrf);
+int bp_peer_srh_socketv6(struct bfd_session *bs);
+int bp_sbfd_socket(const struct vrf *vrf);
+int bp_initv6_socket(const struct vrf *vrf);
void ptm_bfd_snd(struct bfd_session *bfd, int fbit);
void ptm_bfd_echo_snd(struct bfd_session *bfd);
void ptm_bfd_echo_fp_snd(struct bfd_session *bfd);
+void ptm_sbfd_echo_snd(struct bfd_session *bfd);
+void ptm_sbfd_initiator_snd(struct bfd_session *bfd, int fbit);
void bfd_recv_cb(struct event *t);
@@ -545,13 +580,21 @@ typedef void (*bfd_ev_cb)(struct event *t);
void bfd_recvtimer_update(struct bfd_session *bs);
void bfd_echo_recvtimer_update(struct bfd_session *bs);
+void sbfd_init_recvtimer_update(struct bfd_session *bs);
+void sbfd_echo_recvtimer_update(struct bfd_session *bs);
void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter);
void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter);
+void sbfd_init_xmttimer_update(struct bfd_session *bs, uint64_t jitter);
+void sbfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter);
void bfd_xmttimer_delete(struct bfd_session *bs);
void bfd_echo_xmttimer_delete(struct bfd_session *bs);
+void sbfd_init_xmttimer_delete(struct bfd_session *bs);
+void sbfd_echo_xmttimer_delete(struct bfd_session *bs);
void bfd_recvtimer_delete(struct bfd_session *bs);
void bfd_echo_recvtimer_delete(struct bfd_session *bs);
+void sbfd_init_recvtimer_delete(struct bfd_session *bs);
+void sbfd_echo_recvtimer_delete(struct bfd_session *bs);
void bfd_recvtimer_assign(struct bfd_session *bs, bfd_ev_cb cb, int sd);
void bfd_echo_recvtimer_assign(struct bfd_session *bs, bfd_ev_cb cb, int sd);
@@ -574,6 +617,9 @@ void ptm_bfd_echo_stop(struct bfd_session *bfd);
void ptm_bfd_echo_start(struct bfd_session *bfd);
void ptm_bfd_xmt_TO(struct bfd_session *bfd, int fbit);
void ptm_bfd_start_xmt_timer(struct bfd_session *bfd, bool is_echo);
+void ptm_sbfd_init_xmt_TO(struct bfd_session *bfd, int fbit);
+void ptm_sbfd_init_reset(struct bfd_session *bfd);
+void ptm_sbfd_echo_reset(struct bfd_session *bfd);
struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp,
struct sockaddr_any *peer,
struct sockaddr_any *local,
@@ -598,16 +644,16 @@ void bs_observer_del(struct bfd_session_observer *bso);
void bs_to_bpc(struct bfd_session *bs, struct bfd_peer_cfg *bpc);
-void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
- struct sockaddr_any *local, bool mhop, const char *ifname,
- const char *vrfname);
-struct bfd_session *bfd_session_new(void);
+void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer, struct sockaddr_any *local,
+ bool mhop, const char *ifname, const char *vrfname, const char *bfdname);
+struct bfd_session *bfd_session_new(enum bfd_mode_type mode);
struct bfd_session *bs_registrate(struct bfd_session *bs);
void bfd_session_free(struct bfd_session *bs);
-const struct bfd_session *bfd_session_next(const struct bfd_session *bs,
- bool mhop);
+const struct bfd_session *bfd_session_next(const struct bfd_session *bs, bool mhop,
+ uint32_t bfd_mode);
void bfd_sessions_remove_manual(void);
void bfd_profiles_remove(void);
+void bs_sbfd_echo_timer_handler(struct bfd_session *bs);
void bfd_rtt_init(struct bfd_session *bfd);
extern void bfd_vrf_toggle_echo(struct bfd_vrf_global *bfd_vrf);
@@ -653,18 +699,22 @@ void bfd_vrf_terminate(void);
struct bfd_vrf_global *bfd_vrf_look_by_session(struct bfd_session *bfd);
struct bfd_session *bfd_id_lookup(uint32_t id);
struct bfd_session *bfd_key_lookup(struct bfd_key key);
-
+struct sbfd_reflector *sbfd_discr_lookup(uint32_t discr);
struct bfd_session *bfd_id_delete(uint32_t id);
struct bfd_session *bfd_key_delete(struct bfd_key key);
+struct sbfd_reflector *sbfd_discr_delete(uint32_t discr);
bool bfd_id_insert(struct bfd_session *bs);
bool bfd_key_insert(struct bfd_session *bs);
+bool sbfd_discr_insert(struct sbfd_reflector *sr);
typedef void (*hash_iter_func)(struct hash_bucket *hb, void *arg);
void bfd_id_iterate(hash_iter_func hif, void *arg);
void bfd_key_iterate(hash_iter_func hif, void *arg);
+void sbfd_discr_iterate(hash_iter_func hif, void *arg);
unsigned long bfd_get_session_count(void);
+unsigned long sbfd_discr_get_count(void);
/* Export callback functions for `event.c`. */
extern struct event_loop *master;
@@ -674,6 +724,11 @@ void bfd_echo_recvtimer_cb(struct event *t);
void bfd_xmt_cb(struct event *t);
void bfd_echo_xmt_cb(struct event *t);
+void sbfd_init_recvtimer_cb(struct event *t);
+void sbfd_echo_recvtimer_cb(struct event *t);
+void sbfd_init_xmt_cb(struct event *t);
+void sbfd_echo_xmt_cb(struct event *t);
+
extern struct in6_addr zero_addr;
/**
@@ -809,4 +864,17 @@ int bfd_dplane_update_session_counters(struct bfd_session *bs);
void bfd_dplane_show_counters(struct vty *vty);
+/*sbfd relfector*/
+struct sbfd_reflector *sbfd_reflector_new(const uint32_t discr, struct in6_addr *sip);
+void sbfd_reflector_free(const uint32_t discr);
+void sbfd_reflector_flush(void);
+
+/*sbfd*/
+void ptm_sbfd_echo_sess_dn(struct bfd_session *bfd, uint8_t diag);
+void ptm_sbfd_init_sess_dn(struct bfd_session *bfd, uint8_t diag);
+void ptm_sbfd_sess_up(struct bfd_session *bfd);
+void sbfd_echo_state_handler(struct bfd_session *bs, int nstate);
+void sbfd_initiator_state_handler(struct bfd_session *bs, int nstate);
+
+struct bfd_session *bfd_session_get_by_name(const char *name);
#endif /* _BFD_H_ */
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index f9397fa128..cecced3ccc 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -16,20 +16,28 @@
#ifdef GNU_LINUX
#include <linux/filter.h>
+#include <linux/seg6.h>
+#include <linux/ipv6.h>
#endif
#ifdef BFD_LINUX
#include <linux/if_packet.h>
+#include <linux/seg6.h>
+#include <linux/ipv6.h>
#endif /* BFD_LINUX */
#include <netinet/if_ether.h>
#include <netinet/udp.h>
+#include <netinet/ip6.h>
+#include <ifaddrs.h>
#include "lib/sockopt.h"
#include "lib/checksum.h"
#include "lib/network.h"
#include "bfd.h"
+#define BUF_SIZ 1024
+#define SOCK_OPT_PRIO_HIGH 6
/*
* Prototypes
@@ -49,12 +57,19 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen,
struct sockaddr *to, socklen_t tolen);
int bp_bfd_echo_in(struct bfd_vrf_global *bvrf, int sd, uint8_t *ttl,
uint32_t *my_discr, uint64_t *my_rtt);
+static int ptm_bfd_reflector_process_init_packet(struct bfd_vrf_global *bvrf, int s);
+int _ptm_sbfd_init_send(struct bfd_session *bs, const void *data, size_t datalen);
+
#ifdef BFD_LINUX
-ssize_t bfd_recv_ipv4_fp(int sd, uint8_t *msgbuf, size_t msgbuflen,
- uint8_t *ttl, ifindex_t *ifindex,
- struct sockaddr_any *local, struct sockaddr_any *peer);
-void bfd_peer_mac_set(int sd, struct bfd_session *bfd,
- struct sockaddr_any *peer, struct interface *ifp);
+static int bp_raw_sbfd_red_send(int sd, uint8_t *data, size_t datalen, uint16_t family,
+ struct in6_addr *out_sip, struct in6_addr *sip,
+ struct in6_addr *dip, uint16_t src_port, uint16_t dst_port,
+ uint8_t seg_num, struct in6_addr *segment_list);
+static ssize_t bfd_recv_ipv4_fp(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
+ ifindex_t *ifindex, struct sockaddr_any *local,
+ struct sockaddr_any *peer);
+static void bfd_peer_mac_set(int sd, struct bfd_session *bfd, struct sockaddr_any *peer,
+ struct interface *ifp);
int bp_udp_send_fp(int sd, uint8_t *data, size_t datalen,
struct bfd_session *bfd);
ssize_t bfd_recv_fp_echo(int sd, uint8_t *msgbuf, size_t msgbuflen,
@@ -374,7 +389,24 @@ static int ptm_bfd_process_echo_pkt(struct bfd_vrf_global *bvrf, int s)
/* Compute detect time */
bfd->echo_detect_TO = bfd->remote_detect_mult * bfd->echo_xmt_TO;
- /* Update echo receive timeout. */
+ /* Update sbfd-echo session state */
+ if (bfd->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ sbfd_echo_state_handler(bfd, PTM_BFD_UP);
+
+ if (bfd->echo_xmt_TO != bfd->timers.desired_min_echo_tx) {
+ bfd->echo_xmt_TO = bfd->timers.desired_min_echo_tx;
+ //reset xmt timer TO after UP
+ ptm_bfd_start_xmt_timer(bfd, true);
+ }
+
+ bfd->echo_detect_TO = bfd->detect_mult * bfd->echo_xmt_TO;
+ /* Update sbfd echo receive timeout. */
+ if (bfd->echo_detect_TO > 0)
+ sbfd_echo_recvtimer_update(bfd);
+ return 0;
+ }
+
+ /* Update bfd-echo receive timeout. */
if (bfd->echo_detect_TO > 0)
bfd_echo_recvtimer_update(bfd);
@@ -438,9 +470,9 @@ void ptm_bfd_snd(struct bfd_session *bfd, int fbit)
/*
* receive the ipv4 echo packet that was loopback in the peers forwarding plane
*/
-ssize_t bfd_recv_ipv4_fp(int sd, uint8_t *msgbuf, size_t msgbuflen,
- uint8_t *ttl, ifindex_t *ifindex,
- struct sockaddr_any *local, struct sockaddr_any *peer)
+static ssize_t bfd_recv_ipv4_fp(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl,
+ ifindex_t *ifindex, struct sockaddr_any *local,
+ struct sockaddr_any *peer)
{
ssize_t mlen;
struct sockaddr_ll msgaddr;
@@ -729,6 +761,9 @@ static void bfd_sd_reschedule(struct bfd_vrf_global *bvrf, int sd)
EVENT_OFF(bvrf->bg_ev[5]);
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
&bvrf->bg_ev[5]);
+ } else if (sd == bvrf->bg_initv6) {
+ EVENT_OFF(bvrf->bg_ev[6]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_initv6, &bvrf->bg_ev[6]);
}
}
@@ -822,6 +857,11 @@ void bfd_recv_cb(struct event *t)
/* Schedule next read. */
bfd_sd_reschedule(bvrf, sd);
+ /* The reflector handle SBFD init packets. */
+ if (sd == bvrf->bg_initv6) {
+ ptm_bfd_reflector_process_init_packet(bvrf, sd);
+ return;
+ }
/* Handle echo packets. */
if (sd == bvrf->bg_echo || sd == bvrf->bg_echov6) {
ptm_bfd_process_echo_pkt(bvrf, sd);
@@ -996,6 +1036,29 @@ void bfd_recv_cb(struct event *t)
else
bfd->remote_cbit = 0;
+ /* The initiator handle SBFD reflect packet. */
+ if (bfd->bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ sbfd_initiator_state_handler(bfd, PTM_BFD_UP);
+ if (bfd->xmt_TO != bfd->timers.desired_min_tx) {
+ bfd->xmt_TO = bfd->timers.desired_min_tx;
+ //reset xmt timer TO after UP
+ ptm_bfd_start_xmt_timer(bfd, false);
+ }
+
+ bfd->detect_TO = bfd->detect_mult * bfd->xmt_TO;
+ sbfd_init_recvtimer_update(bfd);
+
+ if (bfd->polling && BFD_GETFBIT(cp->flags)) {
+ /* Disable polling. */
+ bfd->polling = 0;
+ /* Start using our new timers. */
+ bfd->cur_timers.desired_min_tx = bfd->timers.desired_min_tx;
+ bfd->cur_timers.required_min_rx = bfd->timers.required_min_rx;
+ }
+
+ return;
+ }
+
/* State switch from section 6.2. */
bs_state_handler(bfd, BFD_GETSTATE(cp->flags));
@@ -1352,6 +1415,16 @@ static void bp_bind_ip(int sd, uint16_t port)
zlog_fatal("bind-ip: bind: %s", strerror(errno));
}
+void bp_set_prio(int sd, int value)
+{
+#if defined(GNU_LINUX)
+ int priority = value;
+
+ if (setsockopt(sd, SOL_SOCKET, SO_PRIORITY, &priority, sizeof(priority)) < 0)
+ zlog_warn("set_prio: setsockopt(SO_PRIORITY, %d): %s", value, strerror(errno));
+#endif
+}
+
int bp_udp_shop(const struct vrf *vrf)
{
int sd;
@@ -1421,6 +1494,8 @@ int bp_peer_socket(const struct bfd_session *bs)
return -1;
}
+ bp_set_prio(sd, SOCK_OPT_PRIO_HIGH);
+
/* Find an available source port in the proper range */
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
@@ -1487,6 +1562,8 @@ int bp_peer_socketv6(const struct bfd_session *bs)
return -1;
}
+ bp_set_prio(sd, SOCK_OPT_PRIO_HIGH);
+
/* Find an available source port in the proper range */
memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
@@ -1731,8 +1808,8 @@ int bp_echov6_socket(const struct vrf *vrf)
/* get peer's mac address to be used with Echo packets when they are looped in
* peers forwarding plane
*/
-void bfd_peer_mac_set(int sd, struct bfd_session *bfd,
- struct sockaddr_any *peer, struct interface *ifp)
+static void bfd_peer_mac_set(int sd, struct bfd_session *bfd, struct sockaddr_any *peer,
+ struct interface *ifp)
{
struct arpreq arpreq_;
@@ -1768,3 +1845,727 @@ void bfd_peer_mac_set(int sd, struct bfd_session *bfd,
}
}
#endif
+
+int _ptm_sbfd_init_send(struct bfd_session *bfd, const void *data, size_t datalen)
+{
+#ifdef BFD_LINUX
+ int sd = -1;
+ struct bfd_vrf_global *bvrf = bfd_vrf_look_by_session(bfd);
+
+ int seg_num;
+ struct in6_addr *segment_list = NULL;
+ struct in6_addr peer;
+ struct in6_addr local;
+
+ if (!bvrf)
+ return -1;
+
+ seg_num = bfd->segnum;
+ if (seg_num > 0)
+ segment_list = bfd->seg_list;
+
+ sd = bfd->sock;
+
+ local = bfd->key.local;
+ peer = bfd->key.peer;
+
+ /*SBFD Control pkt dst port should be 7784, src port can be any but NOT 7784 according to RFC7881 */
+ if (bp_raw_sbfd_red_send(sd, (uint8_t *)data, datalen, bfd->key.family, &bfd->out_sip6,
+ &local, &peer,
+ CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_MH) ? BFD_DEF_MHOP_DEST_PORT
+ : BFD_DEFDESTPORT,
+ BFD_DEF_SBFD_DEST_PORT, seg_num, segment_list) < 0) {
+ if (bfd->stats.tx_fail_pkt <= 1) {
+ char dst[INET6_ADDRSTRLEN] = { 0 };
+
+ inet_ntop(AF_INET6, seg_num > 0 ? segment_list : (&bfd->key.peer), dst,
+ sizeof(dst));
+ zlog_err("sbfd initiator send failed, dst:%s, errno:%s", dst,
+ safe_strerror(errno));
+ }
+
+ bfd->stats.tx_fail_pkt++;
+ return -1;
+ }
+
+ if (bfd->stats.tx_fail_pkt > 0) {
+ char dst[INET6_ADDRSTRLEN] = { 0 };
+
+ inet_ntop(AF_INET6, seg_num > 0 ? segment_list : (&bfd->key.peer), dst, sizeof(dst));
+ zlog_warn("sbfd initiator send success, dst:%s, previous tx_fail_pkt:%d", dst,
+ (int)bfd->stats.tx_fail_pkt);
+ }
+ bfd->stats.tx_fail_pkt = 0;
+
+ bfd->stats.tx_ctrl_pkt++;
+#endif
+ return 0;
+}
+
+static int _ptm_sbfd_echo_send(struct bfd_session *bfd, const void *data, size_t datalen)
+{
+#ifdef BFD_LINUX
+ int sd = -1;
+ struct bfd_vrf_global *bvrf = bfd_vrf_look_by_session(bfd);
+
+ int seg_num;
+ struct in6_addr *segment_list = NULL;
+ struct in6_addr peer;
+ struct in6_addr local;
+
+ if (!bvrf)
+ return -1;
+
+ seg_num = bfd->segnum;
+ if (seg_num > 0)
+ segment_list = bfd->seg_list;
+
+ sd = bfd->sock;
+
+ local = bfd->key.local;
+ peer = bfd->key.peer;
+
+ /*SBFD echo pkt dst port should use BFD Echo port 3785, src port can be any according to RFC7881*/
+ if (bp_raw_sbfd_red_send(sd, (uint8_t *)data, datalen, bfd->key.family, &bfd->out_sip6,
+ &local, &peer, BFD_DEF_ECHO_PORT, BFD_DEF_ECHO_PORT, seg_num,
+ segment_list) < 0) {
+ if (bfd->stats.tx_fail_pkt <= 1) {
+ char dst[INET6_ADDRSTRLEN] = { 0 };
+
+ inet_ntop(AF_INET6, seg_num > 0 ? segment_list : (&bfd->key.peer), dst,
+ sizeof(dst));
+ zlog_err("sbfd echo send failed, bfd_name:%s, dst:%s, errno:%s",
+ bfd->bfd_name, dst, safe_strerror(errno));
+ }
+
+ bfd->stats.tx_fail_pkt++;
+ return -1;
+ }
+
+ if (bfd->stats.tx_fail_pkt > 0) {
+ char dst[INET6_ADDRSTRLEN] = { 0 };
+
+ inet_ntop(AF_INET6, seg_num > 0 ? segment_list : (&bfd->key.peer), dst, sizeof(dst));
+ zlog_warn("sbfd echo send success, bfd_name:%s, dst:%s, previous tx_fail_pkt:%d",
+ bfd->bfd_name, dst, (int)bfd->stats.tx_fail_pkt);
+ }
+ bfd->stats.tx_fail_pkt = 0;
+
+ bfd->stats.tx_echo_pkt++;
+#endif
+ return 0;
+}
+
+void ptm_sbfd_initiator_snd(struct bfd_session *bfd, int fbit)
+{
+ struct bfd_pkt cp = {};
+
+ /* Set fields according to section 6.5.7 */
+ cp.diag = bfd->local_diag;
+ BFD_SETVER(cp.diag, BFD_VERSION);
+ cp.flags = 0;
+ BFD_SETSTATE(cp.flags, bfd->ses_state);
+
+ if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_CBIT))
+ BFD_SETCBIT(cp.flags, BFD_CBIT);
+
+ BFD_SETDEMANDBIT(cp.flags, BFD_SBFD_INITIATOR_DEMAND);
+
+ /*
+ * Polling and Final can't be set at the same time.
+ *
+ * RFC 5880, Section 6.5.
+ */
+ BFD_SETFBIT(cp.flags, fbit);
+ if (fbit == 0)
+ BFD_SETPBIT(cp.flags, bfd->polling);
+
+ cp.detect_mult = bfd->detect_mult;
+ cp.len = BFD_PKT_LEN;
+ cp.discrs.my_discr = htonl(bfd->discrs.my_discr);
+ cp.discrs.remote_discr = htonl(bfd->discrs.remote_discr);
+ if (bfd->polling) {
+ cp.timers.desired_min_tx = htonl(bfd->timers.desired_min_tx);
+ } else {
+ /*
+ * We can only announce current setting on poll, this
+ * avoids timing mismatch with our peer and give it
+ * the oportunity to learn. See `bs_final_handler` for
+ * more information.
+ */
+ cp.timers.desired_min_tx = htonl(bfd->cur_timers.desired_min_tx);
+ }
+ cp.timers.required_min_rx = 0;
+ cp.timers.required_min_echo = 0;
+
+ if (_ptm_sbfd_init_send(bfd, &cp, BFD_PKT_LEN) != 0)
+ return;
+
+ bfd->stats.tx_ctrl_pkt++;
+}
+void ptm_sbfd_echo_snd(struct bfd_session *bfd)
+{
+ struct bfd_echo_pkt bep;
+
+ memset(&bep, 0, sizeof(bep));
+ BFD_SETVER(bep.ver, BFD_ECHO_VERSION);
+ bep.len = BFD_ECHO_PKT_LEN;
+ bep.my_discr = htonl(bfd->discrs.my_discr);
+
+ if (_ptm_sbfd_echo_send(bfd, &bep, BFD_ECHO_PKT_LEN) != 0)
+ return;
+
+ if (!CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE))
+ SET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE);
+}
+
+static int ptm_bfd_reflector_process_init_packet(struct bfd_vrf_global *bvrf, int sd)
+{
+ //uint32_t my_discr = 0;
+ //uint32_t remote_discr = 0;
+ uint8_t ttl = 0;
+ struct sockaddr *sa;
+ struct sbfd_reflector *sr;
+ /* Receive and parse echo packet. */
+ struct bfd_pkt *cp;
+ ssize_t rlen;
+ struct sockaddr_any local, peer;
+ ifindex_t ifindex = IFINDEX_INTERNAL;
+ //vrf_id_t vrfid = VRF_DEFAULT;
+ uint8_t msgbuf[1516];
+
+ rlen = bfd_recv_ipv6(sd, msgbuf, sizeof(msgbuf), &ttl, &ifindex, &local, &peer);
+ /* Short packet, better not risk reading it. */
+ if (rlen < (ssize_t)sizeof(*cp)) {
+ zlog_debug("small bfd packet");
+ return 0;
+ }
+ cp = (struct bfd_pkt *)(msgbuf);
+ if (!CHECK_FLAG(cp->flags, BFD_DEMANDBIT)) {
+ /*Control Packet from SBFDInitiator should have Demand bit set to 1 according to RFC7880*/
+ return 0;
+ }
+
+ sr = sbfd_discr_lookup(ntohl(cp->discrs.remote_discr));
+ if (sr) {
+ uint32_t temp = cp->discrs.my_discr;
+
+ cp->discrs.my_discr = cp->discrs.remote_discr;
+ cp->discrs.remote_discr = temp;
+ UNSET_FLAG(cp->flags, BFD_DEMANDBIT);
+ BFD_SETSTATE(cp->flags, PTM_BFD_UP);
+ if (CHECK_FLAG(cp->flags, BFD_PBIT)) {
+ UNSET_FLAG(cp->flags, BFD_PBIT);
+ SET_FLAG(cp->flags, BFD_FBIT);
+ }
+
+ sa = (struct sockaddr *)&peer.sa_sin6;
+
+ if (sendto(sd, msgbuf, rlen, 0, sa, sizeof(peer.sa_sin6)) <= 0) {
+ zlog_debug("packet-send: send failure: %s", strerror(errno));
+ return -1;
+ }
+ } else {
+ zlog_debug("no reflector found in %u", cp->discrs.remote_discr);
+ }
+ return 0;
+}
+
+int bp_peer_srh_socketv6(struct bfd_session *bs)
+{
+ int sd; //, pcount;
+ //struct sockaddr_in6 sin6;
+ //static int srcPort = BFD_SRCPORTINIT;
+ const char *device_to_bind = NULL;
+
+ if (bs->key.ifname[0]) {
+ device_to_bind = (const char *)bs->key.ifname;
+ zlog_debug("device_to_bind to ifname:%s", device_to_bind);
+ } else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) && bs->key.vrfname[0]) {
+ device_to_bind = (const char *)bs->key.vrfname;
+ zlog_debug("device_to_bind to vrf:%s", device_to_bind);
+ } else {
+ zlog_debug("device_to_bind to NULL");
+ }
+
+ frr_with_privs (&bglobal.bfdd_privs) {
+ sd = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_RAW, bs->vrf->vrf_id, device_to_bind);
+ }
+ if (sd == -1) {
+ zlog_err("ipv6-new: failed to create socket: %s", strerror(errno));
+ return -1;
+ }
+
+ /* Set TTL to 255 for all transmitted packets */
+ if (bp_set_ttlv6(sd, BFD_TTL_VAL) != 0) {
+ close(sd);
+ return -1;
+ }
+
+ /* Set TOS to CS6 for all transmitted packets */
+ if (bp_set_tosv6(sd, BFD_TOS_VAL) != 0) {
+ close(sd);
+ return -1;
+ }
+#ifdef IPV6_HDRINCL
+ int on = 1;
+
+ /*manage the IP6 header all on own onwn*/
+ if (setsockopt(sd, IPPROTO_IPV6, IPV6_HDRINCL, &on, sizeof(on))) {
+#else
+ if (true) {
+#endif
+ zlog_err("setsockopt IPV6_HDRINCL error: %s", strerror(errno));
+ close(sd);
+ return -1;
+ }
+
+ return sd;
+}
+
+int bp_initv6_socket(const struct vrf *vrf)
+{
+ int sd;
+
+ frr_with_privs (&bglobal.bfdd_privs) {
+ sd = vrf_socket(AF_INET6, SOCK_DGRAM, 0, vrf->vrf_id, vrf->name);
+ }
+ if (sd == -1) {
+ if (errno != EAFNOSUPPORT)
+ zlog_fatal("echov6-socket: socket: %s", strerror(errno));
+ else
+ zlog_warn("echov6-socket: V6 is not supported, continuing");
+
+ return -1;
+ }
+
+ bp_set_ipv6opts(sd);
+ bp_bind_ipv6(sd, BFD_DEF_SBFD_DEST_PORT);
+
+ return sd;
+}
+
+#ifdef BFD_LINUX
+static uint16_t checksum(uint16_t *addr, int len)
+{
+ int count = len;
+ uint16_t answer = 0;
+
+ register uint32_t sum = 0;
+
+ // Sum up 2-byte values until none or only one byte left.
+ while (count > 1) {
+ sum += *(addr++);
+ count -= 2;
+ }
+
+ // Add left-over byte, if any.
+ if (count > 0)
+ sum += *(uint8_t *)addr;
+
+ // Fold 32-bit sum into 16 bits; we lose information by doing this,
+ // increasing the chances of a collision.
+ // sum = (lower 16 bits) + (upper 16 bits shifted right 16 bits)
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+
+ // Checksum is one's compliment of sum.
+ answer = ~sum;
+
+ return answer;
+}
+
+static uint16_t udp6_checksum(struct ip6_hdr iphdr, struct udphdr udp_hdr, uint8_t *payload,
+ int payloadlen)
+{
+ char buf[IP_MAXPACKET];
+ char *ptr;
+ int chksumlen = 0;
+ int i;
+
+ ptr = &buf[0]; // ptr points to beginning of buffer buf
+
+ // Copy source IP address into buf (128 bits)
+ memcpy(ptr, &iphdr.ip6_src.s6_addr, sizeof(iphdr.ip6_src.s6_addr));
+ ptr += sizeof(iphdr.ip6_src.s6_addr);
+ chksumlen += sizeof(iphdr.ip6_src.s6_addr);
+
+ // Copy destination IP address into buf (128 bits)
+ memcpy(ptr, &iphdr.ip6_dst.s6_addr, sizeof(iphdr.ip6_dst.s6_addr));
+ ptr += sizeof(iphdr.ip6_dst.s6_addr);
+ chksumlen += sizeof(iphdr.ip6_dst.s6_addr);
+
+ // Copy UDP length into buf (32 bits)
+ memcpy(ptr, &udp_hdr.len, sizeof(udp_hdr.len));
+ ptr += sizeof(udp_hdr.len);
+ chksumlen += sizeof(udp_hdr.len);
+
+ // Copy zero field to buf (24 bits)
+ *ptr = 0;
+ ptr++;
+ *ptr = 0;
+ ptr++;
+ *ptr = 0;
+ ptr++;
+ chksumlen += 3;
+
+ // Copy next header field to buf (8 bits)
+ memcpy(ptr, &iphdr.ip6_nxt, sizeof(iphdr.ip6_nxt));
+ ptr += sizeof(iphdr.ip6_nxt);
+ chksumlen += sizeof(iphdr.ip6_nxt);
+
+ // Copy UDP source port to buf (16 bits)
+ memcpy(ptr, &udp_hdr.source, sizeof(udp_hdr.source));
+ ptr += sizeof(udp_hdr.source);
+ chksumlen += sizeof(udp_hdr.source);
+
+ // Copy UDP destination port to buf (16 bits)
+ memcpy(ptr, &udp_hdr.dest, sizeof(udp_hdr.dest));
+ ptr += sizeof(udp_hdr.dest);
+ chksumlen += sizeof(udp_hdr.dest);
+
+ // Copy UDP length again to buf (16 bits)
+ memcpy(ptr, &udp_hdr.len, sizeof(udp_hdr.len));
+ ptr += sizeof(udp_hdr.len);
+ chksumlen += sizeof(udp_hdr.len);
+
+ // Copy UDP checksum to buf (16 bits)
+ // Zero, since we don't know it yet
+ *ptr = 0;
+ ptr++;
+ *ptr = 0;
+ ptr++;
+ chksumlen += 2;
+
+ // Copy payload to buf
+ memcpy(ptr, payload, payloadlen * sizeof(uint8_t));
+ ptr += payloadlen;
+ chksumlen += payloadlen;
+
+ // Pad to the next 16-bit boundary
+ for (i = 0; i < payloadlen % 2; i++, ptr++) {
+ *ptr = 0;
+ ptr++;
+ chksumlen++;
+ }
+
+ return checksum((uint16_t *)buf, chksumlen);
+}
+
+// Build IPv4 UDP pseudo-header and call checksum function.
+static uint16_t udp4_checksum(struct ip iphdr, struct udphdr udp_hdr, uint8_t *payload,
+ int payloadlen)
+{
+ char buf[IP_MAXPACKET];
+ char *ptr;
+ int chksumlen = 0;
+ int i;
+
+ ptr = &buf[0]; // ptr points to beginning of buffer buf
+
+ // Copy source IP address into buf (32 bits)
+ memcpy(ptr, &iphdr.ip_src.s_addr, sizeof(iphdr.ip_src.s_addr));
+ ptr += sizeof(iphdr.ip_src.s_addr);
+ chksumlen += sizeof(iphdr.ip_src.s_addr);
+
+ // Copy destination IP address into buf (32 bits)
+ memcpy(ptr, &iphdr.ip_dst.s_addr, sizeof(iphdr.ip_dst.s_addr));
+ ptr += sizeof(iphdr.ip_dst.s_addr);
+ chksumlen += sizeof(iphdr.ip_dst.s_addr);
+
+ // Copy zero field to buf (8 bits)
+ *ptr = 0;
+ ptr++;
+ chksumlen += 1;
+
+ // Copy transport layer protocol to buf (8 bits)
+ memcpy(ptr, &iphdr.ip_p, sizeof(iphdr.ip_p));
+ ptr += sizeof(iphdr.ip_p);
+ chksumlen += sizeof(iphdr.ip_p);
+
+ // Copy UDP length to buf (16 bits)
+ memcpy(ptr, &udp_hdr.len, sizeof(udp_hdr.len));
+ ptr += sizeof(udp_hdr.len);
+ chksumlen += sizeof(udp_hdr.len);
+
+ // Copy UDP source port to buf (16 bits)
+ memcpy(ptr, &udp_hdr.source, sizeof(udp_hdr.source));
+ ptr += sizeof(udp_hdr.source);
+ chksumlen += sizeof(udp_hdr.source);
+
+ // Copy UDP destination port to buf (16 bits)
+ memcpy(ptr, &udp_hdr.dest, sizeof(udp_hdr.dest));
+ ptr += sizeof(udp_hdr.dest);
+ chksumlen += sizeof(udp_hdr.dest);
+
+ // Copy UDP length again to buf (16 bits)
+ memcpy(ptr, &udp_hdr.len, sizeof(udp_hdr.len));
+ ptr += sizeof(udp_hdr.len);
+ chksumlen += sizeof(udp_hdr.len);
+
+ // Copy UDP checksum to buf (16 bits)
+ // Zero, since we don't know it yet
+ *ptr = 0;
+ ptr++;
+ *ptr = 0;
+ ptr++;
+ chksumlen += 2;
+
+ // Copy payload to buf
+ memcpy(ptr, payload, payloadlen);
+ ptr += payloadlen;
+ chksumlen += payloadlen;
+
+ // Pad to the next 16-bit boundary
+ for (i = 0; i < payloadlen % 2; i++, ptr++) {
+ *ptr = 0;
+ ptr++;
+ chksumlen++;
+ }
+
+ return checksum((uint16_t *)buf, chksumlen);
+}
+#endif
+
+int bp_sbfd_socket(const struct vrf *vrf)
+{
+ int s;
+
+ frr_with_privs (&bglobal.bfdd_privs) {
+ s = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_RAW, vrf->vrf_id, vrf->name);
+ }
+ if (s == -1) {
+ if (errno != EAFNOSUPPORT)
+ zlog_fatal("sbfdv6-socket: socket: %s", strerror(errno));
+ else
+ zlog_warn("sbfdv6-socket: V6 is not supported, continuing");
+
+ return -1;
+ }
+
+ bp_set_prio(s, SOCK_OPT_PRIO_HIGH);
+
+ return s;
+}
+
+#ifdef BFD_LINUX
+static void bp_sbfd_encap_srh_ip6h_red(struct ip6_hdr *srh_ip6h, struct in6_addr *sip,
+ struct in6_addr *dip, uint8_t seg_num, size_t datalen,
+ uint16_t family)
+{
+ /* SRH IPv6 Header */
+ srh_ip6h->ip6_flow = (BFD_TOS_VAL << 20);
+ srh_ip6h->ip6_vfc = 6 << 4;
+
+ if (seg_num == 1) {
+ if (family == AF_INET6) {
+ srh_ip6h->ip6_plen =
+ htons(sizeof(struct ip6_hdr) + sizeof(struct udphdr) + datalen);
+ srh_ip6h->ip6_nxt = IPPROTO_IPV6;
+ } else {
+ srh_ip6h->ip6_plen =
+ htons(sizeof(struct ip) + sizeof(struct udphdr) + datalen);
+ srh_ip6h->ip6_nxt = IPPROTO_IPIP;
+ }
+
+ } else {
+ srh_ip6h->ip6_plen = htons(sizeof(struct ip6_hdr) + sizeof(struct udphdr) +
+ sizeof(struct ipv6_sr_hdr) +
+ sizeof(struct in6_addr) * (seg_num - 1) + datalen);
+ srh_ip6h->ip6_nxt = IPPROTO_ROUTING;
+ }
+ srh_ip6h->ip6_hlim = BFD_TTL_VAL;
+
+ memcpy(&(srh_ip6h->ip6_src), sip, sizeof(struct in6_addr));
+ memcpy(&(srh_ip6h->ip6_dst), dip, sizeof(struct in6_addr));
+}
+
+static void bp_sbfd_encap_srh_rth_red(struct ipv6_sr_hdr *srv6h, struct in6_addr *segment_list,
+ uint8_t seg_num)
+{
+ //caller should make sure: seg_num > 1
+ srv6h->nexthdr = IPPROTO_IPV6;
+ srv6h->hdrlen =
+ GET_RTH_HDR_LEN(RTH_BASE_HEADER_LEN + sizeof(struct in6_addr) * (seg_num - 1));
+ srv6h->type = IPV6_SRCRT_TYPE_4;
+ srv6h->segments_left = seg_num - 1; //if encap reduce mode , seg_num-1
+ srv6h->first_segment = seg_num - 2; //if encap reduce mode , seg_num-2
+ srv6h->flags = 0;
+ srv6h->tag = 0;
+
+ for (int i = 0; i < seg_num - 1; i++)
+ memcpy(&srv6h->segments[i], &segment_list[seg_num - 1 - i], sizeof(struct in6_addr));
+}
+
+static void bp_sbfd_encap_inner_ip6h(struct ip6_hdr *ip6h, struct in6_addr *sip,
+ struct in6_addr *dip, size_t datalen)
+{
+ /* IPv6 Header */
+ ip6h->ip6_flow = (BFD_TOS_VAL << 20);
+ ip6h->ip6_vfc = 6 << 4;
+ ip6h->ip6_plen = htons(sizeof(struct udphdr) + datalen);
+ ip6h->ip6_nxt = IPPROTO_UDP;
+ ip6h->ip6_hlim = BFD_TTL_VAL;
+
+ memcpy(&(ip6h->ip6_src), sip, sizeof(struct in6_addr));
+ memcpy(&(ip6h->ip6_dst), dip, sizeof(struct in6_addr));
+}
+
+static void bp_sbfd_encap_inner_iph(struct ip *iph, struct in6_addr *sip, struct in6_addr *dip,
+ size_t datalen)
+{
+ /* IPv4 Header */
+ iph->ip_v = 4;
+ iph->ip_hl = 5;
+ iph->ip_tos = BFD_TOS_VAL;
+ iph->ip_len = htons(sizeof(struct ip) + sizeof(struct udphdr) + datalen);
+ iph->ip_id = (uint16_t)frr_weak_random();
+ iph->ip_ttl = BFD_TTL_VAL;
+ iph->ip_p = IPPROTO_UDP;
+ iph->ip_sum = 0;
+ memcpy(&iph->ip_src, sip, sizeof(iph->ip_src));
+ memcpy(&iph->ip_dst, dip, sizeof(iph->ip_dst));
+}
+
+static void bp_sbfd_encap_udp6(struct udphdr *udph, struct ip6_hdr *ip6h, uint16_t src_port,
+ uint16_t dst_port, uint8_t *payload, int payloadlen)
+{
+ udph->source = htons(src_port);
+ udph->dest = htons(dst_port);
+ udph->len = htons(sizeof(struct udphdr) + payloadlen);
+ udph->check = udp6_checksum(*ip6h, *udph, payload, payloadlen);
+}
+
+static void bp_sbfd_encap_udp4(struct udphdr *udph, struct ip *iph, uint16_t src_port,
+ uint16_t dst_port, uint8_t *payload, int payloadlen)
+{
+ udph->source = htons(src_port);
+ udph->dest = htons(dst_port);
+ udph->len = htons(sizeof(struct udphdr) + payloadlen);
+ udph->check = udp4_checksum(*iph, *udph, payload, payloadlen);
+}
+
+/**
+ * @brief encap srv6 to send raw socker red mode, just support ecore 2.5 case
+ *
+ * @param sd sokcet
+ * @param data actual data, e.g. bfd packet or bfd echo packet
+ * @param datalen actual data length
+ * @param sip source ip address of outer ipv6 header and inner ipv6 header
+ * @param dip destination ip address of inner ipv6 header
+ * @param src_port source port of udp
+ * @param dst_port destination port of udp
+ * @param seg_num segment number of srh header
+ * @param segment_list segment list of srh header and the last one segment is destination ip address of outer ipv6 header
+ * @param ifname out interface name
+ * @param vrfname vrf name
+ * @param nhp specified nexthop
+ * @return int
+ */
+static int bp_raw_sbfd_red_send(int sd, uint8_t *data, size_t datalen, uint16_t family,
+ struct in6_addr *out_sip, struct in6_addr *sip,
+ struct in6_addr *dip, uint16_t src_port, uint16_t dst_port,
+ uint8_t seg_num, struct in6_addr *segment_list)
+{
+ static uint8_t sendbuf[BUF_SIZ];
+ struct msghdr msg = { 0 };
+ struct iovec iov;
+ int flags = 0;
+ int ret = 0;
+
+ struct ip6_hdr *srh_ip6h;
+ struct ipv6_sr_hdr *psrv6h; // srh Routing header
+ struct ip6_hdr *ip6h;
+ struct ip *iph;
+ struct udphdr *udp;
+ uint8_t *payload;
+
+ struct ipaddr out_sip_addr = { 0 };
+ struct sockaddr_in6 dst_sin6 = { 0 };
+ char buf_addr[INET6_ADDRSTRLEN] = { 0 };
+
+ memset(sendbuf, 0, sizeof(sendbuf));
+ int total_len = 0;
+
+ /* SRH IPv6 Header */
+ if (seg_num > 0) {
+ memcpy(&out_sip_addr.ipaddr_v6, out_sip, sizeof(struct in6_addr));
+
+ srh_ip6h = (struct ip6_hdr *)(sendbuf + total_len);
+ bp_sbfd_encap_srh_ip6h_red(srh_ip6h, &out_sip_addr.ipaddr_v6, &segment_list[0],
+ seg_num, datalen, family);
+ total_len += sizeof(struct ip6_hdr);
+
+ memcpy(&dst_sin6.sin6_addr, &segment_list[0], sizeof(struct in6_addr));
+ }
+
+ //case with srh header
+ if (seg_num > 1) {
+ psrv6h = (struct ipv6_sr_hdr *)(sendbuf + total_len);
+ bp_sbfd_encap_srh_rth_red(psrv6h, segment_list, seg_num);
+ total_len += sizeof(struct ipv6_sr_hdr) + sizeof(struct in6_addr) * (seg_num - 1);
+ }
+
+ if (family == AF_INET6) {
+ if (seg_num == 0)
+ memcpy(&dst_sin6.sin6_addr, dip, sizeof(struct in6_addr));
+
+ /* Inner IPv6 Header */
+ ip6h = (struct ip6_hdr *)(sendbuf + total_len);
+ bp_sbfd_encap_inner_ip6h(ip6h, sip, dip, datalen);
+ total_len += sizeof(struct ip6_hdr);
+
+ /* UDP Header */
+ udp = (struct udphdr *)(sendbuf + total_len);
+ bp_sbfd_encap_udp6(udp, ip6h, src_port, dst_port, data, datalen);
+ total_len += sizeof(struct udphdr);
+ } else {
+ if (seg_num == 0) {
+ //should never come to here, just print a error hint
+ zlog_err("%s error, empty sidlist for ipv4 bfd", __func__);
+ }
+
+ /* Inner IPv4 Header */
+ iph = (struct ip *)(sendbuf + total_len);
+ bp_sbfd_encap_inner_iph(iph, sip, dip, datalen);
+ total_len += sizeof(struct ip);
+
+ /* UDP Header */
+ udp = (struct udphdr *)(sendbuf + total_len);
+ bp_sbfd_encap_udp4(udp, iph, src_port, dst_port, data, datalen);
+ total_len += sizeof(struct udphdr);
+
+ iph->ip_sum = in_cksum((const void *)iph, sizeof(struct ip));
+ }
+
+ /* BFD payload*/
+ payload = (uint8_t *)(sendbuf + total_len);
+ memcpy(payload, data, datalen);
+ total_len += datalen;
+
+ dst_sin6.sin6_family = AF_INET6;
+ dst_sin6.sin6_port = 0; //we don't use sin6_port in raw, but should set to 0!!
+
+ /* message data. */
+ iov.iov_base = (uint8_t *)sendbuf;
+ iov.iov_len = total_len;
+
+ msg.msg_name = &dst_sin6;
+ msg.msg_namelen = sizeof(struct sockaddr_in6);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ /* sendmsg */
+ ret = sendmsg(sd, &msg, flags);
+ if (ret < 0) {
+ inet_ntop(AF_INET6, &dst_sin6.sin6_addr, buf_addr, INET6_ADDRSTRLEN);
+ zlog_debug("sbfd send to:%s failed , ret:%d, errno:%s", buf_addr, ret,
+ safe_strerror(errno));
+
+ return ret;
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/bfdd/bfdd_cli.c b/bfdd/bfdd_cli.c
index 6527ec5f41..a1710ec127 100644
--- a/bfdd/bfdd_cli.c
+++ b/bfdd/bfdd_cli.c
@@ -11,8 +11,12 @@
#include "lib/command.h"
#include "lib/log.h"
#include "lib/northbound_cli.h"
+#include "lib/termtable.h"
+#include "lib/ipaddr.h"
+#ifndef VTYSH_EXTRACT_PL
#include "bfdd/bfdd_cli_clippy.c"
+#endif /* VTYSH_EXTRACT_PL */
#include "bfd.h"
#include "bfdd_nb.h"
@@ -31,6 +35,10 @@
#define LOCAL_INTF_STR "Configure local interface name to use\n"
#define VRF_STR "Configure VRF\n"
#define VRF_NAME_STR "Configure VRF name\n"
+#define SESSION_NAME_STR "Specify bfd session name\n"
+#define SET_SESSION_NAME_STR "bfd session name\n"
+#define SESSION_MODE_STR "Specify bfd session mode\n"
+#define APPLY_SESSION_MODE_STR "Enable bfd mode\n"
/*
* Prototypes.
@@ -41,6 +49,12 @@ bfd_cli_is_single_hop(struct vty *vty)
return strstr(VTY_CURR_XPATH, "/single-hop") != NULL;
}
+static bool bfd_cli_is_sbfd_echo(struct vty *vty)
+{
+ return strstr(VTY_CURR_XPATH, "/sbfd-echo") != NULL;
+}
+
+
static bool
bfd_cli_is_profile(struct vty *vty)
{
@@ -215,45 +229,486 @@ DEFPY_YANG(
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY_YANG_NOSH(
+ sbfd_echo_peer_enter, sbfd_echo_peer_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-echo bfd-name BFDNAME$bfdname \
+ [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] \
+ srv6-source-ipv6 X:X::X:X srv6-encap-data X:X::X:X...",
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ SESSION_MODE_STR
+ "Enable sbfd-echo mode\n"
+ SESSION_NAME_STR
+ SET_SESSION_NAME_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ VRF_STR
+ VRF_NAME_STR
+ "Configure source ipv6 address for srv6 encap\n"
+ LOCAL_IPV6_STR
+ "Configure sidlist data for srv6 encap\n"
+ "X:X::X:X IPv6 sid address\n")
+{
+ int ret, slen, data_idx = 11;
+ char xpath[XPATH_MAXLEN], xpath_sl[XPATH_MAXLEN + 32], xpath_mh[XPATH_MAXLEN + 32];
+
+ if (!bfdname) {
+ vty_out(vty, "%% ERROR: bfd name is required\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (strcmp(peer_str, local_address_str)) {
+ vty_out(vty,
+ "%% ERROR: peer and local-address must be the same in sbfd-echo mode\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo[source-addr='%s'][bfd-name='%s']",
+ local_address_str, bfdname);
+
+ if (vrf) {
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ data_idx += 2;
+ } else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ if (multihop) {
+ snprintf(xpath_mh, sizeof(xpath_mh), "%s/multi-hop", xpath);
+ nb_cli_enqueue_change(vty, xpath_mh, NB_OP_MODIFY, "true");
+ data_idx += 1;
+ }
+
+ for (int i = data_idx; i < argc; i++) {
+ snprintf(xpath_sl, sizeof(xpath_sl), "%s/srv6-encap-data", xpath);
+ nb_cli_enqueue_change(vty, xpath_sl, NB_OP_CREATE, argv[i]->arg);
+ }
+
+ snprintf(xpath_sl, sizeof(xpath_sl), "%s/srv6-source-ipv6", xpath);
+ nb_cli_enqueue_change(vty, xpath_sl, NB_OP_MODIFY, srv6_source_ipv6_str);
+
+ snprintf(xpath_sl, sizeof(xpath_sl), "%s/dest-addr", xpath);
+ nb_cli_enqueue_change(vty, xpath_sl, NB_OP_MODIFY, peer_str);
+
+ /* Apply settings immediately. */
+ ret = nb_cli_apply_changes(vty, NULL);
+ if (ret == CMD_SUCCESS)
+ VTY_PUSH_XPATH(BFD_PEER_NODE, xpath);
+
+ return ret;
+}
+
+DEFPY_YANG(
+ sbfd_echo_no_peer, sbfd_echo_no_peer_cmd,
+ "no peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-echo bfd-name BFDNAME$bfdname \
+ [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] \
+ srv6-source-ipv6 X:X::X:X srv6-encap-data X:X::X:X...",
+ NO_STR
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ SESSION_MODE_STR
+ "Enable sbfd-echo mode\n"
+ SESSION_NAME_STR
+ SET_SESSION_NAME_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ VRF_STR
+ VRF_NAME_STR
+ "Configure source ipv6 address for srv6 encap\n"
+ LOCAL_IPV6_STR
+ "Configure sidlist data for srv6 encap\n"
+ "X:X::X:X IPv6 sid address\n")
+{
+ int slen;
+ char xpath[XPATH_MAXLEN];
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo[source-addr='%s'][bfd-name='%s']",
+ local_address_str, bfdname);
+
+ if (vrf)
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ /* Apply settings immediatly. */
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+
+DEFPY_YANG_NOSH(
+ sbfd_init_peer_enter, sbfd_init_peer_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-init bfd-name BFDNAME$bfdname \
+ [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] \
+ remote-discr (1-4294967295)$discr srv6-source-ipv6 X:X::X:X srv6-encap-data X:X::X:X...",
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ SESSION_MODE_STR
+ "Enable sbfd-init mode\n"
+ SESSION_NAME_STR
+ SET_SESSION_NAME_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ VRF_STR
+ VRF_NAME_STR
+ "Configure bfd session remote discriminator\n"
+ "Configure remote discriminator\n"
+ "Configure source ipv6 address for srv6 encap\n"
+ LOCAL_IPV6_STR
+ "Configure sidlist data for srv6 encap\n"
+ "X:X::X:X IPv6 sid address\n")
+{
+ int ret, slen, data_idx = 13;
+ char xpath[XPATH_MAXLEN], xpath_sl[XPATH_MAXLEN + 32], xpath_rd[XPATH_MAXLEN + 32],
+ xpath_mh[XPATH_MAXLEN + 32];
+ struct ipaddr peer_addr = { 0 };
+ struct ipaddr local_addr = { 0 };
+
+ if (!bfdname) {
+ vty_out(vty, "%% ERROR: bfd name is required\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ str2ipaddr(peer_str, &peer_addr);
+ if (peer_addr.ipa_type == AF_UNSPEC) {
+ vty_out(vty, "%% ERROR: peer is invalid address\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ str2ipaddr(local_address_str, &local_addr);
+ if (local_addr.ipa_type == AF_UNSPEC) {
+ vty_out(vty, "%% ERROR: local_address is invalid address\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (peer_addr.ipa_type != local_addr.ipa_type) {
+ vty_out(vty, "%% ERROR: peer and local_address are not the same ip version\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/sbfd-init[source-addr='%s'][dest-addr='%s'][bfd-name='%s']",
+ local_address_str, peer_str, bfdname);
+
+ if (vrf) {
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ data_idx += 2;
+ } else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ if (multihop) {
+ snprintf(xpath_mh, sizeof(xpath_mh), "%s/multi-hop", xpath);
+ nb_cli_enqueue_change(vty, xpath_mh, NB_OP_MODIFY, "true");
+ data_idx += 1;
+ }
+
+ if (srv6_source_ipv6_str) {
+ for (int i = data_idx; i < argc; i++) {
+ snprintf(xpath_sl, sizeof(xpath_sl), "%s/srv6-encap-data", xpath);
+ nb_cli_enqueue_change(vty, xpath_sl, NB_OP_CREATE, argv[i]->arg);
+ }
+
+ snprintf(xpath_sl, sizeof(xpath_sl), "%s/srv6-source-ipv6", xpath);
+ nb_cli_enqueue_change(vty, xpath_sl, NB_OP_MODIFY, srv6_source_ipv6_str);
+ }
+
+ snprintf(xpath_rd, sizeof(xpath_rd), "%s/remote-discr", xpath);
+ nb_cli_enqueue_change(vty, xpath_rd, NB_OP_MODIFY, discr_str);
+
+ /* Apply settings immediately. */
+ ret = nb_cli_apply_changes(vty, NULL);
+ if (ret == CMD_SUCCESS)
+ VTY_PUSH_XPATH(BFD_PEER_NODE, xpath);
+
+ return ret;
+}
+
+DEFPY_YANG(
+ sbfd_init_no_peer, sbfd_init_no_peer_cmd,
+ "no peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-init bfd-name BFDNAME$bfdname \
+ [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] \
+ remote-discr (0-4294967295)$discr srv6-source-ipv6 X:X::X:X srv6-encap-data X:X::X:X...",
+ NO_STR
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ SESSION_MODE_STR
+ "Enable sbfd-init mode\n"
+ SESSION_NAME_STR
+ SET_SESSION_NAME_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ VRF_STR
+ VRF_NAME_STR
+ "Configure bfd session remote discriminator\n"
+ "Configure remote discriminator\n"
+ "Configure source ipv6 address for srv6 encap\n"
+ LOCAL_IPV6_STR
+ "Configure sidlist data for srv6 encap\n"
+ "X:X::X:X IPv6 sid address\n")
+{
+ int slen;
+ char xpath[XPATH_MAXLEN];
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/sbfd-init[source-addr='%s'][dest-addr='%s'][bfd-name='%s']",
+ local_address_str, peer_str, bfdname);
+
+ if (vrf)
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ /* Apply settings immediatly. */
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY_YANG_NOSH(
+ sbfd_init_peer_raw_enter, sbfd_init_peer_raw_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-init bfd-name BFDNAME$bfdname \
+ [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] \
+ remote-discr (1-4294967295)$discr",
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ SESSION_MODE_STR
+ "Enable sbfd-init mode\n"
+ SESSION_NAME_STR
+ SET_SESSION_NAME_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ VRF_STR
+ VRF_NAME_STR
+ "Configure bfd session remote discriminator\n"
+ "Configure remote discriminator\n")
+{
+ int ret, slen;
+ char xpath[XPATH_MAXLEN], xpath_rd[XPATH_MAXLEN + 32], xpath_mh[XPATH_MAXLEN + 32];
+ struct ipaddr peer_addr = { 0 };
+ struct ipaddr local_addr = { 0 };
+
+ if (!bfdname) {
+ vty_out(vty, "%% ERROR: bfd name is required\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ str2ipaddr(peer_str, &peer_addr);
+ if (peer_addr.ipa_type == AF_UNSPEC) {
+ vty_out(vty, "%% ERROR: peer is invalid address\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ str2ipaddr(local_address_str, &local_addr);
+ if (local_addr.ipa_type == AF_UNSPEC) {
+ vty_out(vty, "%% ERROR: local_address is invalid address\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (peer_addr.ipa_type != local_addr.ipa_type) {
+ vty_out(vty, "%% ERROR: peer and local_address are not the same ip version\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/sbfd-init[source-addr='%s'][dest-addr='%s'][bfd-name='%s']",
+ local_address_str, peer_str, bfdname);
+
+ if (vrf)
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ if (multihop) {
+ snprintf(xpath_mh, sizeof(xpath_mh), "%s/multi-hop", xpath);
+ nb_cli_enqueue_change(vty, xpath_mh, NB_OP_MODIFY, "true");
+ }
+
+ snprintf(xpath_rd, sizeof(xpath_rd), "%s/remote-discr", xpath);
+ nb_cli_enqueue_change(vty, xpath_rd, NB_OP_MODIFY, discr_str);
+
+ /* Apply settings immediately. */
+ ret = nb_cli_apply_changes(vty, NULL);
+ if (ret == CMD_SUCCESS)
+ VTY_PUSH_XPATH(BFD_PEER_NODE, xpath);
+
+ return ret;
+}
+
+DEFPY_YANG(
+ sbfd_init_no_peer_raw, sbfd_init_no_peer_raw_cmd,
+ "no peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-init bfd-name BFDNAME$bfdname \
+ [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] \
+ remote-discr (0-4294967295)$discr",
+ NO_STR
+ PEER_STR
+ PEER_IPV4_STR
+ PEER_IPV6_STR
+ SESSION_MODE_STR
+ "Enable sbfd-init mode\n"
+ SESSION_NAME_STR
+ SET_SESSION_NAME_STR
+ MHOP_STR
+ LOCAL_STR
+ LOCAL_IPV4_STR
+ LOCAL_IPV6_STR
+ VRF_STR
+ VRF_NAME_STR
+ "Configure bfd session remote discriminator\n"
+ "Configure remote discriminator\n")
+{
+ int slen;
+ char xpath[XPATH_MAXLEN];
+
+ slen = snprintf(xpath, sizeof(xpath),
+ "/frr-bfdd:bfdd/bfd/sessions/sbfd-init[source-addr='%s'][dest-addr='%s'][bfd-name='%s']",
+ local_address_str, peer_str, bfdname);
+
+ if (vrf)
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", vrf);
+ else
+ snprintf(xpath + slen, sizeof(xpath) - slen, "[vrf='%s']", VRF_DEFAULT_NAME);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ /* Apply settings immediatly. */
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+static const char *_bfd_cli_bfd_mode_type_to_string(enum bfd_mode_type mode)
+{
+ switch (mode) {
+ case BFD_MODE_TYPE_BFD:
+ return "bfd";
+ case BFD_MODE_TYPE_SBFD_ECHO:
+ return "sbfd-echo";
+ case BFD_MODE_TYPE_SBFD_INIT:
+ return "sbfd-init";
+ default:
+ return "Unknown";
+ }
+}
+
+struct sidlist_show_iter {
+ char buf[INET6_ADDRSTRLEN * SRV6_MAX_SEGS];
+};
+
+static int sidlist_show_iter_cb(const struct lyd_node *dnode, void *arg)
+{
+ struct sidlist_show_iter *iter = arg;
+ const char *addr;
+
+ addr = yang_dnode_get_string(dnode, NULL);
+
+ strlcat(iter->buf, addr, INET6_ADDRSTRLEN * SRV6_MAX_SEGS);
+ strlcat(iter->buf, " ", INET6_ADDRSTRLEN * SRV6_MAX_SEGS);
+
+ return YANG_ITER_CONTINUE;
+}
+
static void _bfd_cli_show_peer(struct vty *vty, const struct lyd_node *dnode,
- bool show_defaults __attribute__((__unused__)),
- bool mhop)
+ bool show_defaults __attribute__((__unused__)), bool mhop,
+ uint32_t bfd_mode)
{
const char *vrf = yang_dnode_get_string(dnode, "vrf");
+ struct sidlist_show_iter iter = { 0 };
- vty_out(vty, " peer %s",
- yang_dnode_get_string(dnode, "dest-addr"));
+ vty_out(vty, " peer %s", yang_dnode_get_string(dnode, "./dest-addr"));
+ if (bfd_mode == BFD_MODE_TYPE_BFD) {
+ if (mhop)
+ vty_out(vty, " multihop");
- if (mhop)
- vty_out(vty, " multihop");
+ if (yang_dnode_exists(dnode, "./source-addr"))
+ vty_out(vty, " local-address %s",
+ yang_dnode_get_string(dnode, "./source-addr"));
- if (yang_dnode_exists(dnode, "source-addr"))
- vty_out(vty, " local-address %s",
- yang_dnode_get_string(dnode, "source-addr"));
+ if (strcmp(vrf, VRF_DEFAULT_NAME))
+ vty_out(vty, " vrf %s", vrf);
- if (strcmp(vrf, VRF_DEFAULT_NAME))
- vty_out(vty, " vrf %s", vrf);
+ if (!mhop) {
+ const char *ifname = yang_dnode_get_string(dnode, "./interface");
- if (!mhop) {
- const char *ifname =
- yang_dnode_get_string(dnode, "interface");
- if (strcmp(ifname, "*"))
- vty_out(vty, " interface %s", ifname);
- }
+ if (strcmp(ifname, "*"))
+ vty_out(vty, " interface %s", ifname);
+ }
+ vty_out(vty, "\n");
+ } else if (bfd_mode == BFD_MODE_TYPE_SBFD_ECHO || bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ vty_out(vty, " bfd-mode %s", _bfd_cli_bfd_mode_type_to_string(bfd_mode));
+
+ if (yang_dnode_exists(dnode, "bfd-name"))
+ vty_out(vty, " bfd-name %s", yang_dnode_get_string(dnode, "bfd-name"));
- vty_out(vty, "\n");
+ if (mhop)
+ vty_out(vty, " multihop");
+
+ if (yang_dnode_exists(dnode, "source-addr"))
+ vty_out(vty, " local-address %s",
+ yang_dnode_get_string(dnode, "source-addr"));
+
+ if (strcmp(vrf, VRF_DEFAULT_NAME))
+ vty_out(vty, " vrf %s", vrf);
+
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ if (yang_dnode_exists(dnode, "remote-discr"))
+ vty_out(vty, " remote-discr %u",
+ yang_dnode_get_uint32(dnode, "remote-discr"));
+ }
+
+ if (yang_dnode_exists(dnode, "srv6-source-ipv6"))
+ vty_out(vty, " srv6-source-ipv6 %s",
+ yang_dnode_get_string(dnode, "srv6-source-ipv6"));
+
+ if (yang_dnode_exists(dnode, "srv6-encap-data")) {
+ yang_dnode_iterate(sidlist_show_iter_cb, &iter, dnode, "./srv6-encap-data");
+ vty_out(vty, " srv6-encap-data %s", iter.buf);
+ }
+
+ vty_out(vty, "\n");
+ }
}
void bfd_cli_show_single_hop_peer(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults)
{
- _bfd_cli_show_peer(vty, dnode, show_defaults, false);
+ _bfd_cli_show_peer(vty, dnode, show_defaults, false, BFD_MODE_TYPE_BFD);
}
void bfd_cli_show_multi_hop_peer(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults)
{
- _bfd_cli_show_peer(vty, dnode, show_defaults, true);
+ _bfd_cli_show_peer(vty, dnode, show_defaults, true, BFD_MODE_TYPE_BFD);
+}
+
+void bfd_cli_show_sbfd_echo_peer(struct vty *vty, const struct lyd_node *dnode, bool show_defaults)
+{
+ _bfd_cli_show_peer(vty, dnode, show_defaults, false, BFD_MODE_TYPE_SBFD_ECHO);
+}
+
+void bfd_cli_show_sbfd_init_peer(struct vty *vty, const struct lyd_node *dnode, bool show_defaults)
+{
+ _bfd_cli_show_peer(vty, dnode, show_defaults, true, BFD_MODE_TYPE_SBFD_INIT);
}
void bfd_cli_show_peer_end(struct vty *vty, const struct lyd_node *dnode
@@ -446,8 +901,9 @@ DEFPY_YANG(
{
char value[32];
- if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty)) {
- vty_out(vty, "%% Echo mode is only available for single hop sessions.\n");
+ if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty) && !bfd_cli_is_sbfd_echo(vty)) {
+ vty_out(vty,
+ "%% Echo mode is only available for single hop or sbfd echo sessions.\n");
return CMD_WARNING_CONFIG_FAILED;
}
@@ -470,8 +926,9 @@ DEFPY_YANG(
{
char value[32];
- if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty)) {
- vty_out(vty, "%% Echo mode is only available for single hop sessions.\n");
+ if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty) && !bfd_cli_is_sbfd_echo(vty)) {
+ vty_out(vty,
+ "%% Echo mode is only available for single hop or sbfd echo sessions.\n");
return CMD_WARNING_CONFIG_FAILED;
}
@@ -501,11 +958,13 @@ DEFPY_YANG(
{
char value[32];
- if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty)) {
- vty_out(vty, "%% Echo mode is only available for single hop sessions.\n");
+ if (!bfd_cli_is_profile(vty) && !bfd_cli_is_single_hop(vty) && !bfd_cli_is_sbfd_echo(vty)) {
+ vty_out(vty,
+ "%% Echo mode is only available for single hop or sbfd echo sessions.\n");
return CMD_WARNING_CONFIG_FAILED;
}
+
if (disabled)
snprintf(value, sizeof(value), "0");
else
@@ -657,6 +1116,160 @@ DEFPY_YANG(bfd_peer_profile, bfd_peer_profile_cmd,
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY(
+ sbfd_reflector, sbfd_reflector_cmd,
+ "sbfd reflector source-address X:X::X:X$srcip discriminator WORD...",
+ "seamless BFD\n"
+ "sbfd reflector\n"
+ "binding source ip address\n"
+ IPV6_STR
+ "discriminator\n"
+ "discriminator value or range (e.g. 100 or 100 200 300 or 100-300)\n")
+{
+ int idx_discr = 5;
+ int i;
+ uint32_t j;
+ uint32_t discr = 0;
+ uint32_t discr_from = 0;
+ uint32_t discr_to = 0;
+
+ for (i = idx_discr; i < argc; i++) {
+ /* check validity*/
+ char *pstr = argv[i]->arg;
+
+ /*single discr*/
+ if (strspn(pstr, "0123456789") == strlen(pstr)) {
+ discr = atol(pstr);
+ sbfd_reflector_new(discr, &srcip);
+ }
+ /*discr segment*/
+ else if (strspn(pstr, "0123456789-") == strlen(pstr)) {
+ char *token = strtok(argv[i]->arg, "-");
+
+ if (token)
+ discr_from = atol(token);
+
+ token = strtok(NULL, "-");
+ if (token)
+ discr_to = atol(token);
+
+ if (discr_from >= discr_to) {
+ vty_out(vty, "input discriminator range %u-%u is illegal\n",
+ discr_from, discr_to);
+ }
+
+ for (j = discr_from; j <= discr_to; j++)
+ sbfd_reflector_new(j, &srcip);
+ }
+ /*illegal input*/
+ else
+ vty_out(vty, "input discriminator %s is illegal\n", (char *)argv[i]);
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(
+ no_sbfd_reflector_all, no_sbfd_reflector_all_cmd,
+ "no sbfd reflector [all]",
+ NO_STR
+ "seamless BFD\n"
+ "sbfd reflector\n"
+ "all\n")
+{
+ sbfd_reflector_flush();
+
+ if (sbfd_discr_get_count()) {
+ vty_out(vty, "delete all reflector discriminator failed.\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(
+ no_sbfd_reflector, no_sbfd_reflector_cmd,
+ "no sbfd reflector (0-4294967295)$start_discr [(0-4294967295)$end_discr]",
+ NO_STR
+ "seamless BFD\n"
+ "sbfd reflector\n"
+ "start discriminator\n"
+ "end discriminator\n")
+{
+ struct sbfd_reflector *sr;
+ int32_t i;
+
+ if (end_discr == 0) {
+ if (start_discr == 0) {
+ vty_out(vty, "input reflector discriminator is illegal.\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ sr = sbfd_discr_lookup(start_discr);
+ if (!sr) {
+ vty_out(vty, "input reflector discriminator does not exist.\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ // notify bfdsyncd
+ //bfd_fpm_sbfd_reflector_sendmsg(sr, false);
+ sbfd_reflector_free(start_discr);
+
+ } else {
+ if (end_discr <= start_discr) {
+ vty_out(vty, "input reflector discriminator is illegal.\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ for (i = start_discr; i <= end_discr; i++) {
+ sr = sbfd_discr_lookup(i);
+ if (sr) {
+ // notify bfdsyncd
+ //bfd_fpm_sbfd_reflector_sendmsg(sr, false);
+ sbfd_reflector_free(i);
+ }
+ }
+ }
+
+ return CMD_SUCCESS;
+}
+
+static void _sbfd_reflector_show(struct hash_bucket *hb, void *arg)
+{
+ struct sbfd_reflector *sr = hb->data;
+ struct ttable *tt;
+ char buf[INET6_ADDRSTRLEN];
+
+ tt = (struct ttable *)arg;
+
+ ttable_add_row(tt, "%u|%s|%s|%s", sr->discr,
+ inet_ntop(AF_INET6, &sr->local, buf, sizeof(buf)), "Active", "Software");
+}
+
+DEFPY(
+ sbfd_reflector_show_info, sbfd_reflector_show_info_cmd,
+ "show sbfd reflector",
+ "show\n"
+ "seamless BFD\n"
+ "sbfd reflector\n")
+{
+ struct ttable *tt;
+ char *out;
+
+ vty_out(vty, "sbfd reflector discriminator :\n");
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "SBFD-Discr|SourceIP|State|CreateType");
+ ttable_rowseps(tt, 0, BOTTOM, true, '-');
+
+ sbfd_discr_iterate(_sbfd_reflector_show, tt);
+
+ out = ttable_dump(tt, "\n");
+ vty_out(vty, "%s", out);
+ XFREE(MTYPE_TMP, out);
+ ttable_del(tt);
+
+ return CMD_SUCCESS;
+}
void bfd_cli_peer_profile_show(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults)
{
@@ -694,6 +1307,18 @@ bfdd_cli_init(void)
install_element(BFD_NODE, &bfd_peer_enter_cmd);
install_element(BFD_NODE, &bfd_no_peer_cmd);
+ install_element(BFD_NODE, &sbfd_echo_peer_enter_cmd);
+ install_element(BFD_NODE, &sbfd_echo_no_peer_cmd);
+
+ install_element(BFD_NODE, &sbfd_init_peer_enter_cmd);
+ install_element(BFD_NODE, &sbfd_init_no_peer_cmd);
+ install_element(BFD_NODE, &sbfd_init_peer_raw_enter_cmd);
+ install_element(BFD_NODE, &sbfd_init_no_peer_raw_cmd);
+
+ install_element(BFD_NODE, &sbfd_reflector_cmd);
+ install_element(BFD_NODE, &no_sbfd_reflector_all_cmd);
+ install_element(BFD_NODE, &no_sbfd_reflector_cmd);
+ install_element(VIEW_NODE, &sbfd_reflector_show_info_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_shutdown_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_mult_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_rx_cmd);
diff --git a/bfdd/bfdd_nb.c b/bfdd/bfdd_nb.c
index 114fbc2bdd..f60d8397bb 100644
--- a/bfdd/bfdd_nb.c
+++ b/bfdd/bfdd_nb.c
@@ -484,6 +484,459 @@ const struct frr_yang_module_info frr_bfdd_info = {
}
},
{
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo",
+ .cbs = {
+ .create = bfdd_bfd_sessions_sbfd_echo_create,
+ .destroy = bfdd_bfd_sessions_sbfd_echo_destroy,
+ .get_next = bfdd_bfd_sessions_sbfd_echo_get_next,
+ .get_keys = bfdd_bfd_sessions_sbfd_echo_get_keys,
+ .lookup_entry = bfdd_bfd_sessions_sbfd_echo_lookup_entry,
+ .cli_show = bfd_cli_show_sbfd_echo_peer, /* TODO */
+ .cli_show_end = bfd_cli_show_peer_end, /* TODO */
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/dest-addr",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_echo_dest_addr_modify,
+ .destroy = bfdd_bfd_sessions_sbfd_echo_dest_addr_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/srv6-source-ipv6",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_srv6_source_ipv6_modify,
+ .destroy = bfdd_bfd_sessions_sbfd_srv6_source_ipv6_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/profile",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_profile_modify,
+ .destroy = bfdd_bfd_sessions_single_hop_profile_destroy,
+ .cli_show = bfd_cli_peer_profile_show,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/detection-multiplier",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_detection_multiplier_modify,
+ .cli_show = bfd_cli_show_mult,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/desired-transmission-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_desired_transmission_interval_modify,
+ .cli_show = bfd_cli_show_tx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/required-receive-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_required_receive_interval_modify,
+ .cli_show = bfd_cli_show_rx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/echo-mode",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_echo_mode_modify,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/desired-echo-transmission-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_desired_echo_transmission_interval_modify,
+ .cli_show = bfd_cli_show_desired_echo_transmission_interval,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/required-echo-receive-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_required_echo_receive_interval_modify,
+ .cli_show = bfd_cli_show_required_echo_receive_interval,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/administrative-down",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_administrative_down_modify,
+ .cli_show = bfd_cli_show_shutdown,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/passive-mode",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_passive_mode_modify,
+ .cli_show = bfd_cli_show_passive,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/bfd-mode",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_bfd_mode_modify,
+ .destroy = bfdd_bfd_sessions_bfd_mode_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/srv6-encap-data",
+ .cbs = {
+ .create = bfdd_bfd_sessions_segment_list_create,
+ .destroy = bfdd_bfd_sessions_segment_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/minimum-ttl",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_multi_hop_minimum_ttl_modify,
+ .cli_show = bfd_cli_show_minimum_ttl,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/multi-hop",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_multi_hop_modify,
+ .destroy = bfdd_bfd_sessions_sbfd_multi_hop_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/local-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/local-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/local-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/local-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/remote-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/remote-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/remote-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/remote-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/negotiated-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/negotiated-receive-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_receive_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/detection-mode",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/last-down-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_down_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/last-up-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_up_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/session-down-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_down_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/session-up-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_up_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/control-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/control-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/negotiated-echo-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_echo_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/echo-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/stats/echo-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init",
+ .cbs = {
+ .create = bfdd_bfd_sessions_sbfd_init_create,
+ .destroy = bfdd_bfd_sessions_sbfd_init_destroy,
+ .get_next = bfdd_bfd_sessions_sbfd_init_get_next,
+ .get_keys = bfdd_bfd_sessions_sbfd_init_get_keys,
+ .lookup_entry = bfdd_bfd_sessions_sbfd_init_lookup_entry,
+ .cli_show = bfd_cli_show_sbfd_init_peer, /* TODO */
+ .cli_show_end = bfd_cli_show_peer_end, /* TODO */
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/srv6-source-ipv6",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_srv6_source_ipv6_modify,
+ .destroy = bfdd_bfd_sessions_sbfd_srv6_source_ipv6_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/remote-discr",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_init_remote_discr_modify,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/profile",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_profile_modify,
+ .destroy = bfdd_bfd_sessions_single_hop_profile_destroy,
+ .cli_show = bfd_cli_peer_profile_show,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/detection-multiplier",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_detection_multiplier_modify,
+ .cli_show = bfd_cli_show_mult,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/desired-transmission-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_desired_transmission_interval_modify,
+ .cli_show = bfd_cli_show_tx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/required-receive-interval",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_required_receive_interval_modify,
+ .cli_show = bfd_cli_show_rx,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/administrative-down",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_administrative_down_modify,
+ .cli_show = bfd_cli_show_shutdown,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/passive-mode",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_single_hop_passive_mode_modify,
+ .cli_show = bfd_cli_show_passive,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/bfd-mode",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_bfd_mode_modify,
+ .destroy = bfdd_bfd_sessions_bfd_mode_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/srv6-encap-data",
+ .cbs = {
+ .create = bfdd_bfd_sessions_segment_list_create,
+ .destroy = bfdd_bfd_sessions_segment_list_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/minimum-ttl",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_multi_hop_minimum_ttl_modify,
+ .cli_show = bfd_cli_show_minimum_ttl,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/multi-hop",
+ .cbs = {
+ .modify = bfdd_bfd_sessions_sbfd_multi_hop_modify,
+ .destroy = bfdd_bfd_sessions_sbfd_multi_hop_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/local-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/local-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/local-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/local-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_local_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/remote-discriminator",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_discriminator_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/remote-state",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_state_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/remote-diagnostic",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_diagnostic_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/remote-multiplier",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_remote_multiplier_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/negotiated-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/negotiated-receive-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_receive_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/detection-mode",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_detection_mode_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/last-down-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_down_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/last-up-time",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_last_up_time_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/session-down-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_down_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/session-up-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_session_up_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/control-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/control-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_control_packet_output_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/negotiated-echo-transmission-interval",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_negotiated_echo_transmission_interval_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/echo-packet-input-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_input_count_get_elem,
+ }
+ },
+ {
+ .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/stats/echo-packet-output-count",
+ .cbs = {
+ .get_elem = bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem,
+ }
+ },
+ {
.xpath = NULL,
},
}
diff --git a/bfdd/bfdd_nb.h b/bfdd/bfdd_nb.h
index b5b00b57e4..6621973ae3 100644
--- a/bfdd/bfdd_nb.h
+++ b/bfdd/bfdd_nb.h
@@ -112,6 +112,26 @@ bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem(
struct nb_cb_get_elem_args *args);
int bfdd_bfd_sessions_multi_hop_create(struct nb_cb_create_args *args);
int bfdd_bfd_sessions_multi_hop_destroy(struct nb_cb_destroy_args *args);
+int bfdd_bfd_sessions_sbfd_echo_create(struct nb_cb_create_args *args);
+int bfdd_bfd_sessions_sbfd_echo_destroy(struct nb_cb_destroy_args *args);
+int bfdd_bfd_sessions_sbfd_echo_dest_addr_modify(struct nb_cb_modify_args *args);
+int bfdd_bfd_sessions_sbfd_echo_mode_modify(struct nb_cb_modify_args *args);
+int bfdd_bfd_sessions_sbfd_echo_dest_addr_destroy(struct nb_cb_destroy_args *args);
+int bfdd_bfd_sessions_sbfd_srv6_source_ipv6_modify(struct nb_cb_modify_args *args);
+int bfdd_bfd_sessions_sbfd_srv6_source_ipv6_destroy(struct nb_cb_destroy_args *args);
+int bfdd_bfd_sessions_sbfd_init_remote_discr_modify(struct nb_cb_modify_args *args);
+int bfdd_bfd_sessions_sbfd_multi_hop_modify(struct nb_cb_modify_args *args);
+int bfdd_bfd_sessions_sbfd_multi_hop_destroy(struct nb_cb_destroy_args *args);
+
+int bfdd_bfd_sessions_sbfd_init_create(struct nb_cb_create_args *args);
+int bfdd_bfd_sessions_sbfd_init_destroy(struct nb_cb_destroy_args *args);
+const void *bfdd_bfd_sessions_sbfd_echo_get_next(struct nb_cb_get_next_args *args);
+int bfdd_bfd_sessions_sbfd_echo_get_keys(struct nb_cb_get_keys_args *args);
+const void *bfdd_bfd_sessions_sbfd_echo_lookup_entry(struct nb_cb_lookup_entry_args *args);
+const void *bfdd_bfd_sessions_sbfd_init_get_next(struct nb_cb_get_next_args *args);
+int bfdd_bfd_sessions_sbfd_init_get_keys(struct nb_cb_get_keys_args *args);
+const void *bfdd_bfd_sessions_sbfd_init_lookup_entry(struct nb_cb_lookup_entry_args *args);
+
const void *
bfdd_bfd_sessions_multi_hop_get_next(struct nb_cb_get_next_args *args);
int bfdd_bfd_sessions_multi_hop_get_keys(struct nb_cb_get_keys_args *args);
@@ -185,6 +205,8 @@ void bfd_cli_show_single_hop_peer(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
void bfd_cli_show_multi_hop_peer(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
+void bfd_cli_show_sbfd_echo_peer(struct vty *vty, const struct lyd_node *dnode, bool show_defaults);
+void bfd_cli_show_sbfd_init_peer(struct vty *vty, const struct lyd_node *dnode, bool show_defaults);
void bfd_cli_show_peer_end(struct vty *vty, const struct lyd_node *dnode);
void bfd_cli_show_mult(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
@@ -210,4 +232,10 @@ void bfd_cli_show_passive(struct vty *vty, const struct lyd_node *dnode,
void bfd_cli_show_minimum_ttl(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
+int bfdd_bfd_sessions_bfd_mode_modify(struct nb_cb_modify_args *args);
+int bfdd_bfd_sessions_bfd_mode_destroy(struct nb_cb_destroy_args *args);
+
+int bfdd_bfd_sessions_segment_list_create(struct nb_cb_create_args *args);
+int bfdd_bfd_sessions_segment_list_destroy(struct nb_cb_destroy_args *args);
+
#endif /* _FRR_BFDD_NB_H_ */
diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c
index 48fbe7139c..15da1e2440 100644
--- a/bfdd/bfdd_nb_config.c
+++ b/bfdd/bfdd_nb_config.c
@@ -13,14 +13,50 @@
#include "bfd.h"
#include "bfdd_nb.h"
+#include <ifaddrs.h>
/*
* Helpers.
*/
+static void get_ip_by_interface(const char *ifname, const char *vrfname, int family, char *ifip)
+{
+ char intfip[INET6_ADDRSTRLEN];
+ const struct interface *interface;
+ const struct connected *connected;
+ struct vrf *vrf;
+
+ vrf = vrf_lookup_by_name(vrfname ? vrfname : VRF_DEFAULT_NAME);
+ if (!vrf)
+ return;
+
+ interface = if_lookup_by_name_vrf(ifname, vrf);
+ if (interface == NULL)
+ return;
+
+ frr_each (if_connected_const, interface->connected, connected) {
+ if (!connected->address)
+ continue;
+
+ if (family != connected->address->family)
+ continue;
+
+ if (family == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&connected->address->u.prefix6))
+ continue;
+
+ inet_ntop(family,
+ family == AF_INET ? (void *)(&connected->address->u.prefix4)
+ : (void *)(&connected->address->u.prefix6),
+ intfip, INET6_ADDRSTRLEN);
+ strlcpy(ifip, intfip, INET6_ADDRSTRLEN - 1);
+ break;
+ }
+}
+
static void bfd_session_get_key(bool mhop, const struct lyd_node *dnode,
struct bfd_key *bk)
{
const char *ifname = NULL, *vrfname = NULL;
+ char ifip[INET6_ADDRSTRLEN] = { 0 };
struct sockaddr_any psa, lsa;
/* Required destination parameter. */
@@ -37,10 +73,36 @@ static void bfd_session_get_key(bool mhop, const struct lyd_node *dnode,
ifname = yang_dnode_get_string(dnode, "interface");
if (strcmp(ifname, "*") == 0)
ifname = NULL;
+
+ if (ifname != NULL && !yang_dnode_exists(dnode, "source-addr") &&
+ psa.sa_sin.sin_family != 0) {
+ get_ip_by_interface(ifname, vrfname, psa.sa_sin.sin_family, ifip);
+ strtosa(ifip, &lsa);
+ }
}
/* Generate the corresponding key. */
- gen_bfd_key(bk, &psa, &lsa, mhop, ifname, vrfname);
+ gen_bfd_key(bk, &psa, &lsa, mhop, ifname, vrfname, NULL);
+}
+
+static void sbfd_session_get_key(bool mhop, const struct lyd_node *dnode, struct bfd_key *bk)
+{
+ const char *ifname = NULL, *vrfname = NULL, *bfdname = NULL;
+ struct sockaddr_any psa, lsa;
+
+ /* Required source parameter. */
+ strtosa(yang_dnode_get_string(dnode, "source-addr"), &lsa);
+
+ strtosa(yang_dnode_get_string(dnode, "dest-addr"), &psa);
+
+ if (yang_dnode_exists(dnode, "bfd-name"))
+ bfdname = yang_dnode_get_string(dnode, "bfd-name");
+
+ if (yang_dnode_exists(dnode, "vrf"))
+ vrfname = yang_dnode_get_string(dnode, "vrf");
+
+ /* Generate the corresponding key. */
+ gen_bfd_key(bk, &psa, &lsa, mhop, ifname, vrfname, bfdname);
}
struct session_iter {
@@ -63,7 +125,25 @@ static int session_iter_cb(const struct lyd_node *dnode, void *arg)
return YANG_ITER_CONTINUE;
}
-static int bfd_session_create(struct nb_cb_create_args *args, bool mhop)
+static int segment_list_iter_cb(const struct lyd_node *dnode, void *arg)
+{
+ struct bfd_session *bs = arg;
+ uint8_t segnum = bs->segnum;
+ const char *addr;
+ struct sockaddr_any sa;
+
+ addr = yang_dnode_get_string(dnode, NULL);
+
+ if (strtosa(addr, &sa) < 0 || sa.sa_sin6.sin6_family != AF_INET6)
+ return YANG_ITER_STOP;
+
+ memcpy(&bs->seg_list[segnum], &sa.sa_sin6.sin6_addr, sizeof(struct in6_addr));
+ bs->segnum = segnum + 1;
+
+ return YANG_ITER_CONTINUE;
+}
+
+static int bfd_session_create(struct nb_cb_create_args *args, bool mhop, uint32_t bfd_mode)
{
const struct lyd_node *sess_dnode;
struct session_iter iter;
@@ -73,10 +153,20 @@ static int bfd_session_create(struct nb_cb_create_args *args, bool mhop)
const char *vrfname;
struct bfd_key bk;
struct prefix p;
+ const char *bfd_name = NULL;
+ struct sockaddr_any out_sip6;
switch (args->event) {
case NB_EV_VALIDATE:
- yang_dnode_get_prefix(&p, args->dnode, "dest-addr");
+ if ((bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) || (bfd_mode == BFD_MODE_TYPE_SBFD_INIT)) {
+ if (bfd_session_get_by_name(yang_dnode_get_string(args->dnode, "bfd-name"))) {
+ snprintf(args->errmsg, args->errmsg_len, "bfd name already exist.");
+ return NB_ERR_VALIDATION;
+ }
+ return NB_OK;
+ }
+
+ yang_dnode_get_prefix(&p, args->dnode, "./dest-addr");
if (mhop) {
/*
@@ -128,34 +218,123 @@ static int bfd_session_create(struct nb_cb_create_args *args, bool mhop)
break;
case NB_EV_PREPARE:
- bfd_session_get_key(mhop, args->dnode, &bk);
- bs = bfd_key_lookup(bk);
+ if (bfd_mode == BFD_MODE_TYPE_BFD) {
+ bfd_session_get_key(mhop, args->dnode, &bk);
+ bs = bfd_key_lookup(bk);
+
+ /* This session was already configured by another daemon. */
+ if (bs != NULL) {
+ /* Now it is configured also by CLI. */
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ bs->refcount++;
+
+ args->resource->ptr = bs;
+ break;
+ }
+
+ bs = bfd_session_new(BFD_MODE_TYPE_BFD);
- /* This session was already configured by another daemon. */
- if (bs != NULL) {
- /* Now it is configured also by CLI. */
+ /* Fill the session key. */
+ bfd_session_get_key(mhop, args->dnode, &bs->key);
+ /* Set configuration flags. */
+ bs->refcount = 1;
SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
- bs->refcount++;
+ if (mhop)
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_MH);
+ if (bs->key.family == AF_INET6)
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6);
args->resource->ptr = bs;
break;
- }
+ } else if (bfd_mode == BFD_MODE_TYPE_SBFD_ECHO ||
+ bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ sbfd_session_get_key(mhop, args->dnode, &bk);
+ bs = bfd_key_lookup(bk);
+
+ /* This session was already configured by another daemon. */
+ if (bs != NULL) {
+ /* Now it is configured also by CLI. */
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ bs->refcount++;
+
+ args->resource->ptr = bs;
+ break;
+ }
- bs = bfd_session_new();
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_ECHO &&
+ !yang_dnode_exists(args->dnode, "srv6-encap-data")) {
+ //srv6-encap-data should not be null for sbfd echo
+ snprintf(args->errmsg, args->errmsg_len,
+ "srv6-encap-data should not be null");
+ return NB_ERR_RESOURCE;
+ }
- /* Fill the session key. */
- bfd_session_get_key(mhop, args->dnode, &bs->key);
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_ECHO &&
+ !yang_dnode_exists(args->dnode, "srv6-source-ipv6")) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "source_ipv6 should not be null");
+ return NB_ERR_RESOURCE;
+ }
- /* Set configuration flags. */
- bs->refcount = 1;
- SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
- if (mhop)
- SET_FLAG(bs->flags, BFD_SESS_FLAG_MH);
- if (bs->key.family == AF_INET6)
- SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6);
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ if (!yang_dnode_exists(args->dnode, "remote-discr")) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "remote-discr should not be null");
+ return NB_ERR_RESOURCE;
+ }
+ }
- args->resource->ptr = bs;
- break;
+ bfd_name = yang_dnode_get_string(args->dnode, "bfd-name");
+
+ bs = bfd_session_new(bfd_mode);
+ if (bs == NULL) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "session-new: allocation failed");
+ return NB_ERR_RESOURCE;
+ }
+ /* Fill the session key. */
+ sbfd_session_get_key(mhop, args->dnode, &bs->key);
+ strlcpy(bs->bfd_name, bfd_name, BFD_NAME_SIZE);
+
+ if (yang_dnode_exists(args->dnode, "srv6-encap-data")) {
+ yang_dnode_iterate(segment_list_iter_cb, bs, args->dnode,
+ "./srv6-encap-data");
+
+
+ strtosa(yang_dnode_get_string(args->dnode, "./srv6-source-ipv6"),
+ &out_sip6);
+ memcpy(&bs->out_sip6, &out_sip6.sa_sin6.sin6_addr,
+ sizeof(struct in6_addr));
+ }
+
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ bs->discrs.remote_discr = yang_dnode_get_uint32(args->dnode,
+ "./remote-discr");
+ }
+
+ /* Set configuration flags. */
+ bs->refcount = 1;
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ if (mhop)
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_MH);
+
+ if (bs->key.family == AF_INET6)
+ SET_FLAG(bs->flags, BFD_SESS_FLAG_IPV6);
+
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ memcpy(&bs->key.peer, &bs->key.local, sizeof(struct in6_addr));
+ } else {
+ bs->xmt_TO = bs->timers.desired_min_tx;
+ bs->detect_TO = bs->detect_mult * bs->xmt_TO;
+ }
+
+ args->resource->ptr = bs;
+ break;
+
+ } else {
+ snprintf(args->errmsg, args->errmsg_len, "bfd mode must be bfd or sbfd.");
+ return NB_ERR_VALIDATION;
+ }
case NB_EV_APPLY:
bs = args->resource->ptr;
@@ -177,15 +356,19 @@ static int bfd_session_create(struct nb_cb_create_args *args, bool mhop)
return NB_OK;
}
-static int bfd_session_destroy(enum nb_event event,
- const struct lyd_node *dnode, bool mhop)
+static int bfd_session_destroy(enum nb_event event, const struct lyd_node *dnode, bool mhop,
+ uint32_t bfd_mode)
{
struct bfd_session *bs;
struct bfd_key bk;
switch (event) {
case NB_EV_VALIDATE:
- bfd_session_get_key(mhop, dnode, &bk);
+ if (bfd_mode == BFD_MODE_TYPE_BFD)
+ bfd_session_get_key(mhop, dnode, &bk);
+ else
+ sbfd_session_get_key(mhop, dnode, &bk);
+
if (bfd_key_lookup(bk) == NULL)
return NB_ERR_INCONSISTENCY;
break;
@@ -206,6 +389,12 @@ static int bfd_session_destroy(enum nb_event event,
if (bs->refcount > 0)
break;
+ if (bglobal.debug_peer_event)
+ zlog_info("session destroy: %s", bs_to_string(bs));
+
+ if (bfd_mode == BFD_MODE_TYPE_SBFD_ECHO || bfd_mode == BFD_MODE_TYPE_SBFD_INIT)
+ ptm_bfd_notify(bs, PTM_BFD_DOWN);
+
bfd_session_free(bs);
break;
@@ -510,12 +699,12 @@ int bfdd_bfd_profile_required_echo_receive_interval_modify(
*/
int bfdd_bfd_sessions_single_hop_create(struct nb_cb_create_args *args)
{
- return bfd_session_create(args, false);
+ return bfd_session_create(args, false, BFD_MODE_TYPE_BFD);
}
int bfdd_bfd_sessions_single_hop_destroy(struct nb_cb_destroy_args *args)
{
- return bfd_session_destroy(args->event, args->dnode, false);
+ return bfd_session_destroy(args->event, args->dnode, false, BFD_MODE_TYPE_BFD);
}
/*
@@ -715,6 +904,45 @@ int bfdd_bfd_sessions_single_hop_passive_mode_modify(
}
/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/bfd-mode
+ * /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/bfd-mode
+ */
+int bfdd_bfd_sessions_bfd_mode_modify(struct nb_cb_modify_args *args)
+{
+ uint32_t bfd_mode = yang_dnode_get_uint32(args->dnode, NULL);
+ struct bfd_session *bs;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if ((bfd_mode != BFD_MODE_TYPE_BFD) && (bfd_mode != BFD_MODE_TYPE_SBFD_ECHO) &&
+ (bfd_mode != BFD_MODE_TYPE_SBFD_INIT)) {
+ snprintf(args->errmsg, args->errmsg_len, "bfd mode is invalid.");
+ return NB_ERR_VALIDATION;
+ }
+ return NB_OK;
+ case NB_EV_PREPARE:
+ return NB_OK;
+
+ case NB_EV_APPLY:
+ break;
+
+ case NB_EV_ABORT:
+ return NB_OK;
+ }
+
+ bs = nb_running_get_entry(args->dnode, NULL, true);
+ bs->bfd_mode = bfd_mode;
+ bfd_session_apply(bs);
+
+ return NB_OK;
+}
+
+int bfdd_bfd_sessions_bfd_mode_destroy(struct nb_cb_destroy_args *args)
+{
+ return NB_OK;
+}
+
+/*
* XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/echo-mode
*/
int bfdd_bfd_sessions_single_hop_echo_mode_modify(
@@ -811,12 +1039,12 @@ int bfdd_bfd_sessions_single_hop_required_echo_receive_interval_modify(
*/
int bfdd_bfd_sessions_multi_hop_create(struct nb_cb_create_args *args)
{
- return bfd_session_create(args, true);
+ return bfd_session_create(args, true, BFD_MODE_TYPE_BFD);
}
int bfdd_bfd_sessions_multi_hop_destroy(struct nb_cb_destroy_args *args)
{
- return bfd_session_destroy(args->event, args->dnode, true);
+ return bfd_session_destroy(args->event, args->dnode, true, BFD_MODE_TYPE_BFD);
}
/*
@@ -845,3 +1073,106 @@ int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify(
return NB_OK;
}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo
+ */
+int bfdd_bfd_sessions_sbfd_echo_create(struct nb_cb_create_args *args)
+{
+ return bfd_session_create(args, yang_dnode_exists(args->dnode, "multi-hop"),
+ BFD_MODE_TYPE_SBFD_ECHO);
+}
+
+int bfdd_bfd_sessions_sbfd_echo_destroy(struct nb_cb_destroy_args *args)
+{
+ return bfd_session_destroy(args->event, args->dnode,
+ yang_dnode_exists(args->dnode, "multi-hop"),
+ BFD_MODE_TYPE_SBFD_ECHO);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/srv6-encap-data
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/srv6-encap-data
+ */
+int bfdd_bfd_sessions_segment_list_create(struct nb_cb_create_args *args)
+{
+ return NB_OK;
+}
+
+int bfdd_bfd_sessions_segment_list_destroy(struct nb_cb_destroy_args *args)
+{
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/dest-addr
+ */
+int bfdd_bfd_sessions_sbfd_echo_dest_addr_modify(struct nb_cb_modify_args *args)
+{
+ return NB_OK;
+}
+
+int bfdd_bfd_sessions_sbfd_echo_dest_addr_destroy(struct nb_cb_destroy_args *args)
+{
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/echo-mode
+ */
+int bfdd_bfd_sessions_sbfd_echo_mode_modify(struct nb_cb_modify_args *args)
+{
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/srv6-source-ipv6
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/srv6-source-ipv6
+ */
+int bfdd_bfd_sessions_sbfd_srv6_source_ipv6_modify(struct nb_cb_modify_args *args)
+{
+ return NB_OK;
+}
+
+int bfdd_bfd_sessions_sbfd_srv6_source_ipv6_destroy(struct nb_cb_destroy_args *args)
+{
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init
+ */
+int bfdd_bfd_sessions_sbfd_init_create(struct nb_cb_create_args *args)
+{
+ return bfd_session_create(args, yang_dnode_exists(args->dnode, "multi-hop"),
+ BFD_MODE_TYPE_SBFD_INIT);
+}
+
+int bfdd_bfd_sessions_sbfd_init_destroy(struct nb_cb_destroy_args *args)
+{
+ return bfd_session_destroy(args->event, args->dnode,
+ yang_dnode_exists(args->dnode, "multi-hop"),
+ BFD_MODE_TYPE_SBFD_INIT);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/remote-discr
+ */
+int bfdd_bfd_sessions_sbfd_init_remote_discr_modify(struct nb_cb_modify_args *args)
+{
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/multi-hop
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/multi-hop
+ */
+int bfdd_bfd_sessions_sbfd_multi_hop_modify(struct nb_cb_modify_args *args)
+{
+ return NB_OK;
+}
+
+int bfdd_bfd_sessions_sbfd_multi_hop_destroy(struct nb_cb_destroy_args *args)
+{
+ return NB_OK;
+}
diff --git a/bfdd/bfdd_nb_state.c b/bfdd/bfdd_nb_state.c
index 12acda8fd8..c528478231 100644
--- a/bfdd/bfdd_nb_state.c
+++ b/bfdd/bfdd_nb_state.c
@@ -20,7 +20,7 @@
const void *
bfdd_bfd_sessions_single_hop_get_next(struct nb_cb_get_next_args *args)
{
- return bfd_session_next(args->list_entry, false);
+ return bfd_session_next(args->list_entry, false, BFD_MODE_TYPE_BFD);
}
int bfdd_bfd_sessions_single_hop_get_keys(struct nb_cb_get_keys_args *args)
@@ -50,7 +50,7 @@ bfdd_bfd_sessions_single_hop_lookup_entry(struct nb_cb_lookup_entry_args *args)
strtosa(dest_addr, &psa);
memset(&lsa, 0, sizeof(lsa));
- gen_bfd_key(&bk, &psa, &lsa, false, ifname, vrf);
+ gen_bfd_key(&bk, &psa, &lsa, false, ifname, vrf, NULL);
return bfd_key_lookup(bk);
}
@@ -323,7 +323,7 @@ bfdd_bfd_sessions_single_hop_stats_echo_packet_output_count_get_elem(
const void *
bfdd_bfd_sessions_multi_hop_get_next(struct nb_cb_get_next_args *args)
{
- return bfd_session_next(args->list_entry, true);
+ return bfd_session_next(args->list_entry, true, BFD_MODE_TYPE_BFD);
}
int bfdd_bfd_sessions_multi_hop_get_keys(struct nb_cb_get_keys_args *args)
@@ -354,7 +354,87 @@ bfdd_bfd_sessions_multi_hop_lookup_entry(struct nb_cb_lookup_entry_args *args)
strtosa(dest_addr, &psa);
strtosa(source_addr, &lsa);
- gen_bfd_key(&bk, &psa, &lsa, true, NULL, vrf);
+ gen_bfd_key(&bk, &psa, &lsa, true, NULL, vrf, NULL);
+
+ return bfd_key_lookup(bk);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-echo
+ */
+const void *bfdd_bfd_sessions_sbfd_echo_get_next(struct nb_cb_get_next_args *args)
+{
+ return bfd_session_next(args->list_entry, true, BFD_MODE_TYPE_SBFD_ECHO);
+}
+
+int bfdd_bfd_sessions_sbfd_echo_get_keys(struct nb_cb_get_keys_args *args)
+{
+ const struct bfd_session *bs = args->list_entry;
+ char srcbuf[INET6_ADDRSTRLEN];
+
+ inet_ntop(bs->key.family, &bs->key.local, srcbuf, sizeof(srcbuf));
+
+ args->keys->num = 3;
+ strlcpy(args->keys->key[0], srcbuf, sizeof(args->keys->key[0]));
+ strlcpy(args->keys->key[1], bs->key.bfdname, sizeof(args->keys->key[1]));
+ strlcpy(args->keys->key[2], bs->key.vrfname, sizeof(args->keys->key[2]));
+
+ return NB_OK;
+}
+
+const void *bfdd_bfd_sessions_sbfd_echo_lookup_entry(struct nb_cb_lookup_entry_args *args)
+{
+ const char *source_addr = args->keys->key[0];
+ const char *bfdname = args->keys->key[1];
+ const char *vrf = args->keys->key[2];
+ struct sockaddr_any psa, lsa;
+ struct bfd_key bk;
+
+ strtosa(source_addr, &lsa);
+ memset(&psa, 0, sizeof(psa));
+ gen_bfd_key(&bk, &psa, &lsa, true, NULL, vrf, bfdname);
+
+ return bfd_key_lookup(bk);
+}
+
+/*
+ * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init
+ */
+const void *bfdd_bfd_sessions_sbfd_init_get_next(struct nb_cb_get_next_args *args)
+{
+ return bfd_session_next(args->list_entry, true, BFD_MODE_TYPE_SBFD_INIT);
+}
+
+int bfdd_bfd_sessions_sbfd_init_get_keys(struct nb_cb_get_keys_args *args)
+{
+ const struct bfd_session *bs = args->list_entry;
+ char srcbuf[INET6_ADDRSTRLEN];
+ char dstbuf[INET6_ADDRSTRLEN];
+
+ inet_ntop(bs->key.family, &bs->key.local, srcbuf, sizeof(srcbuf));
+ inet_ntop(bs->key.family, &bs->key.peer, dstbuf, sizeof(dstbuf));
+
+ args->keys->num = 4;
+ strlcpy(args->keys->key[0], srcbuf, sizeof(args->keys->key[0]));
+ strlcpy(args->keys->key[1], dstbuf, sizeof(args->keys->key[1]));
+ strlcpy(args->keys->key[2], bs->key.bfdname, sizeof(args->keys->key[2]));
+ strlcpy(args->keys->key[3], bs->key.vrfname, sizeof(args->keys->key[3]));
+
+ return NB_OK;
+}
+
+const void *bfdd_bfd_sessions_sbfd_init_lookup_entry(struct nb_cb_lookup_entry_args *args)
+{
+ const char *source_addr = args->keys->key[0];
+ const char *dest_addr = args->keys->key[1];
+ const char *bfdname = args->keys->key[2];
+ const char *vrf = args->keys->key[3];
+ struct sockaddr_any psa, lsa;
+ struct bfd_key bk;
+
+ strtosa(source_addr, &lsa);
+ strtosa(dest_addr, &psa);
+ gen_bfd_key(&bk, &psa, &lsa, true, NULL, vrf, bfdname);
return bfd_key_lookup(bk);
}
diff --git a/bfdd/bfdd_vty.c b/bfdd/bfdd_vty.c
index 26554e1496..c281197849 100644
--- a/bfdd/bfdd_vty.c
+++ b/bfdd/bfdd_vty.c
@@ -58,7 +58,49 @@ _find_peer_or_error(struct vty *vty, int argc, struct cmd_token **argv,
const char *label, const char *peer_str,
const char *local_str, const char *ifname,
const char *vrfname);
+static void _display_bfd_by_bfdname_json_iter(struct hash_bucket *hb, void *arg);
+static void _display_bfd_by_bfdname_iter(struct hash_bucket *hb, void *arg);
+static void _display_bfd_by_bfdname(struct vty *vty, const char *vrfname, const char *bfdname,
+ bool use_json);
+static void _display_bfd_counters_by_bfdname_iter(struct hash_bucket *hb, void *arg);
+static void _display_bfd_counters_json_by_bfdname_iter(struct hash_bucket *hb, void *arg);
+static void _display_bfd_counters_by_bfdname(struct vty *vty, const char *vrfname,
+ const char *bfdname, bool use_json);
+static void _clear_bfd_counters_by_bfdname(const char *vrfname, const char *bfdname);
+static void _clear_peer_counter(struct bfd_session *bs);
+
+static const char *bfd_mode_type_to_string(enum bfd_mode_type mode)
+{
+ switch (mode) {
+ case BFD_MODE_TYPE_BFD:
+ return "bfd";
+ case BFD_MODE_TYPE_SBFD_ECHO:
+ return "sbfd-echo";
+ case BFD_MODE_TYPE_SBFD_INIT:
+ return "sbfd-init";
+ default:
+ return "Unknown";
+ }
+}
+
+static char *sbfd_sidlist_to_string(struct in6_addr *sidlist, uint8_t segnum)
+{
+ static char buf[INET6_ADDRSTRLEN * SRV6_MAX_SEGS];
+ int pos = 0;
+ uint8_t i = 0;
+ char addr_buf[INET6_ADDRSTRLEN];
+ memset(buf, 0, 256);
+
+ pos = snprintf(buf, sizeof(buf), "%s",
+ inet_ntop(AF_INET6, &sidlist[0], addr_buf, sizeof(addr_buf)));
+
+ for (i = 1; i < segnum; i++)
+ pos += snprintf(buf + pos, sizeof(buf) - pos, " %s",
+ inet_ntop(AF_INET6, &sidlist[i], addr_buf, sizeof(addr_buf)));
+
+ return buf;
+}
/*
* Show commands helper functions
@@ -71,6 +113,12 @@ static void _display_peer_header(struct vty *vty, struct bfd_session *bs)
inet_ntop(bs->key.family, &bs->key.peer, addr_buf,
sizeof(addr_buf)));
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO || bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT)
+ vty_out(vty, " bfd-mode %s", bfd_mode_type_to_string(bs->bfd_mode));
+
+ if (bs->bfd_name[0])
+ vty_out(vty, " bfd-name %s", bs->bfd_name);
+
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH))
vty_out(vty, " multihop");
@@ -83,6 +131,20 @@ static void _display_peer_header(struct vty *vty, struct bfd_session *bs)
vty_out(vty, " vrf %s", bs->key.vrfname);
if (bs->key.ifname[0])
vty_out(vty, " interface %s", bs->key.ifname);
+
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO || bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT)
+ vty_out(vty, " remote-discr %u", bs->discrs.remote_discr);
+
+ if (bs->bfd_name[0] && bs->segnum) {
+ vty_out(vty, " srv6-source-ipv6 %s",
+ inet_ntop(AF_INET6, &bs->out_sip6, addr_buf, sizeof(addr_buf)));
+
+ vty_out(vty, " srv6-encap-data %s",
+ sbfd_sidlist_to_string(bs->seg_list, bs->segnum));
+ }
+ }
+
vty_out(vty, "\n");
}
@@ -135,10 +197,16 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs)
vty_out(vty, "\t\tDiagnostics: %s\n", diag2str(bs->local_diag));
vty_out(vty, "\t\tRemote diagnostics: %s\n", diag2str(bs->remote_diag));
- vty_out(vty, "\t\tPeer Type: %s\n",
- CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic");
- _display_rtt(&min, &avg, &max, bs);
- vty_out(vty, "\t\tRTT min/avg/max: %u/%u/%u usec\n", min, avg, max);
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT) {
+ vty_out(vty, "\t\tPeer Type: sbfd initiator\n");
+ } else if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ vty_out(vty, "\t\tPeer Type: echo\n");
+ } else {
+ vty_out(vty, "\t\tPeer Type: %s\n",
+ CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) ? "configured" : "dynamic");
+ _display_rtt(&min, &avg, &max, bs);
+ vty_out(vty, "\t\tRTT min/avg/max: %u/%u/%u usec\n", min, avg, max);
+ }
vty_out(vty, "\t\tLocal timers:\n");
vty_out(vty, "\t\t\tDetect-multiplier: %u\n",
@@ -152,24 +220,32 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs)
bs->timers.required_min_echo_rx / 1000);
else
vty_out(vty, "\t\t\tEcho receive interval: disabled\n");
- if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO) || bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO)
vty_out(vty, "\t\t\tEcho transmission interval: %ums\n",
bs->timers.desired_min_echo_tx / 1000);
else
vty_out(vty, "\t\t\tEcho transmission interval: disabled\n");
- vty_out(vty, "\t\tRemote timers:\n");
- vty_out(vty, "\t\t\tDetect-multiplier: %u\n",
- bs->remote_detect_mult);
- vty_out(vty, "\t\t\tReceive interval: %ums\n",
- bs->remote_timers.required_min_rx / 1000);
- vty_out(vty, "\t\t\tTransmission interval: %ums\n",
- bs->remote_timers.desired_min_tx / 1000);
- if (bs->remote_timers.required_min_echo != 0)
- vty_out(vty, "\t\t\tEcho receive interval: %ums\n",
- bs->remote_timers.required_min_echo / 1000);
- else
- vty_out(vty, "\t\t\tEcho receive interval: disabled\n");
+
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT || bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ vty_out(vty, "\t\tRemote timers:\n");
+ vty_out(vty, "\t\t\tDetect-multiplier: -\n");
+ vty_out(vty, "\t\t\tReceive interval: -\n");
+ vty_out(vty, "\t\t\tTransmission interval: -\n");
+ vty_out(vty, "\t\t\tEcho receive interval: -\n");
+ } else {
+ vty_out(vty, "\t\tRemote timers:\n");
+ vty_out(vty, "\t\t\tDetect-multiplier: %u\n", bs->remote_detect_mult);
+ vty_out(vty, "\t\t\tReceive interval: %ums\n",
+ bs->remote_timers.required_min_rx / 1000);
+ vty_out(vty, "\t\t\tTransmission interval: %ums\n",
+ bs->remote_timers.desired_min_tx / 1000);
+ if (bs->remote_timers.required_min_echo != 0)
+ vty_out(vty, "\t\t\tEcho receive interval: %ums\n",
+ bs->remote_timers.required_min_echo / 1000);
+ else
+ vty_out(vty, "\t\t\tEcho receive interval: disabled\n");
+ }
vty_out(vty, "\n");
}
@@ -216,6 +292,9 @@ static struct json_object *__display_peer_json(struct bfd_session *bs)
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH))
json_object_int_add(jo, "minimum-ttl", bs->mh_ttl);
+ if (bs->bfd_mode != BFD_MODE_TYPE_BFD)
+ json_object_string_add(jo, "bfd-name", bs->bfd_name);
+
switch (bs->ses_state) {
case PTM_BFD_ADM_DOWN:
json_object_string_add(jo, "status", "shutdown");
@@ -253,7 +332,13 @@ static struct json_object *__display_peer_json(struct bfd_session *bs)
bs->timers.desired_min_tx / 1000);
json_object_int_add(jo, "echo-receive-interval",
bs->timers.required_min_echo_rx / 1000);
- if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT || bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO) {
+ json_object_int_add(jo, "configured-echo-transmit-interval",
+ bs->timers.desired_min_echo_tx / 1000);
+ json_object_int_add(jo, "current-echo-transmit-interval", bs->echo_xmt_TO / 1000);
+ json_object_int_add(jo, "current-detect-echo-receive-interval",
+ bs->echo_detect_TO / 1000);
+ } else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
json_object_int_add(jo, "echo-transmit-interval",
bs->timers.desired_min_echo_tx / 1000);
else
@@ -289,6 +374,7 @@ struct bfd_vrf_tuple {
const char *vrfname;
struct vty *vty;
struct json_object *jo;
+ const char *bfdname;
};
static void _display_peer_iter(struct hash_bucket *hb, void *arg)
@@ -309,6 +395,30 @@ static void _display_peer_iter(struct hash_bucket *hb, void *arg)
_display_peer(vty, bs);
}
+static void _display_bfd_by_bfdname_iter(struct hash_bucket *hb, void *arg)
+{
+ struct bfd_vrf_tuple *bvt = (struct bfd_vrf_tuple *)arg;
+ struct vty *vty;
+ struct bfd_session *bs = hb->data;
+
+ if (!bvt)
+ return;
+ vty = bvt->vty;
+
+ if (bvt->vrfname) {
+ if (!bs->key.vrfname[0] || !strmatch(bs->key.vrfname, bvt->vrfname))
+ return;
+ }
+
+ if (bvt->bfdname) {
+ if ((!bs->key.bfdname[0] || !strmatch(bs->key.bfdname, bvt->bfdname)) &&
+ (!bs->bfd_name[0] || !strmatch(bs->bfd_name, bvt->bfdname)))
+ return;
+ }
+
+ _display_peer(vty, bs);
+}
+
static void _display_peer_json_iter(struct hash_bucket *hb, void *arg)
{
struct bfd_vrf_tuple *bvt = (struct bfd_vrf_tuple *)arg;
@@ -334,6 +444,58 @@ static void _display_peer_json_iter(struct hash_bucket *hb, void *arg)
json_object_array_add(jo, jon);
}
+static void _display_bfd_by_bfdname_json_iter(struct hash_bucket *hb, void *arg)
+{
+ struct bfd_vrf_tuple *bvt = (struct bfd_vrf_tuple *)arg;
+ struct json_object *jo, *jon = NULL;
+ struct bfd_session *bs = hb->data;
+
+ if (!bvt)
+ return;
+ jo = bvt->jo;
+
+ if (bvt->vrfname) {
+ if (!bs->key.vrfname[0] || !strmatch(bs->key.vrfname, bvt->vrfname))
+ return;
+ }
+
+ if (bvt->bfdname) {
+ if ((!bs->key.bfdname[0] || !strmatch(bs->key.bfdname, bvt->bfdname)) &&
+ (!bs->bfd_name[0] || !strmatch(bs->bfd_name, bvt->bfdname)))
+ return;
+ }
+
+ jon = __display_peer_json(bs);
+ if (jon == NULL) {
+ zlog_warn("%s: not enough memory", __func__);
+ return;
+ }
+
+ json_object_array_add(jo, jon);
+}
+static void _display_bfd_by_bfdname(struct vty *vty, const char *vrfname, const char *bfdname,
+ bool use_json)
+{
+ struct json_object *jo;
+ struct bfd_vrf_tuple bvt = { 0 };
+
+ bvt.vrfname = vrfname;
+ bvt.bfdname = bfdname;
+
+ if (!use_json) {
+ bvt.vty = vty;
+ vty_out(vty, "BFD Peers:\n");
+ bfd_id_iterate(_display_bfd_by_bfdname_iter, &bvt);
+ return;
+ }
+
+ jo = json_object_new_array();
+ bvt.jo = jo;
+ bfd_id_iterate(_display_bfd_by_bfdname_json_iter, &bvt);
+
+ vty_json(vty, jo);
+}
+
static void _display_all_peers(struct vty *vty, char *vrfname, bool use_json)
{
struct json_object *jo;
@@ -378,6 +540,7 @@ static void _display_peer_counter(struct vty *vty, struct bfd_session *bs)
bs->stats.session_down);
vty_out(vty, "\t\tZebra notifications: %" PRIu64 "\n",
bs->stats.znotification);
+ vty_out(vty, "\t\tTx fail packet: %" PRIu64 "\n", bs->stats.tx_fail_pkt);
vty_out(vty, "\n");
}
@@ -398,6 +561,9 @@ static struct json_object *__display_peer_counters_json(struct bfd_session *bs)
json_object_int_add(jo, "session-down", bs->stats.session_down);
json_object_int_add(jo, "zebra-notifications", bs->stats.znotification);
+ if (bs->bfd_mode == BFD_MODE_TYPE_SBFD_INIT || bs->bfd_mode == BFD_MODE_TYPE_SBFD_ECHO)
+ json_object_int_add(jo, "tx-fail-packet", bs->stats.tx_fail_pkt);
+
return jo;
}
@@ -472,6 +638,109 @@ static void _display_peers_counter(struct vty *vty, char *vrfname, bool use_json
vty_json(vty, jo);
}
+static void _display_bfd_counters_by_bfdname_iter(struct hash_bucket *hb, void *arg)
+{
+ struct bfd_vrf_tuple *bvt = arg;
+ struct vty *vty;
+ struct bfd_session *bs = hb->data;
+
+ if (!bvt)
+ return;
+ vty = bvt->vty;
+
+ if (bvt->vrfname) {
+ if (!bs->key.vrfname[0] || !strmatch(bs->key.vrfname, bvt->vrfname))
+ return;
+ }
+
+ if (bvt->bfdname) {
+ if (!bs->key.bfdname[0] || !strmatch(bs->key.bfdname, bvt->bfdname))
+ return;
+ }
+
+ _display_peer_counter(vty, bs);
+}
+static void _display_bfd_counters_json_by_bfdname_iter(struct hash_bucket *hb, void *arg)
+{
+ struct json_object *jo, *jon = NULL;
+ struct bfd_session *bs = hb->data;
+ struct bfd_vrf_tuple *bvt = arg;
+
+ if (!bvt)
+ return;
+ jo = bvt->jo;
+
+ if (bvt->vrfname) {
+ if (!bs->key.vrfname[0] || !strmatch(bs->key.vrfname, bvt->vrfname))
+ return;
+ }
+
+ if (bvt->bfdname) {
+ if (!bs->key.bfdname[0] || !strmatch(bs->key.bfdname, bvt->bfdname))
+ return;
+ }
+
+ jon = __display_peer_counters_json(bs);
+ if (jon == NULL) {
+ zlog_warn("%s: not enough memory", __func__);
+ return;
+ }
+
+ json_object_array_add(jo, jon);
+}
+static void _display_bfd_counters_by_bfdname(struct vty *vty, const char *vrfname,
+ const char *bfdname, bool use_json)
+{
+ struct json_object *jo;
+ struct bfd_vrf_tuple bvt = { 0 };
+
+ bvt.vrfname = vrfname;
+ bvt.bfdname = bfdname;
+
+ if (!use_json) {
+ bvt.vty = vty;
+ vty_out(vty, "BFD Peers:\n");
+ bfd_id_iterate(_display_bfd_counters_by_bfdname_iter, &bvt);
+ return;
+ }
+
+ jo = json_object_new_array();
+ bvt.jo = jo;
+ bfd_id_iterate(_display_bfd_counters_json_by_bfdname_iter, &bvt);
+
+ vty_json(vty, jo);
+}
+static void _clear_bfd_counters_by_bfdname_iter(struct hash_bucket *hb, void *arg)
+{
+ struct bfd_vrf_tuple *bvt = arg;
+ struct bfd_session *bs = hb->data;
+
+ if (!bvt)
+ return;
+
+ if (bvt->vrfname) {
+ if (!bs->key.vrfname[0] || !strmatch(bs->key.vrfname, bvt->vrfname))
+ return;
+ }
+
+ if (bvt->bfdname) {
+ if (!bs->key.bfdname[0] || !strmatch(bs->key.bfdname, bvt->bfdname))
+ return;
+ }
+
+ _clear_peer_counter(bs);
+}
+
+static void _clear_bfd_counters_by_bfdname(const char *vrfname, const char *bfdname)
+{
+ struct bfd_vrf_tuple bvt = { 0 };
+
+ bvt.vrfname = vrfname;
+ bvt.bfdname = bfdname;
+
+ bfd_id_iterate(_clear_bfd_counters_by_bfdname_iter, &bvt);
+}
+
static void _clear_peer_counter(struct bfd_session *bs)
{
/* Clear only pkt stats, intention is not to loose system
@@ -486,12 +755,21 @@ static void _display_peer_brief(struct vty *vty, struct bfd_session *bs)
{
char addr_buf[INET6_ADDRSTRLEN];
- vty_out(vty, "%-10u", bs->discrs.my_discr);
- inet_ntop(bs->key.family, &bs->key.local, addr_buf, sizeof(addr_buf));
- vty_out(vty, " %-40s", addr_buf);
- inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf));
- vty_out(vty, " %-40s", addr_buf);
- vty_out(vty, "%-15s\n", state_list[bs->ses_state].str);
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) {
+ vty_out(vty, "%-10u", bs->discrs.my_discr);
+ inet_ntop(bs->key.family, &bs->key.local, addr_buf, sizeof(addr_buf));
+ vty_out(vty, " %-40s", addr_buf);
+ inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf));
+ vty_out(vty, " %-40s", addr_buf);
+ vty_out(vty, "%-15s\n", state_list[bs->ses_state].str);
+ } else {
+ vty_out(vty, "%-10u", bs->discrs.my_discr);
+ vty_out(vty, " %-40s", satostr(&bs->local_address));
+ inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf));
+ vty_out(vty, " %-40s", addr_buf);
+
+ vty_out(vty, "%-15s\n", state_list[bs->ses_state].str);
+ }
}
static void _display_peer_brief_iter(struct hash_bucket *hb, void *arg)
@@ -558,6 +836,8 @@ _find_peer_or_error(struct vty *vty, int argc, struct cmd_token **argv,
struct bfd_peer_cfg bpc;
struct sockaddr_any psa, lsa, *lsap;
char errormsg[128];
+ struct vrf *vrf = NULL;
+ char *tmpName = NULL;
if (peer_str) {
strtosa(peer_str, &psa);
@@ -570,9 +850,18 @@ _find_peer_or_error(struct vty *vty, int argc, struct cmd_token **argv,
idx = 0;
mhop = argv_find(argv, argc, "multihop", &idx);
- if (bfd_configure_peer(&bpc, mhop, &psa, lsap, ifname, vrfname,
- errormsg, sizeof(errormsg))
- != 0) {
+ tmpName = (char *)vrfname;
+ if (vrfname) {
+ vrf = vrf_lookup_by_name(vrfname);
+ if (vrf == NULL) {
+ vty_out(vty, "%% Vrf is not exist: %s\n", vrfname);
+ return NULL;
+ }
+ tmpName = vrf->name;
+ }
+
+ if (bfd_configure_peer(&bpc, mhop, &psa, lsap, ifname, tmpName, errormsg,
+ sizeof(errormsg)) != 0) {
vty_out(vty, "%% Invalid peer configuration: %s\n",
errormsg);
return NULL;
@@ -631,6 +920,49 @@ void _display_rtt(uint32_t *min, uint32_t *avg, uint32_t *max,
/*
* Show commands.
*/
+DEFPY(bfd_show_by_bfdname, bfd_show_by_bfdname_cmd,
+ "show bfd [vrf NAME$vrf_name] bfd-name BFDNAME$bfdname [json]",
+ SHOW_STR
+ "Bidirection Forwarding Detection\n"
+ VRF_CMD_HELP_STR
+ "Specify bfd session name\n"
+ "bfd session name\n"
+ JSON_STR)
+{
+ _display_bfd_by_bfdname(vty, vrf_name, bfdname, use_json(argc, argv));
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bfd_show_counters_by_bfdname, bfd_show_counters_by_bfdname_cmd,
+ "show bfd [vrf NAME$vrf_name] bfd-name BFDNAME$bfdname counters [json]",
+ SHOW_STR
+ "Bidirection Forwarding Detection\n"
+ VRF_CMD_HELP_STR
+ "Specify bfd session name\n"
+ "bfd session name\n"
+ "Show BFD peer counters information\n"
+ JSON_STR)
+{
+ _display_bfd_counters_by_bfdname(vty, vrf_name, bfdname, use_json(argc, argv));
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bfd_clear_counters_by_bfdname, bfd_clear_counters_by_bfdname_cmd,
+ "clear bfd [vrf NAME$vrfname] bfd-name BFDNAME$bfdname counters",
+ CLEAR_STR
+ "Bidirection Forwarding Detection\n"
+ VRF_CMD_HELP_STR
+ "Specify bfd session name\n"
+ "bfd session name\n"
+ "clear BFD peer counters information\n")
+{
+ _clear_bfd_counters_by_bfdname(vrfname, bfdname);
+
+ return CMD_SUCCESS;
+}
+
DEFPY(bfd_show_peers, bfd_show_peers_cmd, "show bfd [vrf NAME] peers [json]",
SHOW_STR
"Bidirection Forwarding Detection\n"
@@ -971,6 +1303,22 @@ struct cmd_node bfd_peer_node = {
.prompt = "%s(config-bfd-peer)# ",
};
+static void _sbfd_reflector_write_config(struct hash_bucket *hb, void *arg)
+{
+ struct sbfd_reflector *sr = hb->data;
+ char buf[INET6_ADDRSTRLEN];
+ struct vty *vty;
+
+ vty = (struct vty *)arg;
+ inet_ntop(AF_INET6, &sr->local, buf, sizeof(buf));
+ vty_out(vty, " sbfd reflector source-address %s discriminator %u\n", buf, sr->discr);
+}
+
+static void sbfd_reflector_write_config(struct vty *vty)
+{
+ sbfd_discr_iterate(_sbfd_reflector_write_config, vty);
+}
+
static int bfdd_write_config(struct vty *vty)
{
struct lyd_node *dnode;
@@ -1002,6 +1350,9 @@ static int bfdd_write_config(struct vty *vty)
written = 1;
}
+ /*sbfd config*/
+ sbfd_reflector_write_config(vty);
+
return written;
}
@@ -1011,6 +1362,9 @@ void bfdd_vty_init(void)
install_element(ENABLE_NODE, &bfd_show_peer_counters_cmd);
install_element(ENABLE_NODE, &bfd_clear_peer_counters_cmd);
install_element(ENABLE_NODE, &bfd_show_peers_cmd);
+ install_element(ENABLE_NODE, &bfd_show_by_bfdname_cmd);
+ install_element(ENABLE_NODE, &bfd_show_counters_by_bfdname_cmd);
+ install_element(ENABLE_NODE, &bfd_clear_counters_by_bfdname_cmd);
install_element(ENABLE_NODE, &bfd_show_peer_cmd);
install_element(ENABLE_NODE, &bfd_show_peers_brief_cmd);
install_element(ENABLE_NODE, &show_bfd_distributed_cmd);
diff --git a/bfdd/event.c b/bfdd/event.c
index e797e71f05..e5f43b6cc6 100644
--- a/bfdd/event.c
+++ b/bfdd/event.c
@@ -58,6 +58,73 @@ void bfd_echo_recvtimer_update(struct bfd_session *bs)
&bs->echo_recvtimer_ev);
}
+void sbfd_init_recvtimer_update(struct bfd_session *bs)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = bs->detect_TO };
+
+ /* Remove previous schedule if any. */
+ sbfd_init_recvtimer_delete(bs);
+
+ /* Don't add event if peer is deactivated. */
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1)
+ return;
+
+ tv_normalize(&tv);
+#ifdef BFD_EVENT_DEBUG
+ log_debug("%s: sec = %ld, usec = %ld", __func__, tv.tv_sec, tv.tv_usec);
+#endif /* BFD_EVENT_DEBUG */
+
+ event_add_timer_tv(master, sbfd_init_recvtimer_cb, bs, &tv, &bs->recvtimer_ev);
+}
+
+void sbfd_echo_recvtimer_update(struct bfd_session *bs)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = bs->echo_detect_TO };
+
+ /* Remove previous schedule if any. */
+ sbfd_echo_recvtimer_delete(bs);
+
+ /* Don't add event if peer is deactivated. */
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1)
+ return;
+
+ tv_normalize(&tv);
+
+ event_add_timer_tv(master, sbfd_echo_recvtimer_cb, bs, &tv, &bs->echo_recvtimer_ev);
+}
+
+void sbfd_init_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = jitter };
+
+ /* Remove previous schedule if any. */
+ sbfd_init_xmttimer_delete(bs);
+
+ /* Don't add event if peer is deactivated. */
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1)
+ return;
+
+ tv_normalize(&tv);
+
+ event_add_timer_tv(master, sbfd_init_xmt_cb, bs, &tv, &bs->xmttimer_ev);
+}
+
+void sbfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = jitter };
+
+ /* Remove previous schedule if any. */
+ sbfd_echo_xmttimer_delete(bs);
+
+ /* Don't add event if peer is deactivated. */
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN) || bs->sock == -1)
+ return;
+
+ tv_normalize(&tv);
+
+ event_add_timer_tv(master, sbfd_echo_xmt_cb, bs, &tv, &bs->echo_xmttimer_ev);
+}
+
void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
{
struct timeval tv = {.tv_sec = 0, .tv_usec = jitter};
@@ -112,3 +179,23 @@ void bfd_echo_xmttimer_delete(struct bfd_session *bs)
{
EVENT_OFF(bs->echo_xmttimer_ev);
}
+
+void sbfd_init_recvtimer_delete(struct bfd_session *bs)
+{
+ EVENT_OFF(bs->recvtimer_ev);
+}
+
+void sbfd_echo_recvtimer_delete(struct bfd_session *bs)
+{
+ EVENT_OFF(bs->echo_recvtimer_ev);
+}
+
+void sbfd_init_xmttimer_delete(struct bfd_session *bs)
+{
+ EVENT_OFF(bs->xmttimer_ev);
+}
+
+void sbfd_echo_xmttimer_delete(struct bfd_session *bs)
+{
+ EVENT_OFF(bs->echo_xmttimer_ev);
+}
diff --git a/bfdd/ptm_adapter.c b/bfdd/ptm_adapter.c
index f6ebefb7be..74f2d39878 100644
--- a/bfdd/ptm_adapter.c
+++ b/bfdd/ptm_adapter.c
@@ -70,10 +70,10 @@ static void bfdd_client_deregister(struct stream *msg);
PRINTFRR(2, 3)
static void debug_printbpc(const struct bfd_peer_cfg *bpc, const char *fmt, ...)
{
- char timers[3][128] = {};
+ char timers[3][160] = {};
char minttl_str[32] = {};
- char addr[3][128] = {};
- char profile[128] = {};
+ char addr[3][160] = {};
+ char profile[160] = {};
char cbit_str[32];
char msgbuf[512];
va_list vl;
@@ -134,7 +134,9 @@ static void _ptm_bfd_session_del(struct bfd_session *bs, uint8_t diag)
/* Change state and notify peer. */
bs->ses_state = PTM_BFD_DOWN;
bs->local_diag = diag;
- ptm_bfd_snd(bs, 0);
+
+ if (bs->bfd_mode == BFD_MODE_TYPE_BFD)
+ ptm_bfd_snd(bs, 0);
/* Session reached refcount == 0, lets delete it. */
if (bs->refcount == 0) {
@@ -200,6 +202,8 @@ int ptm_bfd_notify(struct bfd_session *bs, uint8_t notify_state)
* - 16 bytes: ipv6
* - c: prefix length
* - c: cbit
+ * - c: bfd name len
+ * - Xbytes: bfd name
*
* Commands: ZEBRA_BFD_DEST_REPLAY
*
@@ -238,9 +242,12 @@ int ptm_bfd_notify(struct bfd_session *bs, uint8_t notify_state)
case PTM_BFD_DOWN:
case PTM_BFD_INIT:
- stream_putl(msg, BFD_STATUS_DOWN);
- break;
+ if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN))
+ stream_putl(msg, BFD_STATUS_ADMIN_DOWN);
+ else
+ stream_putl(msg, BFD_STATUS_DOWN);
+ break;
default:
stream_putl(msg, BFD_STATUS_UNKNOWN);
break;
@@ -251,6 +258,9 @@ int ptm_bfd_notify(struct bfd_session *bs, uint8_t notify_state)
stream_putc(msg, bs->remote_cbit);
+ stream_putc(msg, strlen(bs->bfd_name));
+ stream_put(msg, bs->bfd_name, strlen(bs->bfd_name));
+
/* Write packet size. */
stream_putw_at(msg, 0, stream_get_endp(msg));
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 0e3ed9f0d1..1a30cb37f4 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -2163,6 +2163,9 @@ bgp_establish(struct peer_connection *connection)
peer->established++;
bgp_fsm_change_status(connection, Established);
+ if (peer->last_reset == PEER_DOWN_WAITING_OPEN)
+ peer->last_reset = 0;
+
/* bgp log-neighbor-changes of neighbor Up */
if (CHECK_FLAG(peer->bgp->flags, BGP_FLAG_LOG_NEIGHBOR_CHANGES)) {
struct vrf *vrf = vrf_lookup_by_id(peer->bgp->vrf_id);
diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c
index 9ca20c949a..9dbef791b0 100644
--- a/bgpd/bgp_main.c
+++ b/bgpd/bgp_main.c
@@ -327,7 +327,7 @@ static int bgp_vrf_disable(struct vrf *vrf)
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id);
- bgp = bgp_lookup_by_name(vrf->name);
+ bgp = bgp_lookup_by_name_filter(vrf->name, false);
if (bgp) {
vpn_leak_zebra_vrf_label_withdraw(bgp, AFI_IP);
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 164e2300c0..2ef7ec97e3 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -1278,6 +1278,25 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
}
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
+ /*
+ * Currently when a peer goes down, bgp immediately
+ * sees this via the interface events( if it is directly
+ * connected). And in this case it takes and puts on
+ * a special peer queue all path info's associated with
+ * but these items are not yet processed typically when
+ * the nexthop is being handled here. Thus we end
+ * up in a situation where the process Queue for BGP
+ * is being asked to look at the same path info multiple
+ * times. Let's just cut to the chase here and if
+ * the bnc has a peer associated with it and the path info
+ * being looked at uses that peer and the peer is no
+ * longer established we know the path_info is being
+ * handled elsewhere and we do not need to process
+ * it here at all since the pathinfo is going away
+ */
+ if (peer && path->peer == peer && !peer_established(peer->connection))
+ continue;
+
if (path->type == ZEBRA_ROUTE_BGP &&
(path->sub_type == BGP_ROUTE_NORMAL ||
path->sub_type == BGP_ROUTE_STATIC ||
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 0780b4f72d..f8726ffff9 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -1225,6 +1225,9 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi,
const char *hostname = cmd_hostname_get();
const char *domainname = cmd_domainname_get();
+ if (!peer)
+ return;
+
if (!peer_established(peer->connection))
return;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index f2e61e1e7f..e932738cd4 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -3885,6 +3885,12 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
BGP_PATH_ATTR_CHANGED);
UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG);
UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG);
+ } else {
+ /*
+ * Ensure that on uninstall that the INSTALL_PENDING
+ * is no longer set
+ */
+ UNSET_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING);
}
/* call bmp hook for loc-rib route update / withdraw after flags were
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 04b9ce5ea6..6290e1e5b1 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -122,6 +122,12 @@ FRR_CFG_DEFAULT_BOOL(BGP_ENFORCE_FIRST_AS,
{ .val_bool = false, .match_version = "< 9.1", },
{ .val_bool = true },
);
+FRR_CFG_DEFAULT_BOOL(BGP_RR_ALLOW_OUTBOUND_POLICY,
+ { .val_bool = false },
+);
+FRR_CFG_DEFAULT_BOOL(BGP_COMPARE_AIGP,
+ { .val_bool = false },
+);
DEFINE_HOOK(bgp_inst_config_write,
(struct bgp *bgp, struct vty *vty),
@@ -622,6 +628,10 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name,
BGP_FLAG_DYNAMIC_CAPABILITY);
if (DFLT_BGP_ENFORCE_FIRST_AS)
SET_FLAG((*bgp)->flags, BGP_FLAG_ENFORCE_FIRST_AS);
+ if (DFLT_BGP_RR_ALLOW_OUTBOUND_POLICY)
+ SET_FLAG((*bgp)->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY);
+ if (DFLT_BGP_COMPARE_AIGP)
+ SET_FLAG((*bgp)->flags, BGP_FLAG_COMPARE_AIGP);
ret = BGP_SUCCESS;
}
@@ -17575,12 +17585,6 @@ DEFUN (bgp_redistribute_ipv4_ospf,
if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0)
protocol = ZEBRA_ROUTE_OSPF;
else {
- if (bgp->vrf_id != VRF_DEFAULT) {
- vty_out(vty,
- "%% Only default BGP instance can use '%s'\n",
- argv[idx_ospf_table]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
if (strncmp(argv[idx_ospf_table]->arg, "table-direct",
strlen("table-direct")) == 0) {
protocol = ZEBRA_ROUTE_TABLE_DIRECT;
@@ -17634,12 +17638,6 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap,
if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0)
protocol = ZEBRA_ROUTE_OSPF;
else {
- if (bgp->vrf_id != VRF_DEFAULT) {
- vty_out(vty,
- "%% Only default BGP instance can use '%s'\n",
- argv[idx_ospf_table]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
if (strncmp(argv[idx_ospf_table]->arg, "table-direct",
strlen("table-direct")) == 0) {
protocol = ZEBRA_ROUTE_TABLE_DIRECT;
@@ -17697,12 +17695,6 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric,
if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0)
protocol = ZEBRA_ROUTE_OSPF;
else {
- if (bgp->vrf_id != VRF_DEFAULT) {
- vty_out(vty,
- "%% Only default BGP instance can use '%s'\n",
- argv[idx_ospf_table]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
if (strncmp(argv[idx_ospf_table]->arg, "table-direct",
strlen("table-direct")) == 0) {
protocol = ZEBRA_ROUTE_TABLE_DIRECT;
@@ -17767,12 +17759,6 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap_metric,
if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0)
protocol = ZEBRA_ROUTE_OSPF;
else {
- if (bgp->vrf_id != VRF_DEFAULT) {
- vty_out(vty,
- "%% Only default BGP instance can use '%s'\n",
- argv[idx_ospf_table]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
if (strncmp(argv[idx_ospf_table]->arg, "table-direct",
strlen("table-direct")) == 0) {
protocol = ZEBRA_ROUTE_TABLE_DIRECT;
@@ -17842,13 +17828,7 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric_rmap,
if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0)
protocol = ZEBRA_ROUTE_OSPF;
else {
- if (bgp->vrf_id != VRF_DEFAULT) {
- vty_out(vty,
- "%% Only default BGP instance can use '%s'\n",
- argv[idx_ospf_table]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- } else if (strncmp(argv[idx_ospf_table]->arg, "table-direct",
- strlen("table-direct")) == 0) {
+ if (strncmp(argv[idx_ospf_table]->arg, "table-direct", strlen("table-direct")) == 0) {
protocol = ZEBRA_ROUTE_TABLE_DIRECT;
if (instance == RT_TABLE_MAIN ||
instance == RT_TABLE_LOCAL) {
@@ -17911,12 +17891,6 @@ DEFUN (no_bgp_redistribute_ipv4_ospf,
if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0)
protocol = ZEBRA_ROUTE_OSPF;
else {
- if (bgp->vrf_id != VRF_DEFAULT) {
- vty_out(vty,
- "%% Only default BGP instance can use '%s'\n",
- argv[idx_ospf_table]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
if (strncmp(argv[idx_ospf_table]->arg, "table-direct",
strlen("table-direct")) == 0) {
protocol = ZEBRA_ROUTE_TABLE_DIRECT;
@@ -19816,14 +19790,19 @@ int bgp_config_write(struct vty *vty)
}
}
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY)) {
- vty_out(vty,
- " bgp route-reflector allow-outbound-policy\n");
- }
+ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY) !=
+ SAVE_BGP_RR_ALLOW_OUTBOUND_POLICY)
+ vty_out(vty, " %sbgp route-reflector allow-outbound-policy\n",
+ CHECK_FLAG(bgp->flags, BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY) ? ""
+ : "no ");
+
if (CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_ROUTER_ID))
vty_out(vty, " bgp bestpath compare-routerid\n");
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_AIGP))
- vty_out(vty, " bgp bestpath aigp\n");
+
+ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_AIGP) != SAVE_BGP_COMPARE_AIGP)
+ vty_out(vty, " %sbgp bestpath aigp\n",
+ CHECK_FLAG(bgp->flags, BGP_FLAG_COMPARE_AIGP) ? "" : "no ");
+
if (CHECK_FLAG(bgp->flags, BGP_FLAG_MED_CONFED)
|| CHECK_FLAG(bgp->flags, BGP_FLAG_MED_MISSING_AS_WORST)) {
vty_out(vty, " bgp bestpath med");
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 8e8616c155..1669aabc60 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -1674,11 +1674,23 @@ void bgp_zebra_announce_table(struct bgp *bgp, afi_t afi, safi_t safi)
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest))
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next)
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED) &&
- (pi->type == ZEBRA_ROUTE_BGP
- && (pi->sub_type == BGP_ROUTE_NORMAL
- || pi->sub_type == BGP_ROUTE_IMPORTED)))
- bgp_zebra_route_install(dest, pi, bgp, true,
- NULL, false);
+ (pi->type == ZEBRA_ROUTE_BGP && (pi->sub_type == BGP_ROUTE_NORMAL ||
+ pi->sub_type == BGP_ROUTE_IMPORTED))) {
+ bool is_add = true;
+
+ if (bgp->table_map[afi][safi].name) {
+ struct attr local_attr = *pi->attr;
+ struct bgp_path_info local_info = *pi;
+
+ local_info.attr = &local_attr;
+
+ is_add = bgp_table_map_apply(bgp->table_map[afi][safi].map,
+ bgp_dest_get_prefix(dest),
+ &local_info);
+ }
+
+ bgp_zebra_route_install(dest, pi, bgp, is_add, NULL, false);
+ }
}
/* Announce routes of any bgp subtype of a table to zebra */
@@ -2042,11 +2054,34 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
/* Return if already redistribute flag is set. */
if (instance) {
- if (redist_check_instance(&zclient->mi_redist[afi][type],
- instance))
- return CMD_WARNING;
+ if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
+ /*
+ * When redistribution type is `table-direct` the
+ * instance means `table identification`.
+ *
+ * `table_id` support 32bit integers, however since
+ * `instance` is being overloaded to `table_id` it
+ * will only be possible to use the first 65535
+ * entries.
+ *
+ * Also the ZAPI must also support `int`
+ * (see `zebra_redistribute_add`).
+ */
+ struct redist_table_direct table = {
+ .table_id = instance,
+ .vrf_id = bgp->vrf_id,
+ };
+ if (redist_lookup_table_direct(&zclient->mi_redist[afi][type], &table) !=
+ NULL)
+ return CMD_WARNING;
+
+ redist_add_table_direct(&zclient->mi_redist[afi][type], &table);
+ } else {
+ if (redist_check_instance(&zclient->mi_redist[afi][type], instance))
+ return CMD_WARNING;
- redist_add_instance(&zclient->mi_redist[afi][type], instance);
+ redist_add_instance(&zclient->mi_redist[afi][type], instance);
+ }
} else {
if (vrf_bitmap_check(&zclient->redist[afi][type], bgp->vrf_id))
return CMD_WARNING;
@@ -2174,10 +2209,22 @@ int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type,
/* Return if zebra connection is disabled. */
if (instance) {
- if (!redist_check_instance(&zclient->mi_redist[afi][type],
- instance))
- return CMD_WARNING;
- redist_del_instance(&zclient->mi_redist[afi][type], instance);
+ if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
+ struct redist_table_direct table = {
+ .table_id = instance,
+ .vrf_id = bgp->vrf_id,
+ };
+ if (redist_lookup_table_direct(&zclient->mi_redist[afi][type], &table) ==
+ NULL)
+ return CMD_WARNING;
+
+ redist_del_table_direct(&zclient->mi_redist[afi][type], &table);
+ } else {
+ if (!redist_check_instance(&zclient->mi_redist[afi][type], instance))
+ return CMD_WARNING;
+
+ redist_del_instance(&zclient->mi_redist[afi][type], instance);
+ }
} else {
if (!vrf_bitmap_check(&zclient->redist[afi][type], bgp->vrf_id))
return CMD_WARNING;
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 6de403b30c..efb2c00fa5 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -2026,8 +2026,11 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
if (bgp->autoshutdown)
peer_flag_set(peer, PEER_FLAG_SHUTDOWN);
/* Set up peer's events and timers. */
- else if (!active && peer_active(peer->connection))
+ else if (!active && peer_active(peer->connection)) {
+ if (peer->last_reset == PEER_DOWN_NOAFI_ACTIVATED)
+ peer->last_reset = 0;
bgp_timer_set(peer->connection);
+ }
bgp_peer_gr_flags_update(peer);
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(bgp, bgp->peer);
@@ -3985,6 +3988,7 @@ int bgp_delete(struct bgp *bgp)
uint32_t a_ann_cnt = 0, a_l2_cnt = 0, a_l3_cnt = 0;
struct bgp *bgp_to_proc = NULL;
struct bgp *bgp_to_proc_next = NULL;
+ struct bgp *bgp_default = bgp_get_default();
assert(bgp);
@@ -4038,13 +4042,26 @@ int bgp_delete(struct bgp *bgp)
bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL);
/* make sure we withdraw any exported routes */
- vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp_get_default(),
- bgp);
- vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp_get_default(),
- bgp);
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp_default, bgp);
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp_default, bgp);
bgp_vpn_leak_unimport(bgp);
+ /*
+ * Release SRv6 SIDs, like it's done in `vpn_leak_postchange()`
+ * and bgp_sid_vpn_export_cmd/af_sid_vpn_export_cmd commands.
+ */
+ bgp->tovpn_sid_index = 0;
+ UNSET_FLAG(bgp->vrf_flags, BGP_VRF_TOVPN_SID_AUTO);
+ delete_vrf_tovpn_sid_per_vrf(bgp_default, bgp);
+ for (afi = AFI_IP; afi < AFI_MAX; afi++) {
+ bgp->vpn_policy[afi].tovpn_sid_index = 0;
+ UNSET_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_SID_AUTO);
+ delete_vrf_tovpn_sid_per_af(bgp_default, bgp, afi);
+
+ vpn_leak_zebra_vrf_sid_withdraw(bgp, afi);
+ }
+
bgp_vpn_release_label(bgp, AFI_IP, true);
bgp_vpn_release_label(bgp, AFI_IP6, true);
diff --git a/doc/developer/sbfd.rst b/doc/developer/sbfd.rst
new file mode 100644
index 0000000000..7bbd2428dd
--- /dev/null
+++ b/doc/developer/sbfd.rst
@@ -0,0 +1,140 @@
+.. _sbfd:
+
+****
+SBFD
+****
+
+:abbr:`SBFD (Seamless Bidirectional Forwarding Detection)` is:
+
+ Seamless Bidirectional Forwarding Detection, a simplified mechanism for using BFD with a large
+ proportion of negotiation aspects eliminated, thus providing benefits
+ such as quick provisioning, as well as improved control and
+ flexibility for network nodes initiating path monitoring.
+
+ -- :rfc:`7880`
+
+It is described and extended by the following RFCs:
+
+* :rfc:`7880`
+* :rfc:`7881`
+
+.. _sbfd-sate-machine:
+
+SBFD state machine
+==================
+
+SBFD takes the same data packet format as BFD, but with a much simpler state machine.
+According to RFC7880, SBFD has a stateless SBFDReflector and a stateful SBFDInitiator with the state machine as below:
+
+::
+
+ +--+
+ ADMIN DOWN, | |
+ TIMER | V
+ +------+ UP +------+
+ | |-------------------->| |----+
+ | DOWN | | UP | | UP
+ | |<--------------------| |<---+
+ +------+ ADMIN DOWN, +------+
+ TIMER
+
+ Figure 1: SBFDInitiator Finite State Machine
+
+.. _sbfd-extention:
+
+SBFD extension - SRv6 encapsulation
+===================================
+
+SBFDInitiator periodically send packets to monitor the connection to SBFDReflector. We set up an SBFD connection between the source and the destination node of a path,
+with the source node serving as Initiator and the destination node as Reflector. The communicated SBFD packets should also follow every exact hop in the path,
+from the source to the destination, which could be achieved by segment routing. This requirement extends the node verification to the path verification.
+
+.. _sbfd-implement:
+
+implementation
+===============
+
+Some considerations when implementing sbfd.
+
+
+
+.. _sbfd-implement-coexist:
+
+SBFD Co-exist with BFD
+--------------------------
+
+Both SBFD and Classical BFD have their unique discriminator, SBFD can co-exist with BFD since they sharing a same discriminator pool in bfdd.
+Also in bfdd SBFD and BFD can share most code logic, SBFD packet and BFD packet are demultiplexed by different discriminators.
+
+
+.. _sbfd-implement-bfdname:
+
+SBFD name
+---------
+
+We introduced a bfd-name for every sbfd session. A unique bfd-name can be used to identify a sbfd session quickly. This is quite useful in our Srv6 deployment for path protection case.
+A bfd-name is always associated with a TE path, for example if we use the sbfd session to protect the path A-B-D, we would assign the name 'path-a-b-d' or 'a-b-d' to the session.
+
+Meanwhile bfdd will notify the sbfd status to the Pathd, we should add the bfd-name field in PTM bfd notify message ZEBRA_BFD_DEST_REPLAY:
+
+::
+ * Message format:
+ * - header: command, vrf
+ * - l: interface index
+ * - c: family
+ * - AF_INET:
+ * - 4 bytes: ipv4
+ * - AF_INET6:
+ * - 16 bytes: ipv6
+ * - c: prefix length
+ * - l: bfd status
+ * - c: family
+ * - AF_INET:
+ * - 4 bytes: ipv4
+ * - AF_INET6:
+ * - 16 bytes: ipv6
+ * - c: prefix length
+ * - c: cbit
+ * - c: bfd name len <---- new field
+ * - Xbytes: bfd name <---- new field
+ *
+ * Commands: ZEBRA_BFD_DEST_REPLAY
+ *
+ * q(64), l(32), w(16), c(8)
+
+
+
+.. _sbfd-implement-port:
+
+SBFD UDP port
+-------------
+
+According to RFC7881, SBFD Control packet dst port should be 7784, src port can be any but NOT 7784. In our implementation, the UDP ports in packet are set as:
+
+
+::
+ UDP(sport=4784, dport=7784)/BFD() or UDP(sport=3784, dport=7784)/BFD()
+
+if "multihop" is specified for sbfd initiator we choose the 4784 as the source port, so the reflected packet will take 4784 as the dst port, this is a local BFD_MULTI_HOP_PORT so the reflected packet can be handled by the existing bfd_recv_cb function.
+if "multihop" is not specified for sbfd initiator we choose the 3784 as the source port, this is a local BFD_DEFDESTPORT so the reflected packet can be handled by the existing bfd_recv_cb function.
+
+
+For echo SBFD with SRv6 encapsulation case, we re-use the BFD Echo port, the UDP ports in packet are set as:
+
+::
+ UDP(sport=3785, dport=3785)/BFD()
+
+
+we choose the 3785 as the source port, so the echo back packet will take 3785 as the dst port, this is a local BFD_DEF_ECHO_PORT so the packet can be handled by the existing bfd_recv_cb function.
+
+
+.. _sbfd-not-implemented:
+
+Todo list for SBFD
+------------------
+
+ Currently some features are not yet implemented for SBFD, will add it in future:
+ 1) SBFD in IPv4 only packet
+ 2) The ADMIN DOWN logic
+ 3) SBFD echo function in a initiator session
+ 4) SBFD over MPLS
diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am
index bdf93a05c1..9f6986cd33 100644
--- a/doc/developer/subdir.am
+++ b/doc/developer/subdir.am
@@ -83,6 +83,7 @@ dev_RSTFILES = \
doc/developer/northbound/transactional-cli.rst \
doc/developer/northbound/yang-module-translator.rst \
doc/developer/northbound/yang-tools.rst \
+ doc/developer/sbfd.rst \
# end
EXTRA_DIST += \
diff --git a/doc/user/sbfd.rst b/doc/user/sbfd.rst
new file mode 100644
index 0000000000..390d82a6c0
--- /dev/null
+++ b/doc/user/sbfd.rst
@@ -0,0 +1,304 @@
+.. _sbfd:
+
+****
+SBFD
+****
+
+:abbr:`SBFD (Seamless Bidirectional Forwarding Detection)` is:
+
+ Seamless Bidirectional Forwarding Detection, a simplified mechanism for using BFD with a large
+ proportion of negotiation aspects eliminated, thus providing benefits
+ such as quick provisioning, as well as improved control and
+ flexibility for network nodes initiating path monitoring.
+
+ -- :rfc:`7880`
+
+It is described and extended by the following RFCs:
+
+* :rfc:`7880`
+* :rfc:`7881`
+
+.. _sbfd-sate-machine:
+
+SBFD state machine
+==================
+
+SBFD takes the same data packet format as BFD, but with a much simpler state machine.
+According to RFC7880, SBFD has a stateless SBFDReflector and a stateful SBFDInitiator with the state machine as below:
+
+::
+
+ +--+
+ ADMIN DOWN, | |
+ TIMER | V
+ +------+ UP +------+
+ | |-------------------->| |----+
+ | DOWN | | UP | | UP
+ | |<--------------------| |<---+
+ +------+ ADMIN DOWN, +------+
+ TIMER
+
+ Figure 1: SBFDInitiator Finite State Machine
+
+* If SBFDInitiator doesn't receive the response packet in time, session is DOWN.
+* If SBFDInitiator receives the response packet in time: reponse state is ADMINDOWN, session goes DOWN; reponse state is UP, session goes UP.
+
+.. note::
+
+ SBFDReflector is stateless, it just transmit a packet in response to a received S-BFD packet having a valid S-BFD Discriminator in the Your Discriminator field.
+
+
+.. _sbfd-extention:
+
+SBFD extension - SRv6 encapsulation
+===================================
+
+SBFDInitiator periodically send packets to monitor the connection to SBFDReflector. We set up an SBFD connection between the source and the destination node of a path,
+with the source node serving as Initiator and the destination node as Reflector. The communicated SBFD packets should also follow every exact hop in the path,
+from the source to the destination, which could be achieved by segment routing. This requirement extends the node verification to the path verification.
+In the following example, we set up a sbfd session to monitor the path A-B-D (all nodes in the topo are SRv6 ready, which can decap and forward SRv6 packets).
+
+::
+
+ +------------C-----------+
+ / \
+ A---------------B---------------D
+ ^ ^ ^
+ | | |
+ End: 100::A End: 100::B End: 100::D
+ Loopback: 200::A Loopback: 200::D
+ BFD Discrim: 123 BFD Discrim: 456
+
+
+A is the SBFDInitiator, and D is the SBFDReflector, A will trasmit the SBFD packet to B as the format:
+
+::
+ IPv6(src="200::A", dst="100::B", nh=43)/IPv6ExtHdrSegmentRouting(addresses=["100::D"], nh=41, segleft=1)/IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
+
+
+Upon receiving the packet, B will take the Srv6 End action since the dst ip 100::B is the End address, B will the shift the dst address according to Srv6 spec, then trasmit the SBFD packet to D as the format:
+
+::
+ IPv6(src="200::A", dst="100::D", nh=41)/IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
+
+
+After D receive the packet, It will decap the outer IPv6 header since the dst ip 100::D is the End address, the decapped packet is:
+
+::
+ IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
+
+
+This packet will be routed to kernel stack of D since its dst is 200::D. Then the SBFDReflector service on D will get the packet and Reflect it. The response packet will be:
+
+::
+ IPv6(src="200::D", dst="200::A")/UDP(sport=7784)/BFD(my_dis=456, your_disc=123, state=UP)
+
+
+This packet will be routed in the topo according to the dst ip 200::A, it will go back to A by D-B-A or D-C-A in this case.
+
+
+
+ In this example, Command used to configure the SBFDInitiator on A is:
+
+.. clicmd:: peer 200::D bfd-mode sbfd-init bfd-name a-b-d multihop local-address 200::A remote-discr 456 srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
+
+
+ Command used to configure the SBFDReflector on D is:
+
+.. clicmd:: sbfd reflector source-address 200::D discriminator 456
+
+
+.. _sbfd-echo:
+
+Echo SBFD with SRv6 encapsulation
+=================================
+
+The SBFD Initiator-Reflector mode requires the configuration on both source and destination nodes. It can not work if the remote node has no SBD feature supported, especial on some third-party devices.
+The Echo SBFD can solve this kind of deployment issue since it only requires the configuration on source node. This is also known as One-Arm BFD Echo or unaffiliated BFD Echo.
+For example, we use Echo SBFD session to protect Srv6 path: A-B-D
+
+::
+
+ +------------C-----------+
+ / \
+ A---------------B---------------D
+ ^ ^ ^
+ | | |
+ End: 100::A End: 100::B End: 100::D
+ Loopback: 200::A Loopback: 200::D
+ BFD Discrim: 123
+
+
+A is also the SBFDInitiator, and B, C, D is Srv6 ready nodes, A will trasmit the SBFD packet to B as the format:
+
+::
+ IPv6(src="200::A", dst="100::B", nh=43)/IPv6ExtHdrSegmentRouting(addresses=["100::D"], nh=41, segleft=1)/IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
+
+
+Upon receiving the packet, B will take the Srv6 End action since the dst ip 100::B is the End address, B will the shift the dst address according to Srv6 spec, then trasmit the SBFD packet to D as the format:
+
+::
+ IPv6(src="200::A", dst="100::D", nh=41)/IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
+
+
+After D receive the packet, It will decap the outer IPv6 header since the dst ip 100::D is the End address, the decapped packet is:
+
+::
+ IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
+
+
+This packet will be routed in the topo according to the dst ip 200::A, it will go back to A by D-B-A or D-C-A in this case.
+
+
+
+ In this example, Command used to configure the SBFDInitiator on A is:
+
+.. clicmd:: peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
+
+
+ no configuration needed on D.
+
+
+.. _sbfd-normal:
+
+normal SBFD with no SRv6 encapsulation
+======================================
+
+We can also configure a SBFD Initiator-Reflector session based on simple IPv6/IPv4 packet, no Srv6 involved in this case.
+
+::
+
+ +------------C-----------+
+ / \
+ A---------------B---------------D
+ ^ ^ ^
+ | | |
+ Loopback: 200::A Loopback: 200::D
+ BFD Discrim: 123 BFD Discrim: 456
+
+
+
+A is the SBFDInitiator, and D is the SBFDReflector, A will trasmit the SBFD packet to B or C as the format:
+
+::
+ IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
+
+
+Upon receiving the packet, B/C will route the packet to D according to the dst ip 200::D.
+
+After D receive the packet, packet will be sent to kernel stack of D since its dst is 200::D. Then the SBFDReflector service on D will get the packet and reflect it. The response packet will be:
+
+::
+ IPv6(src="200::D", dst="200::A")/UDP(sport=7784)/BFD(my_dis=456, your_disc=123, state=UP)
+
+
+This packet will be routed in the topo according to the dst ip 200::A, it will go back to A by D-B-A or D-C-A in this case.
+
+
+ In this example, Command used to configure the SBFDInitiator on A is:
+
+.. clicmd:: peer 200::D bfd-mode sbfd-init bfd-name a-d local-address 200::A remote-discr 456
+
+
+ Command used to configure the SBFDReflector on D is:
+
+.. clicmd:: sbfd reflector source-address 200::D discriminator 456
+
+
+.. note::
+
+ Currently some features are not yet implemented:
+ 1) SBFD in IPv4 only packet
+ 2) The ADMIN DOWN logic
+ 3) SBFD echo function in a initiator session
+ 4) SBFD over MPLS
+
+
+.. _sbfd-show:
+
+show command
+============
+
+The exsiting bfd show command is also appliable to SBFD sessions, for example:
+This command will show all the BFD and SBFD sessions in the bfdd:
+
+.. clicmd:: show bfd peers
+
+
+::
+ BFD Peers:
+ peer 200::D bfd-mode sbfd-init bfd-name a-d multihop local-address 200::A vrf default remote-discr 456
+ ID: 1421669725
+ Remote ID: 456
+ Active mode
+ Minimum TTL: 254
+ Status: up
+ Uptime: 5 hour(s), 48 minute(s), 39 second(s)
+ Diagnostics: ok
+ Remote diagnostics: ok
+ Peer Type: sbfd initiator
+ Local timers:
+ Detect-multiplier: 3
+ Receive interval: 300ms
+ Transmission interval: 1000ms
+ Echo receive interval: 50ms
+ Echo transmission interval: disabled
+ Remote timers:
+ Detect-multiplier: -
+ Receive interval: -
+ Transmission interval: -
+ Echo receive interval: -
+
+This command will show all the BFD and SBFD session packet counters:
+
+.. clicmd:: show bfd peers counters
+
+::
+ BFD Peers:
+ peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A vrf default srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
+ Control packet input: 0 packets
+ Control packet output: 0 packets
+ Echo packet input: 23807 packets
+ Echo packet output: 23807 packets
+ Session up events: 1
+ Session down events: 0
+ Zebra notifications: 1
+ Tx fail packet: 0
+
+ peer 200::D bfd-mode sbfd-init bfd-name a-d local-address 200::A vrf default remote-discr 456
+ Control packet input: 25289 packets
+ Control packet output: 51812 packets
+ Echo packet input: 0 packets
+ Echo packet output: 0 packets
+ Session up events: 5
+ Session down events: 4
+ Zebra notifications: 9
+ Tx fail packet: 0
+
+
+we also implemented a new show command to display BFD sessions with a bfd-name, the bfd-name is the key to search the sessioon.
+
+.. clicmd:: show bfd bfd-name a-b-d
+
+::
+ BFD Peers:
+ peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A vrf default srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
+ ID: 123
+ Remote ID: 123
+ Active mode
+ Status: up
+ Uptime: 5 hour(s), 39 minute(s), 34 second(s)
+ Diagnostics: ok
+ Remote diagnostics: ok
+ Peer Type: echo
+ Local timers:
+ Detect-multiplier: 3
+ Receive interval: 300ms
+ Transmission interval: 300ms
+ Echo receive interval: 300ms
+ Echo transmission interval: 1000ms
+ Remote timers:
+ Detect-multiplier: -
+ Receive interval: -
+ Transmission interval: -
+ Echo receive interval: -
diff --git a/doc/user/subdir.am b/doc/user/subdir.am
index 395ce305fe..e4e12788e7 100644
--- a/doc/user/subdir.am
+++ b/doc/user/subdir.am
@@ -55,6 +55,7 @@ user_RSTFILES = \
doc/user/watchfrr.rst \
doc/user/wecmp_linkbw.rst \
doc/user/mgmtd.rst \
+ doc/user/sbfd.rst \
# end
EXTRA_DIST += \
diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c
index 18b13547a5..8c97dcda2f 100644
--- a/isisd/isis_tlvs.c
+++ b/isisd/isis_tlvs.c
@@ -1053,9 +1053,8 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
adj->algorithm));
json_object_int_add(srv6_endx_sid_json,
"weight", adj->weight);
- json_object_string_add(srv6_endx_sid_json,
- "behavior",
- seg6local_action2str(
+ json_object_string_add(srv6_endx_sid_json, "behavior",
+ srv6_endpoint_behavior_codepoint2str(
adj->behavior));
json_object_boolean_add(
srv6_endx_sid_json, "flagB",
@@ -1081,22 +1080,17 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
for (adj = (struct isis_srv6_endx_sid_subtlv *)
exts->srv6_endx_sid.head;
adj; adj = adj->next) {
- sbuf_push(
- buf, indent,
- "SRv6 End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c\n",
- &adj->sid,
- sr_algorithm_string(adj->algorithm),
- adj->weight,
- seg6local_action2str(adj->behavior),
- adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG
- ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG
- ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG
- ? '1'
- : '0');
+ sbuf_push(buf, indent,
+ "SRv6 End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c\n",
+ &adj->sid, sr_algorithm_string(adj->algorithm),
+ adj->weight,
+ srv6_endpoint_behavior_codepoint2str(adj->behavior),
+ adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG ? '1'
+ : '0');
if (adj->subsubtlvs)
isis_format_subsubtlvs(adj->subsubtlvs,
buf, NULL,
@@ -1131,9 +1125,8 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
lan->algorithm));
json_object_int_add(srv6_lan_endx_sid_json,
"weight", lan->weight);
- json_object_string_add(srv6_lan_endx_sid_json,
- "behavior",
- seg6local_action2str(
+ json_object_string_add(srv6_lan_endx_sid_json, "behavior",
+ srv6_endpoint_behavior_codepoint2str(
lan->behavior));
json_object_boolean_add(
srv6_lan_endx_sid_json, "flagB",
@@ -1162,24 +1155,19 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
for (lan = (struct isis_srv6_lan_endx_sid_subtlv *)
exts->srv6_lan_endx_sid.head;
lan; lan = lan->next) {
- sbuf_push(
- buf, indent,
- "SRv6 Lan End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c "
- "Neighbor-ID: %pSY\n",
- &lan->sid,
- sr_algorithm_string(lan->algorithm),
- lan->weight,
- seg6local_action2str(lan->behavior),
- lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG
- ? '1'
- : '0',
- lan->neighbor_id);
+ sbuf_push(buf, indent,
+ "SRv6 Lan End.X SID: %pI6, Algorithm: %s, Weight: %hhu, Endpoint Behavior: %s, Flags: B:%c, S:%c, P:%c "
+ "Neighbor-ID: %pSY\n",
+ &lan->sid, sr_algorithm_string(lan->algorithm),
+ lan->weight,
+ srv6_endpoint_behavior_codepoint2str(lan->behavior),
+ lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG ? '1'
+ : '0',
+ lan->neighbor_id);
if (lan->subsubtlvs)
isis_format_subsubtlvs(lan->subsubtlvs,
buf, NULL,
@@ -2823,7 +2811,7 @@ static void format_item_srv6_end_sid(uint16_t mtid, struct isis_item *i,
sid_json = json_object_new_object();
json_object_object_add(json, "srv6EndSid", sid_json);
json_object_string_add(sid_json, "endpointBehavior",
- seg6local_action2str(sid->behavior));
+ srv6_endpoint_behavior_codepoint2str(sid->behavior));
json_object_string_addf(sid_json, "sidValue", "%pI6", &sid->sid);
if (sid->subsubtlvs) {
struct json_object *subtlvs_json;
@@ -2836,7 +2824,7 @@ static void format_item_srv6_end_sid(uint16_t mtid, struct isis_item *i,
} else {
sbuf_push(buf, indent, "SRv6 End SID ");
sbuf_push(buf, 0, "Endpoint Behavior: %s, ",
- seg6local_action2str(sid->behavior));
+ srv6_endpoint_behavior_codepoint2str(sid->behavior));
sbuf_push(buf, 0, "SID value: %pI6\n", &sid->sid);
if (sid->subsubtlvs) {
diff --git a/lib/bfd.c b/lib/bfd.c
index bc4b1c5b51..6300f6f5c0 100644
--- a/lib/bfd.c
+++ b/lib/bfd.c
@@ -18,6 +18,7 @@
#include "table.h"
#include "vty.h"
#include "bfd.h"
+#include "bfdd/bfd.h"
DEFINE_MTYPE_STATIC(LIB, BFD_INFO, "BFD info");
DEFINE_MTYPE_STATIC(LIB, BFD_SOURCE, "BFD source cache");
@@ -140,14 +141,15 @@ static void bfd_source_cache_put(struct bfd_session_params *session);
* bfd_get_peer_info - Extract the Peer information for which the BFD session
* went down from the message sent from Zebra to clients.
*/
-static struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
- struct prefix *sp, int *status,
- int *remote_cbit, vrf_id_t vrf_id)
+static struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp, struct prefix *sp,
+ int *status, int *remote_cbit, vrf_id_t vrf_id,
+ char *bfd_name)
{
unsigned int ifindex;
struct interface *ifp = NULL;
int plen;
int local_remote_cbit;
+ uint8_t bfd_name_len = 0;
/*
* If the ifindex lookup fails the
@@ -194,6 +196,13 @@ static struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
STREAM_GETC(s, local_remote_cbit);
if (remote_cbit)
*remote_cbit = local_remote_cbit;
+
+ STREAM_GETC(s, bfd_name_len);
+ if (bfd_name_len) {
+ STREAM_GET(bfd_name, s, bfd_name_len);
+ *(bfd_name + bfd_name_len) = 0;
+ }
+
return ifp;
stream_failure:
@@ -918,6 +927,7 @@ int zclient_bfd_session_update(ZAPI_CALLBACK_ARGS)
struct prefix dp;
struct prefix sp;
char ifstr[128], cbitstr[32];
+ char bfd_name[BFD_NAME_SIZE + 1] = { 0 };
if (!zclient->bfd_integration)
return 0;
@@ -926,8 +936,7 @@ int zclient_bfd_session_update(ZAPI_CALLBACK_ARGS)
if (bsglobal.shutting_down)
return 0;
- ifp = bfd_get_peer_info(zclient->ibuf, &dp, &sp, &state, &remote_cbit,
- vrf_id);
+ ifp = bfd_get_peer_info(zclient->ibuf, &dp, &sp, &state, &remote_cbit, vrf_id, bfd_name);
/*
* When interface lookup fails or an invalid stream is read, we must
* not proceed otherwise it will trigger an assertion while checking
diff --git a/lib/bfd.h b/lib/bfd.h
index 99790f96a5..07d4c9781d 100644
--- a/lib/bfd.h
+++ b/lib/bfd.h
@@ -26,6 +26,8 @@ extern "C" {
#define BFD_PROFILE_NAME_LEN 64
+#define BFD_NAME_SIZE 255
+
const char *bfd_get_status_str(int status);
extern void bfd_client_sendmsg(struct zclient *zclient, int command,
@@ -409,6 +411,8 @@ struct bfd_session_arg {
uint32_t min_tx;
/** Detection multiplier. */
uint32_t detection_multiplier;
+ /* bfd session name*/
+ char bfd_name[BFD_NAME_SIZE + 1];
};
/**
diff --git a/lib/privs.c b/lib/privs.c
index b0809bf690..e7df383e5d 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -210,10 +210,11 @@ int zprivs_change_caps(zebra_privs_ops_t op)
{
cap_flag_value_t cflag;
- /* should be no possibility of being called without valid caps */
- assert(zprivs_state.syscaps_p && zprivs_state.caps);
- if (!(zprivs_state.syscaps_p && zprivs_state.caps))
- exit(1);
+ /* Called without valid caps - just return. Not every daemon needs
+ * privs.
+ */
+ if (zprivs_state.syscaps_p == NULL || zprivs_state.caps == NULL)
+ return 0;
if (op == ZPRIVS_RAISE)
cflag = CAP_SET;
diff --git a/lib/srcdest_table.c b/lib/srcdest_table.c
index 3247a0372c..7203c8ac8e 100644
--- a/lib/srcdest_table.c
+++ b/lib/srcdest_table.c
@@ -309,13 +309,3 @@ static ssize_t printfrr_rn(struct fbuf *buf, struct printfrr_eargs *ea,
cbuf, sizeof(cbuf));
return bputs(buf, cbuf);
}
-
-struct route_table *srcdest_srcnode_table(struct route_node *rn)
-{
- if (rnode_is_dstnode(rn)) {
- struct srcdest_rnode *srn = srcdest_rnode_from_rnode(rn);
-
- return srn->src_table;
- }
- return NULL;
-}
diff --git a/lib/srcdest_table.h b/lib/srcdest_table.h
index ff97f9b735..a699d4a11b 100644
--- a/lib/srcdest_table.h
+++ b/lib/srcdest_table.h
@@ -87,8 +87,6 @@ static inline void *srcdest_rnode_table_info(struct route_node *rn)
return route_table_get_info(srcdest_rnode_table(rn));
}
-extern struct route_table *srcdest_srcnode_table(struct route_node *rn);
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/zclient.c b/lib/zclient.c
index 063944fd3b..d8c75c9029 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -31,6 +31,7 @@
DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient");
DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs");
+DEFINE_MTYPE_STATIC(LIB, REDIST_TABLE_DIRECT, "Redistribution table direct");
/* Zebra client events. */
enum zclient_event { ZCLIENT_SCHEDULE, ZCLIENT_READ, ZCLIENT_CONNECT };
@@ -104,6 +105,11 @@ void zclient_free(struct zclient *zclient)
XFREE(MTYPE_ZCLIENT, zclient);
}
+static void redist_free_instance(void *data)
+{
+ XFREE(MTYPE_REDIST_INST, data);
+}
+
unsigned short *redist_check_instance(struct redist_proto *red,
unsigned short instance)
{
@@ -126,8 +132,10 @@ void redist_add_instance(struct redist_proto *red, unsigned short instance)
red->enabled = 1;
- if (!red->instances)
+ if (!red->instances) {
red->instances = list_new();
+ red->instances->del = redist_free_instance;
+ }
in = XMALLOC(MTYPE_REDIST_INST, sizeof(unsigned short));
*in = instance;
@@ -143,23 +151,100 @@ void redist_del_instance(struct redist_proto *red, unsigned short instance)
return;
listnode_delete(red->instances, id);
- XFREE(MTYPE_REDIST_INST, id);
+ red->instances->del(id);
if (!red->instances->count) {
red->enabled = 0;
list_delete(&red->instances);
}
}
-void redist_del_all_instances(struct redist_proto *red)
+static void redist_free_table_direct(void *data)
{
- struct listnode *ln, *nn;
- unsigned short *id;
+ XFREE(MTYPE_REDIST_TABLE_DIRECT, data);
+}
+
+struct redist_table_direct *redist_lookup_table_direct(const struct redist_proto *red,
+ const struct redist_table_direct *table)
+{
+ struct redist_table_direct *ntable;
+ struct listnode *node;
+
+ if (red->instances == NULL)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, ntable)) {
+ if (table->vrf_id != ntable->vrf_id)
+ continue;
+ if (table->table_id != ntable->table_id)
+ continue;
+
+ return ntable;
+ }
+
+ return NULL;
+}
+
+bool redist_table_direct_has_id(const struct redist_proto *red, int table_id)
+{
+ struct redist_table_direct *table;
+ struct listnode *node;
+
+ if (red->instances == NULL)
+ return false;
+
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, table)) {
+ if (table->table_id != table_id)
+ continue;
+
+ return true;
+ }
+
+ return false;
+}
+
+void redist_add_table_direct(struct redist_proto *red, const struct redist_table_direct *table)
+{
+ struct redist_table_direct *ntable;
+
+ ntable = redist_lookup_table_direct(red, table);
+ if (ntable != NULL)
+ return;
+
+ if (red->instances == NULL) {
+ red->instances = list_new();
+ red->instances->del = redist_free_table_direct;
+ }
+
+ red->enabled = 1;
+
+ ntable = XCALLOC(MTYPE_REDIST_TABLE_DIRECT, sizeof(*ntable));
+ ntable->vrf_id = table->vrf_id;
+ ntable->table_id = table->table_id;
+ listnode_add(red->instances, ntable);
+}
+
+void redist_del_table_direct(struct redist_proto *red, const struct redist_table_direct *table)
+{
+ struct redist_table_direct *ntable;
+ ntable = redist_lookup_table_direct(red, table);
+ if (ntable == NULL)
+ return;
+
+ listnode_delete(red->instances, ntable);
+ red->instances->del(ntable);
+ if (red->instances->count == 0) {
+ red->enabled = 0;
+ list_delete(&red->instances);
+ }
+}
+
+void redist_del_all_instances(struct redist_proto *red)
+{
if (!red->instances)
return;
- for (ALL_LIST_ELEMENTS(red->instances, ln, nn, id))
- redist_del_instance(red, *id);
+ list_delete(&red->instances);
}
/* Stop zebra client services. */
@@ -480,6 +565,17 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
+static void zclient_send_table_direct(struct zclient *zclient, afi_t afi, int type)
+{
+ struct redist_table_direct *table;
+ struct redist_proto *red = &zclient->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT];
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, table))
+ zebra_redistribute_send(type, zclient, afi, ZEBRA_ROUTE_TABLE_DIRECT,
+ table->table_id, table->vrf_id);
+}
+
/* Send register requests to zebra daemon for the information in a VRF. */
void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
{
@@ -513,6 +609,12 @@ void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
if (!zclient->mi_redist[afi][i].enabled)
continue;
+ if (i == ZEBRA_ROUTE_TABLE_DIRECT) {
+ zclient_send_table_direct(zclient, afi,
+ ZEBRA_REDISTRIBUTE_ADD);
+ continue;
+ }
+
struct listnode *node;
unsigned short *id;
@@ -580,6 +682,12 @@ void zclient_send_dereg_requests(struct zclient *zclient, vrf_id_t vrf_id)
if (!zclient->mi_redist[afi][i].enabled)
continue;
+ if (i == ZEBRA_ROUTE_TABLE_DIRECT) {
+ zclient_send_table_direct(zclient, afi,
+ ZEBRA_REDISTRIBUTE_DELETE);
+ continue;
+ }
+
struct listnode *node;
unsigned short *id;
@@ -2016,6 +2124,15 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p,
enum zapi_route_notify_owner *note,
afi_t *afi, safi_t *safi)
{
+ struct prefix dummy;
+
+ return zapi_route_notify_decode_srcdest(s, p, &dummy, tableid, note, afi, safi);
+}
+
+bool zapi_route_notify_decode_srcdest(struct stream *s, struct prefix *p, struct prefix *src_p,
+ uint32_t *tableid, enum zapi_route_notify_owner *note,
+ afi_t *afi, safi_t *safi)
+{
uint32_t t;
afi_t afi_val;
safi_t safi_val;
@@ -2025,6 +2142,9 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p,
STREAM_GETC(s, p->family);
STREAM_GETC(s, p->prefixlen);
STREAM_GET(&p->u.prefix, s, prefix_blen(p));
+ src_p->family = p->family;
+ STREAM_GETC(s, src_p->prefixlen);
+ STREAM_GET(&src_p->u.prefix, s, prefix_blen(src_p));
STREAM_GETL(s, t);
STREAM_GETC(s, afi_val);
STREAM_GETC(s, safi_val);
@@ -4634,9 +4754,52 @@ static void zclient_read(struct event *thread)
zclient_event(ZCLIENT_READ, zclient);
}
+static void zclient_redistribute_table_direct(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi,
+ int instance, int command)
+{
+ struct redist_proto *red = &zclient->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT];
+ bool has_table;
+ struct redist_table_direct table = {
+ .vrf_id = vrf_id,
+ .table_id = instance,
+ };
+
+ has_table = redist_lookup_table_direct(red, &table);
+
+ if (command == ZEBRA_REDISTRIBUTE_ADD) {
+ if (has_table)
+ return;
+
+ redist_add_table_direct(red, &table);
+ } else {
+ if (!has_table)
+ return;
+
+ redist_del_table_direct(red, &table);
+ }
+
+ if (zclient->sock > 0)
+ zebra_redistribute_send(command, zclient, afi, ZEBRA_ROUTE_TABLE_DIRECT, instance,
+ vrf_id);
+}
+
void zclient_redistribute(int command, struct zclient *zclient, afi_t afi,
int type, unsigned short instance, vrf_id_t vrf_id)
{
+ /*
+ * When asking for table-direct redistribution the parameter
+ * `instance` has a different meaning: it means table
+ * identification.
+ *
+ * The table identification information is stored in
+ * `zclient->mi_redist` along with the VRF identification
+ * information in a pair (different from the usual single protocol
+ * instance value).
+ */
+ if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
+ zclient_redistribute_table_direct(zclient, vrf_id, afi, instance, command);
+ return;
+ }
if (instance) {
if (command == ZEBRA_REDISTRIBUTE_ADD) {
diff --git a/lib/zclient.h b/lib/zclient.h
index 2385a8a219..afd84acce2 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -268,6 +268,21 @@ struct redist_proto {
struct list *instances;
};
+/**
+ * Redistribute table direct instance data structure: keeps the VRF
+ * that subscribed to the table ID.
+ *
+ * **NOTE**
+ * `table_id` is an integer because that is what the netlink interface
+ * uses for route attribute RTA_TABLE (32bit int), however the whole
+ * zclient API uses `unsigned short` (and CLI commands) so it will be
+ * limited to the range 1 to 65535.
+ */
+struct redist_table_direct {
+ vrf_id_t vrf_id;
+ int table_id;
+};
+
struct zclient_capabilities {
uint32_t ecmp;
bool mpls_enabled;
@@ -924,6 +939,15 @@ extern void redist_add_instance(struct redist_proto *, unsigned short);
extern void redist_del_instance(struct redist_proto *, unsigned short);
extern void redist_del_all_instances(struct redist_proto *red);
+extern struct redist_table_direct *
+redist_lookup_table_direct(const struct redist_proto *red, const struct redist_table_direct *table);
+extern bool redist_table_direct_has_id(const struct redist_proto *red, int table_id);
+extern void redist_add_table_direct(struct redist_proto *red,
+ const struct redist_table_direct *table);
+extern void redist_del_table_direct(struct redist_proto *red,
+ const struct redist_table_direct *table);
+
+
/*
* Send to zebra that the specified vrf is using label to resolve
* itself for L3VPN's. Repeated calls of this function with
@@ -1144,6 +1168,9 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p,
uint32_t *tableid,
enum zapi_route_notify_owner *note,
afi_t *afi, safi_t *safi);
+bool zapi_route_notify_decode_srcdest(struct stream *s, struct prefix *p, struct prefix *src_p,
+ uint32_t *tableid, enum zapi_route_notify_owner *note,
+ afi_t *afi, safi_t *safi);
bool zapi_rule_notify_decode(struct stream *s, uint32_t *seqno,
uint32_t *priority, uint32_t *unique, char *ifname,
enum zapi_rule_notify_owner *note);
diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c
index b718d498ae..f45135f44f 100644
--- a/ospfd/ospf_zebra.c
+++ b/ospfd/ospf_zebra.c
@@ -304,6 +304,27 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
if (api.nexthop_num >= ospf->max_multipath)
break;
+ /*
+ * Prune duplicate next-hops from the route that is
+ * installed in the zebra IP route table. OSPF Intra-Area
+ * routes never have duplicates.
+ */
+ if (or->path_type != OSPF_PATH_INTRA_AREA) {
+ struct zapi_nexthop *api_nh = &api.nexthops[0];
+ unsigned int nh_index;
+ bool duplicate_next_hop = false;
+
+ for (nh_index = 0; nh_index < api.nexthop_num; api_nh++, nh_index++) {
+ if (IPV4_ADDR_SAME(&api_nh->gate.ipv4, &path->nexthop) &&
+ (api_nh->ifindex == path->ifindex)) {
+ duplicate_next_hop = true;
+ break;
+ }
+ }
+ if (duplicate_next_hop)
+ continue;
+ }
+
ospf_zebra_add_nexthop(ospf, path, &api);
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c
index caed914a87..dc077dbbd6 100644
--- a/pimd/pim_autorp.c
+++ b/pimd/pim_autorp.c
@@ -113,6 +113,12 @@ static void pim_autorp_free(struct pim_autorp *autorp)
XFREE(MTYPE_PIM_AUTORP_ANNOUNCE, autorp->announce_pkt);
}
+static bool pim_autorp_should_close(struct pim_autorp *autorp)
+{
+ /* If discovery or mapping agent is active, then we need the socket open */
+ return !autorp->do_discovery && !autorp->send_rp_discovery;
+}
+
static bool pim_autorp_join_groups(struct interface *ifp)
{
struct pim_interface *pim_ifp;
@@ -670,10 +676,19 @@ static void autorp_send_discovery(struct event *evt)
&(autorp->send_discovery_timer));
}
+static bool pim_autorp_socket_enable(struct pim_autorp *autorp);
+static bool pim_autorp_socket_disable(struct pim_autorp *autorp);
+
static void autorp_send_discovery_on(struct pim_autorp *autorp)
{
int interval = 5;
+ /* Make sure the socket is open and ready */
+ if (!pim_autorp_socket_enable(autorp)) {
+ zlog_err("%s: AutoRP failed to open socket", __func__);
+ return;
+ }
+
/* Send the first discovery shortly after being enabled.
* If the configured interval is less than 5 seconds, then just use that.
*/
@@ -695,6 +710,10 @@ static void autorp_send_discovery_off(struct pim_autorp *autorp)
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP discovery sending disabled", __func__);
event_cancel(&(autorp->send_discovery_timer));
+
+ /* Close the socket if we need to */
+ if (pim_autorp_should_close(autorp) && !pim_autorp_socket_disable(autorp))
+ zlog_warn("%s: AutoRP failed to close socket", __func__);
}
static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint16_t holdtime,
@@ -949,6 +968,10 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp)
{
int fd;
+ /* Return early if socket is already enabled */
+ if (autorp->sock != -1)
+ return true;
+
frr_with_privs (&pimd_privs) {
fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if (fd < 0) {
@@ -975,6 +998,10 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp)
static bool pim_autorp_socket_disable(struct pim_autorp *autorp)
{
+ /* Return early if socket is already disabled */
+ if (autorp->sock == -1)
+ return true;
+
if (close(autorp->sock)) {
zlog_warn("Failure closing autorp socket: fd=%d errno=%d: %s", autorp->sock, errno,
safe_strerror(errno));
@@ -1453,6 +1480,12 @@ void pim_autorp_start_discovery(struct pim_instance *pim)
struct interface *ifp;
struct pim_autorp *autorp = pim->autorp;
+ /* Make sure the socket is open and ready */
+ if (!pim_autorp_socket_enable(autorp)) {
+ zlog_err("%s: AutoRP failed to open socket", __func__);
+ return;
+ }
+
if (!autorp->do_discovery) {
autorp->do_discovery = true;
autorp_read_on(autorp);
@@ -1482,6 +1515,10 @@ void pim_autorp_stop_discovery(struct pim_instance *pim)
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP Discovery stopped", __func__);
}
+
+ /* Close the socket if we need to */
+ if (pim_autorp_should_close(autorp) && !pim_autorp_socket_disable(autorp))
+ zlog_warn("%s: AutoRP failed to close socket", __func__);
}
void pim_autorp_init(struct pim_instance *pim)
@@ -1510,12 +1547,6 @@ void pim_autorp_init(struct pim_instance *pim)
pim->autorp = autorp;
- if (!pim_autorp_socket_enable(autorp)) {
- zlog_warn("%s: AutoRP failed to initialize, feature will not work correctly",
- __func__);
- return;
- }
-
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP Initialized", __func__);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 8aebce7d27..9f09852a94 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -608,26 +608,14 @@ int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,
int pim_process_autorp_cmd(struct vty *vty)
{
- char xpath[XPATH_MAXLEN];
-
- snprintf(xpath, sizeof(xpath), "%s/%s", FRR_PIM_AUTORP_XPATH,
- "discovery-enabled");
-
- nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, "true");
-
- return nb_cli_apply_changes(vty, NULL);
+ nb_cli_enqueue_change(vty, "./discovery-enabled", NB_OP_MODIFY, "true");
+ return nb_cli_apply_changes(vty, "%s", FRR_PIM_AUTORP_XPATH);
}
int pim_process_no_autorp_cmd(struct vty *vty)
{
- char xpath[XPATH_MAXLEN];
-
- snprintf(xpath, sizeof(xpath), "%s/%s", FRR_PIM_AUTORP_XPATH,
- "discovery-enabled");
-
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
-
- return nb_cli_apply_changes(vty, NULL);
+ nb_cli_enqueue_change(vty, "./discovery-enabled", NB_OP_MODIFY, "false");
+ return nb_cli_apply_changes(vty, "%s", FRR_PIM_AUTORP_XPATH);
}
int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, const char *rpaddr_str,
diff --git a/staticd/static_nb.c b/staticd/static_nb.c
index 356324126a..ef363bfe7e 100644
--- a/staticd/static_nb.c
+++ b/staticd/static_nb.c
@@ -135,96 +135,6 @@ const struct frr_yang_module_info frr_staticd_info = {
}
},
{
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list",
- .cbs = {
- .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list",
- .cbs = {
- .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_create,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/tag",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop",
- .cbs = {
- .apply_finish = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish,
- .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy,
- .pre_validate = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_pre_validate,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/bh-type",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/onlink",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srte-color",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry",
- .cbs = {
- .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_create,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry/seg",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry",
- .cbs = {
- .create = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_create,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/label",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/ttl",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_destroy,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class",
- .cbs = {
- .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy,
- }
- },
- {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing/srv6/static-sids/sid",
.cbs = {
.apply_finish = routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_apply_finish,
diff --git a/staticd/static_nb.h b/staticd/static_nb.h
index d11bf5363b..aa11f34021 100644
--- a/staticd/static_nb.h
+++ b/staticd/static_nb.h
@@ -72,52 +72,6 @@ int route_next_hop_bfd_source_destroy(struct nb_cb_destroy_args *args);
int route_next_hop_bfd_profile_modify(struct nb_cb_modify_args *args);
int route_next_hop_bfd_profile_destroy(struct nb_cb_destroy_args *args);
int route_next_hop_bfd_multi_hop_modify(struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create(
- struct nb_cb_create_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_create(
- struct nb_cb_create_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create(
- struct nb_cb_create_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_create(
- struct nb_cb_create_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_create(
- struct nb_cb_create_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_destroy(
- struct nb_cb_destroy_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_modify(
- struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy(
- struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_create(
struct nb_cb_create_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_destroy(
@@ -151,8 +105,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routi
void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_apply_finish(
struct nb_cb_apply_finish_args *args);
-void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish(
- struct nb_cb_apply_finish_args *args);
void routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_srv6_local_sids_sid_apply_finish(
struct nb_cb_apply_finish_args *args);
@@ -169,16 +121,16 @@ int routing_control_plane_protocols_name_validate(
/* xpath macros */
/* route-list */
-#define FRR_STATIC_ROUTE_INFO_KEY_XPATH \
- "/frr-routing:routing/control-plane-protocols/" \
- "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
- "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \
+#define FRR_STATIC_ROUTE_INFO_KEY_XPATH \
+ "/frr-routing:routing/control-plane-protocols/" \
+ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
+ "frr-staticd:staticd/route-list[prefix='%s'][src-prefix='%s'][afi-safi='%s']/" \
"path-list[table-id='%u'][distance='%u']"
-#define FRR_STATIC_ROUTE_INFO_KEY_NO_DISTANCE_XPATH \
- "/frr-routing:routing/control-plane-protocols/" \
- "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
- "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \
+#define FRR_STATIC_ROUTE_INFO_KEY_NO_DISTANCE_XPATH \
+ "/frr-routing:routing/control-plane-protocols/" \
+ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
+ "frr-staticd:staticd/route-list[prefix='%s'][src-prefix='%s'][afi-safi='%s']/" \
"path-list[table-id='%u']"
@@ -203,19 +155,6 @@ int routing_control_plane_protocols_name_validate(
#define FRR_STATIC_ROUTE_NH_SRV6_KEY_SEG_XPATH "/entry[id='%u']/seg"
-/* route-list/srclist */
-#define FRR_S_ROUTE_SRC_INFO_KEY_XPATH \
- "/frr-routing:routing/control-plane-protocols/" \
- "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
- "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \
- "src-list[src-prefix='%s']/path-list[table-id='%u'][distance='%u']"
-
-#define FRR_S_ROUTE_SRC_INFO_KEY_NO_DISTANCE_XPATH \
- "/frr-routing:routing/control-plane-protocols/" \
- "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
- "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \
- "src-list[src-prefix='%s']/path-list[table-id='%u']"
-
/* route-list/frr-nexthops */
#define FRR_DEL_S_ROUTE_NH_KEY_XPATH \
FRR_STATIC_ROUTE_INFO_KEY_XPATH \
@@ -226,16 +165,6 @@ int routing_control_plane_protocols_name_validate(
FRR_STATIC_ROUTE_INFO_KEY_NO_DISTANCE_XPATH \
FRR_STATIC_ROUTE_NH_KEY_XPATH
-/* route-list/src/src-list/frr-nexthops*/
-#define FRR_DEL_S_ROUTE_SRC_NH_KEY_XPATH \
- FRR_S_ROUTE_SRC_INFO_KEY_XPATH \
- FRR_STATIC_ROUTE_NH_KEY_XPATH
-
-/* route-list/src/src-list/frr-nexthops*/
-#define FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH \
- FRR_S_ROUTE_SRC_INFO_KEY_NO_DISTANCE_XPATH \
- FRR_STATIC_ROUTE_NH_KEY_XPATH
-
/* srv6 */
#define FRR_STATIC_SRV6_INFO_KEY_XPATH \
"/frr-routing:routing/control-plane-protocols/" \
diff --git a/staticd/static_nb_config.c b/staticd/static_nb_config.c
index 51de05c2ea..e2ab1f2ffe 100644
--- a/staticd/static_nb_config.c
+++ b/staticd/static_nb_config.c
@@ -502,16 +502,6 @@ void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_p
static_install_nexthop(nh);
}
-void routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish(
- struct nb_cb_apply_finish_args *args)
-{
- struct static_nexthop *nh;
-
- nh = nb_running_get_entry(args->dnode, NULL, true);
-
- static_install_nexthop(nh);
-}
-
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_pre_validate(
struct nb_cb_pre_validate_args *args)
{
@@ -576,7 +566,7 @@ int routing_control_plane_protocols_staticd_destroy(
if (!stable)
continue;
- for (rn = route_top(stable); rn; rn = route_next(rn))
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn))
static_del_route(rn);
}
@@ -595,7 +585,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr
struct static_vrf *svrf;
struct route_node *rn;
const struct lyd_node *vrf_dnode;
- struct prefix prefix;
+ struct prefix prefix, src_prefix, *src_p;
const char *afi_safi;
afi_t prefix_afi;
afi_t afi;
@@ -604,6 +594,8 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr
switch (args->event) {
case NB_EV_VALIDATE:
yang_dnode_get_prefix(&prefix, args->dnode, "prefix");
+ yang_dnode_get_prefix(&src_prefix, args->dnode, "src-prefix");
+ src_p = src_prefix.prefixlen ? &src_prefix : NULL;
afi_safi = yang_dnode_get_string(args->dnode, "afi-safi");
yang_afi_safi_identity2value(afi_safi, &afi, &safi);
prefix_afi = family2afi(prefix.family);
@@ -614,6 +606,14 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr
yang_dnode_get_string(args->dnode, "prefix"));
return NB_ERR_VALIDATION;
}
+
+ if (src_p && afi != AFI_IP6) {
+ flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE,
+ "invalid use of IPv6 dst-src prefix %s on %s",
+ yang_dnode_get_string(args->dnode, "src-prefix"),
+ yang_dnode_get_string(args->dnode, "prefix"));
+ return NB_ERR_VALIDATION;
+ }
break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
@@ -624,10 +624,12 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_cr
svrf = nb_running_get_entry(vrf_dnode, NULL, true);
yang_dnode_get_prefix(&prefix, args->dnode, "prefix");
+ yang_dnode_get_prefix(&src_prefix, args->dnode, "src-prefix");
+ src_p = src_prefix.prefixlen ? &src_prefix : NULL;
afi_safi = yang_dnode_get_string(args->dnode, "afi-safi");
yang_afi_safi_identity2value(afi_safi, &afi, &safi);
- rn = static_add_route(afi, safi, &prefix, NULL, svrf);
+ rn = static_add_route(afi, safi, &prefix, (struct prefix_ipv6 *)src_p, svrf);
if (!svrf->vrf || svrf->vrf->vrf_id == VRF_UNKNOWN)
snprintf(
args->errmsg, args->errmsg_len,
@@ -1048,331 +1050,6 @@ int route_next_hop_bfd_profile_destroy(struct nb_cb_destroy_args *args)
/*
* XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create(
- struct nb_cb_create_args *args)
-{
- struct static_vrf *s_vrf;
- struct route_node *rn;
- struct route_node *src_rn;
- struct prefix_ipv6 src_prefix = {};
- struct stable_info *info;
- afi_t afi;
- safi_t safi = SAFI_UNICAST;
-
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- rn = nb_running_get_entry(args->dnode, NULL, true);
- info = route_table_get_info(rn->table);
- s_vrf = info->svrf;
- yang_dnode_get_ipv6p(&src_prefix, args->dnode, "src-prefix");
- afi = family2afi(src_prefix.family);
- src_rn =
- static_add_route(afi, safi, &rn->p, &src_prefix, s_vrf);
- nb_running_set_entry(args->dnode, src_rn);
- break;
- }
- return NB_OK;
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy(
- struct nb_cb_destroy_args *args)
-{
- struct route_node *src_rn;
-
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- src_rn = nb_running_unset_entry(args->dnode);
- static_del_route(src_rn);
- break;
- }
-
- return NB_OK;
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_create(
- struct nb_cb_create_args *args)
-{
- return static_path_list_create(args);
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_destroy(
- struct nb_cb_destroy_args *args)
-{
- return static_path_list_destroy(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/tag
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify(
- struct nb_cb_modify_args *args)
-{
- return static_path_list_tag_modify(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create(
- struct nb_cb_create_args *args)
-{
- return static_nexthop_create(args);
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy(
- struct nb_cb_destroy_args *args)
-{
- return static_nexthop_destroy(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/bh-type
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify(
- struct nb_cb_modify_args *args)
-{
- return static_nexthop_bh_type_modify(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/onlink
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify(
- struct nb_cb_modify_args *args)
-{
- return static_nexthop_onlink_modify(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srte-color
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify(
- struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_color_modify(args) != NB_OK)
- return NB_ERR;
-
- break;
- }
- return NB_OK;
-}
-
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy(
- struct nb_cb_destroy_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_color_destroy(args) != NB_OK)
- return NB_ERR;
- break;
- }
- return NB_OK;
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_create(
- struct nb_cb_create_args *args)
-{
- return nexthop_srv6_segs_stack_entry_create(args);
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_destroy(
- struct nb_cb_destroy_args *args)
-{
- return nexthop_srv6_segs_stack_entry_destroy(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/srv6-segs-stack/entry/seg
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_modify(
- struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_srv6_segs_modify(args) != NB_OK)
- return NB_ERR;
- break;
- }
- return NB_OK;
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_srv6_segs_stack_entry_seg_destroy(
- struct nb_cb_destroy_args *args)
-{
- /*
- * No operation is required in this call back.
- * nexthop_mpls_seg_stack_entry_destroy() will take care
- * to reset the seg vaue.
- */
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
- return NB_OK;
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_create(
- struct nb_cb_create_args *args)
-{
- return nexthop_mpls_label_stack_entry_create(args);
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_destroy(
- struct nb_cb_destroy_args *args)
-{
- return nexthop_mpls_label_stack_entry_destroy(args);
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/label
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_modify(
- struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- break;
- case NB_EV_APPLY:
- if (static_nexthop_mpls_label_modify(args) != NB_OK)
- return NB_ERR;
- break;
- }
- return NB_OK;
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_label_destroy(
- struct nb_cb_destroy_args *args)
-{
- /*
- * No operation is required in this call back.
- * nexthop_mpls_label_stack_entry_destroy() will take care
- * to reset the label vaue.
- */
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
- return NB_OK;
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/ttl
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_modify(
- struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
-
- return NB_OK;
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_ttl_destroy(
- struct nb_cb_destroy_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
-
- return NB_OK;
-}
-
-/*
- * XPath:
- * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/mpls-label-stack/entry/traffic-class
- */
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_modify(
- struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
-
- return NB_OK;
-}
-
-int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy(
- struct nb_cb_destroy_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- break;
- }
-
- return NB_OK;
-}
-
-/*
- * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing
*/
int routing_control_plane_protocols_control_plane_protocol_staticd_segment_routing_create(
diff --git a/staticd/static_nht.c b/staticd/static_nht.c
index 06d27c6f59..367ee85040 100644
--- a/staticd/static_nht.c
+++ b/staticd/static_nht.c
@@ -49,8 +49,8 @@ static void static_nht_update_path(struct static_path *pn, struct prefix *nhp,
static_zebra_route_add(pn, true);
}
-static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
- uint32_t nh_num, afi_t afi, safi_t safi,
+static void static_nht_update_safi(const struct prefix *sp, const struct prefix *ssrc_p,
+ struct prefix *nhp, uint32_t nh_num, afi_t afi, safi_t safi,
struct static_vrf *svrf, vrf_id_t nh_vrf_id)
{
struct route_table *stable;
@@ -63,7 +63,7 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
return;
if (sp) {
- rn = srcdest_rnode_lookup(stable, sp, NULL);
+ rn = srcdest_rnode_lookup(stable, sp, (const struct prefix_ipv6 *)ssrc_p);
if (rn && rn->info) {
si = static_route_info_from_rnode(rn);
frr_each(static_path_list, &si->path_list, pn) {
@@ -75,7 +75,7 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
return;
}
- for (rn = route_top(stable); rn; rn = route_next(rn)) {
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) {
si = static_route_info_from_rnode(rn);
if (!si)
continue;
@@ -85,14 +85,13 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
}
}
-void static_nht_update(struct prefix *sp, struct prefix *nhp, uint32_t nh_num,
- afi_t afi, safi_t safi, vrf_id_t nh_vrf_id)
+void static_nht_update(const struct prefix *sp, const struct prefix *ssrc_p, struct prefix *nhp,
+ uint32_t nh_num, afi_t afi, safi_t safi, vrf_id_t nh_vrf_id)
{
struct static_vrf *svrf;
RB_FOREACH (svrf, svrf_name_head, &svrfs)
- static_nht_update_safi(sp, nhp, nh_num, afi, safi, svrf,
- nh_vrf_id);
+ static_nht_update_safi(sp, ssrc_p, nhp, nh_num, afi, safi, svrf, nh_vrf_id);
}
static void static_nht_reset_start_safi(struct prefix *nhp, afi_t afi,
@@ -109,7 +108,7 @@ static void static_nht_reset_start_safi(struct prefix *nhp, afi_t afi,
if (!stable)
return;
- for (rn = route_top(stable); rn; rn = route_next(rn)) {
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) {
si = static_route_info_from_rnode(rn);
if (!si)
continue;
@@ -150,8 +149,8 @@ void static_nht_reset_start(struct prefix *nhp, afi_t afi, safi_t safi,
static_nht_reset_start_safi(nhp, afi, safi, svrf, nh_vrf_id);
}
-static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi,
- safi_t safi, struct vrf *vrf,
+static void static_nht_mark_state_safi(const struct prefix *sp, const struct prefix *ssrc_p,
+ afi_t afi, safi_t safi, struct vrf *vrf,
enum static_install_states state)
{
struct static_vrf *svrf;
@@ -169,7 +168,7 @@ static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi,
if (!stable)
return;
- rn = srcdest_rnode_lookup(stable, sp, NULL);
+ rn = srcdest_rnode_lookup(stable, sp, (const struct prefix_ipv6 *)ssrc_p);
if (!rn)
return;
si = rn->info;
@@ -184,8 +183,8 @@ static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi,
route_unlock_node(rn);
}
-void static_nht_mark_state(struct prefix *sp, safi_t safi, vrf_id_t vrf_id,
- enum static_install_states state)
+void static_nht_mark_state(const struct prefix *sp, const struct prefix *ssrc_p, safi_t safi,
+ vrf_id_t vrf_id, enum static_install_states state)
{
struct vrf *vrf;
@@ -198,5 +197,5 @@ void static_nht_mark_state(struct prefix *sp, safi_t safi, vrf_id_t vrf_id,
if (!vrf || !vrf->info)
return;
- static_nht_mark_state_safi(sp, afi, safi, vrf, state);
+ static_nht_mark_state_safi(sp, ssrc_p, afi, safi, vrf, state);
}
diff --git a/staticd/static_nht.h b/staticd/static_nht.h
index 74f4401e49..41ff30cd52 100644
--- a/staticd/static_nht.h
+++ b/staticd/static_nht.h
@@ -16,15 +16,14 @@ extern "C" {
* us call this function to find the nexthop we are tracking so it
* can be installed or removed.
*
- * sp -> The route we are looking at. If NULL then look at all
- * routes.
+ * sp + ssrc_p -> The route we are looking at. If NULL then look at all routes.
* nhp -> The nexthop that is being tracked.
* nh_num -> number of valid nexthops.
* afi -> The afi we are working in.
* vrf_id -> The vrf the nexthop is in.
*/
-extern void static_nht_update(struct prefix *sp, struct prefix *nhp,
- uint32_t nh_num, afi_t afi, safi_t safi,
+extern void static_nht_update(const struct prefix *sp, const struct prefix *ssrc_p,
+ struct prefix *nhp, uint32_t nh_num, afi_t afi, safi_t safi,
vrf_id_t vrf_id);
/*
@@ -35,11 +34,10 @@ extern void static_nht_reset_start(struct prefix *nhp, afi_t afi, safi_t safi,
vrf_id_t nh_vrf_id);
/*
- * For the given prefix, sp, mark it as in a particular state
+ * For the given prefix, sp + ssrc_p, mark it as in a particular state
*/
-extern void static_nht_mark_state(struct prefix *sp, safi_t safi,
- vrf_id_t vrf_id,
- enum static_install_states state);
+extern void static_nht_mark_state(const struct prefix *sp, const struct prefix *ssrc_p, safi_t safi,
+ vrf_id_t vrf_id, enum static_install_states state);
/*
* For the given nexthop, returns the string
diff --git a/staticd/static_routes.c b/staticd/static_routes.c
index cba38183bb..cbe1c3c8c0 100644
--- a/staticd/static_routes.c
+++ b/staticd/static_routes.c
@@ -33,10 +33,6 @@ void zebra_stable_node_cleanup(struct route_table *table,
struct static_nexthop *nh;
struct static_path *pn;
struct static_route_info *si;
- struct route_table *src_table;
- struct route_node *src_node;
- struct static_path *src_pn;
- struct static_route_info *src_si;
si = node->info;
@@ -50,36 +46,6 @@ void zebra_stable_node_cleanup(struct route_table *table,
static_path_list_del(&si->path_list, pn);
XFREE(MTYPE_STATIC_PATH, pn);
}
-
- /* clean up for dst table */
- src_table = srcdest_srcnode_table(node);
- if (src_table) {
- /* This means the route_node is part of the top
- * hierarchy and refers to a destination prefix.
- */
- for (src_node = route_top(src_table); src_node;
- src_node = route_next(src_node)) {
- src_si = src_node->info;
-
- frr_each_safe(static_path_list,
- &src_si->path_list, src_pn) {
- frr_each_safe(static_nexthop_list,
- &src_pn->nexthop_list,
- nh) {
- static_nexthop_list_del(
- &src_pn->nexthop_list,
- nh);
- XFREE(MTYPE_STATIC_NEXTHOP, nh);
- }
- static_path_list_del(&src_si->path_list,
- src_pn);
- XFREE(MTYPE_STATIC_PATH, src_pn);
- }
-
- XFREE(MTYPE_STATIC_ROUTE, src_node->info);
- }
- }
-
XFREE(MTYPE_STATIC_ROUTE, node->info);
}
}
@@ -124,28 +90,10 @@ struct route_node *static_add_route(afi_t afi, safi_t safi, struct prefix *p,
return rn;
}
-/* To delete the srcnodes */
-static void static_del_src_route(struct route_node *rn)
-{
- struct static_path *pn;
- struct static_route_info *si;
-
- si = rn->info;
-
- frr_each_safe(static_path_list, &si->path_list, pn) {
- static_del_path(pn);
- }
-
- XFREE(MTYPE_STATIC_ROUTE, rn->info);
- route_unlock_node(rn);
-}
-
void static_del_route(struct route_node *rn)
{
struct static_path *pn;
struct static_route_info *si;
- struct route_table *src_table;
- struct route_node *src_node;
si = rn->info;
@@ -153,17 +101,6 @@ void static_del_route(struct route_node *rn)
static_del_path(pn);
}
- /* clean up for dst table */
- src_table = srcdest_srcnode_table(rn);
- if (src_table) {
- /* This means the route_node is part of the top hierarchy
- * and refers to a destination prefix.
- */
- for (src_node = route_top(src_table); src_node;
- src_node = route_next(src_node)) {
- static_del_src_route(src_node);
- }
- }
XFREE(MTYPE_STATIC_ROUTE, rn->info);
route_unlock_node(rn);
}
@@ -477,7 +414,7 @@ static void static_fixup_vrf(struct vrf *vrf, struct route_table *stable,
struct static_path *pn;
struct static_route_info *si;
- for (rn = route_top(stable); rn; rn = route_next(rn)) {
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) {
si = static_route_info_from_rnode(rn);
if (!si)
continue;
@@ -517,7 +454,7 @@ static void static_enable_vrf(struct route_table *stable, afi_t afi, safi_t safi
struct static_path *pn;
struct static_route_info *si;
- for (rn = route_top(stable); rn; rn = route_next(rn)) {
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) {
si = static_route_info_from_rnode(rn);
if (!si)
continue;
@@ -575,7 +512,7 @@ static void static_cleanup_vrf(struct vrf *vrf, struct route_table *stable,
struct static_path *pn;
struct static_route_info *si;
- for (rn = route_top(stable); rn; rn = route_next(rn)) {
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) {
si = static_route_info_from_rnode(rn);
if (!si)
continue;
@@ -608,7 +545,7 @@ static void static_disable_vrf(struct route_table *stable,
struct static_path *pn;
struct static_route_info *si;
- for (rn = route_top(stable); rn; rn = route_next(rn)) {
+ for (rn = route_top(stable); rn; rn = srcdest_route_next(rn)) {
si = static_route_info_from_rnode(rn);
if (!si)
continue;
diff --git a/staticd/static_vrf.c b/staticd/static_vrf.c
index 710827a9ff..78bc30500b 100644
--- a/staticd/static_vrf.c
+++ b/staticd/static_vrf.c
@@ -51,10 +51,8 @@ struct static_vrf *static_vrf_alloc(const char *name)
for (afi = AFI_IP; afi <= AFI_IP6; afi++) {
for (safi = SAFI_UNICAST; safi <= SAFI_MULTICAST; safi++) {
- if (afi == AFI_IP6)
- table = srcdest_table_init();
- else
- table = route_table_init();
+ table = srcdest_table_init();
+ table->cleanup = zebra_stable_node_cleanup;
info = XCALLOC(MTYPE_STATIC_RTABLE_INFO,
sizeof(struct stable_info));
@@ -63,7 +61,6 @@ struct static_vrf *static_vrf_alloc(const char *name)
info->safi = safi;
route_table_set_info(table, info);
- table->cleanup = zebra_stable_node_cleanup;
svrf->stable[afi][safi] = table;
}
}
diff --git a/staticd/static_vty.c b/staticd/static_vty.c
index 2fadc1f0d4..f93e81e8dc 100644
--- a/staticd/static_vty.c
+++ b/staticd/static_vty.c
@@ -79,7 +79,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
char xpath_seg[XPATH_MAXLEN];
char ab_xpath[XPATH_MAXLEN];
char buf_prefix[PREFIX_STRLEN];
- char buf_src_prefix[PREFIX_STRLEN] = {};
+ char buf_src_prefix[PREFIX_STRLEN] = "::/0";
char buf_nh_type[PREFIX_STRLEN] = {};
char buf_tag[PREFIX_STRLEN];
uint8_t label_stack_id = 0;
@@ -116,6 +116,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
}
assert(!!str2prefix(args->prefix, &p));
+ src = (struct prefix){ .family = p.family, .prefixlen = 0 };
switch (args->afi) {
case AFI_IP:
@@ -146,7 +147,7 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
return CMD_WARNING_CONFIG_FAILED;
}
- if (args->source)
+ if (src.prefixlen)
prefix2str(&src, buf_src_prefix, sizeof(buf_src_prefix));
if (args->gateway)
buf_gate_str = args->gateway;
@@ -183,25 +184,10 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
static_get_nh_type(type, buf_nh_type, sizeof(buf_nh_type));
if (!args->delete) {
- if (args->source)
- snprintf(ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH,
- "frr-staticd:staticd", "staticd", args->vrf,
- buf_prefix,
- yang_afi_safi_value2identity(args->afi,
- args->safi),
- buf_src_prefix, table_id, buf_nh_type,
- args->nexthop_vrf, buf_gate_str,
- args->interface_name);
- else
- snprintf(ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
- "frr-staticd:staticd", "staticd", args->vrf,
- buf_prefix,
- yang_afi_safi_value2identity(args->afi,
- args->safi),
- table_id, buf_nh_type, args->nexthop_vrf,
- buf_gate_str, args->interface_name);
+ snprintf(ab_xpath, sizeof(ab_xpath), FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
+ "frr-staticd:staticd", "staticd", args->vrf, buf_prefix, buf_src_prefix,
+ yang_afi_safi_value2identity(args->afi, args->safi), table_id, buf_nh_type,
+ args->nexthop_vrf, buf_gate_str, args->interface_name);
/*
* If there's already the same nexthop but with a different
@@ -218,22 +204,9 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
}
/* route + path procesing */
- if (args->source)
- snprintf(xpath_prefix, sizeof(xpath_prefix),
- FRR_S_ROUTE_SRC_INFO_KEY_XPATH,
- "frr-staticd:staticd", "staticd", args->vrf,
- buf_prefix,
- yang_afi_safi_value2identity(args->afi,
- args->safi),
- buf_src_prefix, table_id, distance);
- else
- snprintf(xpath_prefix, sizeof(xpath_prefix),
- FRR_STATIC_ROUTE_INFO_KEY_XPATH,
- "frr-staticd:staticd", "staticd", args->vrf,
- buf_prefix,
- yang_afi_safi_value2identity(args->afi,
- args->safi),
- table_id, distance);
+ snprintf(xpath_prefix, sizeof(xpath_prefix), FRR_STATIC_ROUTE_INFO_KEY_XPATH,
+ "frr-staticd:staticd", "staticd", args->vrf, buf_prefix, buf_src_prefix,
+ yang_afi_safi_value2identity(args->afi, args->safi), table_id, distance);
nb_cli_enqueue_change(vty, xpath_prefix, NB_OP_CREATE, NULL);
@@ -412,51 +385,18 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
if (orig_seg)
XFREE(MTYPE_TMP, orig_seg);
} else {
- if (args->source) {
- if (args->distance)
- snprintf(ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_SRC_NH_KEY_XPATH,
- "frr-staticd:staticd", "staticd",
- args->vrf, buf_prefix,
- yang_afi_safi_value2identity(
- args->afi, args->safi),
- buf_src_prefix, table_id, distance,
- buf_nh_type, args->nexthop_vrf,
- buf_gate_str, args->interface_name);
- else
- snprintf(
- ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH,
- "frr-staticd:staticd", "staticd",
- args->vrf, buf_prefix,
- yang_afi_safi_value2identity(
- args->afi, args->safi),
- buf_src_prefix, table_id, buf_nh_type,
- args->nexthop_vrf, buf_gate_str,
- args->interface_name);
- } else {
- if (args->distance)
- snprintf(ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_NH_KEY_XPATH,
- "frr-staticd:staticd", "staticd",
- args->vrf, buf_prefix,
- yang_afi_safi_value2identity(
- args->afi, args->safi),
- table_id, distance, buf_nh_type,
- args->nexthop_vrf, buf_gate_str,
- args->interface_name);
- else
- snprintf(
- ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
- "frr-staticd:staticd", "staticd",
- args->vrf, buf_prefix,
- yang_afi_safi_value2identity(
- args->afi, args->safi),
- table_id, buf_nh_type,
- args->nexthop_vrf, buf_gate_str,
- args->interface_name);
- }
+ if (args->distance)
+ snprintf(ab_xpath, sizeof(ab_xpath), FRR_DEL_S_ROUTE_NH_KEY_XPATH,
+ "frr-staticd:staticd", "staticd", args->vrf, buf_prefix,
+ buf_src_prefix, yang_afi_safi_value2identity(args->afi, args->safi),
+ table_id, distance, buf_nh_type, args->nexthop_vrf, buf_gate_str,
+ args->interface_name);
+ else
+ snprintf(ab_xpath, sizeof(ab_xpath),
+ FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH, "frr-staticd:staticd",
+ "staticd", args->vrf, buf_prefix, buf_src_prefix,
+ yang_afi_safi_value2identity(args->afi, args->safi), table_id,
+ buf_nh_type, args->nexthop_vrf, buf_gate_str, args->interface_name);
dnode = yang_dnode_get(vty->candidate_config->dnode, ab_xpath);
if (!dnode) {
@@ -1333,8 +1273,8 @@ DEFPY_YANG(no_srv6_sid, no_srv6_sid_cmd,
{
char xpath[XPATH_MAXLEN + 37];
- snprintf(xpath, sizeof(xpath), FRR_STATIC_SRV6_INFO_KEY_XPATH, "frr-staticd:staticd",
- "staticd", VRF_DEFAULT_NAME);
+ snprintf(xpath, sizeof(xpath), FRR_STATIC_SRV6_SID_KEY_XPATH, "frr-staticd:staticd",
+ "staticd", VRF_DEFAULT_NAME, sid_str);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
@@ -1439,9 +1379,8 @@ static int srv6_seg_iter_cb(const struct lyd_node *dnode, void *arg)
}
static void nexthop_cli_show(struct vty *vty, const struct lyd_node *route,
- const struct lyd_node *src,
- const struct lyd_node *path,
- const struct lyd_node *nexthop, bool show_defaults)
+ const struct lyd_node *path, const struct lyd_node *nexthop,
+ bool show_defaults)
{
const char *vrf;
const char *afi_safi;
@@ -1455,6 +1394,7 @@ static void nexthop_cli_show(struct vty *vty, const struct lyd_node *route,
struct srv6_seg_iter seg_iter;
const char *nexthop_vrf;
uint32_t table_id;
+ struct prefix src_prefix;
bool onlink;
vrf = yang_dnode_get_string(route, "../../vrf");
@@ -1476,9 +1416,9 @@ static void nexthop_cli_show(struct vty *vty, const struct lyd_node *route,
vty_out(vty, " %s", yang_dnode_get_string(route, "prefix"));
- if (src)
- vty_out(vty, " from %s",
- yang_dnode_get_string(src, "src-prefix"));
+ yang_dnode_get_prefix(&src_prefix, route, "src-prefix");
+ if (src_prefix.prefixlen)
+ vty_out(vty, " from %pFX", &src_prefix);
nh_type = yang_dnode_get_enum(nexthop, "nh-type");
switch (nh_type) {
@@ -1582,18 +1522,7 @@ static void static_nexthop_cli_show(struct vty *vty,
const struct lyd_node *route =
yang_dnode_get_parent(path, "route-list");
- nexthop_cli_show(vty, route, NULL, path, dnode, show_defaults);
-}
-
-static void static_src_nexthop_cli_show(struct vty *vty,
- const struct lyd_node *dnode,
- bool show_defaults)
-{
- const struct lyd_node *path = yang_dnode_get_parent(dnode, "path-list");
- const struct lyd_node *src = yang_dnode_get_parent(path, "src-list");
- const struct lyd_node *route = yang_dnode_get_parent(src, "route-list");
-
- nexthop_cli_show(vty, route, src, path, dnode, show_defaults);
+ nexthop_cli_show(vty, route, path, dnode, show_defaults);
}
static int static_nexthop_cli_cmp(const struct lyd_node *dnode1,
@@ -1658,6 +1587,8 @@ static int static_route_list_cli_cmp(const struct lyd_node *dnode1,
afi_t afi1, afi2;
safi_t safi1, safi2;
struct prefix prefix1, prefix2;
+ struct prefix src_prefix1, src_prefix2;
+ int rv;
afi_safi1 = yang_dnode_get_string(dnode1, "afi-safi");
yang_afi_safi_identity2value(afi_safi1, &afi1, &safi1);
@@ -1673,19 +1604,13 @@ static int static_route_list_cli_cmp(const struct lyd_node *dnode1,
yang_dnode_get_prefix(&prefix1, dnode1, "prefix");
yang_dnode_get_prefix(&prefix2, dnode2, "prefix");
+ rv = prefix_cmp(&prefix1, &prefix2);
+ if (rv)
+ return rv;
- return prefix_cmp(&prefix1, &prefix2);
-}
-
-static int static_src_list_cli_cmp(const struct lyd_node *dnode1,
- const struct lyd_node *dnode2)
-{
- struct prefix prefix1, prefix2;
-
- yang_dnode_get_prefix(&prefix1, dnode1, "src-prefix");
- yang_dnode_get_prefix(&prefix2, dnode2, "src-prefix");
-
- return prefix_cmp(&prefix1, &prefix2);
+ yang_dnode_get_prefix(&src_prefix1, dnode1, "src-prefix");
+ yang_dnode_get_prefix(&src_prefix2, dnode2, "src-prefix");
+ return prefix_cmp(&src_prefix1, &src_prefix2);
}
static int static_path_list_cli_cmp(const struct lyd_node *dnode1,
@@ -1831,25 +1756,6 @@ const struct frr_yang_module_info frr_staticd_cli_info = {
}
},
{
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list",
- .cbs = {
- .cli_cmp = static_src_list_cli_cmp,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list",
- .cbs = {
- .cli_cmp = static_path_list_cli_cmp,
- }
- },
- {
- .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop",
- .cbs = {
- .cli_show = static_src_nexthop_cli_show,
- .cli_cmp = static_nexthop_cli_cmp,
- }
- },
- {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/segment-routing",
.cbs = {
.cli_show = static_segment_routing_cli_show,
diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c
index e87eaed008..6da2dfec90 100644
--- a/staticd/static_zebra.c
+++ b/staticd/static_zebra.c
@@ -132,35 +132,37 @@ static int static_ifp_down(struct interface *ifp)
static int route_notify_owner(ZAPI_CALLBACK_ARGS)
{
- struct prefix p;
+ struct prefix p, src_p, *src_pp;
enum zapi_route_notify_owner note;
uint32_t table_id;
safi_t safi;
- if (!zapi_route_notify_decode(zclient->ibuf, &p, &table_id, &note, NULL,
- &safi))
+ if (!zapi_route_notify_decode_srcdest(zclient->ibuf, &p, &src_p, &table_id, &note, NULL,
+ &safi))
return -1;
+ src_pp = src_p.prefixlen ? &src_p : NULL;
+
switch (note) {
case ZAPI_ROUTE_FAIL_INSTALL:
- static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED);
+ static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_NOT_INSTALLED);
zlog_warn("%s: Route %pFX failed to install for table: %u",
__func__, &p, table_id);
break;
case ZAPI_ROUTE_BETTER_ADMIN_WON:
- static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED);
+ static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_NOT_INSTALLED);
zlog_warn(
"%s: Route %pFX over-ridden by better route for table: %u",
__func__, &p, table_id);
break;
case ZAPI_ROUTE_INSTALLED:
- static_nht_mark_state(&p, safi, vrf_id, STATIC_INSTALLED);
+ static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_INSTALLED);
break;
case ZAPI_ROUTE_REMOVED:
- static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED);
+ static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_NOT_INSTALLED);
break;
case ZAPI_ROUTE_REMOVE_FAIL:
- static_nht_mark_state(&p, safi, vrf_id, STATIC_INSTALLED);
+ static_nht_mark_state(&p, src_pp, safi, vrf_id, STATIC_INSTALLED);
zlog_warn("%s: Route %pFX failure to remove for table: %u",
__func__, &p, table_id);
break;
@@ -226,8 +228,8 @@ static void static_zebra_nexthop_update(struct vrf *vrf, struct prefix *matched,
nhtd->nh_num = nhr->nexthop_num;
static_nht_reset_start(matched, afi, nhr->safi, nhtd->nh_vrf_id);
- static_nht_update(NULL, matched, nhr->nexthop_num, afi,
- nhr->safi, nhtd->nh_vrf_id);
+ static_nht_update(NULL, NULL, matched, nhr->nexthop_num, afi, nhr->safi,
+ nhtd->nh_vrf_id);
} else
zlog_err("No nhtd?");
}
@@ -312,10 +314,13 @@ void static_zebra_nht_register(struct static_nexthop *nh, bool reg)
{
struct static_path *pn = nh->pn;
struct route_node *rn = pn->rn;
+ const struct prefix *p, *src_p;
struct static_route_info *si = static_route_info_from_rnode(rn);
struct static_nht_data *nhtd, lookup = {};
uint32_t cmd;
+ srcdest_rnode_prefixes(rn, &p, &src_p);
+
if (!static_zebra_nht_get_prefix(nh, &lookup.nh))
return;
lookup.nh_vrf_id = nh->nh_vrf_id;
@@ -351,8 +356,8 @@ void static_zebra_nht_register(struct static_nexthop *nh, bool reg)
if (nh->state == STATIC_NOT_INSTALLED ||
nh->state == STATIC_SENT_TO_ZEBRA)
nh->state = STATIC_START;
- static_nht_update(&rn->p, &nhtd->nh, nhtd->nh_num, afi,
- si->safi, nh->nh_vrf_id);
+ static_nht_update(p, src_p, &nhtd->nh, nhtd->nh_num, afi, si->safi,
+ nh->nh_vrf_id);
return;
}
@@ -1164,6 +1169,19 @@ static int static_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS)
DEBUGD(&static_dbg_srv6, "%s: SRv6 SID %pI6 %s: RELEASED", __func__, &sid_addr,
srv6_sid_ctx2str(buf, sizeof(buf), &ctx));
+ for (ALL_LIST_ELEMENTS_RO(srv6_sids, node, sid)) {
+ if (IPV6_ADDR_SAME(&sid->addr.prefix, &sid_addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found || !sid) {
+ zlog_err("SRv6 SID %pI6 %s: not found", &sid_addr,
+ srv6_sid_ctx2str(buf, sizeof(buf), &ctx));
+ return 0;
+ }
+
UNSET_FLAG(sid->flags, STATIC_FLAG_SRV6_SID_VALID);
break;
diff --git a/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py b/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py
index cf590ad01d..5c0b909517 100755
--- a/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py
+++ b/tests/topotests/bgp_srv6_sid_reachability/test_bgp_srv6_sid_reachability.py
@@ -159,6 +159,36 @@ def test_sid_reachable_again_bgp_update():
check_ping("c11", "192.168.2.1", True, 10, 1)
+def test_sid_unreachable_no_router():
+ get_topogen().gears["r2"].vtysh_cmd(
+ """
+ configure terminal
+ no router bgp 65002 vrf vrf10
+ """
+ )
+ check_ping("c11", "192.168.2.1", False, 10, 1)
+
+
+def test_sid_reachable_again_no_router():
+ get_topogen().gears["r2"].vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65002 vrf vrf10
+ bgp router-id 192.0.2.2
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ sid vpn export 1
+ rd vpn export 65002:10
+ rt vpn both 0:10
+ import vpn
+ export vpn
+ exit-address-family
+ """
+ )
+ check_ping("c11", "192.168.2.1", True, 10, 1)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_table_direct_topo1/__init__.py b/tests/topotests/bgp_table_direct_topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_table_direct_topo1/__init__.py
diff --git a/tests/topotests/bgp_table_direct_topo1/r1/frr.conf b/tests/topotests/bgp_table_direct_topo1/r1/frr.conf
new file mode 100644
index 0000000000..c45e3456a4
--- /dev/null
+++ b/tests/topotests/bgp_table_direct_topo1/r1/frr.conf
@@ -0,0 +1,31 @@
+log commands
+!
+debug bgp zebra
+debug zebra events
+!
+ip route 10.254.254.1/32 lo table 2000
+ip route 10.254.254.2/32 lo table 2000
+ip route 10.254.254.3/32 lo table 2000
+!
+interface r1-eth0
+ ip address 192.168.10.1/24
+!
+interface r1-eth1 vrf blue
+ ip address 192.168.20.1/24
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.10.2 remote-as external
+ address-family ipv4 unicast
+ redistribute table-direct 2000
+ exit-address-family
+!
+router bgp 65001 vrf blue
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.20.2 remote-as external
+ address-family ipv4 unicast
+ redistribute table-direct 2000
+ exit-address-family
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_table_direct_topo1/r2/frr.conf b/tests/topotests/bgp_table_direct_topo1/r2/frr.conf
new file mode 100644
index 0000000000..04787be0b3
--- /dev/null
+++ b/tests/topotests/bgp_table_direct_topo1/r2/frr.conf
@@ -0,0 +1,10 @@
+log commands
+!
+interface r2-eth0
+ ip address 192.168.10.2/24
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.10.1 remote-as external
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_table_direct_topo1/r3/frr.conf b/tests/topotests/bgp_table_direct_topo1/r3/frr.conf
new file mode 100644
index 0000000000..2530b28bfd
--- /dev/null
+++ b/tests/topotests/bgp_table_direct_topo1/r3/frr.conf
@@ -0,0 +1,10 @@
+log commands
+!
+interface r3-eth0
+ ip address 192.168.20.2/24
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.20.1 remote-as external
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py b/tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py
new file mode 100644
index 0000000000..70257be3e7
--- /dev/null
+++ b/tests/topotests/bgp_table_direct_topo1/test_bgp_table_direct_topo1.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_bgp_table_direct_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2025 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+test_bgp_table_direct_topo1.py: Test the FRR PIM MSDP peer.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import re
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+
+# Required to instantiate the topology builder class.
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+from lib.pim import McastTesterHelper
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
+
+app_helper = McastTesterHelper()
+
+
+def build_topo(tgen):
+ """
+ +----+ +----+
+ | r1 | <-> | r2 |
+ +----+ +----+
+ |
+ | +----+
+ --------| r3 |
+ +----+
+ """
+
+ # Create 3 routers
+ for routern in range(1, 4):
+ tgen.add_router(f"r{routern}")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for _, router in router_list.items():
+ router.load_frr_config(os.path.join(CWD, f"{router.name}/frr.conf"))
+
+ tgen.gears["r1"].run("ip link add blue type vrf table 10")
+ tgen.gears["r1"].run("ip link set blue up")
+ tgen.gears["r1"].run("ip link set r1-eth1 master blue")
+
+ # Initialize all routers.
+ tgen.start_router()
+
+ app_helper.init(tgen)
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ app_helper.cleanup()
+ tgen.stop_topology()
+
+
+def expect_bgp_route(router, iptype, route, missing=False):
+ "Wait until route is present on RIB for protocol."
+ if missing:
+ logger.info("waiting route {} go missing in {}".format(route, router))
+ else:
+ logger.info("waiting route {} in {}".format(route, router))
+
+ tgen = get_topogen()
+ expected_output = {route: [{"protocol": "bgp"}]}
+ wait_time = 130
+ if missing:
+ expected_output = {route: None}
+ wait_time = 5
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router],
+ "show {} route json".format(iptype),
+ expected_output
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
+ assertmsg = f'"{router}" convergence failure'
+ assert result is None, assertmsg
+
+
+def test_bgp_convergence():
+ "Wait for BGP protocol convergence"
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("waiting for protocols to converge")
+
+ # Wait for R2
+ expect_bgp_route("r2", "ip", "10.254.254.1/32")
+ expect_bgp_route("r2", "ip", "10.254.254.2/32")
+ expect_bgp_route("r2", "ip", "10.254.254.3/32")
+
+ # Wait for R3
+ expect_bgp_route("r3", "ip", "10.254.254.1/32")
+ expect_bgp_route("r3", "ip", "10.254.254.2/32")
+ expect_bgp_route("r3", "ip", "10.254.254.3/32")
+
+
+def test_route_change_convergence():
+ "Change routes in table 2000 to test zebra redistribution."
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r1"].vtysh_cmd("""
+ configure terminal
+ no ip route 10.254.254.2/32 lo table 2000
+ ip route 10.254.254.10/32 lo table 2000
+ """)
+
+ # Check R2
+ expect_bgp_route("r2", "ip", "10.254.254.2/32", missing=True)
+ expect_bgp_route("r2", "ip", "10.254.254.10/32")
+
+ # Check R3
+ expect_bgp_route("r3", "ip", "10.254.254.2/32", missing=True)
+ expect_bgp_route("r3", "ip", "10.254.254.10/32")
+
+
+def test_configuration_removal_convergence():
+ "Remove table direct configuration and check if routes went missing."
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r1"].vtysh_cmd("""
+ configure terminal
+ router bgp 65001
+ address-family ipv4 unicast
+ no redistribute table-direct 2000
+ exit-address-family
+ exit
+
+ router bgp 65001 vrf blue
+ address-family ipv4 unicast
+ no redistribute table-direct 2000
+ exit-address-family
+ exit
+ """)
+
+ # Check R2
+ expect_bgp_route("r2", "ip", "10.254.254.1/32", missing=True)
+ expect_bgp_route("r2", "ip", "10.254.254.3/32", missing=True)
+ expect_bgp_route("r2", "ip", "10.254.254.10/32", missing=True)
+
+ # Check R3
+ expect_bgp_route("r3", "ip", "10.254.254.1/32", missing=True)
+ expect_bgp_route("r3", "ip", "10.254.254.3/32", missing=True)
+ expect_bgp_route("r3", "ip", "10.254.254.10/32", missing=True)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_table_map/r1/frr.conf b/tests/topotests/bgp_table_map/r1/frr.conf
new file mode 100644
index 0000000000..f74440c384
--- /dev/null
+++ b/tests/topotests/bgp_table_map/r1/frr.conf
@@ -0,0 +1,22 @@
+!
+int r1-eth0
+ ip address 10.255.0.1/24
+!
+access-list AccList seq 5 permit 10.0.0.1/32
+!
+route-map TableMap permit 10
+ match ip address AccList
+exit
+!
+router bgp 65001
+ bgp router-id 10.255.0.1
+ no bgp ebgp-requires-policy
+ neighbor 10.255.0.2 remote-as external
+ neighbor 10.255.0.2 timers 1 3
+ neighbor 10.255.0.2 timers connect 1
+ !
+ address-family ipv4 unicast
+ table-map TableMap
+ exit-address-family
+exit
+!
diff --git a/tests/topotests/bgp_table_map/r2/frr.conf b/tests/topotests/bgp_table_map/r2/frr.conf
new file mode 100644
index 0000000000..4523fe49ea
--- /dev/null
+++ b/tests/topotests/bgp_table_map/r2/frr.conf
@@ -0,0 +1,18 @@
+!
+int r2-eth0
+ ip address 10.255.0.2/24
+!
+router bgp 65002
+ bgp router-id 10.255.0.2
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 10.255.0.1 remote-as external
+ neighbor 10.255.0.1 timers 1 3
+ neighbor 10.255.0.1 timers connect 1
+ !
+ address-family ipv4 unicast
+ network 10.0.0.1/32
+ network 10.0.0.2/32
+ exit-address-family
+exit
+!
diff --git a/tests/topotests/bgp_table_map/test_bgp_table_map.py b/tests/topotests/bgp_table_map/test_bgp_table_map.py
new file mode 100644
index 0000000000..b10680f741
--- /dev/null
+++ b/tests/topotests/bgp_table_map/test_bgp_table_map.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+import functools, json, os, pytest, re, sys
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(
+ os.path.join(CWD, "{}/frr.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_table_map():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ def _bgp_converge():
+ output = json.loads(
+ r1.vtysh_cmd( "show bgp ipv4 unicast summary json")
+ )
+ expected = {
+ "peers": {
+ "10.255.0.2": {
+ "remoteAs": 65002,
+ "state": "Established",
+ "peerState": "OK",
+ },
+ },
+ "totalPeers": 1,
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_converge,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Can't converge initial state"
+
+ def _bgp_with_table_map():
+ output = json.loads(r1.vtysh_cmd("show ip fib json"))
+ expected = {
+ "10.0.0.1/32": [],
+ "10.0.0.2/32": None,
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_with_table_map,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Should contain only one of two shared networks"
+
+ #
+ # Unset table-map
+ #
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65001
+ address-family ipv4 unicast
+ no table-map TableMap
+ """
+ )
+
+ def _bgp_without_table_map():
+ output = json.loads(r1.vtysh_cmd("show ip fib json"))
+ expected = {
+ "10.0.0.1/32": [],
+ "10.0.0.2/32": [],
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_without_table_map,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Shouldn't contain both shared routes"
+
+ #
+ # Reset table-map
+ #
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router bgp 65001
+ address-family ipv4 unicast
+ table-map TableMap
+ """
+ )
+
+ test_func = functools.partial(
+ _bgp_with_table_map,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Should contain only one of two shared networks"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py b/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py
index 9c1a23f54f..d17b4702f7 100644
--- a/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py
+++ b/tests/topotests/isis_srv6_topo1/test_isis_srv6_topo1.py
@@ -245,7 +245,7 @@ def check_ping6(name, dest_addr, expect_connected):
if match not in output:
return "ping fail"
- match = "{} packet loss".format("0%" if expect_connected else "100%")
+ match = "{} packet loss".format(", 0%" if expect_connected else ", 100%")
logger.info("[+] check {} {} {}".format(name, dest_addr, match))
tgen = get_topogen()
func = functools.partial(_check, name, dest_addr, match)
@@ -333,7 +333,7 @@ def test_ping_step1():
# Setup encap route on rt1, decap route on rt2
tgen.gears["rt1"].vtysh_cmd(
- "sharp install seg6-routes fc00:0:9::1 nexthop-seg6 2001:db8:1::2 encap fc00:0:1:2:6:f00d:: 1"
+ "sharp install seg6-routes fc00:0:9::1 nexthop-seg6 2001:db8:1::2 encap fc00:0:2:6:f00d:: 1"
)
tgen.gears["rt6"].vtysh_cmd(
"sharp install seg6local-routes fc00:0:f00d:: nexthop-seg6local eth-dst End_DT6 254 1"
@@ -443,7 +443,8 @@ def test_ping_step2():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_ping6("rt1", "fc00:0:9::1", False)
+ # ping should pass because route to fc00:0:2:6:f00d:: is still valid
+ check_ping6("rt1", "fc00:0:9::1", True)
#
@@ -643,7 +644,8 @@ def test_ping_step4():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_ping6("rt1", "fc00:0:9::1", False)
+ # ping should pass because route to fc00:0:2:6:f00d:: is still valid
+ check_ping6("rt1", "fc00:0:9::1", True)
#
@@ -838,7 +840,8 @@ def test_ping_step6():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_ping6("rt1", "fc00:0:9::1", False)
+ # ping should pass because route to fc00:0:2:6:f00d:: is still valid
+ check_ping6("rt1", "fc00:0:9::1", True)
#
@@ -1033,7 +1036,8 @@ def test_ping_step8():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_ping6("rt1", "fc00:0:9::1", False)
+ # ping should pass because route to fc00:0:2:6:f00d:: is still valid
+ check_ping6("rt1", "fc00:0:9::1", True)
#
diff --git a/tests/topotests/mgmt_tests/test_yang_mgmt.py b/tests/topotests/mgmt_tests/test_yang_mgmt.py
index 52f6ba4db7..7b74eab6b7 100644
--- a/tests/topotests/mgmt_tests/test_yang_mgmt.py
+++ b/tests/topotests/mgmt_tests/test_yang_mgmt.py
@@ -181,7 +181,7 @@ def test_mgmt_commit_check(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
"mgmt commit check",
]
}
@@ -194,7 +194,7 @@ def test_mgmt_commit_check(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
"mgmt commit check",
]
}
@@ -245,7 +245,7 @@ def test_mgmt_commit_apply(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
"mgmt commit apply",
]
}
@@ -258,7 +258,7 @@ def test_mgmt_commit_apply(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
"mgmt commit apply",
]
}
@@ -298,7 +298,7 @@ def test_mgmt_commit_abort(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.3/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
"mgmt commit abort",
]
}
@@ -350,7 +350,7 @@ def test_mgmt_delete_config(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
"mgmt commit apply",
]
}
@@ -381,7 +381,7 @@ def test_mgmt_delete_config(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']",
+ "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']",
"mgmt commit apply",
]
}
@@ -657,7 +657,7 @@ def test_mgmt_chaos_stop_start_frr(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
"mgmt commit apply",
]
}
@@ -689,7 +689,7 @@ def test_mgmt_chaos_stop_start_frr(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']",
+ "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']",
"mgmt commit apply",
]
}
@@ -733,7 +733,7 @@ def test_mgmt_chaos_kill_daemon(request):
raw_config = {
"r1": {
"raw_config": [
- "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][src-prefix='::/0'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
"mgmt commit apply",
]
}
diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py
index 8d91826022..a32b82c7f4 100755
--- a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py
+++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py
@@ -638,12 +638,6 @@ def pre_config_for_source_dr_tests(
"interfaceName": "r5-r4-eth1",
"weight": 1,
},
- {
- "ip": "10.0.3.1",
- "afi": "ipv4",
- "interfaceName": "r5-r4-eth1",
- "weight": 1,
- },
],
}
]
diff --git a/tests/topotests/ospf_prune_next_hop/r1/frr.conf b/tests/topotests/ospf_prune_next_hop/r1/frr.conf
new file mode 100644
index 0000000000..130872e8d0
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r1/frr.conf
@@ -0,0 +1,23 @@
+!
+hostname r1
+ip forwarding
+!
+interface r1-eth0
+ ip address 10.1.1.1/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r1-eth1
+ ip address 10.1.2.1/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+!
+!
+router ospf
+ ospf router-id 1.1.1.1
+ distance 20
+ network 10.1.1.0/24 area 0
+ network 10.1.2.0/24 area 0
diff --git a/tests/topotests/ospf_prune_next_hop/r2/frr.conf b/tests/topotests/ospf_prune_next_hop/r2/frr.conf
new file mode 100644
index 0000000000..4268aea857
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r2/frr.conf
@@ -0,0 +1,23 @@
+!
+hostname r2
+ip forwarding
+!
+interface r2-eth0
+ ip address 10.1.1.2/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r2-eth1
+ ip address 10.1.2.1/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+!
+!
+router ospf
+ ospf router-id 2.2.2.2
+ distance 20
+ network 10.1.1.0/24 area 0
+ network 10.1.2.0/24 area 0
diff --git a/tests/topotests/ospf_prune_next_hop/r3/frr.conf b/tests/topotests/ospf_prune_next_hop/r3/frr.conf
new file mode 100644
index 0000000000..21d6506d7c
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r3/frr.conf
@@ -0,0 +1,35 @@
+!
+hostname r3
+ip forwarding
+!
+interface r3-eth0
+ ip address 20.1.3.3/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r3-eth1
+ ip address 10.1.3.3/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r3-eth2
+ ip address 10.1.2.3/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+!
+!
+router ospf
+ ospf router-id 3.3.3.3
+ distance 20
+ network 10.1.2.0/24 area 0
+ network 10.1.3.0/24 area 0
+ network 20.1.3.0/24 area 1
+ area 1 range 20.1.0.0/16
+ redistribute static
+!
+!
+ip route 100.100.100.100/32 Null0
diff --git a/tests/topotests/ospf_prune_next_hop/r4/frr.conf b/tests/topotests/ospf_prune_next_hop/r4/frr.conf
new file mode 100644
index 0000000000..e66e93e20c
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r4/frr.conf
@@ -0,0 +1,34 @@
+!
+hostname r4
+ip forwarding
+!
+interface r4-eth0
+ ip address 20.1.4.4/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r4-eth1
+ ip address 10.1.3.4/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r4-eth2
+ ip address 10.1.2.4/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+!
+router ospf
+ ospf router-id 4.4.4.4
+ distance 20
+ network 10.1.2.0/24 area 0
+ network 10.1.3.0/24 area 0
+ network 20.1.4.0/24 area 1
+ area 1 range 20.1.0.0/16
+ redistribute static
+!
+!
+ip route 100.100.100.100/32 Null0
diff --git a/tests/topotests/ospf_prune_next_hop/r5/frr.conf b/tests/topotests/ospf_prune_next_hop/r5/frr.conf
new file mode 100644
index 0000000000..2d1dad9925
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r5/frr.conf
@@ -0,0 +1,34 @@
+!
+hostname r5
+ip forwarding
+!
+interface r5-eth0
+ ip address 20.1.5.5/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r5-eth1
+ ip address 10.1.3.5/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r5-eth2
+ ip address 10.1.2.5/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+!
+router ospf
+ ospf router-id 5.5.5.5
+ distance 20
+ network 10.1.2.0/24 area 0
+ network 10.1.3.0/24 area 0
+ network 20.1.5.0/24 area 1
+ area 1 range 20.1.0.0/16
+ redistribute static
+!
+!
+ip route 100.100.100.100/32 Null0
diff --git a/tests/topotests/ospf_prune_next_hop/r6/frr.conf b/tests/topotests/ospf_prune_next_hop/r6/frr.conf
new file mode 100644
index 0000000000..f343ee7c35
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r6/frr.conf
@@ -0,0 +1,34 @@
+!
+hostname r6
+ip forwarding
+!
+interface r6-eth0
+ ip address 20.1.6.6/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r6-eth1
+ ip address 10.1.3.6/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+interface r6-eth2
+ ip address 10.1.2.6/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+!
+router ospf
+ ospf router-id 6.6.6.6
+ distance 20
+ network 10.1.2.0/24 area 0
+ network 10.1.3.0/24 area 0
+ network 20.1.6.0/24 area 1
+ area 1 range 20.1.0.0/16
+ redistribute static
+!
+!
+ip route 100.100.100.100/32 Null0
diff --git a/tests/topotests/ospf_prune_next_hop/r7/frr.conf b/tests/topotests/ospf_prune_next_hop/r7/frr.conf
new file mode 100644
index 0000000000..1eeb88c9d0
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r7/frr.conf
@@ -0,0 +1,14 @@
+!
+hostname r7
+ip forwarding
+!
+interface r7-eth0
+ ip address 10.1.3.7/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+router ospf
+ ospf router-id 7.7.7.7
+ distance 20
+ network 10.1.3.0/24 area 0
diff --git a/tests/topotests/ospf_prune_next_hop/r8/frr.conf b/tests/topotests/ospf_prune_next_hop/r8/frr.conf
new file mode 100644
index 0000000000..d8facbc01f
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/r8/frr.conf
@@ -0,0 +1,14 @@
+!
+hostname r8
+ip forwarding
+!
+interface r8-eth0
+ ip address 10.1.3.8/24
+ ip ospf network broadcast
+ ip ospf hello-interval 1
+ ip ospf dead-interval 10
+!
+router ospf
+ ospf router-id 8.8.8.8
+ distance 20
+ network 10.1.3.0/24 area 0
diff --git a/tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py b/tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py
new file mode 100644
index 0000000000..88aa6b2e36
--- /dev/null
+++ b/tests/topotests/ospf_prune_next_hop/test_ospf_prune_next_hop.py
@@ -0,0 +1,343 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_ospf_prune_next_hop
+#
+# Copyright (c) 2025 LabN Consulting
+# Acee Lindem
+#
+
+import os
+import sys
+from functools import partial
+import pytest
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
+
+from lib.common_config import (
+ step,
+)
+
+
+"""
+test_ospf_metric_propagation.py: Test OSPF/BGP metric propagation
+"""
+
+TOPOLOGY = """
+ 20.1.3.0 20.1.4.0 20.1.5.0 20.1.6.0
+ eth0 | .3 eth0 | .4 eth0 | .5 eth0 | .6
+ +--+-+ +--+-+ +--+-+ +--+-+
+10.1 3.0 | R3 | | R4 | | R5 | | R6 |
+ +-----+ | | |eth1 | |eth1 | | 10.1.3.0/24
+ | | | | +---- | |--- + -+---+
+ | +--+-+ +--+-+ +--+-+ +--+-+ |
+ | eth2 | .3 eth2 | .4 eth2 | .5 eth2 | |
+eth0| | | | | | eth0
+ +--+--+ ++-------+ Switch Network +---------++ +--+---+
+ | R7 | | 10.1.2.0/24 | | R8 |
+ +-----+ +------------------------------------+ +------+
+ eth1 | .2
+ +--+--+
+ | R2 |
+ +--+--+
+ eth0 | .2
+ 10.1.1.0/24 |
+ eth0 | .1
+ +--+--+
+ | R1 |
+ +-----+
+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# Required to instantiate the topology builder class.
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 8 routers
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("r4")
+ tgen.add_router("r5")
+ tgen.add_router("r6")
+ tgen.add_router("r7")
+ tgen.add_router("r8")
+
+ # Interconect router 1, 2 (0)
+ switch = tgen.add_switch("s1-1-2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # Add standalone networks to router 3
+ switch = tgen.add_switch("s2-3")
+ switch.add_link(tgen.gears["r3"])
+
+ # Add standalone network to router 4
+ switch = tgen.add_switch("s3-4")
+ switch.add_link(tgen.gears["r4"])
+
+ # Add standalone network to router 5
+ switch = tgen.add_switch("s4-5")
+ switch.add_link(tgen.gears["r5"])
+
+ # Add standalone network to router 6
+ switch = tgen.add_switch("s5-6")
+ switch.add_link(tgen.gears["r6"])
+
+ # Interconect routers 3, 4, 5, and 6
+ switch = tgen.add_switch("s6-3")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r7"])
+ switch = tgen.add_switch("s7-4")
+ switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s8-5")
+ switch.add_link(tgen.gears["r5"])
+ switch = tgen.add_switch("s9-6")
+ switch.add_link(tgen.gears["r6"])
+ switch.add_link(tgen.gears["r8"])
+
+ # Interconect routers 2, 3, 4, 5, and 6
+ switch = tgen.add_switch("s10-lan")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+ switch.add_link(tgen.gears["r5"])
+ switch.add_link(tgen.gears["r6"])
+
+
+def setup_module(mod):
+ logger.info("OSPF Prune Next Hops:\n {}".format(TOPOLOGY))
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ # Starting Routers
+ router_list = tgen.routers()
+
+ for rname, router in router_list.items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_intra_area_route_prune():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("Skipped because of router(s) failure")
+
+ step("Test OSPF intra-area route 10.1.3.0/24 duplicate nexthops already pruned")
+ # Verify OSPF route 10.1.3.0/24 nexthops pruned already.
+ r1 = tgen.gears["r1"]
+ input_dict = {
+ "10.1.3.0/24": {
+ "routeType": "N",
+ "transit": True,
+ "cost": 30,
+ "area": "0.0.0.0",
+ "nexthops": [
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "8.8.8.8"}
+ ],
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip ospf route detail json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "OSPF Intra-Area route 10.1.3.0/24 mismatch on router r1"
+ assert result is None, assertmsg
+
+ step("Test IP route 10.1.3.0/24 installed")
+ input_dict = {
+ "10.1.3.0/24": [
+ {
+ "prefix": "10.1.3.0/24",
+ "prefixLen": 24,
+ "protocol": "ospf",
+ "vrfName": "default",
+ "distance": 20,
+ "metric": 30,
+ "installed": True,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "ip": "10.1.1.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "active": True,
+ "weight": 1,
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 10.1.3.0/24 json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "IP route 10.1.3.0/24 mismatch on router r1"
+ assert result is None, assertmsg
+
+
+def test_inter_area_route_prune():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("Skipped because of router(s) failure")
+
+ step("Test OSPF inter-area route 20.1.0.0/16 duplicate nexthops installed")
+ # Verify OSPF route 20.1.0.0/16 duplication nexthops
+ r1 = tgen.gears["r1"]
+ input_dict = {
+ "20.1.0.0/16": {
+ "routeType": "N IA",
+ "cost": 30,
+ "area": "0.0.0.0",
+ "nexthops": [
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "3.3.3.3"},
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "4.4.4.4"},
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "5.5.5.5"},
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "6.6.6.6"},
+ ],
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip ospf route detail json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "OSPF Inter-Area route 20.1.0.0/16 mismatch on router r1"
+ assert result is None, assertmsg
+
+ step("Test IP route 10.1.3.0/24 installed with pruned next-hops")
+ input_dict = {
+ "20.1.0.0/16": [
+ {
+ "prefix": "20.1.0.0/16",
+ "prefixLen": 16,
+ "protocol": "ospf",
+ "vrfName": "default",
+ "distance": 20,
+ "metric": 30,
+ "installed": True,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "ip": "10.1.1.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "active": True,
+ "weight": 1,
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 20.1.0.0/16 json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "IP route 20.1.1.0/24 mismatch on router r1"
+ assert result is None, assertmsg
+
+
+def test_as_external_route_prune():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("Skipped because of router(s) failure")
+
+ step("Test OSPF AS external route 100.100.100.100 duplicate nexthops installed")
+ # Verify OSPF route 20.1.0.0/16 duplication nexthops
+ r1 = tgen.gears["r1"]
+ input_dict = {
+ "100.100.100.100/32": {
+ "routeType": "N E2",
+ "cost": 20,
+ "type2cost": 20,
+ "tag": 0,
+ "nexthops": [
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "3.3.3.3"},
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "4.4.4.4"},
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "5.5.5.5"},
+ {"ip": "10.1.1.2", "via": "r1-eth0", "advertisedRouter": "6.6.6.6"},
+ ],
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip ospf route detail json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "OSPF AS external route 100.100.100.100/32 mismatch on router r1"
+ assert result is None, assertmsg
+
+ step("Test IP route 100.100.100.100/32 installed with pruned next-hops")
+ input_dict = {
+ "100.100.100.100/32": [
+ {
+ "prefix": "100.100.100.100/32",
+ "prefixLen": 32,
+ "protocol": "ospf",
+ "vrfName": "default",
+ "distance": 20,
+ "metric": 20,
+ "installed": True,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "ip": "10.1.1.2",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "active": True,
+ "weight": 1,
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp,
+ r1,
+ "show ip route 100.100.100.100/32 json",
+ input_dict,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "IP route 100.100.100.100/32 mismatch on router r1"
+ assert result is None, assertmsg
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/sbfd_topo1/__init__.py b/tests/topotests/sbfd_topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/sbfd_topo1/__init__.py
diff --git a/tests/topotests/sbfd_topo1/r1/frr.conf b/tests/topotests/sbfd_topo1/r1/frr.conf
new file mode 100644
index 0000000000..f865b81303
--- /dev/null
+++ b/tests/topotests/sbfd_topo1/r1/frr.conf
@@ -0,0 +1,8 @@
+ip forwarding
+ipv6 forwarding
+!
+
+interface r1-eth0
+ ipv6 address 2001::10/64
+!
+!
diff --git a/tests/topotests/sbfd_topo1/r2/frr.conf b/tests/topotests/sbfd_topo1/r2/frr.conf
new file mode 100644
index 0000000000..c9d166cabb
--- /dev/null
+++ b/tests/topotests/sbfd_topo1/r2/frr.conf
@@ -0,0 +1,8 @@
+ip forwarding
+ipv6 forwarding
+!
+
+interface r2-eth0
+ ipv6 address 2001::20/64
+!
+!
diff --git a/tests/topotests/sbfd_topo1/sbfd_topo1.dot b/tests/topotests/sbfd_topo1/sbfd_topo1.dot
new file mode 100644
index 0000000000..437e8230ce
--- /dev/null
+++ b/tests/topotests/sbfd_topo1/sbfd_topo1.dot
@@ -0,0 +1,45 @@
+## Color coding:
+#########################
+## Main FRR: #f08080 red
+## Switches: #d0e0d0 gray
+## RIP: #19e3d9 Cyan
+## RIPng: #fcb314 dark yellow
+## OSPFv2: #32b835 Green
+## OSPFv3: #19e3d9 Cyan
+## ISIS IPv4 #fcb314 dark yellow
+## ISIS IPv6 #9a81ec purple
+## BGP IPv4 #eee3d3 beige
+## BGP IPv6 #fdff00 yellow
+##### Colors (see http://www.color-hex.com/)
+
+graph template {
+ label="template";
+
+ # Routers
+ r1 [
+ shape=doubleoctagon,
+ label="A\nAS 100\n1.1.1.1",
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r2 [
+ shape=doubleoctagon
+ label="B\nAS 200\n1.1.1.2",
+ fillcolor="#f08080",
+ style=filled,
+ ];
+
+ # Switches
+ s1 [
+ shape=oval,
+ label="s1\n192.168.0.0/24",
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+
+
+ # Connections
+ r1 -- s1 [label="A-eth0"];
+ r2 -- s1 [label="B-eth0"];
+
+}
diff --git a/tests/topotests/sbfd_topo1/test_sbfd_topo1.py b/tests/topotests/sbfd_topo1/test_sbfd_topo1.py
new file mode 100644
index 0000000000..e20902ebf5
--- /dev/null
+++ b/tests/topotests/sbfd_topo1/test_sbfd_topo1.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+
+#
+# test_sbfd_topo1.py
+# basic test cases for sbfd initiator and reflector
+#
+# Copyright (c) 2025 by Alibaba, Inc.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<template>.py: Test <template>.
+"""
+
+import os
+import sys
+import pytest
+import json
+import re
+import time
+import pdb
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import required_linux_kernel_version
+
+"""
+test_sbfd_topo1.py: test simple sbfd with IPv6 encap. RT1 is sbfd Initiator, RT2 is sbfd Reflector
+
+ +----+----+ +----+----+
+ | | | |
+ | RT1 | 1 | RT2 |
+ | +--------+ |
+ | 2001::10| | 2001::20|
+ +----+----+ +----+----+
+
+"""
+pytestmark = [pytest.mark.bfdd, pytest.mark.sbfd]
+
+def show_bfd_check(router, status, type='echo', encap=None):
+ output = router.cmd("vtysh -c 'show bfd peers'")
+ if encap:
+ # check encap data if any
+ pattern1 = re.compile(r'encap-data {}'.format(encap))
+ ret = pattern1.findall(output)
+ if len(ret) <= 0:
+ logger.info("encap-data not match")
+ return False
+
+ # check status
+ pattern2 = re.compile(r'Status: {}'.format(status))
+ ret = pattern2.findall(output)
+ if len(ret) <= 0:
+ logger.info("Status not match")
+ return False
+
+ # check type
+ pattern3 = re.compile(r'Peer Type: {}'.format(type))
+ ret = pattern3.findall(output)
+ if len(ret) <= 0:
+ logger.info("Peer Type not match")
+ return False
+
+ logger.info("all check passed")
+ return True
+
+def build_topo(tgen):
+ "Test topology builder"
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ # Example
+ #
+ # Create 2 routers
+ for routern in range(1, 3):
+ tgen.add_router('r{}'.format(routern))
+
+ # Create a switch with just one router connected to it to simulate a
+ # empty network.
+ switch = tgen.add_switch('s1')
+ switch.add_link(tgen.gears['r1'])
+ switch.add_link(tgen.gears['r2'])
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(build_topo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+ tgen.start_topology()
+
+ # This is a sample of configuration loading.
+ router_list = tgen.routers()
+
+ for rname, router in router_list.items():
+ router.load_frr_config(
+ os.path.join(CWD, "{}/frr.conf".format(rname)),
+ [(TopoRouter.RD_ZEBRA, None), (TopoRouter.RD_BFD, None)])
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+
+ # Verify that we are using the proper version and that the BFD
+ # daemon exists.
+ for router in router_list.values():
+ # Check for Version
+ if router.has_version('<', '5.1'):
+ tgen.set_error('Unsupported FRR version')
+ break
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+# step 1 : config sbfd Initiator and reflector
+def test_sbfd_config_check():
+ "Assert that config sbfd and check sbfd status."
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.5")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # config sbfd
+ r1 = tgen.net['r1']
+ r1.cmd("ping -c 5 2001::20")
+ r1.cmd("vtysh -c 'config t' -c 'bfd' -c 'peer 2001::20 bfd-mode sbfd-init bfd-name 2-44 local-address 2001::10 remote-discr 1234'")
+
+ r2 = tgen.net['r2']
+ r2.cmd("vtysh -c 'config t' -c 'bfd' -c 'sbfd reflector source-address 2001::20 discriminator 1234'")
+
+ check_func = partial(
+ show_bfd_check, r1, 'up', type='sbfd initiator'
+ )
+ success, _ = topotest.run_and_expect(check_func, True, count=15, wait=1)
+ assert success is True, "sbfd not up in 15 seconds"
+
+# step 2: shutdown if and no shutdown if then check sbfd status
+def test_sbfd_updown_interface():
+ "Assert that updown interface then check sbfd status."
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.5")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.net['r1']
+ r2 = tgen.net['r2']
+
+ # shutdown interface
+ r2.cmd("vtysh -c 'config t' -c 'interface r2-eth0' -c 'shutdown'")
+
+ check_func = partial(
+ show_bfd_check, r1, 'down', type='sbfd initiator'
+ )
+ success, _ = topotest.run_and_expect(check_func, True, count=15, wait=1)
+ assert success is True, "sbfd not down in 15 seconds after shut"
+
+ # up interface
+ r2.cmd("vtysh -c 'config t' -c 'interface r2-eth0' -c 'no shutdown'")
+ check_func = partial(
+ show_bfd_check, r1, 'up', type='sbfd initiator'
+ )
+ success, _ = topotest.run_and_expect(check_func, True, count=15, wait=1)
+ assert success is True, "sbfd not up in 15 seconds after no shut"
+
+# step 3: change transmit-interval and check sbfd status according to the interval time
+def test_sbfd_change_transmit_interval():
+ "Assert that sbfd status changes align with transmit-interval."
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.5")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.net['r1']
+ r2 = tgen.net['r2']
+
+ r1.cmd("vtysh -c 'config t' -c 'bfd' -c 'peer 2001::20 bfd-mode sbfd-init bfd-name 2-44 local-address 2001::10 remote-discr 1234' -c 'transmit-interval 3000'")
+ #wait sometime for polling finish
+ time.sleep(1)
+
+ # shutdown interface
+ r2.cmd("vtysh -c 'config t' -c 'interface r2-eth0' -c 'shutdown'")
+
+ #wait enough time for timeout
+ check_func = partial(
+ show_bfd_check, r1, 'down', type='sbfd initiator'
+ )
+ success, _ = topotest.run_and_expect(check_func, True, count=5, wait=3)
+ assert success is True, "sbfd not down as expected"
+
+ r2.cmd("vtysh -c 'config t' -c 'interface r2-eth0' -c 'no shutdown'")
+ check_func = partial(
+ show_bfd_check, r1, 'up', type='sbfd initiator'
+ )
+ success, _ = topotest.run_and_expect(check_func, True, count=15, wait=1)
+ assert success is True, "sbfd not up in 15 seconds after no shut"
+
+ r1.cmd("vtysh -c 'config t' -c 'bfd' -c 'no peer 2001::20 bfd-mode sbfd-init bfd-name 2-44 local-address 2001::10 remote-discr 1234'")
+ success = show_bfd_check(r1, 'up', type='sbfd initiator')
+ assert success is False, "sbfd not deleted as unexpected"
+
+# Memory leak test template
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip('Memory leak test/report is disabled')
+
+ tgen.report_memory_leaks()
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/static_simple/test_static_simple.py b/tests/topotests/static_simple/test_static_simple.py
index bb3580a1d8..afde58fbf7 100644
--- a/tests/topotests/static_simple/test_static_simple.py
+++ b/tests/topotests/static_simple/test_static_simple.py
@@ -61,6 +61,15 @@ def get_ip_networks(super_prefix, count):
return tuple(network.subnets(count_log2))[0:count]
+def get_src_networks(src_prefix, count, default=""):
+ if src_prefix is not None:
+ for net in get_ip_networks(src_prefix, count):
+ yield " from {}".format(net)
+ else:
+ for i in range(0, count):
+ yield default
+
+
def enable_debug(router):
router.vtysh_cmd("debug northbound callbacks configuration")
@@ -70,7 +79,7 @@ def disable_debug(router):
@retry(retry_timeout=30, initial_wait=0.1)
-def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia):
+def check_kernel(r1, super_prefix, src_prefix, count, add, is_blackhole, vrf, matchvia):
network = ipaddress.ip_network(super_prefix)
vrfstr = f" vrf {vrf}" if vrf else ""
if network.version == 6:
@@ -79,26 +88,30 @@ def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia):
kernel = r1.run(f"ip -4 route show{vrfstr}")
logger.debug("checking kernel routing table%s:\n%s", vrfstr, kernel)
- for _, net in enumerate(get_ip_networks(super_prefix, count)):
+ for net, srcnet in zip(
+ get_ip_networks(super_prefix, count), get_src_networks(src_prefix, count)
+ ):
+ netfull = str(net) + srcnet
if not add:
- assert str(net) not in kernel
+ assert netfull + " nhid" not in kernel
+ assert netfull + " via" not in kernel
continue
if is_blackhole:
- route = f"blackhole {str(net)} proto (static|196) metric 20"
+ route = f"blackhole {netfull}(?: dev lo)? proto (static|196) metric 20"
else:
route = (
- f"{str(net)}(?: nhid [0-9]+)? {matchvia} "
- "proto (static|196) metric 20"
+ f"{netfull}(?: nhid [0-9]+)? {matchvia} proto (static|196) metric 20"
)
assert re.search(route, kernel), f"Failed to find \n'{route}'\n in \n'{kernel}'"
-def do_config(
+def do_config_inner(
r1,
count,
add=True,
do_ipv6=False,
+ do_sadr=False,
via=None,
vrf=None,
use_cli=False,
@@ -109,11 +122,18 @@ def do_config(
#
# Set the route details
#
-
- if vrf:
- super_prefix = "2002::/48" if do_ipv6 else "20.0.0.0/8"
+ src_prefs = [None, None]
+ if do_ipv6 and do_sadr:
+ # intentionally using overlapping prefix
+ super_prefs = ["2001::/48", "2002::/48"]
+ src_prefs = ["2001:db8:1111::/48", "2001:db8:2222::/48"]
+ elif do_ipv6:
+ super_prefs = ["2001::/48", "2002::/48"]
else:
- super_prefix = "2001::/48" if do_ipv6 else "10.0.0.0/8"
+ super_prefs = ["10.0.0.0/8", "20.0.0.0/8"]
+
+ super_prefix = super_prefs[1 if vrf else 0]
+ src_prefix = src_prefs[1 if vrf else 0]
matchvia = ""
if via == "blackhole":
@@ -144,11 +164,13 @@ def do_config(
if vrf:
f.write("vrf {}\n".format(vrf))
- for _, net in enumerate(get_ip_networks(super_prefix, count)):
+ for net, srcnet in zip(
+ get_ip_networks(super_prefix, count), get_src_networks(src_prefix, count)
+ ):
if add:
- f.write("ip route {} {}\n".format(net, via))
+ f.write("ip route {}{} {}\n".format(net, srcnet, via))
else:
- f.write("no ip route {} {}\n".format(net, via))
+ f.write("no ip route {}{} {}\n".format(net, srcnet, via))
#
# Load config file.
@@ -165,7 +187,9 @@ def do_config(
#
# Verify the results are in the kernel
#
- check_kernel(r1, super_prefix, count, add, via == "blackhole", vrf, matchvia)
+ check_kernel(
+ r1, super_prefix, src_prefix, count, add, via == "blackhole", vrf, matchvia
+ )
optyped = "added" if add else "removed"
logger.debug(
@@ -175,6 +199,12 @@ def do_config(
)
+def do_config(*args, **kwargs):
+ do_config_inner(*args, do_ipv6=False, do_sadr=False, **kwargs)
+ do_config_inner(*args, do_ipv6=True, do_sadr=False, **kwargs)
+ do_config_inner(*args, do_ipv6=True, do_sadr=True, **kwargs)
+
+
def guts(tgen, vrf, use_cli):
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -183,20 +213,20 @@ def guts(tgen, vrf, use_cli):
count = 10
step(f"add {count} via gateway", reset=True)
- do_config(r1, count, True, False, vrf=vrf, use_cli=use_cli)
+ do_config(r1, count, True, vrf=vrf, use_cli=use_cli)
step(f"remove {count} via gateway")
- do_config(r1, count, False, False, vrf=vrf, use_cli=use_cli)
+ do_config(r1, count, False, vrf=vrf, use_cli=use_cli)
via = f"lo-{vrf}" if vrf else "lo"
step("add via loopback")
- do_config(r1, 1, True, False, via=via, vrf=vrf, use_cli=use_cli)
+ do_config(r1, 1, True, via=via, vrf=vrf, use_cli=use_cli)
step("remove via loopback")
- do_config(r1, 1, False, False, via=via, vrf=vrf, use_cli=use_cli)
+ do_config(r1, 1, False, via=via, vrf=vrf, use_cli=use_cli)
step("add via blackhole")
- do_config(r1, 1, True, False, via="blackhole", vrf=vrf, use_cli=use_cli)
+ do_config(r1, 1, True, via="blackhole", vrf=vrf, use_cli=use_cli)
step("remove via blackhole")
- do_config(r1, 1, False, False, via="blackhole", vrf=vrf, use_cli=use_cli)
+ do_config(r1, 1, False, via="blackhole", vrf=vrf, use_cli=use_cli)
def test_static_cli(tgen):
diff --git a/tests/topotests/static_srv6_sids/expected_srv6_sids.json b/tests/topotests/static_srv6_sids/expected_srv6_sids.json
index e1a2a16afe..5799d97988 100644
--- a/tests/topotests/static_srv6_sids/expected_srv6_sids.json
+++ b/tests/topotests/static_srv6_sids/expected_srv6_sids.json
@@ -1,4 +1,39 @@
{
+ "fcbb:bbbb:1::/48": [
+ {
+ "prefix": "fcbb:bbbb:1::/48",
+ "prefixLen": 48,
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 9,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "sr0",
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "End"
+ },
+ "seg6localContext": {
+
+ }
+ }
+ ]
+ }
+ ],
"fcbb:bbbb:1:fe10::/64": [
{
"prefix": "fcbb:bbbb:1:fe10::/64",
diff --git a/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json
new file mode 100644
index 0000000000..e1a2a16afe
--- /dev/null
+++ b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_1.json
@@ -0,0 +1,107 @@
+{
+ "fcbb:bbbb:1:fe10::/64": [
+ {
+ "prefix": "fcbb:bbbb:1:fe10::/64",
+ "prefixLen": 64,
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 9,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "Vrf10",
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "End.DT4"
+ },
+ "seg6localContext": {
+ "table": 10
+ }
+ }
+ ]
+ }
+ ],
+ "fcbb:bbbb:1:fe20::/64": [
+ {
+ "prefix": "fcbb:bbbb:1:fe20::/64",
+ "prefixLen": 64,
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 9,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "Vrf20",
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "End.DT6"
+ },
+ "seg6localContext": {
+ "table": 20
+ }
+ }
+ ]
+ }
+ ],
+ "fcbb:bbbb:1:fe30::/64": [
+ {
+ "prefix": "fcbb:bbbb:1:fe30::/64",
+ "prefixLen": 64,
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 9,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "Vrf30",
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "End.DT46"
+ },
+ "seg6localContext": {
+ "table": 30
+ }
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json
new file mode 100644
index 0000000000..b5801d354b
--- /dev/null
+++ b/tests/topotests/static_srv6_sids/expected_srv6_sids_sid_delete_2.json
@@ -0,0 +1,72 @@
+{
+ "fcbb:bbbb:1:fe10::/64": [
+ {
+ "prefix": "fcbb:bbbb:1:fe10::/64",
+ "prefixLen": 64,
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 9,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "Vrf10",
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "End.DT4"
+ },
+ "seg6localContext": {
+ "table": 10
+ }
+ }
+ ]
+ }
+ ],
+ "fcbb:bbbb:1:fe30::/64": [
+ {
+ "prefix": "fcbb:bbbb:1:fe30::/64",
+ "prefixLen": 64,
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 9,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "Vrf30",
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "End.DT46"
+ },
+ "seg6localContext": {
+ "table": 30
+ }
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tests/topotests/static_srv6_sids/r1/frr.conf b/tests/topotests/static_srv6_sids/r1/frr.conf
index 999e35c35b..b4904d9ac2 100644
--- a/tests/topotests/static_srv6_sids/r1/frr.conf
+++ b/tests/topotests/static_srv6_sids/r1/frr.conf
@@ -8,6 +8,7 @@ segment-routing
!
!
static-sids
+ sid fcbb:bbbb:1::/48 locator MAIN behavior uN
sid fcbb:bbbb:1:fe10::/64 locator MAIN behavior uDT4 vrf Vrf10
sid fcbb:bbbb:1:fe20::/64 locator MAIN behavior uDT6 vrf Vrf20
sid fcbb:bbbb:1:fe30::/64 locator MAIN behavior uDT46 vrf Vrf30
diff --git a/tests/topotests/static_srv6_sids/test_static_srv6_sids.py b/tests/topotests/static_srv6_sids/test_static_srv6_sids.py
index 453a30af48..cdcc6fd29e 100755
--- a/tests/topotests/static_srv6_sids/test_static_srv6_sids.py
+++ b/tests/topotests/static_srv6_sids/test_static_srv6_sids.py
@@ -78,6 +78,100 @@ def test_srv6_static_sids():
check_srv6_static_sids(router, "expected_srv6_sids.json")
+def test_srv6_static_sids_sid_delete():
+ """
+ Remove the static SID and verify it gets removed
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ def _check_srv6_static_sids(router, expected_route_file):
+ logger.info("checking zebra srv6 static sids")
+ output = json.loads(router.vtysh_cmd("show ipv6 route static json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_route_file))
+ return topotest.json_cmp(output, expected)
+
+ def check_srv6_static_sids(router, expected_file):
+ func = functools.partial(_check_srv6_static_sids, router, expected_file)
+ _, result = topotest.run_and_expect(func, None, count=15, wait=1)
+ assert result is None, "Failed"
+
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ static-sids
+ no sid fcbb:bbbb:1::/48
+ """
+ )
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Test for srv6 sids configuration")
+ check_srv6_static_sids(router, "expected_srv6_sids_sid_delete_1.json")
+
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ static-sids
+ no sid fcbb:bbbb:1:fe20::/64 locator MAIN behavior uDT6 vrf Vrf20
+ """
+ )
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Test for srv6 sids configuration")
+ check_srv6_static_sids(router, "expected_srv6_sids_sid_delete_2.json")
+
+
+def test_srv6_static_sids_sid_readd():
+ """
+ Re-add the static SID and verify the routing table
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ def _check_srv6_static_sids(router, expected_route_file):
+ logger.info("checking zebra srv6 static sids")
+ output = json.loads(router.vtysh_cmd("show ipv6 route static json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_route_file))
+ return topotest.json_cmp(output, expected)
+
+ def check_srv6_static_sids(router, expected_file):
+ func = functools.partial(_check_srv6_static_sids, router, expected_file)
+ _, result = topotest.run_and_expect(func, None, count=15, wait=1)
+ assert result is None, "Failed"
+
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ static-sids
+ sid fcbb:bbbb:1::/48 locator MAIN behavior uN
+ sid fcbb:bbbb:1:fe20::/64 locator MAIN behavior uDT6 vrf Vrf20
+ """
+ )
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Test for srv6 sids configuration")
+ check_srv6_static_sids(router, "expected_srv6_sids.json")
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf b/tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf
new file mode 100644
index 0000000000..f4da11af06
--- /dev/null
+++ b/tests/topotests/v6_nexthop_group_recursive_resolution/r1/frr.conf
@@ -0,0 +1,4 @@
+int r1-eth0
+ ipv6 address fc00::1/64
+
+ipv6 route 1::1/128 fc00::2
diff --git a/tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py b/tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py
new file mode 100644
index 0000000000..587a951c85
--- /dev/null
+++ b/tests/topotests/v6_nexthop_group_recursive_resolution/test_v6_nexthop_group_recursive_resolution.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2024 by Nvidia Corporation
+# Donald Sharp
+#
+
+"""
+Check that the v6 nexthop recursive resolution works when it changes
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.staticd]
+
+
+def build_topo(tgen):
+
+ tgen.add_router("r1")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for rname, router in router_list.items():
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)),
+ [(TopoRouter.RD_MGMTD, None),
+ (TopoRouter.RD_ZEBRA, None),
+ (TopoRouter.RD_STATIC, None),
+ (TopoRouter.RD_SHARP, None)])
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_recursive_v6_nexthop_generation():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Testing v6 nexthop resolution")
+
+ #assert False
+ router = tgen.gears["r1"]
+
+ def _v6_converge_1_1_initial():
+ output = json.loads(
+ router.vtysh_cmd("show ipv6 route 1::1 json"))
+
+ expected = {
+ "1::1/128":[
+ {
+ "prefix":"1::1/128",
+ "prefixLen":128,
+ "protocol":"static",
+ "vrfName":"default",
+ "selected":True,
+ "destSelected":True,
+ "distance":1,
+ "metric":0,
+ "installed":True,
+ "table":254,
+ "nexthops":[
+ {
+ "fib":True,
+ "ip":"fc00::2",
+ "afi":"ipv6",
+ "interfaceName":"r1-eth0",
+ "active":True,
+ "weight":1
+ }
+ ]
+ }
+ ]
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_v6_converge_1_1_initial)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to install v6 1::1 route"
+
+ router.vtysh_cmd("sharp install routes 2::2 nexthop 1::1 1")
+ router.vtysh_cmd("conf\nipv6 route 1::1/128 fc00::3\nno ipv6 route 1::1/128 fc00::2")
+
+ def _v6_converge_1_1_post():
+ output = json.loads(
+ router.vtysh_cmd("show ipv6 route 1::1 json"))
+
+ expected = {
+ "1::1/128":[
+ {
+ "prefix":"1::1/128",
+ "prefixLen":128,
+ "protocol":"static",
+ "vrfName":"default",
+ "selected":True,
+ "destSelected":True,
+ "distance":1,
+ "metric":0,
+ "installed":True,
+ "table":254,
+ "nexthops":[
+ {
+ "fib":True,
+ "ip":"fc00::3",
+ "afi":"ipv6",
+ "interfaceName":"r1-eth0",
+ "active":True,
+ "weight":1
+ }
+ ]
+ }
+ ]
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_v6_converge_1_1_post)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to change v6 1::1 route"
+
+ router.vtysh_cmd("sharp install routes 2::2 nexthop 1::1 1")
+
+ def _v6_change_2_2_post():
+ output = json.loads(
+ router.vtysh_cmd("show ipv6 route 2::2 json"))
+
+ expected = {
+ "2::2/128":[
+ {
+ "prefix":"2::2/128",
+ "prefixLen":128,
+ "protocol":"sharp",
+ "vrfName":"default",
+ "selected":True,
+ "destSelected":True,
+ "distance":150,
+ "metric":0,
+ "installed":True,
+ "table":254,
+ "nexthops":[
+ {
+ "fib":True,
+ "ip":"fc00::3",
+ "afi":"ipv6",
+ "interfaceName":"r1-eth0",
+ "active":True,
+ "weight":1
+ }
+ ]
+ }
+ ]
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_v6_change_2_2_post)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see sharpd route correctly"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index a138e4e239..f124cae713 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -237,6 +237,14 @@ def get_normalized_interface_vrf(line):
return line
+def get_normalized_ebgp_multihop_line(line):
+ obj = re.search(r"(.*)ebgp-multihop\s+255", line)
+ if obj:
+ line = obj.group(1) + "ebgp-multihop"
+
+ return line
+
+
# This dictionary contains a tree of all commands that we know start a
# new multi-line context. All other commands are treated either as
# commands inside a multi-line context or as single-line contexts. This
@@ -382,6 +390,9 @@ class Config(object):
if ":" in line:
line = get_normalized_mac_ip_line(line)
+ if "ebgp-multihop" in line:
+ line = get_normalized_ebgp_multihop_line(line)
+
# vrf static routes can be added in two ways. The old way is:
#
# "ip route x.x.x.x/x y.y.y.y vrf <vrfname>"
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index a1b16c2b66..0559e89f92 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -2400,6 +2400,79 @@ DEFUNSH(VTYSH_BFDD, bfd_peer_enter, bfd_peer_enter_cmd,
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_BFDD, sbfd_echo_peer_enter, sbfd_echo_peer_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-echo bfd-name BFDNAME [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] srv6-source-ipv6 X:X::X:X srv6-encap-data X:X::X:X...",
+ "Configure peer\n"
+ "IPv4 peer address\n"
+ "IPv6 peer address\n"
+ "Specify bfd session mode\n"
+ "Enable sbfd-echo mode\n"
+ "Specify bfd session name\n"
+ "bfd session name\n"
+ "Configure multihop\n"
+ "Configure local\n"
+ "IPv4 local address\n"
+ "IPv6 local address\n"
+ "Configure VRF\n"
+ "Configure VRF name\n"
+ "Configure source ipv6 address for srv6 encap\n"
+ "IPv6 local address\n"
+ "Configure sidlist data for srv6 encap\n"
+ "X:X::X:X IPv6 sid address\n")
+{
+ vty->node = BFD_PEER_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BFDD, sbfd_init_peer_enter, sbfd_init_peer_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-init bfd-name BFDNAME [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] remote-discr (1-4294967295) srv6-source-ipv6 X:X::X:X srv6-encap-data X:X::X:X...",
+ "Configure peer\n"
+ "IPv4 peer address\n"
+ "IPv6 peer address\n"
+ "Specify bfd session mode\n"
+ "Enable sbfd-init mode\n"
+ "Specify bfd session name\n"
+ "bfd session name\n"
+ "Configure multihop\n"
+ "Configure local\n"
+ "IPv4 local address\n"
+ "IPv6 local address\n"
+ "Configure VRF\n"
+ "Configure VRF name\n"
+ "Configure bfd session remote discriminator\n"
+ "Configure remote discriminator\n"
+ "Configure source ipv6 address for srv6 encap\n"
+ "IPv6 local address\n"
+ "Configure sidlist data for srv6 encap\n"
+ "X:X::X:X IPv6 sid address\n"
+ )
+{
+ vty->node = BFD_PEER_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BFDD, sbfd_init_peer_raw_enter, sbfd_init_peer_raw_enter_cmd,
+ "peer <A.B.C.D|X:X::X:X> bfd-mode sbfd-init bfd-name BFDNAME [multihop$multihop] local-address <A.B.C.D|X:X::X:X> [vrf NAME] remote-discr (1-4294967295)",
+ "Configure peer\n"
+ "IPv4 peer address\n"
+ "IPv6 peer address\n"
+ "Specify bfd session mode\n"
+ "Enable sbfd-init mode\n"
+ "Specify bfd session name\n"
+ "bfd session name\n"
+ "Configure multihop\n"
+ "Configure local\n"
+ "IPv4 local address\n"
+ "IPv6 local address\n"
+ "Configure VRF\n"
+ "Configure VRF name\n"
+ "Configure bfd session remote discriminator\n"
+ "Configure remote discriminator\n")
+{
+ vty->node = BFD_PEER_NODE;
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_BFDD, bfd_profile_enter, bfd_profile_enter_cmd,
"profile BFDPROF",
BFD_PROFILE_STR
@@ -5296,6 +5369,9 @@ void vtysh_init_vty(void)
install_element(BFD_NODE, &vtysh_end_all_cmd);
install_element(BFD_NODE, &bfd_peer_enter_cmd);
+ install_element(BFD_NODE, &sbfd_init_peer_enter_cmd);
+ install_element(BFD_NODE, &sbfd_init_peer_raw_enter_cmd);
+ install_element(BFD_NODE, &sbfd_echo_peer_enter_cmd);
install_element(BFD_PEER_NODE, &vtysh_exit_bfdd_cmd);
install_element(BFD_PEER_NODE, &vtysh_quit_bfdd_cmd);
install_element(BFD_PEER_NODE, &vtysh_end_all_cmd);
diff --git a/yang/frr-bfdd.yang b/yang/frr-bfdd.yang
index c5c824f792..ceed6fa5a1 100644
--- a/yang/frr-bfdd.yang
+++ b/yang/frr-bfdd.yang
@@ -503,6 +503,136 @@ module frr-bfdd {
config false;
}
}
+
+ list sbfd-echo {
+ key "source-addr bfd-name vrf";
+ description "support a special echo SBFD session in RFC7880, and enhanced with SRv6 encapsulation";
+
+ leaf source-addr {
+ type inet:ip-address;
+ description "Local IP address";
+ }
+
+ leaf dest-addr {
+ type inet:ip-address;
+ description "IP address of the peer";
+ }
+
+ leaf bfd-name {
+ type string;
+ default "";
+ description "Bfd session name.";
+ }
+
+ leaf vrf {
+ type frr-vrf:vrf-ref;
+ description "Virtual Routing Domain name";
+ }
+
+ leaf profile {
+ type profile-ref;
+ description "Override defaults with profile.";
+ }
+
+ leaf-list srv6-encap-data {
+ type inet:ipv6-address;
+ max-elements 6;
+
+ description "segment list ipv6 addresses for srv6 encapsulation";
+ }
+
+ leaf srv6-source-ipv6 {
+ type inet:ipv6-address;
+ description "source ipv6 address for srv6 encapsulation";
+ }
+
+ leaf bfd-mode {
+ type uint32;
+ description "Bfd session mode.";
+ }
+
+ leaf multi-hop {
+ type boolean;
+ description "Use multi hop session instead of single hop.";
+ }
+
+ uses session-common;
+ uses session-multi-hop;
+ uses session-echo;
+
+ container stats {
+ uses session-states;
+ config false;
+ }
+ }
+
+ list sbfd-init {
+ key "source-addr dest-addr bfd-name vrf";
+ description "support SBFD session in RFC7880, and optional with SRv6 encapsulation";
+
+ leaf source-addr {
+ type inet:ip-address;
+ description "Local IP address";
+ }
+
+ leaf dest-addr {
+ type inet:ip-address;
+ description "IP address of the peer";
+ }
+
+ leaf bfd-name {
+ type string;
+ default "";
+ description "Bfd session name.";
+ }
+
+ leaf vrf {
+ type frr-vrf:vrf-ref;
+ description "Virtual Routing Domain name";
+ }
+
+ leaf profile {
+ type profile-ref;
+ description "Override defaults with profile.";
+ }
+
+ leaf-list srv6-encap-data {
+ type inet:ipv6-address;
+ max-elements 6;
+
+ description "segment list ipv6 addresses for srv6 encapsulation";
+ }
+
+ leaf srv6-source-ipv6 {
+ type inet:ip-address;
+ description "source ipv6 address for srv6 encapsulation";
+ }
+
+ leaf remote-discr {
+ type uint32;
+ default 0;
+ description
+ "Remote session identifier";
+ }
+
+ leaf bfd-mode {
+ type uint32;
+ description "Bfd session mode.";
+ }
+
+ leaf multi-hop {
+ type boolean;
+ description "Use multi hop session instead of single hop.";
+ }
+
+ uses session-common;
+ uses session-multi-hop;
+
+ container stats {
+ uses session-states;
+ config false;
+ }
+ }
}
}
}
diff --git a/yang/frr-staticd.yang b/yang/frr-staticd.yang
index 904e2058e9..8d0e58c0a5 100644
--- a/yang/frr-staticd.yang
+++ b/yang/frr-staticd.yang
@@ -165,7 +165,7 @@ module frr-staticd {
"Support for a 'staticd' pseudo-protocol instance
consists of a list of routes.";
list route-list {
- key "prefix afi-safi";
+ key "prefix src-prefix afi-safi";
description
"List of staticd IP routes.";
leaf prefix {
@@ -173,6 +173,11 @@ module frr-staticd {
description
"IP prefix.";
}
+ leaf src-prefix {
+ type inet:ipv6-prefix;
+ description
+ "IPv6 source prefix for dst-src routes";
+ }
leaf afi-safi {
type identityref {
base frr-rt:afi-safi-type;
@@ -180,6 +185,12 @@ module frr-staticd {
description
"AFI-SAFI type.";
}
+ /* note dst-src routes are semantically invalid in MRIB */
+ must "afi-safi = 'frr-rt:ipv6-unicast'
+ or afi-safi = 'frr-rt:ipv6-labeled-unicast'
+ or afi-safi = 'frr-rt:l3vpn-ipv6-unicast'
+ or src-prefix = '::/0'
+ ";
uses staticd-prefix-attributes {
augment "path-list/frr-nexthops/nexthop" {
@@ -194,17 +205,6 @@ module frr-staticd {
}
}
}
-
- list src-list {
- key "src-prefix";
- leaf src-prefix {
- type inet:ipv6-prefix;
- description
- "IPv6 source prefix";
- }
-
- uses staticd-prefix-attributes;
- }
}
container segment-routing {
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index 66dc5b4b5f..9bf7e2cbb5 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -82,8 +82,8 @@ static void zebra_redistribute_default(struct zserv *client, vrf_id_t vrf_id)
RNODE_FOREACH_RE (rn, newre) {
if (CHECK_FLAG(newre->flags, ZEBRA_FLAG_SELECTED))
- zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD,
- client, rn, newre, false);
+ zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, rn,
+ newre, NULL);
}
route_unlock_node(rn);
@@ -91,6 +91,24 @@ static void zebra_redistribute_default(struct zserv *client, vrf_id_t vrf_id)
}
/* Redistribute routes. */
+static void redistribute_table_direct(struct zserv *client, int type, const struct route_node *rn,
+ const struct route_entry *re)
+{
+ struct redist_table_direct *table;
+ struct redist_proto *red;
+ struct listnode *node;
+ afi_t afi = family2afi(rn->p.family);
+
+ red = &client->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT];
+
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, table)) {
+ if (table->table_id != (int)re->table)
+ continue;
+
+ zsend_redistribute_route(type, client, rn, re, &table->vrf_id);
+ }
+}
+
static void zebra_redistribute(struct zserv *client, int type,
unsigned short instance, struct zebra_vrf *zvrf,
int afi)
@@ -102,13 +120,9 @@ static void zebra_redistribute(struct zserv *client, int type,
vrf_id_t vrf_id = zvrf_id(zvrf);
if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
- if (vrf_id == VRF_DEFAULT) {
- table = zebra_router_find_table(zvrf, instance, afi,
- SAFI_UNICAST);
- type = ZEBRA_ROUTE_ALL;
- is_table_direct = true;
- } else
- return;
+ table = zebra_router_find_table(zvrf, instance, afi, SAFI_UNICAST);
+ type = ZEBRA_ROUTE_ALL;
+ is_table_direct = true;
} else
table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
@@ -140,15 +154,20 @@ static void zebra_redistribute(struct zserv *client, int type,
if (!zebra_check_addr(&rn->p))
continue;
- zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD,
- client, rn, newre, is_table_direct);
+ if (is_table_direct)
+ redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_ADD, rn,
+ newre);
+ else
+ zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, rn,
+ newre, NULL);
}
}
/*
- * Function to return a valid table id value if table-direct is used
- * return 0 otherwise
- * This function can be called only if zebra_redistribute_check returns TRUE
+ * Checks if the route entry can be used as table-direct or not.
+ * `table-direct` routes always belong to `VRF_DEFAULT` and has an table
+ * ID different than the VRF it belongs (example main VRF table is 254,
+ * so in order to be `table-direct` the route's table ID must be != 254).
*/
static bool zebra_redistribute_is_table_direct(const struct route_entry *re)
{
@@ -177,15 +196,14 @@ static bool zebra_redistribute_check(const struct route_node *rn,
afi = family2afi(rn->p.family);
zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
- if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table) {
+ if (zvrf->table_id != re->table) {
+ /*
+ * Routes with table ID different from VRFs can be used as
+ * `table-direct` if enabled.
+ */
if (re->table &&
- redist_check_instance(&client->mi_redist
- [afi][ZEBRA_ROUTE_TABLE_DIRECT],
- re->table)) {
- /* table-direct redistribution only for route entries which
- * are on the default vrf, and that have table id different
- * from the default table.
- */
+ redist_table_direct_has_id(&client->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT],
+ re->table)) {
return true;
}
return false;
@@ -227,7 +245,6 @@ void redistribute_update(const struct route_node *rn,
{
struct listnode *node, *nnode;
struct zserv *client;
- bool is_table_direct;
if (IS_ZEBRA_DEBUG_RIB)
zlog_debug(
@@ -242,7 +259,6 @@ void redistribute_update(const struct route_node *rn,
return;
}
-
for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) {
if (zebra_redistribute_check(rn, re, client)) {
if (IS_ZEBRA_DEBUG_RIB) {
@@ -253,15 +269,19 @@ void redistribute_update(const struct route_node *rn,
re->vrf_id, re->table, re->type,
re->distance, re->metric);
}
- is_table_direct = zebra_redistribute_is_table_direct(re);
- zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD,
- client, rn, re,
- is_table_direct);
+ if (zebra_redistribute_is_table_direct(re))
+ redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_ADD, rn,
+ re);
+ else
+ zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, rn,
+ re, NULL);
} else if (zebra_redistribute_check(rn, prev_re, client)) {
- is_table_direct = zebra_redistribute_is_table_direct(prev_re);
- zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL,
- client, rn, prev_re,
- is_table_direct);
+ if (zebra_redistribute_is_table_direct(prev_re))
+ redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_DEL, rn,
+ prev_re);
+ else
+ zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, rn,
+ prev_re, NULL);
}
}
}
@@ -281,7 +301,6 @@ void redistribute_delete(const struct route_node *rn,
struct listnode *node, *nnode;
struct zserv *client;
vrf_id_t vrfid;
- bool is_table_direct;
if (old_re)
vrfid = old_re->vrf_id;
@@ -344,10 +363,12 @@ void redistribute_delete(const struct route_node *rn,
* happy.
*/
assert(old_re);
- is_table_direct = zebra_redistribute_is_table_direct(old_re);
- zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL,
- client, rn, old_re,
- is_table_direct);
+ if (zebra_redistribute_is_table_direct(old_re))
+ redistribute_table_direct(client, ZEBRA_REDISTRIBUTE_ROUTE_DEL, rn,
+ old_re);
+ else
+ zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, client, rn,
+ old_re, NULL);
}
}
}
@@ -383,8 +404,16 @@ void zebra_redistribute_add(ZAPI_HANDLER_ARGS)
}
if (instance) {
- if (!redist_check_instance(&client->mi_redist[afi][type],
- instance)) {
+ if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
+ struct redist_table_direct table = {
+ .vrf_id = zvrf->vrf->vrf_id,
+ .table_id = instance,
+ };
+ if (!redist_lookup_table_direct(&client->mi_redist[afi][type], &table)) {
+ redist_add_table_direct(&client->mi_redist[afi][type], &table);
+ zebra_redistribute(client, type, instance, zvrf, afi);
+ }
+ } else if (!redist_check_instance(&client->mi_redist[afi][type], instance)) {
redist_add_instance(&client->mi_redist[afi][type],
instance);
zebra_redistribute(client, type, instance, zvrf, afi);
@@ -443,7 +472,13 @@ void zebra_redistribute_delete(ZAPI_HANDLER_ARGS)
* themselves should keep track of the received routes from zebra and
* withdraw them when necessary.
*/
- if (instance)
+ if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
+ struct redist_table_direct table = {
+ .vrf_id = zvrf->vrf->vrf_id,
+ .table_id = instance,
+ };
+ redist_del_table_direct(&client->mi_redist[afi][type], &table);
+ } else if (instance)
redist_del_instance(&client->mi_redist[afi][type], instance);
else
vrf_bitmap_unset(&client->redist[afi][type], zvrf_id(zvrf));
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index b32882e858..d696b19859 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -2588,10 +2588,10 @@ ssize_t netlink_route_multipath_msg_encode(int cmd, struct zebra_dplane_ctx *ctx
}
}
- if ((!fpm && kernel_nexthops_supported()
- && (!proto_nexthops_only()
- || is_proto_nhg(dplane_ctx_get_nhe_id(ctx), 0)))
- || (fpm && force_nhg)) {
+ if ((!fpm && kernel_nexthops_supported() &&
+ (!proto_nexthops_only() || is_proto_nhg(dplane_ctx_get_nhe_id(ctx), 0)) &&
+ (!src_p || !src_p->prefixlen)) ||
+ (fpm && force_nhg)) {
/* Kernel supports nexthop objects */
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("%s: %pFX nhg_id is %u", __func__, p,
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index ab55998af0..e9d554ba3d 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -509,9 +509,8 @@ int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp)
return zserv_send_message(client, s);
}
-int zsend_redistribute_route(int cmd, struct zserv *client,
- const struct route_node *rn,
- const struct route_entry *re, bool is_table_direct)
+int zsend_redistribute_route(int cmd, struct zserv *client, const struct route_node *rn,
+ const struct route_entry *re, vrf_id_t *to_vrf)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
@@ -527,9 +526,10 @@ int zsend_redistribute_route(int cmd, struct zserv *client,
api.vrf_id = re->vrf_id;
api.type = re->type;
api.safi = SAFI_UNICAST;
- if (is_table_direct) {
+ if (to_vrf != NULL) {
api.instance = re->table;
api.type = ZEBRA_ROUTE_TABLE_DIRECT;
+ api.vrf_id = *to_vrf;
} else
api.instance = re->instance;
api.flags = re->flags;
@@ -598,7 +598,7 @@ int zsend_redistribute_route(int cmd, struct zserv *client,
/* Attributes. */
SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
- if (is_table_direct)
+ if (to_vrf != NULL)
api.distance = ZEBRA_TABLEDIRECT_DISTANCE_DEFAULT;
else
api.distance = re->distance;
@@ -740,6 +740,10 @@ static int route_notify_internal(const struct route_node *rn, int type,
struct zserv *client;
struct stream *s;
uint8_t blen;
+ const struct prefix *p, *src_p;
+ struct prefix src_dummy = {};
+
+ srcdest_rnode_prefixes(rn, &p, &src_p);
client = zserv_find_client(type, instance);
if (!client || !client->notify_owner) {
@@ -771,9 +775,17 @@ static int route_notify_internal(const struct route_node *rn, int type,
stream_putc(s, rn->p.family);
- blen = prefix_blen(&rn->p);
- stream_putc(s, rn->p.prefixlen);
- stream_put(s, &rn->p.u.prefix, blen);
+ blen = prefix_blen(p);
+ stream_putc(s, p->prefixlen);
+ stream_put(s, &p->u.prefix, blen);
+
+ if (!src_p) {
+ src_dummy.family = p->family;
+ src_p = &src_dummy;
+ }
+ blen = prefix_blen(src_p);
+ stream_putc(s, src_p->prefixlen);
+ stream_put(s, &src_p->u.prefix, blen);
stream_putl(s, table_id);
diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h
index a59ccc838b..29a5b69f18 100644
--- a/zebra/zapi_msg.h
+++ b/zebra/zapi_msg.h
@@ -51,10 +51,8 @@ extern void nbr_connected_delete_ipv6(struct interface *ifp,
struct in6_addr *address);
extern int zsend_interface_update(int cmd, struct zserv *client,
struct interface *ifp);
-extern int zsend_redistribute_route(int cmd, struct zserv *zclient,
- const struct route_node *rn,
- const struct route_entry *re,
- bool is_table_direct);
+extern int zsend_redistribute_route(int cmd, struct zserv *zclient, const struct route_node *rn,
+ const struct route_entry *re, vrf_id_t *to_vrf);
extern int zsend_router_id_update(struct zserv *zclient, afi_t afi,
struct prefix *p, vrf_id_t vrf_id);
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 4f6bc02c6e..f5141c8f23 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -572,8 +572,7 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
/* Nexthops should be in-order, so we simply compare them in-place */
for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop;
nexthop1 && nexthop2;
- nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
-
+ nexthop1 = nexthop_next(nexthop1), nexthop2 = nexthop_next(nexthop2)) {
if (!nhg_compare_nexthops(nexthop1, nexthop2))
return false;
}
@@ -608,8 +607,7 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
nexthop1 && nexthop2;
- nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
-
+ nexthop1 = nexthop_next(nexthop1), nexthop2 = nexthop_next(nexthop2)) {
if (!nhg_compare_nexthops(nexthop1, nexthop2))
return false;
}
diff --git a/zebra/zebra_srv6.c b/zebra/zebra_srv6.c
index 624f60e815..6d228c5e24 100644
--- a/zebra/zebra_srv6.c
+++ b/zebra/zebra_srv6.c
@@ -1547,9 +1547,26 @@ static int get_srv6_sid_explicit(struct zebra_srv6_sid **sid,
}
if (ctx->behavior == ZEBRA_SEG6_LOCAL_ACTION_END) {
- zlog_err("%s: invalid SM request arguments: explicit SID allocation not allowed for End/uN behavior",
- __func__);
- return -1;
+ zctx = zebra_srv6_sid_ctx_alloc();
+ zctx->ctx = *ctx;
+
+ *sid = zebra_srv6_sid_alloc(zctx, sid_value, locator, block, sid_func,
+ SRV6_SID_ALLOC_MODE_EXPLICIT);
+ if (!(*sid)) {
+ flog_err(EC_ZEBRA_SM_CANNOT_ASSIGN_SID,
+ "%s: failed to create SRv6 SID %s (%pI6)", __func__,
+ srv6_sid_ctx2str(buf, sizeof(buf), ctx), sid_value);
+ return -1;
+ }
+ (*sid)->ctx = zctx;
+ zctx->sid = *sid;
+ listnode_add(srv6->sids, zctx);
+
+ if (IS_ZEBRA_DEBUG_SRV6)
+ zlog_debug("%s: allocated explicit SRv6 SID %pI6 for context %s", __func__,
+ &(*sid)->value, srv6_sid_ctx2str(buf, sizeof(buf), ctx));
+
+ return 1;
}
/* Allocate an explicit SID function for the SID */