summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Stapp <mjs@voltanet.io>2021-06-04 13:41:55 -0400
committerGitHub <noreply@github.com>2021-06-04 13:41:55 -0400
commite4768d32b82f0054192f1bf1a455f5fd4e5d895e (patch)
tree7f16a15f33ce2356bf013dad4ac8eb82c0e25576
parentdd553fb39b02cd4391d50ffb84b45da7248906ed (diff)
parent2ba6be5b24c9c572d167248004fb1e3c4b57e0a2 (diff)
Merge pull request #5865 from slankdev/slankdev-zebra-srv6-manager
zebra: srv6 manager
-rw-r--r--bgpd/bgp_attr.c26
-rw-r--r--bgpd/bgp_main.c2
-rw-r--r--bgpd/bgp_memory.c2
-rw-r--r--bgpd/bgp_memory.h2
-rw-r--r--bgpd/bgp_mplsvpn.c264
-rw-r--r--bgpd/bgp_mplsvpn.h12
-rw-r--r--bgpd/bgp_nb_config.c45
-rw-r--r--bgpd/bgp_nht.c15
-rw-r--r--bgpd/bgp_vty.c192
-rw-r--r--bgpd/bgp_zebra.c61
-rw-r--r--bgpd/bgp_zebra.h1
-rw-r--r--bgpd/bgpd.c18
-rw-r--r--bgpd/bgpd.h20
-rw-r--r--doc/user/bgp.rst47
-rw-r--r--doc/user/sharp.rst139
-rw-r--r--doc/user/zebra.rst131
-rw-r--r--lib/command.c9
-rw-r--r--lib/command.h4
-rw-r--r--lib/nexthop.c110
-rw-r--r--lib/nexthop.h10
-rw-r--r--lib/srv6.c88
-rw-r--r--lib/srv6.h70
-rw-r--r--lib/zclient.c231
-rw-r--r--lib/zclient.h33
-rw-r--r--sharpd/sharp_globals.h16
-rw-r--r--sharpd/sharp_main.c1
-rw-r--r--sharpd/sharp_vty.c301
-rw-r--r--sharpd/sharp_zebra.c67
-rw-r--r--sharpd/sharp_zebra.h10
-rw-r--r--tests/topotests/bgp_prefix_sid2/peer1/exabgp.cfg29
-rw-r--r--tests/topotests/bgp_prefix_sid2/peer1/exabgp.env53
-rw-r--r--tests/topotests/bgp_prefix_sid2/r1/bgpd.conf26
-rw-r--r--tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry1.json50
-rw-r--r--tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry2.json50
-rw-r--r--tests/topotests/bgp_prefix_sid2/r1/zebra.conf7
-rwxr-xr-xtests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py121
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/bgpd.conf8
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/ipv6_rib.json58
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/zebra.conf14
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/bgpd.conf8
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/ipv6_rib.json58
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/zebra.conf14
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/bgpd.conf8
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/ipv6_rib.json58
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/zebra.conf14
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/bgpd.conf8
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/ipv6_rib.json58
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/zebra.conf14
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/bgpd.conf8
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/ipv6_rib.json58
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/zebra.conf14
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/bgpd.conf8
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/ipv6_rib.json58
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/zebra.conf14
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/bgpd.conf64
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vpnv6_rib.json170
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf10_rib.json89
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf20_rib.json98
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/zebra.conf40
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/bgpd.conf65
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vpnv6_rib.json170
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf10_rib.json98
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf20_rib.json89
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/zebra.conf40
-rwxr-xr-xtests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py204
-rw-r--r--tests/topotests/srv6_locator/__init__.py0
-rw-r--r--tests/topotests/srv6_locator/expected_chunks1.json1
-rw-r--r--tests/topotests/srv6_locator/expected_chunks2.json8
-rw-r--r--tests/topotests/srv6_locator/expected_chunks3.json1
-rw-r--r--tests/topotests/srv6_locator/expected_chunks4.json6
-rw-r--r--tests/topotests/srv6_locator/expected_chunks5.json8
-rw-r--r--tests/topotests/srv6_locator/expected_ipv6_routes.json29
-rw-r--r--tests/topotests/srv6_locator/expected_locators1.json26
-rw-r--r--tests/topotests/srv6_locator/expected_locators2.json26
-rw-r--r--tests/topotests/srv6_locator/expected_locators3.json26
-rw-r--r--tests/topotests/srv6_locator/expected_locators4.json36
-rw-r--r--tests/topotests/srv6_locator/expected_locators5.json38
-rw-r--r--tests/topotests/srv6_locator/r1/setup.sh2
-rw-r--r--tests/topotests/srv6_locator/r1/sharpd.conf7
-rw-r--r--tests/topotests/srv6_locator/r1/zebra.conf22
-rwxr-xr-xtests/topotests/srv6_locator/test_srv6_locator.py142
-rw-r--r--tests/topotests/zebra_seg6_route/r1/routes.json25
-rw-r--r--tests/topotests/zebra_seg6_route/r1/setup.sh5
-rw-r--r--tests/topotests/zebra_seg6_route/r1/sharpd.conf0
-rw-r--r--tests/topotests/zebra_seg6_route/r1/zebra.conf13
-rwxr-xr-xtests/topotests/zebra_seg6_route/test_zebra_seg6_route.py109
-rw-r--r--tests/topotests/zebra_seg6local_route/r1/routes.json98
-rw-r--r--tests/topotests/zebra_seg6local_route/r1/setup.sh3
-rw-r--r--tests/topotests/zebra_seg6local_route/r1/sharpd.conf0
-rw-r--r--tests/topotests/zebra_seg6local_route/r1/zebra.conf9
-rwxr-xr-xtests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py107
-rw-r--r--vtysh/vtysh.c134
-rw-r--r--vtysh/vtysh.h1
-rw-r--r--vtysh/vtysh_config.c4
-rw-r--r--zebra/main.c4
-rw-r--r--zebra/rt_netlink.c343
-rw-r--r--zebra/subdir.am6
-rw-r--r--zebra/zapi_msg.c103
-rw-r--r--zebra/zapi_msg.h8
-rw-r--r--zebra/zebra_errors.c6
-rw-r--r--zebra/zebra_errors.h1
-rw-r--r--zebra/zebra_nhg.c4
-rw-r--r--zebra/zebra_srv6.c350
-rw-r--r--zebra/zebra_srv6.h80
-rw-r--r--zebra/zebra_srv6_vty.c356
-rw-r--r--zebra/zebra_srv6_vty.h25
-rw-r--r--zebra/zebra_vty.c29
107 files changed, 6049 insertions, 22 deletions
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 71e4b56a00..2f0751a5f0 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -676,6 +676,10 @@ unsigned int attrhash_key_make(const void *p)
MIX(transit_hash_key_make(bgp_attr_get_transit(attr)));
if (attr->encap_subtlvs)
MIX(encap_hash_key_make(attr->encap_subtlvs));
+ if (attr->srv6_l3vpn)
+ MIX(srv6_l3vpn_hash_key_make(attr->srv6_l3vpn));
+ if (attr->srv6_vpn)
+ MIX(srv6_vpn_hash_key_make(attr->srv6_vpn));
#ifdef ENABLE_BGP_VNC
struct bgp_attr_encap_subtlv *vnc_subtlvs =
bgp_attr_get_vnc_subtlvs(attr);
@@ -1141,6 +1145,16 @@ void bgp_attr_undup(struct attr *new, struct attr *old)
if (new->lcommunity != old->lcommunity)
lcommunity_free(&new->lcommunity);
+
+ if (new->srv6_l3vpn != old->srv6_l3vpn) {
+ srv6_l3vpn_free(new->srv6_l3vpn);
+ new->srv6_l3vpn = NULL;
+ }
+
+ if (new->srv6_vpn != old->srv6_vpn) {
+ srv6_vpn_free(new->srv6_vpn);
+ new->srv6_vpn = NULL;
+ }
}
/* Free bgp attribute and aspath. */
@@ -1202,6 +1216,14 @@ void bgp_attr_flush(struct attr *attr)
encap_free(attr->encap_subtlvs);
attr->encap_subtlvs = NULL;
}
+ if (attr->srv6_l3vpn && !attr->srv6_l3vpn->refcnt) {
+ srv6_l3vpn_free(attr->srv6_l3vpn);
+ attr->srv6_l3vpn = NULL;
+ }
+ if (attr->srv6_vpn && !attr->srv6_vpn->refcnt) {
+ srv6_vpn_free(attr->srv6_vpn);
+ attr->srv6_vpn = NULL;
+ }
#ifdef ENABLE_BGP_VNC
struct bgp_attr_encap_subtlv *vnc_subtlvs =
bgp_attr_get_vnc_subtlvs(attr);
@@ -2676,6 +2698,7 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length,
sizeof(struct bgp_attr_srv6_vpn));
attr->srv6_vpn->sid_flags = sid_flags;
sid_copy(&attr->srv6_vpn->sid, &ipv6_sid);
+ attr->srv6_vpn = srv6_vpn_intern(attr->srv6_vpn);
}
/* Placeholder code for the SRv6 L3 Service type */
@@ -2718,6 +2741,7 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length,
attr->srv6_l3vpn->sid_flags = sid_flags;
attr->srv6_l3vpn->endpoint_behavior = endpoint_behavior;
sid_copy(&attr->srv6_l3vpn->sid, &ipv6_sid);
+ attr->srv6_l3vpn = srv6_l3vpn_intern(attr->srv6_l3vpn);
}
/* Placeholder code for Unsupported TLV */
@@ -4098,7 +4122,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
}
/* SRv6 Service Information Attribute. */
- if (afi == AFI_IP && safi == SAFI_MPLS_VPN) {
+ if ((afi == AFI_IP || afi == AFI_IP6) && safi == SAFI_MPLS_VPN) {
if (attr->srv6_l3vpn) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c
index d545becded..6736671f37 100644
--- a/bgpd/bgp_main.c
+++ b/bgpd/bgp_main.c
@@ -317,6 +317,8 @@ static int bgp_vrf_enable(struct vrf *vrf)
bgp_instance_up(bgp);
vpn_leak_zebra_vrf_label_update(bgp, AFI_IP);
vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP6);
vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP,
bgp_get_default(), bgp);
vpn_leak_postchange(BGP_VPN_POLICY_DIR_FROMVPN, AFI_IP,
diff --git a/bgpd/bgp_memory.c b/bgpd/bgp_memory.c
index fc508496cc..eb85936f0f 100644
--- a/bgpd/bgp_memory.c
+++ b/bgpd/bgp_memory.c
@@ -142,3 +142,5 @@ DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_INDEX, "BGP flowspec index");
DEFINE_MTYPE(BGPD, BGP_SRV6_L3VPN, "BGP prefix-sid srv6 l3vpn servcie");
DEFINE_MTYPE(BGPD, BGP_SRV6_VPN, "BGP prefix-sid srv6 vpn service");
+DEFINE_MTYPE(BGPD, BGP_SRV6_SID, "BGP srv6 segment-id");
+DEFINE_MTYPE(BGPD, BGP_SRV6_FUNCTION, "BGP srv6 function");
diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h
index 4080248038..c5ba371498 100644
--- a/bgpd/bgp_memory.h
+++ b/bgpd/bgp_memory.h
@@ -139,5 +139,7 @@ DECLARE_MTYPE(BGP_FLOWSPEC_INDEX);
DECLARE_MTYPE(BGP_SRV6_L3VPN);
DECLARE_MTYPE(BGP_SRV6_VPN);
+DECLARE_MTYPE(BGP_SRV6_SID);
+DECLARE_MTYPE(BGP_SRV6_FUNCTION);
#endif /* _QUAGGA_BGP_MEMORY_H */
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index eb68d84c06..f99d672c8a 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -47,6 +47,7 @@
#include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_nht.h"
#include "bgpd/bgp_evpn.h"
+#include "bgpd/bgp_memory.h"
#ifdef ENABLE_BGP_VNC
#include "bgpd/rfapi/rfapi_backend.h"
@@ -356,6 +357,83 @@ void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi)
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label;
}
+/*
+ * This function informs zebra of the srv6-function this vrf sets on routes
+ * leaked to VPN. Zebra should install this srv6-function in the kernel with
+ * an action of "End.DT4/6's IP FIB to route the PDU."
+ */
+void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi)
+{
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+ enum seg6local_action_t act;
+ struct seg6local_context ctx = {};
+ struct in6_addr *tovpn_sid = NULL;
+ struct in6_addr *tovpn_sid_ls = NULL;
+ struct vrf *vrf;
+ char buf[256] = {0};
+
+ if (bgp->vrf_id == VRF_UNKNOWN) {
+ if (debug)
+ zlog_debug("%s: vrf %s: afi %s: vrf_id not set, can't set zebra vrf label",
+ __func__, bgp->name_pretty, afi2str(afi));
+ return;
+ }
+
+ tovpn_sid = bgp->vpn_policy[afi].tovpn_sid;
+ if (!tovpn_sid) {
+ if (debug)
+ zlog_debug("%s: vrf %s: afi %s: sid not set", __func__,
+ bgp->name_pretty, afi2str(afi));
+ return;
+ }
+
+ if (debug) {
+ inet_ntop(AF_INET6, tovpn_sid, buf, sizeof(buf));
+ zlog_debug("%s: vrf %s: afi %s: setting sid %s for vrf id %d",
+ __func__, bgp->name_pretty, afi2str(afi), buf,
+ bgp->vrf_id);
+ }
+
+ vrf = vrf_lookup_by_id(bgp->vrf_id);
+ if (!vrf)
+ return;
+
+ ctx.table = vrf->data.l.table_id;
+ act = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4
+ : ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
+ zclient_send_localsid(zclient, tovpn_sid, bgp->vrf_id, act, &ctx);
+
+ tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
+ *tovpn_sid_ls = *tovpn_sid;
+ bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent = tovpn_sid_ls;
+}
+
+/*
+ * If zebra tells us vrf has become unconfigured, tell zebra not to
+ * use this srv6-function to forward to the vrf anymore
+ */
+void vpn_leak_zebra_vrf_sid_withdraw(struct bgp *bgp, afi_t afi)
+{
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+
+ if (bgp->vrf_id == VRF_UNKNOWN) {
+ if (debug)
+ zlog_debug("%s: vrf %s: afi %s: vrf_id not set, can't set zebra vrf label",
+ __func__, bgp->name_pretty, afi2str(afi));
+ return;
+ }
+
+ if (debug)
+ zlog_debug("%s: deleting sid for vrf %s afi (id=%d)", __func__,
+ bgp->name_pretty, bgp->vrf_id);
+
+ zclient_send_localsid(zclient,
+ bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent,
+ bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, NULL);
+ XFREE(MTYPE_BGP_SRV6_SID,
+ bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent);
+}
+
int vpn_leak_label_callback(
mpls_label_t label,
void *labelid,
@@ -417,6 +495,129 @@ int vpn_leak_label_callback(
return 0;
}
+static void sid_register(struct bgp *bgp, const struct in6_addr *sid,
+ const char *locator_name)
+{
+ struct bgp_srv6_function *func;
+ func = XCALLOC(MTYPE_BGP_SRV6_FUNCTION,
+ sizeof(struct bgp_srv6_function));
+ func->sid = *sid;
+ snprintf(func->locator_name, sizeof(func->locator_name),
+ "%s", locator_name);
+ listnode_add(bgp->srv6_functions, func);
+}
+
+static bool sid_exist(struct bgp *bgp, const struct in6_addr *sid)
+{
+ struct listnode *node;
+ struct bgp_srv6_function *func;
+
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func))
+ if (sid_same(&func->sid, sid))
+ return true;
+ return false;
+}
+
+/*
+ * if index != 0: try to allocate as index-mode
+ * else: try to allocate as auto-mode
+ */
+static bool alloc_new_sid(struct bgp *bgp, uint32_t index,
+ struct in6_addr *sid)
+{
+ struct listnode *node;
+ struct prefix_ipv6 *chunk;
+ struct in6_addr sid_buf;
+ bool alloced = false;
+
+ if (!bgp || !sid)
+ return false;
+
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
+ sid_buf = chunk->prefix;
+ if (index != 0) {
+ sid_buf.s6_addr[15] = index;
+ if (sid_exist(bgp, &sid_buf))
+ return false;
+ alloced = true;
+ break;
+ }
+
+ for (size_t i = 1; i < 255; i++) {
+ sid_buf.s6_addr[15] = (i & 0xff00) >> 8;
+ sid_buf.s6_addr[14] = (i & 0x00ff);
+
+ if (sid_exist(bgp, &sid_buf))
+ continue;
+ alloced = true;
+ break;
+ }
+ }
+
+ if (!alloced)
+ return false;
+
+ sid_register(bgp, &sid_buf, bgp->srv6_locator_name);
+ *sid = sid_buf;
+ return true;
+}
+
+void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi)
+{
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
+ bool alloced = false;
+ char buf[256];
+ struct in6_addr *sid;
+ uint32_t tovpn_sid_index = 0;
+ bool tovpn_sid_auto = false;
+
+ if (debug)
+ zlog_debug("%s: try to allocate new SID for vrf %s: afi %s",
+ __func__, bgp_vrf->name_pretty, afi2str(afi));
+
+ /* skip when tovpn sid is already allocated on vrf instance */
+ if (bgp_vrf->vpn_policy[afi].tovpn_sid)
+ return;
+
+ /*
+ * skip when bgp vpn instance ins't allocated
+ * or srv6 locator chunk isn't allocated
+ */
+ if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks || !bgp_vrf)
+ return;
+
+ tovpn_sid_index = bgp_vrf->vpn_policy[afi].tovpn_sid_index;
+ tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO);
+
+ /* skip when VPN isn't configured on vrf-instance */
+ if (tovpn_sid_index == 0 && !tovpn_sid_auto)
+ return;
+
+ /* check invalid case both configured index and auto */
+ if (tovpn_sid_index != 0 && tovpn_sid_index) {
+ zlog_err("%s: index-mode and auto-mode both selected. ignored.",
+ __func__);
+ return;
+ }
+
+ sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
+ alloced = alloc_new_sid(bgp_vpn, tovpn_sid_index, sid);
+ if (!alloced) {
+ zlog_debug("%s: not allocated new sid for vrf %s: afi %s",
+ __func__, bgp_vrf->name_pretty, afi2str(afi));
+ return;
+ }
+
+ if (debug) {
+ inet_ntop(AF_INET6, sid, buf, sizeof(buf));
+ zlog_debug("%s: new sid %s allocated for vrf %s: afi %s",
+ __func__, buf, bgp_vrf->name_pretty,
+ afi2str(afi));
+ }
+ bgp_vrf->vpn_policy[afi].tovpn_sid = sid;
+}
+
static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2)
{
uint32_t i, j;
@@ -488,6 +689,32 @@ static void setlabels(struct bgp_path_info *bpi,
}
/*
+ * make encoded route SIDs match specified encoded sid set
+ */
+static void setsids(struct bgp_path_info *bpi,
+ struct in6_addr *sid,
+ uint32_t num_sids)
+{
+ uint32_t i;
+ struct bgp_path_info_extra *extra;
+
+ if (num_sids)
+ assert(sid);
+ assert(num_sids <= BGP_MAX_SIDS);
+
+ if (!num_sids) {
+ if (bpi->extra)
+ bpi->extra->num_sids = 0;
+ return;
+ }
+
+ extra = bgp_path_info_extra_get(bpi);
+ for (i = 0; i < num_sids; i++)
+ memcpy(&extra->sid[i], &sid[i], sizeof(struct in6_addr));
+ extra->num_sids = num_sids;
+}
+
+/*
* returns pointer to new bgp_path_info upon success
*/
static struct bgp_path_info *
@@ -502,6 +729,10 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
struct bgp_path_info *bpi;
struct bgp_path_info *bpi_ultimate;
struct bgp_path_info *new;
+ uint32_t num_sids = 0;
+
+ if (new_attr->srv6_l3vpn || new_attr->srv6_vpn)
+ num_sids = 1;
if (debug)
zlog_debug(
@@ -580,6 +811,18 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (!labelssame)
setlabels(bpi, label, num_labels);
+ /*
+ * rewrite sid
+ */
+ if (num_sids) {
+ if (new_attr->srv6_l3vpn)
+ setsids(bpi, &new_attr->srv6_l3vpn->sid,
+ num_sids);
+ else if (new_attr->srv6_vpn)
+ setsids(bpi, &new_attr->srv6_vpn->sid,
+ num_sids);
+ }
+
if (nexthop_self_flag)
bgp_path_info_set_flag(bn, bpi, BGP_PATH_ANNC_NH_SELF);
@@ -642,6 +885,16 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
bgp_path_info_extra_get(new);
+ /*
+ * rewrite sid
+ */
+ if (num_sids) {
+ if (new_attr->srv6_l3vpn)
+ setsids(new, &new_attr->srv6_l3vpn->sid, num_sids);
+ else if (new_attr->srv6_vpn)
+ setsids(new, &new_attr->srv6_vpn->sid, num_sids);
+ }
+
if (num_labels)
setlabels(new, label, num_labels);
@@ -898,6 +1151,17 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
static_attr.originator_id = bgp_vpn->router_id;
+ /* Set SID for SRv6 VPN */
+ if (bgp_vrf->vpn_policy[afi].tovpn_sid) {
+ static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN,
+ sizeof(struct bgp_attr_srv6_l3vpn));
+ static_attr.srv6_l3vpn->sid_flags = 0x00;
+ static_attr.srv6_l3vpn->endpoint_behavior = 0xffff;
+ memcpy(&static_attr.srv6_l3vpn->sid,
+ bgp_vrf->vpn_policy[afi].tovpn_sid,
+ sizeof(static_attr.srv6_l3vpn->sid));
+ }
+
new_attr = bgp_attr_intern(
&static_attr); /* hashed refcounted everything */
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 91a073d5d7..38193721b3 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -77,7 +77,10 @@ extern void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,
extern void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi);
extern void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi);
+extern void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi);
+extern void vpn_leak_zebra_vrf_sid_withdraw(struct bgp *bgp, afi_t afi);
extern int vpn_leak_label_callback(mpls_label_t label, void *lblid, bool alloc);
+extern void ensure_vrf_tovpn_sid(struct bgp *vpn, struct bgp *vrf, afi_t afi);
extern void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi, safi_t safi);
void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
@@ -237,6 +240,15 @@ static inline void vpn_leak_postchange(vpn_policy_direction_t direction,
vpn_leak_zebra_vrf_label_update(bgp_vrf, afi);
}
+ if (!bgp_vrf->vpn_policy[afi].tovpn_sid)
+ ensure_vrf_tovpn_sid(bgp_vpn, bgp_vrf, afi);
+
+ if (sid_diff(bgp_vrf->vpn_policy[afi].tovpn_sid,
+ bgp_vrf->vpn_policy[afi]
+ .tovpn_zebra_vrf_sid_last_sent)) {
+ vpn_leak_zebra_vrf_sid_update(bgp_vrf, afi);
+ }
+
vpn_leak_from_vrf_update_all(bgp_vpn, bgp_vrf, afi);
}
}
diff --git a/bgpd/bgp_nb_config.c b/bgpd/bgp_nb_config.c
index 3b7d95c0f3..5189d7ba8f 100644
--- a/bgpd/bgp_nb_config.c
+++ b/bgpd/bgp_nb_config.c
@@ -11442,12 +11442,33 @@ int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_nexthop_destroy(
int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_import_vpn_modify(
struct nb_cb_modify_args *args)
{
+ bool is_enable = false;
+ struct bgp *bgp;
+
switch (args->event) {
case NB_EV_VALIDATE:
+ bgp = nb_running_get_entry(args->dnode, NULL, false);
+ if (!bgp)
+ return NB_OK;
+
+ if (bgp->inst_type != BGP_INSTANCE_TYPE_VRF
+ && bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "import|export vpn valid only for bgp vrf or default instance");
+ return NB_ERR_VALIDATION;
+ }
+
+ break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ return NB_OK;
case NB_EV_APPLY:
- /* TODO: implement me. */
+ if (yang_dnode_get_bool(args->dnode, NULL))
+ is_enable = true;
+
+ return bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
+ args, "import", is_enable);
break;
}
@@ -11461,12 +11482,32 @@ int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_import_vpn_modify(
int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_export_vpn_modify(
struct nb_cb_modify_args *args)
{
+ bool is_enable = false;
+ struct bgp *bgp;
+
switch (args->event) {
case NB_EV_VALIDATE:
+ bgp = nb_running_get_entry(args->dnode, NULL, false);
+ if (!bgp)
+ return NB_OK;
+
+ if (bgp->inst_type != BGP_INSTANCE_TYPE_VRF
+ && bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "import|export vpn valid only for bgp vrf or default instance");
+ return NB_ERR_VALIDATION;
+ }
+ break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ return NB_OK;
case NB_EV_APPLY:
- /* TODO: implement me. */
+ if (yang_dnode_get_bool(args->dnode, NULL))
+ is_enable = true;
+
+ return bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
+ args, "export", is_enable);
break;
}
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index e94f63541d..4b4a3716e6 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -66,9 +66,20 @@ static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc)
static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
{
+ /*
+ * In the case of MPLS-VPN, the label is learned from LDP or other
+ * protocols, and nexthop tracking is enabled for the label.
+ * The value is recorded as BGP_NEXTHOP_LABELED_VALID.
+ * In the case of SRv6-VPN, we need to track the reachability to the
+ * SID (in other words, IPv6 address). As in MPLS, we need to record
+ * the value as BGP_NEXTHOP_SID_VALID. However, this function is
+ * currently not implemented, and this function assumes that all
+ * Transit routes for SRv6-VPN are valid.
+ */
return (bgp_zebra_num_connects() == 0
- || (bnc && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
- && bnc->nexthop_num > 0));
+ || (bnc && bnc->nexthop_num > 0
+ && (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
+ || bnc->bgp->srv6_enabled)));
}
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 9748df9f96..2feba00806 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -9252,6 +9252,78 @@ DEFPY (af_label_vpn_export,
return CMD_SUCCESS;
}
+DEFPY (af_sid_vpn_export,
+ af_sid_vpn_export_cmd,
+ "[no] sid vpn export <(1-255)$sid_idx|auto$sid_auto>",
+ NO_STR
+ "sid value for VRF\n"
+ "Between current address-family and vpn\n"
+ "For routes leaked from current address-family to vpn\n"
+ "Sid allocation index\n"
+ "Automatically assign a label\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ afi_t afi;
+ int debug = 0;
+ int idx = 0;
+ bool yes = true;
+
+ if (argv_find(argv, argc, "no", &idx))
+ yes = false;
+ debug = (BGP_DEBUG(vpn, VPN_LEAK_TO_VRF) |
+ BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF));
+
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ if (!yes) {
+ /* implement me */
+ vty_out(vty, "It's not implemented");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* skip when it's already configured */
+ if ((sid_idx != 0 && bgp->vpn_policy[afi].tovpn_sid_index != 0)
+ || (sid_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO)))
+ return CMD_SUCCESS;
+
+ /*
+ * mode change between sid_idx and sid_auto isn't supported.
+ * user must negate sid vpn export when they want to change the mode
+ */
+ if ((sid_auto && bgp->vpn_policy[afi].tovpn_sid_index != 0)
+ || (sid_idx != 0 && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO))) {
+ vty_out(vty, "it's already configured as %s.\n",
+ sid_auto ? "auto-mode" : "idx-mode");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* pre-change */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+
+ if (sid_auto) {
+ /* SID allocation auto-mode */
+ if (debug)
+ zlog_debug("%s: auto sid alloc.", __func__);
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO);
+ } else {
+ /* SID allocation index-mode */
+ if (debug)
+ zlog_debug("%s: idx %ld sid alloc.", __func__, sid_idx);
+ bgp->vpn_policy[afi].tovpn_sid_index = sid_idx;
+ }
+
+ /* post-change */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+ return CMD_SUCCESS;
+}
+
ALIAS (af_label_vpn_export,
af_no_label_vpn_export_cmd,
"no label vpn export",
@@ -9878,6 +9950,102 @@ DEFUN_NOSH (address_family_evpn,
return CMD_SUCCESS;
}
+DEFUN_NOSH (bgp_segment_routing_srv6,
+ bgp_segment_routing_srv6_cmd,
+ "segment-routing srv6",
+ "Segment-Routing configuration\n"
+ "Segment-Routing SRv6 configuration\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ bgp->srv6_enabled = true;
+ vty->node = BGP_SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (bgp_srv6_locator,
+ bgp_srv6_locator_cmd,
+ "locator NAME$name",
+ "Specify SRv6 locator\n"
+ "Specify SRv6 locator\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ int ret;
+
+ if (strlen(bgp->srv6_locator_name) > 0
+ && strcmp(name, bgp->srv6_locator_name) != 0) {
+ vty_out(vty, "srv6 locator is already configured\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ snprintf(bgp->srv6_locator_name,
+ sizeof(bgp->srv6_locator_name), "%s", name);
+
+ ret = bgp_zebra_srv6_manager_get_locator_chunk(name);
+ if (ret < 0)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_bgp_srv6,
+ show_bgp_srv6_cmd,
+ "show bgp segment-routing srv6",
+ SHOW_STR
+ BGP_STR
+ "BGP Segment Routing\n"
+ "BGP Segment Routing SRv6\n")
+{
+ struct bgp *bgp;
+ struct listnode *node;
+ struct prefix_ipv6 *chunk;
+ struct bgp_srv6_function *func;
+ struct in6_addr *tovpn4_sid;
+ struct in6_addr *tovpn6_sid;
+ char buf[256];
+ char buf_tovpn4_sid[256];
+ char buf_tovpn6_sid[256];
+
+ bgp = bgp_get_default();
+ if (!bgp)
+ return CMD_SUCCESS;
+
+ vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name);
+ vty_out(vty, "locator_chunks:\n");
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
+ prefix2str(chunk, buf, sizeof(buf));
+ vty_out(vty, "- %s\n", buf);
+ }
+
+ vty_out(vty, "functions:\n");
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func)) {
+ inet_ntop(AF_INET6, &func->sid, buf, sizeof(buf));
+ vty_out(vty, "- sid: %s\n", buf);
+ vty_out(vty, " locator: %s\n", func->locator_name);
+ }
+
+ vty_out(vty, "bgps:\n");
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
+ vty_out(vty, "- name: %s\n",
+ bgp->name ? bgp->name : "default");
+
+ tovpn4_sid = bgp->vpn_policy[AFI_IP].tovpn_sid;
+ tovpn6_sid = bgp->vpn_policy[AFI_IP6].tovpn_sid;
+ if (tovpn4_sid)
+ inet_ntop(AF_INET6, tovpn4_sid, buf_tovpn4_sid,
+ sizeof(buf_tovpn4_sid));
+ if (tovpn6_sid)
+ inet_ntop(AF_INET6, tovpn6_sid, buf_tovpn6_sid,
+ sizeof(buf_tovpn6_sid));
+
+ vty_out(vty, " vpn_policy[AFI_IP].tovpn_sid: %s\n",
+ tovpn4_sid ? buf_tovpn4_sid : "none");
+ vty_out(vty, " vpn_policy[AFI_IP6].tovpn_sid: %s\n",
+ tovpn6_sid ? buf_tovpn6_sid : "none");
+ }
+
+ return CMD_SUCCESS;
+}
+
DEFUN_NOSH (exit_address_family,
exit_address_family_cmd,
"exit-address-family",
@@ -17895,6 +18063,14 @@ int bgp_config_write(struct vty *vty)
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SHUTDOWN))
vty_out(vty, " bgp shutdown\n");
+ if (bgp->srv6_enabled) {
+ vty_frame(vty, " !\n segment-routing srv6\n");
+ if (strlen(bgp->srv6_locator_name))
+ vty_out(vty, " locator %s\n",
+ bgp->srv6_locator_name);
+ }
+
+
/* IPv4 unicast configuration. */
bgp_config_write_family(vty, bgp, AFI_IP, SAFI_UNICAST);
@@ -18040,6 +18216,13 @@ static struct cmd_node bgp_flowspecv6_node = {
.prompt = "%s(config-router-af-vpnv6)# ",
};
+static struct cmd_node bgp_srv6_node = {
+ .name = "bgp srv6",
+ .node = BGP_SRV6_NODE,
+ .parent_node = BGP_NODE,
+ .prompt = "%s(config-router-srv6)# ",
+};
+
static void community_list_vty(void);
static void bgp_ac_neighbor(vector comps, struct cmd_token *token)
@@ -18114,6 +18297,7 @@ void bgp_vty_init(void)
install_node(&bgp_evpn_vni_node);
install_node(&bgp_flowspecv4_node);
install_node(&bgp_flowspecv6_node);
+ install_node(&bgp_srv6_node);
/* Install default VTY commands to new nodes. */
install_default(BGP_NODE);
@@ -18129,6 +18313,7 @@ void bgp_vty_init(void)
install_default(BGP_FLOWSPECV6_NODE);
install_default(BGP_EVPN_NODE);
install_default(BGP_EVPN_VNI_NODE);
+ install_default(BGP_SRV6_NODE);
/* "bgp local-mac" hidden commands. */
install_element(CONFIG_NODE, &bgp_local_mac_cmd);
@@ -19457,6 +19642,13 @@ void bgp_vty_init(void)
/* tcp-mss command */
install_element(BGP_NODE, &neighbor_tcp_mss_cmd);
install_element(BGP_NODE, &no_neighbor_tcp_mss_cmd);
+
+ /* srv6 commands */
+ install_element(VIEW_NODE, &show_bgp_srv6_cmd);
+ install_element(BGP_NODE, &bgp_segment_routing_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &bgp_srv6_locator_cmd);
+ install_element(BGP_IPV4_NODE, &af_sid_vpn_export_cmd);
+ install_element(BGP_IPV6_NODE, &af_sid_vpn_export_cmd);
}
#include "memory.h"
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 97f781b2bf..e3a795c6f1 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -1171,6 +1171,7 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
unsigned int valid_nh_count = 0;
int has_valid_label = 0;
bool allow_recursion = false;
+ int has_valid_sid = 0;
uint8_t distance;
struct peer *peer;
struct bgp_path_info *mpinfo;
@@ -1395,9 +1396,20 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
sizeof(struct ethaddr));
api_nh->weight = nh_weight;
+ if (mpinfo->extra
+ && !sid_zero(&mpinfo->extra->sid[0])
+ && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ has_valid_sid = 1;
+ memcpy(&api_nh->seg6_segs, &mpinfo->extra->sid[0],
+ sizeof(api_nh->seg6_segs));
+ }
+
valid_nh_count++;
}
+ if (has_valid_sid && !(CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)))
+ SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6);
+
is_add = (valid_nh_count || nhg_id) ? true : false;
if (is_add && CHECK_FLAG(bm->flags, BM_FLAG_SEND_EXTRA_DATA_TO_ZEBRA)) {
@@ -1453,6 +1465,8 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
char eth_buf[ETHER_ADDR_STRLEN + 7] = {'\0'};
char buf1[ETHER_ADDR_STRLEN];
char label_buf[20];
+ char sid_buf[20];
+ char segs_buf[256];
int i;
zlog_debug(
@@ -1495,15 +1509,22 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
snprintf(label_buf, sizeof(label_buf),
"label %u", api_nh->labels[0]);
+ if (has_valid_sid
+ && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ inet_ntop(AF_INET6, &api_nh->seg6_segs,
+ sid_buf, sizeof(sid_buf));
+ snprintf(segs_buf, sizeof(segs_buf), "segs %s",
+ sid_buf);
+ }
if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)
&& !is_zero_mac(&api_nh->rmac))
snprintf(eth_buf, sizeof(eth_buf), " RMAC %s",
prefix_mac2str(&api_nh->rmac,
buf1, sizeof(buf1)));
- zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s",
+ zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s %s",
i + 1, nh_buf, api_nh->ifindex,
api_nh->vrf_id, api_nh->weight,
- label_buf, eth_buf);
+ label_buf, segs_buf, eth_buf);
}
int recursion_flag = 0;
@@ -2976,6 +2997,35 @@ static int bgp_ifp_create(struct interface *ifp)
return 0;
}
+static void bgp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
+{
+ struct stream *s = NULL;
+ struct bgp *bgp = bgp_get_default();
+ struct listnode *node;
+ struct prefix_ipv6 *c;
+ struct srv6_locator_chunk s6c = {};
+ struct prefix_ipv6 *chunk = NULL;
+
+ s = zclient->ibuf;
+ zapi_srv6_locator_chunk_decode(s, &s6c);
+
+ if (strcmp(bgp->srv6_locator_name, s6c.locator_name) != 0) {
+ zlog_err("%s: Locator name unmatch %s:%s", __func__,
+ bgp->srv6_locator_name, s6c.locator_name);
+ return;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, c)) {
+ if (!prefix_cmp(c, &s6c.prefix))
+ return;
+ }
+
+ chunk = prefix_ipv6_new();
+ *chunk = s6c.prefix;
+ listnode_add(bgp->srv6_locator_chunks, chunk);
+ vpn_leak_postchange_all();
+}
+
void bgp_zebra_init(struct thread_master *master, unsigned short instance)
{
zclient_num_connects = 0;
@@ -3018,6 +3068,8 @@ void bgp_zebra_init(struct thread_master *master, unsigned short instance)
zclient->iptable_notify_owner = iptable_notify_owner;
zclient->route_notify_owner = bgp_zebra_route_notify_owner;
zclient->instance = instance;
+ zclient->process_srv6_locator_chunk =
+ bgp_zebra_process_srv6_locator_chunk;
}
void bgp_zebra_destroy(void)
@@ -3415,3 +3467,8 @@ int bgp_zebra_stale_timer_update(struct bgp *bgp)
zlog_debug("send capabilty success");
return BGP_GR_SUCCESS;
}
+
+int bgp_zebra_srv6_manager_get_locator_chunk(const char *name)
+{
+ return srv6_manager_get_locator_chunk(zclient, name);
+}
diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h
index 4b357c380a..02b6484943 100644
--- a/bgpd/bgp_zebra.h
+++ b/bgpd/bgp_zebra.h
@@ -113,4 +113,5 @@ extern void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
extern int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable);
extern int bgp_zebra_update(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type);
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
+extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
#endif /* _QUAGGA_BGP_ZEBRA_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 6f2f2c9f34..33429d1d78 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1327,6 +1327,22 @@ int bgp_peer_gr_init(struct peer *peer)
return BGP_GR_SUCCESS;
}
+static void bgp_srv6_init(struct bgp *bgp)
+{
+ bgp->srv6_enabled = false;
+ memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name));
+ bgp->srv6_locator_chunks = list_new();
+ bgp->srv6_functions = list_new();
+}
+
+static void bgp_srv6_cleanup(struct bgp *bgp)
+{
+ if (bgp->srv6_locator_chunks)
+ list_delete(&bgp->srv6_locator_chunks);
+ if (bgp->srv6_functions)
+ list_delete(&bgp->srv6_functions);
+}
+
/* Allocate new peer object, implicitely locked. */
struct peer *peer_new(struct bgp *bgp)
{
@@ -3238,6 +3254,7 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp_evpn_init(bgp);
bgp_evpn_vrf_es_init(bgp);
bgp_pbr_init(bgp);
+ bgp_srv6_init(bgp);
/*initilize global GR FSM */
bgp_global_gr_init(bgp);
@@ -3754,6 +3771,7 @@ void bgp_free(struct bgp *bgp)
bgp_evpn_cleanup(bgp);
bgp_pbr_cleanup(bgp);
+ bgp_srv6_cleanup(bgp);
XFREE(MTYPE_BGP_EVPN_INFO, bgp->evpn_info);
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 4a17b72b7f..ffac20c218 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -29,6 +29,7 @@
#include "lib/json.h"
#include "vrf.h"
#include "vty.h"
+#include "srv6.h"
#include "iana_afi.h"
/* For union sockunion. */
@@ -222,6 +223,7 @@ struct vpn_policy {
#define BGP_VPN_POLICY_TOVPN_LABEL_AUTO (1 << 0)
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
+#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
/*
* If we are importing another vrf into us keep a list of
@@ -234,6 +236,13 @@ struct vpn_policy {
* vrf names that we are being exported to.
*/
struct list *export_vrf;
+
+ /*
+ * Segment-Routing SRv6 Mode
+ */
+ uint32_t tovpn_sid_index; /* unset => set to 0 */
+ struct in6_addr *tovpn_sid;
+ struct in6_addr *tovpn_zebra_vrf_sid_last_sent;
};
/*
@@ -322,6 +331,11 @@ struct bgp_snmp_stats {
uint32_t routes_deleted;
};
+struct bgp_srv6_function {
+ struct in6_addr sid;
+ char locator_name[SRV6_LOCNAME_SIZE];
+};
+
/* BGP instance structure. */
struct bgp {
/* AS number of this BGP instance. */
@@ -718,6 +732,12 @@ struct bgp {
/* BGP route flap dampening configuration */
struct bgp_damp_config damp[AFI_MAX][SAFI_MAX];
+ /* BGP VPN SRv6 backend */
+ bool srv6_enabled;
+ char srv6_locator_name[SRV6_LOCNAME_SIZE];
+ struct list *srv6_locator_chunks;
+ struct list *srv6_functions;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp);
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index dd042e2584..0e01b8c3e4 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2597,6 +2597,19 @@ address-family:
The CLI will disallow attempts to configure incompatible leaking
modes.
+.. _bgp-l3vpn-srv6:
+
+L3VPN SRv6
+----------
+
+.. clicmd:: segment-routing srv6
+
+ Use SRv6 backend with BGP L3VPN, and go to its configuration node.
+
+.. clicmd:: locator NAME
+
+ Specify the SRv6 locator to be used for SRv6 L3VPN. The Locator name must
+ be set in zebra, but user can set it in any order.
.. _bgp-evpn:
@@ -3528,6 +3541,40 @@ Displaying Update Group Information
Display Information about update-group events in FRR.
+Segment-Routing IPv6
+--------------------
+
+.. clicmd:: show bgp segment-routing srv6
+
+ This command displays information about SRv6 L3VPN in bgpd. Specifically,
+ what kind of Locator is being used, and its Locator chunk information.
+ And the SID of the SRv6 Function that is actually managed on bgpd.
+ In the following example, bgpd is using a Locator named loc1, and two SRv6
+ Functions are managed to perform VPNv6 VRF redirect for vrf10 and vrf20.
+
+::
+
+ router# show bgp segment-routing srv6
+ locator_name: loc1
+ locator_chunks:
+ - 2001:db8:1:1::/64
+ functions:
+ - sid: 2001:db8:1:1::100
+ locator: loc1
+ - sid: 2001:db8:1:1::200
+ locator: loc1
+ bgps:
+ - name: default
+ vpn_policy[AFI_IP].tovpn_sid: none
+ vpn_policy[AFI_IP6].tovpn_sid: none
+ - name: vrf10
+ vpn_policy[AFI_IP].tovpn_sid: none
+ vpn_policy[AFI_IP6].tovpn_sid: 2001:db8:1:1::100
+ - name: vrf20
+ vpn_policy[AFI_IP].tovpn_sid: none
+ vpn_policy[AFI_IP6].tovpn_sid: 2001:db8:1:1::200
+
+
.. _bgp-route-reflector:
Route Reflector
diff --git a/doc/user/sharp.rst b/doc/user/sharp.rst
index aa0ad6a102..e088c2f75b 100644
--- a/doc/user/sharp.rst
+++ b/doc/user/sharp.rst
@@ -147,4 +147,143 @@ keyword. At present, no sharp commands will be preserved in the config.
Show imported Traffic Engineering Data Base
+.. clicmd:: sharp install seg6-routes [vrf NAME] <A.B.C.D|X:X::X:X> nexthop-seg6 X:X::X:X encap X:X::X:X (1-1000000)
+ This command installs a route for SRv6 Transit behavior (on Linux it is
+ known as seg6 route). The count, destination, vrf, etc. have the same
+ meaning as in the ``sharp install routes`` command. With this command,
+ sharpd will request zebra to configure seg6 route via ZEBRA_ROUTE_ADD
+ ZAPI. As in the following example.
+
+::
+
+ router# sharp install seg6-routes 1::A nexthop-seg6 2001::2 encap A:: 1
+ router# sharp install seg6-routes 1::B nexthop-seg6 2001::2 encap B:: 1
+
+ router# show ipv6 route
+ D>* 1::A/128 [150/0] via 2001::2, dum0, seg6 a::, weight 1, 00:00:01
+ D>* 1::B/128 [150/0] via 2001::2, dum0, seg6 b::, weight 1, 00:00:01
+
+ bash# ip -6 route list
+ 1::A encap seg6 mode encap segs 1 [ a:: ] via 2001::2 dev dum0 proto 194 metric 20 pref medium
+ 1::B encap seg6 mode encap segs 1 [ b:: ] via 2001::2 dev dum0 proto 194 metric 20 pref medium
+
+.. clicmd:: sharp install seg6local-routes [vrf NAME] X:X::X:X nexthop-seg6local NAME ACTION ARGS.. (1-1000000)
+
+ This command installs a route for SRv6 Endpoint behavior (on Linux it is
+ known as seg6local route). The count, destination, vrf, etc. have the same
+ meaning as in the ``sharp install routes`` command. With this command,
+ sharpd will request zebra to configure seg6local route via ZEBRA_ROUTE_ADD
+ ZAPI. As in the following example.
+
+ There are many End Functions defined in SRv6, which have been standardized
+ in RFC 8986. The current implementation supports End, End.X, End.T, End.DX4,
+ and End.DT6, which can be configured as follows.
+
+::
+
+ router# sharp install seg6local-routes 1::1 nexthop-seg6local dum0 End 1
+ router# sharp install seg6local-routes 1::2 nexthop-seg6local dum0 End_X 2001::1 1
+ router# sharp install seg6local-routes 1::3 nexthop-seg6local dum0 End_T 10 1
+ router# sharp install seg6local-routes 1::4 nexthop-seg6local dum0 End_DX4 10.0.0.1 1
+ router# sharp install seg6local-routes 1::5 nexthop-seg6local dum0 End_DT6 10 1
+
+ router# show ipv6 route
+ D>* 1::1/128 [150/0] is directly connected, dum0, seg6local End USP, weight 1, 00:00:05
+ D>* 1::2/128 [150/0] is directly connected, dum0, seg6local End.X nh6 2001::1, weight 1, 00:00:05
+ D>* 1::3/128 [150/0] is directly connected, dum0, seg6local End.T table 10, weight 1, 00:00:05
+ D>* 1::4/128 [150/0] is directly connected, dum0, seg6local End.DX4 nh4 10.0.0.1, weight 1, 00:00:05
+ D>* 1::5/128 [150/0] is directly connected, dum0, seg6local End.DT6 table 10, weight 1, 00:00:05
+
+ bash# ip -6 route
+ 1::1 encap seg6local action End dev dum0 proto 194 metric 20 pref medium
+ 1::2 encap seg6local action End.X nh6 2001::1 dev dum0 proto 194 metric 20 pref medium
+ 1::3 encap seg6local action End.T table 10 dev dum0 proto 194 metric 20 pref medium
+ 1::4 encap seg6local action End.DX4 nh4 10.0.0.1 dev dum0 proto 194 metric 20 pref medium
+ 1::5 encap seg6local action End.DT6 table 10 dev dum0 proto 194 metric 20 pref medium
+
+.. clicmd:: show sharp segment-routing srv6
+
+ This command shows us what SRv6 locator chunk, sharp is holding as zclient.
+ An SRv6 locator is defined for each SRv6 router, and a single locator may
+ be shared by multiple protocols.
+
+ In the FRRouting implementation, the Locator chunk get request is executed
+ by a routing protocol daemon such as sharpd or bgpd, And then Zebra
+ allocates a Locator Chunk, which is a subset of the Locator Prefix, and
+ notifies the requesting protocol daemon of this information.
+
+ This command example shows how the locator chunk of sharpd itself is
+ allocated.
+
+::
+
+ router# show segment-routing srv6 locator
+ Locator:
+ Name ID 2 2001:db8:2:2::/64 Up
+
+ router# show sharp segment-routing srv6
+ Locator loc1 has 1 prefix chunks
+ 2001:db8:1:1::/64
+
+.. clicmd:: sharp srv6-manager get-locator-chunk
+
+ This command requests the SRv6 locator to allocate a locator chunk via ZAPI.
+ This chunk can be owned by the protocol daemon, and the chunk obtained by
+ sharpd will not be used by the SRv6 mechanism of another routing protocol.
+
+ Since this request is made asynchronously, it can be issued before the SRv6
+ locator is configured on the zebra side, and as soon as it is ready on the
+ zebra side, sharpd can check the allocated locator chunk via zapi.
+
+::
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: system
+
+ router# show sharp segment-routing srv6
+ (nothing)
+
+ router# sharp srv6-manager get-locator-chunk loc1
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: sharp
+
+ router# show sharp segment-routing srv6
+ Locator loc1 has 1 prefix chunks
+ 2001:db8:1:1::/64
+
+.. clicmd:: sharp srv6-manager release-locator-chunk
+
+ This command releases a locator chunk that has already been allocated by
+ ZAPI. The freed chunk will have its owner returned to the system and will
+ be available to another protocol daemon.
+
+::
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: sharp
+
+ router# show sharp segment-routing srv6
+ Locator loc1 has 1 prefix chunks
+ 2001:db8:1:1::/64
+
+ router# sharp srv6-manager release-locator-chunk loc1
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: system
+
+ router# show sharp segment-routing srv6
+ (nothing)
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 6eebf71f80..ee9da63445 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -630,6 +630,137 @@ presence of the entry.
21 Static 10.125.0.2 IPv4 Explicit Null
+.. _zebra-srv6:
+
+Segment-Routing IPv6
+====================
+
+Segment-Routing is source routing paradigm that allows
+network operator to encode network intent into the packets.
+SRv6 is an implementation of Segment-Routing
+with application of IPv6 and segment-routing-header.
+
+All routing daemon can use the Segment-Routing base
+framework implemented on zebra to use SRv6 routing mechanism.
+In that case, user must configure initial srv6 setting on
+FRR's cli or frr.conf or zebra.conf. This section shows how
+to configure SRv6 on FRR. Of course SRv6 can be used as standalone,
+and this section also helps that case.
+
+.. index:: show segment-routing srv6 locator [json]
+.. clicmd:: show segment-routing srv6 locator [json]
+
+ This command dump SRv6-locator configured on zebra. SRv6-locator is used
+ to route to the node before performing the SRv6-function. and that works as
+ aggregation of SRv6-function's IDs. Following console log shows two
+ SRv6-locators loc1 and loc2. All locators are identified by unique IPv6
+ prefix. User can get that information as JSON string when ``json`` key word
+ at the end of cli is presented.
+
+::
+
+ router# sh segment-routing srv6 locator
+ Locator:
+ Name ID Prefix Status
+ -------------------- ------- ------------------------ -------
+ loc1 1 2001:db8:1:1::/64 Up
+ loc2 2 2001:db8:2:2::/64 Up
+
+.. index:: show segment-routing srv6 locator NAME detail [json]
+.. clicmd:: show segment-routing srv6 locator NAME detail [json]
+
+ As shown in the example, by specifying the name of the locator, you
+ can see the detailed information for each locator. Locator can be
+ represented by a single IPv6 prefix, but SRv6 is designed to share this
+ Locator among multiple Routing Protocols. For this purpose, zebra divides
+ the IPv6 prefix block that makes the Locator unique into multiple chunks,
+ and manages the ownership of each chunk.
+
+ For example, loc1 has system as its owner. For example, loc1 is owned by
+ system, which means that it is not yet proprietary to any routing protocol.
+ For example, loc2 has sharp as its owner. This means that the shaprd for
+ function development holds the owner of the chunk of this locator, and no
+ other routing protocol will use this area.
+
+::
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: system
+
+ router# show segment-routing srv6 locator loc2 detail
+ Name: loc2
+ Prefix: 2001:db8:2:2::/64
+ Chunks:
+ - prefix: 2001:db8:2:2::/64, owner: sharp
+
+.. index:: segment-routing
+.. clicmd:: segment-routing
+
+ Move from configure mode to segment-routing node.
+
+.. index:: srv6
+.. clicmd:: srv6
+
+ Move from segment-routing node to srv6 node.
+
+.. index:: locators
+.. clicmd:: locators
+
+ Move from srv6 node to locator node. In this locator node, user can
+ configure detailed settings such as the actual srv6 locator.
+
+.. index:: locator NAME
+.. clicmd:: locator NAME
+
+ Create a new locator. If the name of an existing locator is specified,
+ move to specified locator's configuration node to change the settings it.
+
+.. index:: prefix X:X::X:X/M [function-bits-length 32]
+.. clicmd:: prefix X:X::X:X/M [function-bits-length 32]
+
+ Set the ipv6 prefix block of the locator. SRv6 locator is defined by
+ RFC8986. The actual routing protocol specifies the locator and allocates a
+ SID to be used by each routing protocol. This SID is included in the locator
+ as an IPv6 prefix.
+
+ Following example console log shows the typical configuration of SRv6
+ data-plane. After a new SRv6 locator, named loc1, is created, loc1's prefix
+ is configured as ``2001:db8:1:1::/64``. If user or some routing daemon
+ allocates new SID on this locator, new SID will allocated in range of this
+ prefix. For example, if some routing daemon creates new SID on locator
+ (``2001:db8:1:1::/64``), Then new SID will be ``2001:db8:1:1:7::/80``,
+ ``2001:db8:1:1:8::/80``, and so on. Each locator has default SID that is
+ SRv6 local function "End". Usually default SID is allocated as
+ ``PREFIX:1::``. (``PREFIX`` is locator's prefix) For example, if user
+ configure the locator's prefix as ``2001:db8:1:1::/64``, then default SID
+ will be ``2001:db8:1:1:1::``)
+
+ The function bits range is 16bits by default. If operator want to change
+ function bits range, they can configure with ``function-bits-length``
+ option.
+
+::
+
+ router# configure terminal
+ router(config)# segment-routinig
+ router(config-sr)# srv6
+ router(config-srv6)# locators
+ router(config-srv6-locs)# locator loc1
+ router(config-srv6-loc)# prefix 2001:db8:1:1::/64
+
+ router(config-srv6-loc)# show run
+ ...
+ segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:1:1::/64
+ !
+ ...
+
.. _multicast-rib-commands:
Multicast RIB Commands
diff --git a/lib/command.c b/lib/command.c
index 5cf1a4f57a..c955087437 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -889,6 +889,15 @@ enum node_type node_parent(enum node_type node)
case PCEP_PCC_NODE:
ret = PCEP_NODE;
break;
+ case SRV6_NODE:
+ ret = SEGMENT_ROUTING_NODE;
+ break;
+ case SRV6_LOCS_NODE:
+ ret = SRV6_NODE;
+ break;
+ case SRV6_LOC_NODE:
+ ret = SRV6_LOCS_NODE;
+ break;
default:
ret = CONFIG_NODE;
break;
diff --git a/lib/command.h b/lib/command.h
index 51da4c52e6..c36797d125 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -120,6 +120,7 @@ enum node_type {
BGP_VNC_L2_GROUP_NODE, /* BGP VNC L2 group */
RFP_DEFAULTS_NODE, /* RFP defaults node */
BGP_EVPN_NODE, /* BGP EVPN node. */
+ BGP_SRV6_NODE, /* BGP SRv6 node. */
OSPF_NODE, /* OSPF protocol mode */
OSPF6_NODE, /* OSPF protocol for IPv6 mode */
LDP_NODE, /* LDP protocol mode */
@@ -155,6 +156,9 @@ enum node_type {
PCEP_PCE_CONFIG_NODE, /* PCE shared configuration node */
PCEP_PCE_NODE, /* PCE configuration node */
PCEP_PCC_NODE, /* PCC configuration node */
+ SRV6_NODE, /* SRv6 node */
+ SRV6_LOCS_NODE, /* SRv6 locators node */
+ SRV6_LOC_NODE, /* SRv6 locator node */
VTY_NODE, /* Vty node. */
FPM_NODE, /* Dataplane FPM node. */
LINK_PARAMS_NODE, /* Link-parameters node */
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 0ac6c0ae1b..23e3a2b733 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -36,6 +36,7 @@
DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop");
DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label");
+DEFINE_MTYPE_STATIC(LIB, NH_SRV6, "Nexthop srv6");
static int _nexthop_labels_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
@@ -66,6 +67,39 @@ static int _nexthop_labels_cmp(const struct nexthop *nh1,
(nhl1->num_labels * sizeof(mpls_label_t)));
}
+static int _nexthop_srv6_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ int ret = 0;
+
+ if (!nh1->nh_srv6 && !nh2->nh_srv6)
+ return 0;
+
+ if (nh1->nh_srv6 && !nh2->nh_srv6)
+ return 1;
+
+ if (!nh1->nh_srv6 && nh2->nh_srv6)
+ return -1;
+
+ if (nh1->nh_srv6->seg6local_action > nh2->nh_srv6->seg6local_action)
+ return 1;
+
+ if (nh2->nh_srv6->seg6local_action < nh1->nh_srv6->seg6local_action)
+ return -1;
+
+ ret = memcmp(&nh1->nh_srv6->seg6local_ctx,
+ &nh2->nh_srv6->seg6local_ctx,
+ sizeof(struct seg6local_context));
+ if (ret != 0)
+ return ret;
+
+ ret = memcmp(&nh1->nh_srv6->seg6_segs,
+ &nh2->nh_srv6->seg6_segs,
+ sizeof(struct in6_addr));
+
+ return ret;
+}
+
int nexthop_g_addr_cmp(enum nexthop_types_t type, const union g_addr *addr1,
const union g_addr *addr2)
{
@@ -199,6 +233,10 @@ int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2)
return ret;
ret = _nexthop_labels_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = _nexthop_srv6_cmp(next1, next2);
return ret;
}
@@ -353,6 +391,8 @@ struct nexthop *nexthop_new(void)
void nexthop_free(struct nexthop *nexthop)
{
nexthop_del_labels(nexthop);
+ nexthop_del_srv6_seg6local(nexthop);
+ nexthop_del_srv6_seg6(nexthop);
if (nexthop->resolved)
nexthops_free(nexthop->resolved);
XFREE(MTYPE_NEXTHOP, nexthop);
@@ -523,6 +563,57 @@ void nexthop_del_labels(struct nexthop *nexthop)
nexthop->nh_label_type = ZEBRA_LSP_NONE;
}
+void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
+ const struct seg6local_context *ctx)
+{
+ if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ return;
+
+ if (!nexthop->nh_srv6)
+ nexthop->nh_srv6 = XCALLOC(MTYPE_NH_SRV6,
+ sizeof(struct nexthop_srv6));
+
+ nexthop->nh_srv6->seg6local_action = action;
+ nexthop->nh_srv6->seg6local_ctx = *ctx;
+}
+
+void nexthop_del_srv6_seg6local(struct nexthop *nexthop)
+{
+ if (!nexthop->nh_srv6)
+ return;
+
+ nexthop->nh_srv6->seg6local_action = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+
+ if (sid_zero(&nexthop->nh_srv6->seg6_segs))
+ XFREE(MTYPE_NH_SRV6, nexthop->nh_srv6);
+}
+
+void nexthop_add_srv6_seg6(struct nexthop *nexthop,
+ const struct in6_addr *segs)
+{
+ if (!segs)
+ return;
+
+ if (!nexthop->nh_srv6)
+ nexthop->nh_srv6 = XCALLOC(MTYPE_NH_SRV6,
+ sizeof(struct nexthop_srv6));
+
+ nexthop->nh_srv6->seg6_segs = *segs;
+}
+
+void nexthop_del_srv6_seg6(struct nexthop *nexthop)
+{
+ if (!nexthop->nh_srv6)
+ return;
+
+ memset(&nexthop->nh_srv6->seg6_segs, 0,
+ sizeof(nexthop->nh_srv6->seg6_segs));
+
+ if (nexthop->nh_srv6->seg6local_action ==
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ XFREE(MTYPE_NH_SRV6, nexthop->nh_srv6);
+}
+
const char *nexthop2str(const struct nexthop *nexthop, char *str, int size)
{
switch (nexthop->type) {
@@ -668,6 +759,14 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
key = jhash_1word(nexthop->backup_idx[i], key);
}
+ if (nexthop->nh_srv6) {
+ key = jhash_1word(nexthop->nh_srv6->seg6local_action, key);
+ key = jhash(&nexthop->nh_srv6->seg6local_ctx,
+ sizeof(nexthop->nh_srv6->seg6local_ctx), key);
+ key = jhash(&nexthop->nh_srv6->seg6_segs,
+ sizeof(nexthop->nh_srv6->seg6_segs), key);
+ }
+
return key;
}
@@ -720,6 +819,17 @@ void nexthop_copy_no_recurse(struct nexthop *copy,
nexthop_add_labels(copy, nexthop->nh_label_type,
nexthop->nh_label->num_labels,
&nexthop->nh_label->label[0]);
+
+ if (nexthop->nh_srv6) {
+ if (nexthop->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(copy,
+ nexthop->nh_srv6->seg6local_action,
+ &nexthop->nh_srv6->seg6local_ctx);
+ if (!sid_zero(&nexthop->nh_srv6->seg6_segs))
+ nexthop_add_srv6_seg6(copy,
+ &nexthop->nh_srv6->seg6_segs);
+ }
}
void nexthop_copy(struct nexthop *copy, const struct nexthop *nexthop,
diff --git a/lib/nexthop.h b/lib/nexthop.h
index d6ea83cf06..dd65509aec 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -26,6 +26,7 @@
#include "prefix.h"
#include "mpls.h"
#include "vxlan.h"
+#include "srv6.h"
#ifdef __cplusplus
extern "C" {
@@ -139,6 +140,9 @@ struct nexthop {
/* SR-TE color used for matching SR-TE policies */
uint32_t srte_color;
+
+ /* SRv6 information */
+ struct nexthop_srv6 *nh_srv6;
};
/* Utility to append one nexthop to another. */
@@ -157,6 +161,12 @@ void nexthops_free(struct nexthop *nexthop);
void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype,
uint8_t num_labels, const mpls_label_t *labels);
void nexthop_del_labels(struct nexthop *);
+void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
+ const struct seg6local_context *ctx);
+void nexthop_del_srv6_seg6local(struct nexthop *nexthop);
+void nexthop_add_srv6_seg6(struct nexthop *nexthop,
+ const struct in6_addr *segs);
+void nexthop_del_srv6_seg6(struct nexthop *nexthop);
/*
* Allocate a new nexthop object and initialize it from various args.
diff --git a/lib/srv6.c b/lib/srv6.c
index 287bf56089..ceb769ef76 100644
--- a/lib/srv6.c
+++ b/lib/srv6.c
@@ -22,6 +22,10 @@
#include "srv6.h"
#include "log.h"
+DEFINE_QOBJ_TYPE(srv6_locator);
+DEFINE_MTYPE_STATIC(LIB, SRV6_LOCATOR, "SRV6 locator");
+DEFINE_MTYPE_STATIC(LIB, SRV6_LOCATOR_CHUNK, "SRV6 locator chunk");
+
const char *seg6local_action2str(uint32_t action)
{
switch (action) {
@@ -76,7 +80,8 @@ int snprintf_seg6_segs(char *str,
}
const char *seg6local_context2str(char *str, size_t size,
- struct seg6local_context *ctx, uint32_t action)
+ const struct seg6local_context *ctx,
+ uint32_t action)
{
char b0[128];
@@ -116,3 +121,84 @@ const char *seg6local_context2str(char *str, size_t size,
return str;
}
}
+
+struct srv6_locator *srv6_locator_alloc(const char *name)
+{
+ struct srv6_locator *locator = NULL;
+
+ locator = XCALLOC(MTYPE_SRV6_LOCATOR, sizeof(struct srv6_locator));
+ strlcpy(locator->name, name, sizeof(locator->name));
+ locator->chunks = list_new();
+ QOBJ_REG(locator, srv6_locator);
+ return locator;
+}
+
+struct srv6_locator_chunk *srv6_locator_chunk_alloc(void)
+{
+ struct srv6_locator_chunk *chunk = NULL;
+
+ chunk = XCALLOC(MTYPE_SRV6_LOCATOR_CHUNK,
+ sizeof(struct srv6_locator_chunk));
+ return chunk;
+}
+
+void srv6_locator_free(struct srv6_locator *locator)
+{
+ XFREE(MTYPE_SRV6_LOCATOR, locator);
+}
+
+void srv6_locator_chunk_free(struct srv6_locator_chunk *chunk)
+{
+ XFREE(MTYPE_SRV6_LOCATOR_CHUNK, chunk);
+}
+
+json_object *srv6_locator_chunk_json(const struct srv6_locator_chunk *chunk)
+{
+ char str[256];
+ json_object *jo_root = NULL;
+
+ jo_root = json_object_new_object();
+ prefix2str(&chunk->prefix, str, sizeof(str));
+ json_object_string_add(jo_root, "prefix", str);
+ json_object_string_add(jo_root, "proto",
+ zebra_route_string(chunk->proto));
+
+ return jo_root;
+}
+
+json_object *srv6_locator_json(const struct srv6_locator *loc)
+{
+ char str[256];
+ struct listnode *node;
+ struct srv6_locator_chunk *chunk;
+ json_object *jo_root = NULL;
+ json_object *jo_chunk = NULL;
+ json_object *jo_chunks = NULL;
+
+ jo_root = json_object_new_object();
+
+ /* set name */
+ json_object_string_add(jo_root, "name", loc->name);
+
+ /* set prefix */
+ prefix2str(&loc->prefix, str, sizeof(str));
+ json_object_string_add(jo_root, "prefix", str);
+
+ /* set function_bits_length */
+ json_object_int_add(jo_root, "functionBitsLength",
+ loc->function_bits_length);
+
+ /* set status_up */
+ json_object_boolean_add(jo_root, "statusUp",
+ loc->status_up);
+
+ /* set chunks */
+ jo_chunks = json_object_new_array();
+ json_object_object_add(jo_root, "chunks", jo_chunks);
+ for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
+ jo_chunk = srv6_locator_chunk_json(chunk);
+ json_object_array_add(jo_chunks, jo_chunk);
+ }
+
+ return jo_root;
+}
diff --git a/lib/srv6.h b/lib/srv6.h
index 24c7ffc3a2..715fc3723b 100644
--- a/lib/srv6.h
+++ b/lib/srv6.h
@@ -21,10 +21,14 @@
#define _FRR_SRV6_H
#include <zebra.h>
+#include "prefix.h"
+#include "json.h"
+
#include <arpa/inet.h>
#include <netinet/in.h>
#define SRV6_MAX_SIDS 16
+#define SRV6_LOCNAME_SIZE 256
#ifdef __cplusplus
extern "C" {
@@ -69,6 +73,59 @@ struct seg6local_context {
uint32_t table;
};
+struct srv6_locator {
+ char name[SRV6_LOCNAME_SIZE];
+ struct prefix_ipv6 prefix;
+
+ /*
+ * Bit length of SRv6 locator described in
+ * draft-ietf-bess-srv6-services-05#section-3.2.1
+ */
+ uint8_t block_bits_length;
+ uint8_t node_bits_length;
+ uint8_t function_bits_length;
+ uint8_t argument_bits_length;
+
+ int algonum;
+ uint64_t current;
+ bool status_up;
+ struct list *chunks;
+
+ QOBJ_FIELDS;
+};
+DECLARE_QOBJ_TYPE(srv6_locator);
+
+struct srv6_locator_chunk {
+ char locator_name[SRV6_LOCNAME_SIZE];
+ struct prefix_ipv6 prefix;
+
+ /*
+ * Bit length of SRv6 locator described in
+ * draft-ietf-bess-srv6-services-05#section-3.2.1
+ */
+ uint8_t block_bits_length;
+ uint8_t node_bits_length;
+ uint8_t function_bits_length;
+ uint8_t argument_bits_length;
+
+ /*
+ * For Zclient communication values
+ */
+ uint8_t keep;
+ uint8_t proto;
+ uint16_t instance;
+ uint32_t session_id;
+};
+
+struct nexthop_srv6 {
+ /* SRv6 localsid info for Endpoint-behaviour */
+ enum seg6local_action_t seg6local_action;
+ struct seg6local_context seg6local_ctx;
+
+ /* SRv6 Headend-behaviour */
+ struct in6_addr seg6_segs;
+};
+
static inline const char *seg6_mode2str(enum seg6_mode_t mode)
{
switch (mode) {
@@ -119,13 +176,20 @@ static inline void *sid_copy(struct in6_addr *dst,
const char *
seg6local_action2str(uint32_t action);
-const char *
-seg6local_context2str(char *str, size_t size,
- struct seg6local_context *ctx, uint32_t action);
+const char *seg6local_context2str(char *str, size_t size,
+ const struct seg6local_context *ctx,
+ uint32_t action);
int snprintf_seg6_segs(char *str,
size_t size, const struct seg6_segs *segs);
+extern struct srv6_locator *srv6_locator_alloc(const char *name);
+extern struct srv6_locator_chunk *srv6_locator_chunk_alloc(void);
+extern void srv6_locator_free(struct srv6_locator *locator);
+extern void srv6_locator_chunk_free(struct srv6_locator_chunk *chunk);
+json_object *srv6_locator_chunk_json(const struct srv6_locator_chunk *chunk);
+json_object *srv6_locator_json(const struct srv6_locator *loc);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/zclient.c b/lib/zclient.c
index 3ea1789441..10dda5ba0e 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -41,6 +41,7 @@
#include "lib_errors.h"
#include "srte.h"
#include "printfrr.h"
+#include "srv6.h"
DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient");
DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs");
@@ -435,6 +436,42 @@ enum zclient_send_status zclient_send_vrf_label(struct zclient *zclient,
return zclient_send_message(zclient);
}
+enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
+ const struct in6_addr *sid, ifindex_t oif,
+ enum seg6local_action_t action,
+ const struct seg6local_context *context)
+{
+ struct prefix_ipv6 p = {};
+ struct zapi_route api = {};
+ struct nexthop nh = {};
+
+ p.family = AF_INET6;
+ p.prefixlen = 128;
+ p.prefix = *sid;
+
+ api.vrf_id = VRF_DEFAULT;
+ api.type = ZEBRA_ROUTE_BGP;
+ api.instance = 0;
+ api.safi = SAFI_UNICAST;
+ memcpy(&api.prefix, &p, sizeof(p));
+
+ if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ return zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
+
+ SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
+ SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
+
+ nh.type = NEXTHOP_TYPE_IFINDEX;
+ nh.ifindex = oif;
+ SET_FLAG(nh.flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
+ nexthop_add_srv6_seg6local(&nh, action, context);
+
+ zapi_nexthop_from_nexthop(&api.nexthops[0], &nh);
+ api.nexthop_num = 1;
+
+ return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
+}
+
/* Send register requests to zebra daemon for the information in a VRF. */
void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
{
@@ -796,6 +833,26 @@ static int zapi_nexthop_labels_cmp(const struct zapi_nexthop *next1,
return memcmp(next1->labels, next2->labels, next1->label_num);
}
+static int zapi_nexthop_srv6_cmp(const struct zapi_nexthop *next1,
+ const struct zapi_nexthop *next2)
+{
+ int ret = 0;
+
+ ret = memcmp(&next1->seg6_segs, &next2->seg6_segs,
+ sizeof(struct in6_addr));
+ if (ret != 0)
+ return ret;
+
+ if (next1->seg6local_action > next2->seg6local_action)
+ return 1;
+
+ if (next1->seg6local_action < next2->seg6local_action)
+ return -1;
+
+ return memcmp(&next1->seg6local_ctx, &next2->seg6local_ctx,
+ sizeof(struct seg6local_context));
+}
+
static int zapi_nexthop_cmp_no_labels(const struct zapi_nexthop *next1,
const struct zapi_nexthop *next2)
{
@@ -896,6 +953,10 @@ static int zapi_nexthop_cmp(const void *item1, const void *item2)
return ret;
ret = zapi_nexthop_labels_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = zapi_nexthop_srv6_cmp(next1, next2);
return ret;
}
@@ -992,10 +1053,58 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh,
stream_putc(s, api_nh->backup_idx[i]);
}
+ if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)) {
+ stream_putl(s, api_nh->seg6local_action);
+ stream_write(s, &api_nh->seg6local_ctx,
+ sizeof(struct seg6local_context));
+ }
+
+ if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6))
+ stream_write(s, &api_nh->seg6_segs,
+ sizeof(struct in6_addr));
+
done:
return ret;
}
+int zapi_srv6_locator_chunk_encode(struct stream *s,
+ const struct srv6_locator_chunk *c)
+{
+ stream_putw(s, strlen(c->locator_name));
+ stream_put(s, c->locator_name, strlen(c->locator_name));
+ stream_putw(s, c->prefix.prefixlen);
+ stream_put(s, &c->prefix.prefix, sizeof(c->prefix.prefix));
+ stream_putc(s, c->block_bits_length);
+ stream_putc(s, c->node_bits_length);
+ stream_putc(s, c->function_bits_length);
+ stream_putc(s, c->argument_bits_length);
+ return 0;
+}
+
+int zapi_srv6_locator_chunk_decode(struct stream *s,
+ struct srv6_locator_chunk *c)
+{
+ uint16_t len = 0;
+
+ c->prefix.family = AF_INET6;
+
+ STREAM_GETW(s, len);
+ if (len > SRV6_LOCNAME_SIZE)
+ goto stream_failure;
+
+ STREAM_GET(c->locator_name, s, len);
+ STREAM_GETW(s, c->prefix.prefixlen);
+ STREAM_GET(&c->prefix.prefix, s, sizeof(c->prefix.prefix));
+ STREAM_GETC(s, c->block_bits_length);
+ STREAM_GETC(s, c->node_bits_length);
+ STREAM_GETC(s, c->function_bits_length);
+ STREAM_GETC(s, c->argument_bits_length);
+ return 0;
+
+stream_failure:
+ return -1;
+}
+
static int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
{
int i;
@@ -1273,6 +1382,16 @@ int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh,
STREAM_GETC(s, api_nh->backup_idx[i]);
}
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)) {
+ STREAM_GETL(s, api_nh->seg6local_action);
+ STREAM_GET(&api_nh->seg6local_ctx, s,
+ sizeof(struct seg6local_context));
+ }
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6))
+ STREAM_GET(&api_nh->seg6_segs, s,
+ sizeof(struct in6_addr));
+
/* Success */
ret = 0;
@@ -1637,6 +1756,13 @@ struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh)
memcpy(n->backup_idx, znh->backup_idx, n->backup_num);
}
+ if (znh->seg6local_action != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(n, znh->seg6local_action,
+ &znh->seg6local_ctx);
+
+ if (!sid_zero(&znh->seg6_segs))
+ nexthop_add_srv6_seg6(n, &znh->seg6_segs);
+
return n;
}
@@ -1681,6 +1807,23 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
memcpy(znh->backup_idx, nh->backup_idx, znh->backup_num);
}
+ if (nh->nh_srv6) {
+ if (nh->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
+ znh->seg6local_action = nh->nh_srv6->seg6local_action;
+ memcpy(&znh->seg6local_ctx,
+ &nh->nh_srv6->seg6local_ctx,
+ sizeof(struct seg6local_context));
+ }
+
+ if (!sid_zero(&nh->nh_srv6->seg6_segs)) {
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6);
+ memcpy(&znh->seg6_segs, &nh->nh_srv6->seg6_segs,
+ sizeof(struct in6_addr));
+ }
+ }
+
return 0;
}
@@ -2598,6 +2741,76 @@ stream_failure:
return -1;
}
+/**
+ * Function to request a srv6-locator chunk in an Asyncronous way
+ *
+ * @param zclient Zclient used to connect to table manager (zebra)
+ * @param locator_name Name of SRv6-locator
+ * @result 0 on success, -1 otherwise
+ */
+int srv6_manager_get_locator_chunk(struct zclient *zclient,
+ const char *locator_name)
+{
+ struct stream *s;
+ const size_t len = strlen(locator_name);
+
+ if (zclient_debug)
+ zlog_debug("Getting SRv6-Locator Chunk %s", locator_name);
+
+ if (zclient->sock < 0)
+ return -1;
+
+ /* send request */
+ s = zclient->obuf;
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK,
+ VRF_DEFAULT);
+
+ /* locator_name */
+ stream_putw(s, len);
+ stream_put(s, locator_name, len);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zclient_send_message(zclient);
+}
+
+/**
+ * Function to release a srv6-locator chunk
+ *
+ * @param zclient Zclient used to connect to table manager (zebra)
+ * @param locator_name Name of SRv6-locator
+ * @result 0 on success, -1 otherwise
+ */
+int srv6_manager_release_locator_chunk(struct zclient *zclient,
+ const char *locator_name)
+{
+ struct stream *s;
+ const size_t len = strlen(locator_name);
+
+ if (zclient_debug)
+ zlog_debug("Releasing SRv6-Locator Chunk %s", locator_name);
+
+ if (zclient->sock < 0)
+ return -1;
+
+ /* send request */
+ s = zclient->obuf;
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK,
+ VRF_DEFAULT);
+
+ /* locator_name */
+ stream_putw(s, len);
+ stream_put(s, locator_name, len);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zclient_send_message(zclient);
+}
+
/*
* Asynchronous label chunk request
*
@@ -3345,7 +3558,8 @@ enum zclient_send_status zclient_send_mlag_register(struct zclient *client,
enum zclient_send_status zclient_send_mlag_deregister(struct zclient *client)
{
- return zebra_message_send(client, ZEBRA_MLAG_CLIENT_UNREGISTER, VRF_DEFAULT);
+ return zebra_message_send(client, ZEBRA_MLAG_CLIENT_UNREGISTER,
+ VRF_DEFAULT);
}
enum zclient_send_status zclient_send_mlag_data(struct zclient *client,
@@ -3888,6 +4102,21 @@ static int zclient_read(struct thread *thread)
case ZEBRA_MLAG_FORWARD_MSG:
zclient_mlag_handle_msg(command, zclient, length, vrf_id);
break;
+ case ZEBRA_SRV6_LOCATOR_ADD:
+ if (zclient->srv6_locator_add)
+ (*zclient->srv6_locator_add)(command, zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_SRV6_LOCATOR_DELETE:
+ if (zclient->srv6_locator_delete)
+ (*zclient->srv6_locator_delete)(command, zclient,
+ length, vrf_id);
+ break;
+ case ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK:
+ if (zclient->process_srv6_locator_chunk)
+ (*zclient->process_srv6_locator_chunk)(command, zclient,
+ length, vrf_id);
+ break;
case ZEBRA_ERROR:
zclient_handle_error(command, zclient, length, vrf_id);
break;
diff --git a/lib/zclient.h b/lib/zclient.h
index 8c27916542..48de3425be 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -39,6 +39,7 @@
#include "mlag.h"
#include "srte.h"
+#include "srv6.h"
#ifdef __cplusplus
extern "C" {
@@ -216,6 +217,10 @@ typedef enum {
ZEBRA_NHG_NOTIFY_OWNER,
ZEBRA_EVPN_REMOTE_NH_ADD,
ZEBRA_EVPN_REMOTE_NH_DEL,
+ ZEBRA_SRV6_LOCATOR_ADD,
+ ZEBRA_SRV6_LOCATOR_DELETE,
+ ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK,
+ ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK,
ZEBRA_ERROR,
ZEBRA_CLIENT_CAPABILITIES,
ZEBRA_OPAQUE_MESSAGE,
@@ -387,6 +392,11 @@ struct zclient {
int (*mlag_process_down)(void);
int (*mlag_handle_msg)(struct stream *msg, int len);
int (*nhg_notify_owner)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_locator_add)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_locator_delete)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_function_add)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_function_delete)(ZAPI_CALLBACK_ARGS);
+ void (*process_srv6_locator_chunk)(ZAPI_CALLBACK_ARGS);
int (*handle_error)(enum zebra_error_types error);
int (*opaque_msg_handler)(ZAPI_CALLBACK_ARGS);
int (*opaque_register_handler)(ZAPI_CALLBACK_ARGS);
@@ -459,6 +469,13 @@ struct zapi_nexthop {
/* SR-TE color. */
uint32_t srte_color;
+
+ /* SRv6 localsid info for Endpoint-behaviour */
+ uint32_t seg6local_action;
+ struct seg6local_context seg6local_ctx;
+
+ /* SRv6 Headend-behaviour */
+ struct in6_addr seg6_segs;
};
/*
@@ -471,6 +488,8 @@ struct zapi_nexthop {
#define ZAPI_NEXTHOP_FLAG_LABEL 0x02
#define ZAPI_NEXTHOP_FLAG_WEIGHT 0x04
#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */
+#define ZAPI_NEXTHOP_FLAG_SEG6 0x10
+#define ZAPI_NEXTHOP_FLAG_SEG6LOCAL 0x20
/*
* ZAPI Nexthop Group. For use with protocol creation of nexthop groups.
@@ -905,6 +924,11 @@ extern enum zclient_send_status
zclient_send_vrf_label(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi,
mpls_label_t label, enum lsp_types_t ltype);
+extern enum zclient_send_status
+zclient_send_localsid(struct zclient *zclient, const struct in6_addr *sid,
+ ifindex_t oif, enum seg6local_action_t action,
+ const struct seg6local_context *context);
+
extern void zclient_send_reg_requests(struct zclient *, vrf_id_t);
extern void zclient_send_dereg_requests(struct zclient *, vrf_id_t);
extern enum zclient_send_status
@@ -1037,6 +1061,10 @@ extern int tm_get_table_chunk(struct zclient *zclient, uint32_t chunk_size,
uint32_t *start, uint32_t *end);
extern int tm_release_table_chunk(struct zclient *zclient, uint32_t start,
uint32_t end);
+extern int srv6_manager_get_locator_chunk(struct zclient *zclient,
+ const char *locator_name);
+extern int srv6_manager_release_locator_chunk(struct zclient *zclient,
+ const char *locator_name);
extern enum zclient_send_status zebra_send_sr_policy(struct zclient *zclient,
int cmd,
@@ -1054,6 +1082,11 @@ extern int zapi_labels_encode(struct stream *s, int cmd,
struct zapi_labels *zl);
extern int zapi_labels_decode(struct stream *s, struct zapi_labels *zl);
+extern int zapi_srv6_locator_chunk_encode(struct stream *s,
+ const struct srv6_locator_chunk *c);
+extern int zapi_srv6_locator_chunk_decode(struct stream *s,
+ struct srv6_locator_chunk *c);
+
extern enum zclient_send_status zebra_send_pw(struct zclient *zclient,
int command, struct zapi_pw *pw);
extern int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS,
diff --git a/sharpd/sharp_globals.h b/sharpd/sharp_globals.h
index 0b3776cd90..46096f4aa7 100644
--- a/sharpd/sharp_globals.h
+++ b/sharpd/sharp_globals.h
@@ -22,6 +22,8 @@
#ifndef __SHARP_GLOBAL_H__
#define __SHARP_GLOBAL_H__
+#include "lib/srv6.h"
+
DECLARE_MGROUP(SHARPD);
struct sharp_routes {
@@ -40,6 +42,9 @@ struct sharp_routes {
uint32_t removed_routes;
int32_t repeat;
+ /* ZAPI_ROUTE's flag */
+ uint32_t flags;
+
uint8_t inst;
vrf_id_t vrf_id;
@@ -49,6 +54,14 @@ struct sharp_routes {
char opaque[ZAPI_MESSAGE_OPAQUE_LENGTH];
};
+struct sharp_srv6_locator {
+ /* name of locator */
+ char name[SRV6_LOCNAME_SIZE];
+
+ /* list of struct prefix_ipv6 */
+ struct list *chunks;
+};
+
struct sharp_global {
/* Global data about route install/deletions */
struct sharp_routes r;
@@ -58,6 +71,9 @@ struct sharp_global {
/* Traffic Engineering Database */
struct ls_ted *ted;
+
+ /* list of sharp_srv6_locator */
+ struct list *srv6_locators;
};
extern struct sharp_global sg;
diff --git a/sharpd/sharp_main.c b/sharpd/sharp_main.c
index e93db34ffa..75cf145385 100644
--- a/sharpd/sharp_main.c
+++ b/sharpd/sharp_main.c
@@ -140,6 +140,7 @@ static void sharp_global_init(void)
memset(&sg, 0, sizeof(sg));
sg.nhs = list_new();
sg.ted = NULL;
+ sg.srv6_locators = list_new();
}
static void sharp_start_configuration(void)
diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c
index 1ff0591d5e..250151b1fa 100644
--- a/sharpd/sharp_vty.c
+++ b/sharpd/sharp_vty.c
@@ -39,6 +39,8 @@
#include "sharpd/sharp_vty_clippy.c"
#endif
+DEFINE_MTYPE_STATIC(SHARPD, SRV6_LOCATOR, "SRv6 Locator");
+
DEFPY(watch_redistribute, watch_redistribute_cmd,
"sharp watch [vrf NAME$vrf_name] redistribute " FRR_REDIST_STR_SHARPD,
"Sharp routing Protocol\n"
@@ -332,7 +334,181 @@ DEFPY (install_routes,
rts = routes;
sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, nhgid,
&sg.r.nhop_group, &sg.r.backup_nhop_group,
- rts, sg.r.opaque);
+ rts, 0, sg.r.opaque);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (install_seg6_routes,
+ install_seg6_routes_cmd,
+ "sharp install seg6-routes [vrf NAME$vrf_name]\
+ <A.B.C.D$start4|X:X::X:X$start6>\
+ nexthop-seg6 X:X::X:X$seg6_nh6 encap X:X::X:X$seg6_seg\
+ (1-1000000)$routes [repeat (2-1000)$rpt]",
+ "Sharp routing Protocol\n"
+ "install some routes\n"
+ "Routes to install\n"
+ "The vrf we would like to install into if non-default\n"
+ "The NAME of the vrf\n"
+ "v4 Address to start /32 generation at\n"
+ "v6 Address to start /32 generation at\n"
+ "Nexthop-seg6 to use\n"
+ "V6 Nexthop address to use\n"
+ "Encap mode\n"
+ "Segment List to use\n"
+ "How many to create\n"
+ "Should we repeat this command\n"
+ "How many times to repeat this command\n")
+{
+ struct vrf *vrf;
+ struct prefix prefix;
+ uint32_t route_flags = 0;
+
+ sg.r.total_routes = routes;
+ sg.r.installed_routes = 0;
+
+ if (rpt >= 2)
+ sg.r.repeat = rpt * 2;
+ else
+ sg.r.repeat = 0;
+
+ memset(&prefix, 0, sizeof(prefix));
+ memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
+ memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
+ sg.r.opaque[0] = '\0';
+ sg.r.inst = 0;
+
+ if (start4.s_addr != INADDR_ANY) {
+ prefix.family = AF_INET;
+ prefix.prefixlen = 32;
+ prefix.u.prefix4 = start4;
+ } else {
+ prefix.family = AF_INET6;
+ prefix.prefixlen = 128;
+ prefix.u.prefix6 = start6;
+ }
+ sg.r.orig_prefix = prefix;
+
+ if (!vrf_name)
+ vrf_name = VRF_DEFAULT_NAME;
+
+ vrf = vrf_lookup_by_name(vrf_name);
+ if (!vrf) {
+ vty_out(vty, "The vrf NAME specified: %s does not exist\n",
+ vrf_name);
+ return CMD_WARNING;
+ }
+
+ sg.r.nhop.type = NEXTHOP_TYPE_IPV6;
+ sg.r.nhop.gate.ipv6 = seg6_nh6;
+ sg.r.nhop.vrf_id = vrf->vrf_id;
+ sg.r.nhop_group.nexthop = &sg.r.nhop;
+ nexthop_add_srv6_seg6(&sg.r.nhop, &seg6_seg);
+
+ sg.r.vrf_id = vrf->vrf_id;
+ sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, 0,
+ &sg.r.nhop_group, &sg.r.backup_nhop_group,
+ routes, route_flags, sg.r.opaque);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (install_seg6local_routes,
+ install_seg6local_routes_cmd,
+ "sharp install seg6local-routes [vrf NAME$vrf_name]\
+ X:X::X:X$start6\
+ nexthop-seg6local NAME$seg6l_oif\
+ <End$seg6l_end|\
+ End_X$seg6l_endx X:X::X:X$seg6l_endx_nh6|\
+ End_T$seg6l_endt (1-4294967295)$seg6l_endt_table|\
+ End_DX4$seg6l_enddx4 A.B.C.D$seg6l_enddx4_nh4|\
+ End_DT6$seg6l_enddt6 (1-4294967295)$seg6l_enddt6_table>\
+ (1-1000000)$routes [repeat (2-1000)$rpt]",
+ "Sharp routing Protocol\n"
+ "install some routes\n"
+ "Routes to install\n"
+ "The vrf we would like to install into if non-default\n"
+ "The NAME of the vrf\n"
+ "v6 Address to start /32 generation at\n"
+ "Nexthop-seg6local to use\n"
+ "Output device to use\n"
+ "SRv6 End function to use\n"
+ "SRv6 End.X function to use\n"
+ "V6 Nexthop address to use\n"
+ "SRv6 End.T function to use\n"
+ "Redirect table id to use\n"
+ "SRv6 End.DX4 function to use\n"
+ "V4 Nexthop address to use\n"
+ "SRv6 End.DT6 function to use\n"
+ "Redirect table id to use\n"
+ "How many to create\n"
+ "Should we repeat this command\n"
+ "How many times to repeat this command\n")
+{
+ struct vrf *vrf;
+ uint32_t route_flags = 0;
+ struct seg6local_context ctx = {};
+ enum seg6local_action_t action;
+
+ sg.r.total_routes = routes;
+ sg.r.installed_routes = 0;
+
+ if (rpt >= 2)
+ sg.r.repeat = rpt * 2;
+ else
+ sg.r.repeat = 0;
+
+ memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
+ memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
+ sg.r.opaque[0] = '\0';
+ sg.r.inst = 0;
+ sg.r.orig_prefix.family = AF_INET6;
+ sg.r.orig_prefix.prefixlen = 128;
+ sg.r.orig_prefix.u.prefix6 = start6;
+
+ if (!vrf_name)
+ vrf_name = VRF_DEFAULT_NAME;
+
+ vrf = vrf_lookup_by_name(vrf_name);
+ if (!vrf) {
+ vty_out(vty, "The vrf NAME specified: %s does not exist\n",
+ vrf_name);
+ return CMD_WARNING;
+ }
+
+ if (seg6l_enddx4) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_DX4;
+ ctx.nh4 = seg6l_enddx4_nh4;
+ } else if (seg6l_endx) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_X;
+ ctx.nh6 = seg6l_endx_nh6;
+ } else if (seg6l_endt) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_T;
+ ctx.table = seg6l_endt_table;
+ } else if (seg6l_enddt6) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
+ ctx.table = seg6l_enddt6_table;
+ } else {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END;
+ }
+
+ sg.r.nhop.type = NEXTHOP_TYPE_IFINDEX;
+ sg.r.nhop.ifindex = ifname2ifindex(seg6l_oif, vrf->vrf_id);
+ sg.r.nhop.vrf_id = vrf->vrf_id;
+ sg.r.nhop_group.nexthop = &sg.r.nhop;
+ nexthop_add_srv6_seg6local(&sg.r.nhop, action, &ctx);
+
+ sg.r.vrf_id = vrf->vrf_id;
+ sharp_install_routes_helper(&sg.r.orig_prefix, sg.r.vrf_id, sg.r.inst,
+ 0, &sg.r.nhop_group,
+ &sg.r.backup_nhop_group, routes,
+ route_flags, sg.r.opaque);
return CMD_SUCCESS;
}
@@ -740,6 +916,40 @@ DEFPY (import_te,
return CMD_SUCCESS;
}
+DEFPY (sharp_srv6_manager_get_locator_chunk,
+ sharp_srv6_manager_get_locator_chunk_cmd,
+ "sharp srv6-manager get-locator-chunk NAME$locator_name",
+ SHARP_STR
+ "Segment-Routing IPv6\n"
+ "Get SRv6 locator-chunk\n"
+ "SRv6 Locator name\n")
+{
+ int ret;
+ struct listnode *node;
+ struct sharp_srv6_locator *loc;
+ struct sharp_srv6_locator *loc_found = NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, node, loc)) {
+ if (strcmp(loc->name, locator_name))
+ continue;
+ loc_found = loc;
+ break;
+ }
+ if (!loc_found) {
+ loc = XCALLOC(MTYPE_SRV6_LOCATOR,
+ sizeof(struct sharp_srv6_locator));
+ loc->chunks = list_new();
+ snprintf(loc->name, SRV6_LOCNAME_SIZE, "%s", locator_name);
+ listnode_add(sg.srv6_locators, loc);
+ }
+
+ ret = sharp_zebra_srv6_manager_get_locator_chunk(locator_name);
+ if (ret < 0)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return CMD_SUCCESS;
+}
+
DEFUN (show_sharp_ted,
show_sharp_ted_cmd,
"show sharp ted [<vertex [A.B.C.D]|edge [A.B.C.D]|subnet [A.B.C.D/M]>] [verbose|json]",
@@ -861,6 +1071,88 @@ DEFUN (show_sharp_ted,
json, JSON_C_TO_STRING_PRETTY));
json_object_free(json);
}
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (sharp_srv6_manager_release_locator_chunk,
+ sharp_srv6_manager_release_locator_chunk_cmd,
+ "sharp srv6-manager release-locator-chunk NAME$locator_name",
+ SHARP_STR
+ "Segment-Routing IPv6\n"
+ "Release SRv6 locator-chunk\n"
+ "SRv6 Locator name\n")
+{
+ int ret;
+ struct listnode *loc_node;
+ struct sharp_srv6_locator *loc;
+
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
+ if (!strcmp(loc->name, locator_name)) {
+ list_delete_all_node(loc->chunks);
+ list_delete(&loc->chunks);
+ listnode_delete(sg.srv6_locators, loc);
+ break;
+ }
+ }
+
+ ret = sharp_zebra_srv6_manager_release_locator_chunk(locator_name);
+ if (ret < 0)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_sharp_segment_routing_srv6,
+ show_sharp_segment_routing_srv6_cmd,
+ "show sharp segment-routing srv6 [json]",
+ SHOW_STR
+ SHARP_STR
+ "Segment-Routing\n"
+ "Segment-Routing IPv6\n"
+ JSON_STR)
+{
+ char str[256];
+ struct listnode *loc_node;
+ struct listnode *chunk_node;
+ struct sharp_srv6_locator *loc;
+ struct prefix_ipv6 *chunk;
+ bool uj = use_json(argc, argv);
+ json_object *jo_locs = NULL;
+ json_object *jo_loc = NULL;
+ json_object *jo_chunks = NULL;
+
+ if (uj) {
+ jo_locs = json_object_new_array();
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
+ jo_loc = json_object_new_object();
+ json_object_array_add(jo_locs, jo_loc);
+ json_object_string_add(jo_loc, "name", loc->name);
+ jo_chunks = json_object_new_array();
+ json_object_object_add(jo_loc, "chunks", jo_chunks);
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node,
+ chunk)) {
+ prefix2str(chunk, str, sizeof(str));
+ json_array_string_add(jo_chunks, str);
+ }
+ }
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ jo_locs, JSON_C_TO_STRING_PRETTY));
+ json_object_free(jo_locs);
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
+ vty_out(vty, "Locator %s has %d prefix chunks\n",
+ loc->name, listcount(loc->chunks));
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node,
+ chunk)) {
+ prefix2str(chunk, str, sizeof(str));
+ vty_out(vty, " %s\n", str);
+ }
+ vty_out(vty, "\n");
+ }
+ }
+
return CMD_SUCCESS;
}
@@ -868,6 +1160,8 @@ void sharp_vty_init(void)
{
install_element(ENABLE_NODE, &install_routes_data_dump_cmd);
install_element(ENABLE_NODE, &install_routes_cmd);
+ install_element(ENABLE_NODE, &install_seg6_routes_cmd);
+ install_element(ENABLE_NODE, &install_seg6local_routes_cmd);
install_element(ENABLE_NODE, &remove_routes_cmd);
install_element(ENABLE_NODE, &vrf_label_cmd);
install_element(ENABLE_NODE, &sharp_nht_data_dump_cmd);
@@ -888,5 +1182,10 @@ void sharp_vty_init(void)
install_element(ENABLE_NODE, &show_debugging_sharpd_cmd);
install_element(ENABLE_NODE, &show_sharp_ted_cmd);
+ install_element(ENABLE_NODE, &sharp_srv6_manager_get_locator_chunk_cmd);
+ install_element(ENABLE_NODE,
+ &sharp_srv6_manager_release_locator_chunk_cmd);
+ install_element(ENABLE_NODE, &show_sharp_segment_routing_srv6_cmd);
+
return;
}
diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c
index 128cfe2de6..2575475dd2 100644
--- a/sharpd/sharp_zebra.c
+++ b/sharpd/sharp_zebra.c
@@ -230,6 +230,7 @@ struct buffer_delay {
vrf_id_t vrf_id;
uint8_t instance;
uint32_t nhgid;
+ uint32_t flags;
const struct nexthop_group *nhg;
const struct nexthop_group *backup_nhg;
enum where_to_restart restart;
@@ -244,7 +245,8 @@ struct buffer_delay {
*/
static bool route_add(const struct prefix *p, vrf_id_t vrf_id, uint8_t instance,
uint32_t nhgid, const struct nexthop_group *nhg,
- const struct nexthop_group *backup_nhg, char *opaque)
+ const struct nexthop_group *backup_nhg, uint32_t flags,
+ char *opaque)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
@@ -258,6 +260,7 @@ static bool route_add(const struct prefix *p, vrf_id_t vrf_id, uint8_t instance,
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, p, sizeof(*p));
+ api.flags = flags;
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
@@ -335,7 +338,8 @@ static void sharp_install_routes_restart(struct prefix *p, uint32_t count,
uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
- uint32_t routes, char *opaque)
+ uint32_t routes, uint32_t flags,
+ char *opaque)
{
uint32_t temp, i;
bool v4 = false;
@@ -348,7 +352,7 @@ static void sharp_install_routes_restart(struct prefix *p, uint32_t count,
for (i = count; i < routes; i++) {
bool buffered = route_add(p, vrf_id, (uint8_t)instance, nhgid,
- nhg, backup_nhg, opaque);
+ nhg, backup_nhg, flags, opaque);
if (v4)
p->u.prefix4.s_addr = htonl(++temp);
else
@@ -362,6 +366,7 @@ static void sharp_install_routes_restart(struct prefix *p, uint32_t count,
wb.instance = instance;
wb.nhgid = nhgid;
wb.nhg = nhg;
+ wb.flags = flags;
wb.backup_nhg = backup_nhg;
wb.opaque = opaque;
wb.restart = SHARP_INSTALL_ROUTES_RESTART;
@@ -375,7 +380,7 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
- uint32_t routes, char *opaque)
+ uint32_t routes, uint32_t flags, char *opaque)
{
zlog_debug("Inserting %u routes", routes);
@@ -385,7 +390,7 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
monotime(&sg.r.t_start);
sharp_install_routes_restart(p, 0, vrf_id, instance, nhgid, nhg,
- backup_nhg, routes, opaque);
+ backup_nhg, routes, flags, opaque);
}
static void sharp_remove_routes_restart(struct prefix *p, uint32_t count,
@@ -451,7 +456,8 @@ static void handle_repeated(bool installed)
sharp_install_routes_helper(&p, sg.r.vrf_id, sg.r.inst,
sg.r.nhgid, &sg.r.nhop_group,
&sg.r.backup_nhop_group,
- sg.r.total_routes, sg.r.opaque);
+ sg.r.total_routes, sg.r.flags,
+ sg.r.opaque);
}
}
@@ -461,7 +467,8 @@ static void sharp_zclient_buffer_ready(void)
case SHARP_INSTALL_ROUTES_RESTART:
sharp_install_routes_restart(
&wb.p, wb.count, wb.vrf_id, wb.instance, wb.nhgid,
- wb.nhg, wb.backup_nhg, wb.routes, wb.opaque);
+ wb.nhg, wb.backup_nhg, wb.routes, wb.flags,
+ wb.opaque);
return;
case SHARP_DELETE_ROUTES_RESTART:
sharp_remove_routes_restart(&wb.p, wb.count, wb.vrf_id,
@@ -918,6 +925,50 @@ static int nhg_notify_owner(ZAPI_CALLBACK_ARGS)
return 0;
}
+int sharp_zebra_srv6_manager_get_locator_chunk(const char *locator_name)
+{
+ return srv6_manager_get_locator_chunk(zclient, locator_name);
+}
+
+int sharp_zebra_srv6_manager_release_locator_chunk(const char *locator_name)
+{
+ return srv6_manager_release_locator_chunk(zclient, locator_name);
+}
+
+static void sharp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
+{
+ struct stream *s = NULL;
+ struct srv6_locator_chunk s6c = {};
+ struct listnode *node, *nnode;
+ struct sharp_srv6_locator *loc;
+
+ s = zclient->ibuf;
+ zapi_srv6_locator_chunk_decode(s, &s6c);
+
+ for (ALL_LIST_ELEMENTS(sg.srv6_locators, node, nnode, loc)) {
+ struct prefix_ipv6 *chunk = NULL;
+ struct listnode *chunk_node;
+ struct prefix_ipv6 *c;
+
+ if (strcmp(loc->name, s6c.locator_name) != 0) {
+ zlog_err("%s: Locator name unmatch %s:%s", __func__,
+ loc->name, s6c.locator_name);
+ continue;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node, c))
+ if (!prefix_cmp(c, &s6c.prefix))
+ return;
+
+ chunk = prefix_ipv6_new();
+ *chunk = s6c.prefix;
+ listnode_add(loc->chunks, chunk);
+ return;
+ }
+
+ zlog_err("%s: can't get locator_chunk!!", __func__);
+}
+
void sharp_zebra_init(void)
{
struct zclient_options opt = {.receive_notify = true};
@@ -939,4 +990,6 @@ void sharp_zebra_init(void)
zclient->redistribute_route_add = sharp_redistribute_route;
zclient->redistribute_route_del = sharp_redistribute_route;
zclient->opaque_msg_handler = sharp_opaque_handler;
+ zclient->process_srv6_locator_chunk =
+ sharp_zebra_process_srv6_locator_chunk;
}
diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h
index 4355f49a2f..49f11a67e8 100644
--- a/sharpd/sharp_zebra.h
+++ b/sharpd/sharp_zebra.h
@@ -39,7 +39,8 @@ extern void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
- uint32_t routes, char *opaque);
+ uint32_t routes, uint32_t flags,
+ char *opaque);
extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t routes);
@@ -65,4 +66,11 @@ extern void sharp_zebra_register_te(void);
extern void sharp_redistribute_vrf(struct vrf *vrf, int source);
+extern int sharp_zebra_srv6_manager_get_locator_chunk(const char *lname);
+extern int sharp_zebra_srv6_manager_release_locator_chunk(const char *lname);
+extern void sharp_install_seg6local_route_helper(struct prefix *p,
+ uint8_t instance,
+ enum seg6local_action_t act,
+ struct seg6local_context *ctx);
+
#endif
diff --git a/tests/topotests/bgp_prefix_sid2/peer1/exabgp.cfg b/tests/topotests/bgp_prefix_sid2/peer1/exabgp.cfg
new file mode 100644
index 0000000000..ad1b15a26c
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/peer1/exabgp.cfg
@@ -0,0 +1,29 @@
+group controller {
+ neighbor 10.0.0.1 {
+ router-id 10.0.0.101;
+ local-address 10.0.0.101;
+ local-as 2;
+ peer-as 1;
+
+ family {
+ ipv6 mpls-vpn;
+ }
+
+ static {
+ route 2001:1::/64 {
+ rd 2:10;
+ next-hop 2001::2;
+ extended-community [ target:2:10 ];
+ label 3;
+ attribute [0x28 0xc0 0x0500150020010db800010001000000000000000100ffff00 ];
+ }
+ route 2001:2::/64 {
+ rd 2:10;
+ next-hop 2001::2;
+ extended-community [ target:2:10 ];
+ label 3;
+ attribute [0x28 0xc0 0x0500150020010db800010001000000000000000100ffff00 ];
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_prefix_sid2/peer1/exabgp.env b/tests/topotests/bgp_prefix_sid2/peer1/exabgp.env
new file mode 100644
index 0000000000..6c554f5fa8
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/peer1/exabgp.env
@@ -0,0 +1,53 @@
+
+[exabgp.api]
+encoder = text
+highres = false
+respawn = false
+socket = ''
+
+[exabgp.bgp]
+openwait = 60
+
+[exabgp.cache]
+attributes = true
+nexthops = true
+
+[exabgp.daemon]
+daemonize = true
+pid = '/var/run/exabgp/exabgp.pid'
+user = 'exabgp'
+
+[exabgp.log]
+all = false
+configuration = true
+daemon = true
+destination = '/var/log/exabgp.log'
+enable = true
+level = INFO
+message = false
+network = true
+packets = false
+parser = false
+processes = true
+reactor = true
+rib = false
+routes = false
+short = false
+timers = false
+
+[exabgp.pdb]
+enable = false
+
+[exabgp.profile]
+enable = false
+file = ''
+
+[exabgp.reactor]
+speed = 1.0
+
+[exabgp.tcp]
+acl = false
+bind = ''
+delay = 0
+once = false
+port = 179
diff --git a/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
new file mode 100644
index 0000000000..ddc1f07e42
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
@@ -0,0 +1,26 @@
+log stdout notifications
+log monitor notifications
+!log commands
+!
+!debug bgp zebra
+!debug bgp neighbor-events
+!debug bgp vnc verbose
+!debug bgp update-groups
+!debug bgp updates in
+!debug bgp updates out
+!debug bgp vpn label
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp vpn rmap-event
+!
+router bgp 1
+ bgp router-id 10.0.0.1
+ no bgp default ipv4-unicast
+ no bgp ebgp-requires-policy
+ neighbor 10.0.0.101 remote-as 2
+ neighbor 10.0.0.101 timers 3 10
+ !
+ address-family ipv6 vpn
+ neighbor 10.0.0.101 activate
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry1.json b/tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry1.json
new file mode 100644
index 0000000000..42293b1fc7
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry1.json
@@ -0,0 +1,50 @@
+{
+ "2:10":{
+ "prefix":"2001:1::\/64",
+ "advertisedTo":{
+ "10.0.0.101":{
+ }
+ },
+ "paths":[
+ {
+ "aspath":{
+ "string":"2",
+ "segments":[
+ {
+ "type":"as-sequence",
+ "list":[
+ 2
+ ]
+ }
+ ],
+ "length":1
+ },
+ "origin":"IGP",
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"RT:2:10"
+ },
+ "remoteLabel":3,
+ "remoteSid":"2001:db8:1:1::1",
+ "nexthops":[
+ {
+ "ip":"2001::2",
+ "afi":"ipv6",
+ "scope":"global",
+ "metric":0,
+ "accessible":true,
+ "used":true
+ }
+ ],
+ "peer":{
+ "peerId":"10.0.0.101",
+ "routerId":"10.0.0.101",
+ "type":"external"
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry2.json b/tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry2.json
new file mode 100644
index 0000000000..c9ad8714c1
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/r1/vpnv6_rib_entry2.json
@@ -0,0 +1,50 @@
+{
+ "2:10":{
+ "prefix":"2001:2::\/64",
+ "advertisedTo":{
+ "10.0.0.101":{
+ }
+ },
+ "paths":[
+ {
+ "aspath":{
+ "string":"2",
+ "segments":[
+ {
+ "type":"as-sequence",
+ "list":[
+ 2
+ ]
+ }
+ ],
+ "length":1
+ },
+ "origin":"IGP",
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"RT:2:10"
+ },
+ "remoteLabel":3,
+ "remoteSid":"2001:db8:1:1::1",
+ "nexthops":[
+ {
+ "ip":"2001::2",
+ "afi":"ipv6",
+ "scope":"global",
+ "metric":0,
+ "accessible":true,
+ "used":true
+ }
+ ],
+ "peer":{
+ "peerId":"10.0.0.101",
+ "routerId":"10.0.0.101",
+ "type":"external"
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_prefix_sid2/r1/zebra.conf b/tests/topotests/bgp_prefix_sid2/r1/zebra.conf
new file mode 100644
index 0000000000..0cd26052f2
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/r1/zebra.conf
@@ -0,0 +1,7 @@
+hostname r1
+!
+interface r1-eth0
+ ip address 10.0.0.1/24
+ no shutdown
+!
+line vty
diff --git a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
new file mode 100755
index 0000000000..25362530d4
--- /dev/null
+++ b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_prefix_sid2.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by LINE Corporation
+# Copyright (c) 2020 by Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_prefix_sid2.py: Test BGP topology with EBGP on prefix-sid
+"""
+
+import json
+import os
+import sys
+import functools
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+
+class TemplateTopo(Topo):
+ def build(self, **_opts):
+ tgen = get_topogen(self)
+ router = tgen.add_router("r1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(router)
+
+ switch = tgen.gears["s1"]
+ peer1 = tgen.add_exabgp_peer(
+ "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
+ )
+ switch.add_link(peer1)
+
+
+def setup_module(module):
+ tgen = Topogen(TemplateTopo, module.__name__)
+ tgen.start_topology()
+
+ router = tgen.gears["r1"]
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, "{}/zebra.conf".format("r1"))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, "{}/bgpd.conf".format("r1"))
+ )
+ router.start()
+
+ logger.info("starting exaBGP")
+ peer_list = tgen.exabgp_peers()
+ for pname, peer in peer_list.items():
+ logger.info("starting exaBGP on {}".format(pname))
+ peer_dir = os.path.join(CWD, pname)
+ env_file = os.path.join(CWD, pname, "exabgp.env")
+ logger.info("Running ExaBGP peer on {}".format(pname))
+ peer.start(peer_dir, env_file)
+ logger.info(pname)
+
+
+def teardown_module(module):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def test_r1_rib():
+ def _check(name, cmd, expected_file):
+ logger.info("polling")
+ tgen = get_topogen()
+ router = tgen.gears[name]
+ output = json.loads(router.vtysh_cmd(cmd))
+ expected = open_json_file("{}/{}".format(CWD, expected_file))
+ return topotest.json_cmp(output, expected)
+
+ def check(name, cmd, expected_file):
+ logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, cmd, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
+ assert result is None, 'Failed'
+
+ check("r1", "show bgp ipv6 vpn 2001:1::/64 json", "r1/vpnv6_rib_entry1.json")
+ check("r1", "show bgp ipv6 vpn 2001:2::/64 json", "r1/vpnv6_rib_entry2.json")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ ret = pytest.main(args)
+ sys.exit(ret)
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/bgpd.conf
new file mode 100644
index 0000000000..3459796629
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/bgpd.conf
@@ -0,0 +1,8 @@
+frr defaults traditional
+!
+hostname ce1
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/ipv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/ipv6_rib.json
new file mode 100644
index 0000000000..d19e315772
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/ipv6_rib.json
@@ -0,0 +1,58 @@
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:1::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:1::/64": [
+ {
+ "prefix": "2001:1::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/zebra.conf
new file mode 100644
index 0000000000..665808a0e7
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce1/zebra.conf
@@ -0,0 +1,14 @@
+log file zebra.log
+!
+hostname ce1
+!
+interface eth0
+ ipv6 address 2001:1::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:1::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/bgpd.conf
new file mode 100644
index 0000000000..8ed9978749
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/bgpd.conf
@@ -0,0 +1,8 @@
+frr defaults traditional
+!
+hostname ce2
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/ipv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/ipv6_rib.json
new file mode 100644
index 0000000000..35ff14efad
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/ipv6_rib.json
@@ -0,0 +1,58 @@
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:2::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:2::/64": [
+ {
+ "prefix": "2001:2::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/zebra.conf
new file mode 100644
index 0000000000..cc9b90a3b0
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce2/zebra.conf
@@ -0,0 +1,14 @@
+log file zebra.log
+!
+hostname ce2
+!
+interface eth0
+ ipv6 address 2001:2::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:2::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/bgpd.conf
new file mode 100644
index 0000000000..a85d9701c7
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/bgpd.conf
@@ -0,0 +1,8 @@
+frr defaults traditional
+!
+hostname ce3
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/ipv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/ipv6_rib.json
new file mode 100644
index 0000000000..2f2931f80f
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/ipv6_rib.json
@@ -0,0 +1,58 @@
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:3::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "prefix": "2001:3::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/zebra.conf
new file mode 100644
index 0000000000..beca0b1211
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce3/zebra.conf
@@ -0,0 +1,14 @@
+log file zebra.log
+!
+hostname ce3
+!
+interface eth0
+ ipv6 address 2001:3::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:3::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/bgpd.conf
new file mode 100644
index 0000000000..93fb32fd1b
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/bgpd.conf
@@ -0,0 +1,8 @@
+frr defaults traditional
+!
+hostname ce4
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/ipv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/ipv6_rib.json
new file mode 100644
index 0000000000..8a98768e0d
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/ipv6_rib.json
@@ -0,0 +1,58 @@
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:4::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:4::/64": [
+ {
+ "prefix": "2001:4::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/zebra.conf
new file mode 100644
index 0000000000..7b21074df0
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce4/zebra.conf
@@ -0,0 +1,14 @@
+log file zebra.log
+!
+hostname ce4
+!
+interface eth0
+ ipv6 address 2001:4::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:4::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/bgpd.conf
new file mode 100644
index 0000000000..2ab6f2d2a7
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/bgpd.conf
@@ -0,0 +1,8 @@
+frr defaults traditional
+!
+hostname ce5
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/ipv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/ipv6_rib.json
new file mode 100644
index 0000000000..80ff52ad6e
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/ipv6_rib.json
@@ -0,0 +1,58 @@
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:5::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:5::/64": [
+ {
+ "prefix": "2001:5::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/zebra.conf
new file mode 100644
index 0000000000..b5ad48e709
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce5/zebra.conf
@@ -0,0 +1,14 @@
+log file zebra.log
+!
+hostname ce5
+!
+interface eth0
+ ipv6 address 2001:5::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:5::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/bgpd.conf
new file mode 100644
index 0000000000..e0b6540514
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/bgpd.conf
@@ -0,0 +1,8 @@
+frr defaults traditional
+!
+hostname ce6
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/ipv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/ipv6_rib.json
new file mode 100644
index 0000000000..ace6136f06
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/ipv6_rib.json
@@ -0,0 +1,58 @@
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:6::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "prefix": "2001:6::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/zebra.conf
new file mode 100644
index 0000000000..7d19d9880b
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/ce6/zebra.conf
@@ -0,0 +1,14 @@
+log file zebra.log
+!
+hostname ce6
+!
+interface eth0
+ ipv6 address 2001:6::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:6::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/bgpd.conf
new file mode 100644
index 0000000000..d07d0532e3
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/bgpd.conf
@@ -0,0 +1,64 @@
+frr defaults traditional
+!
+hostname r1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+!debug bgp neighbor-events
+!debug bgp zebra
+!debug bgp vnc verbose
+!debug bgp update-groups
+!debug bgp updates in
+!debug bgp updates out
+!debug bgp vpn label
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp vpn rmap-event
+!
+router bgp 1
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor 2001::2 remote-as 2
+ neighbor 2001::2 timers 3 10
+ neighbor 2001::2 timers connect 1
+ !
+ address-family ipv6 vpn
+ neighbor 2001::2 activate
+ exit-address-family
+ !
+ segment-routing srv6
+ locator loc1
+ !
+!
+router bgp 1 vrf vrf10
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 1:10
+ rt vpn both 99:99
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
+router bgp 1 vrf vrf20
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 1:20
+ rt vpn both 88:88
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vpnv6_rib.json
new file mode 100644
index 0000000000..25b7a8616f
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vpnv6_rib.json
@@ -0,0 +1,170 @@
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "tableVersion": 2,
+ "routerId": "1.1.1.1",
+ "defaultLocPrf": 100,
+ "localAS": 1,
+ "routes": {
+ "routeDistinguishers": {
+ "1:10": {
+ "2001:1::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:1::",
+ "prefixLen": 64,
+ "network": "2001:1::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:3::",
+ "prefixLen": 64,
+ "network": "2001:3::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "1:20": {
+ "2001:5::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:5::",
+ "prefixLen": 64,
+ "network": "2001:5::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf20",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:10": {
+ "2001:2::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:2::",
+ "prefixLen": 64,
+ "network": "2001:2::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::2",
+ "path": "2",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::2",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:20": {
+ "2001:4::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:4::",
+ "prefixLen": 64,
+ "network": "2001:4::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::2",
+ "path": "2",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::2",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:6::",
+ "prefixLen": 64,
+ "network": "2001:6::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::2",
+ "path": "2",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::2",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf10_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf10_rib.json
new file mode 100644
index 0000000000..fa05972a35
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf10_rib.json
@@ -0,0 +1,89 @@
+{
+ "2001:1::/64": [
+ {
+ "prefix": "2001:1::/64",
+ "protocol": "connected",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth1",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:2::/64": [
+ {
+ "prefix": "2001:2::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:2:2::100"
+ }
+ }
+ ],
+ "asPath": "2"
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "prefix": "2001:3::/64",
+ "protocol": "connected",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth2",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf20_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf20_rib.json
new file mode 100644
index 0000000000..0155557242
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/vrf20_rib.json
@@ -0,0 +1,98 @@
+{
+ "2001:4::/64": [
+ {
+ "prefix": "2001:4::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:2:2::200"
+ }
+ }
+ ],
+ "asPath": "2"
+ }
+ ],
+ "2001:5::/64": [
+ {
+ "prefix": "2001:5::/64",
+ "protocol": "connected",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth3",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "prefix": "2001:6::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:2:2::200"
+ }
+ }
+ ],
+ "asPath": "2"
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/zebra.conf
new file mode 100644
index 0000000000..ec36870369
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r1/zebra.conf
@@ -0,0 +1,40 @@
+log file zebra.log
+!
+hostname r1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel
+!
+interface eth0
+ ipv6 address 2001::1/64
+!
+interface eth1 vrf vrf10
+ ipv6 address 2001:1::1/64
+!
+interface eth2 vrf vrf10
+ ipv6 address 2001:3::1/64
+!
+interface eth3 vrf vrf20
+ ipv6 address 2001:5::1/64
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:1:1::/64
+ !
+ !
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route 2001:db8:2:2::/64 2001::2
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/bgpd.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/bgpd.conf
new file mode 100644
index 0000000000..d0b3ea8ada
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/bgpd.conf
@@ -0,0 +1,65 @@
+frr defaults traditional
+!
+hostname r2
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+!debug bgp neighbor-events
+!debug bgp zebra
+!debug bgp vnc verbose
+!debug bgp update-groups
+!debug bgp updates in
+!debug bgp updates out
+!debug bgp updates
+!debug bgp vpn label
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp vpn rmap-event
+!
+router bgp 2
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor 2001::1 remote-as 1
+ neighbor 2001::1 timers 3 10
+ neighbor 2001::1 timers connect 1
+ !
+ address-family ipv6 vpn
+ neighbor 2001::1 activate
+ exit-address-family
+ !
+ segment-routing srv6
+ locator loc1
+ !
+!
+router bgp 2 vrf vrf10
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 2:10
+ rt vpn both 99:99
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
+router bgp 2 vrf vrf20
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 2:20
+ rt vpn both 88:88
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vpnv6_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vpnv6_rib.json
new file mode 100644
index 0000000000..2cd47b9ce5
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vpnv6_rib.json
@@ -0,0 +1,170 @@
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "tableVersion": 2,
+ "routerId": "2.2.2.2",
+ "defaultLocPrf": 100,
+ "localAS": 2,
+ "routes": {
+ "routeDistinguishers": {
+ "1:10": {
+ "2001:1::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:1::",
+ "prefixLen": 64,
+ "network": "2001:1::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::1",
+ "path": "1",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::1",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:3::",
+ "prefixLen": 64,
+ "network": "2001:3::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::1",
+ "path": "1",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::1",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "1:20": {
+ "2001:5::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:5::",
+ "prefixLen": 64,
+ "network": "2001:5::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::1",
+ "path": "1",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::1",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:10": {
+ "2001:2::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:2::",
+ "prefixLen": 64,
+ "network": "2001:2::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:20": {
+ "2001:4::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:4::",
+ "prefixLen": 64,
+ "network": "2001:4::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf20",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:6::",
+ "prefixLen": 64,
+ "network": "2001:6::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf20",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf10_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf10_rib.json
new file mode 100644
index 0000000000..887eb24386
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf10_rib.json
@@ -0,0 +1,98 @@
+{
+ "2001:1::/64": [
+ {
+ "prefix": "2001:1::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:1:1::100"
+ }
+ }
+ ],
+ "asPath": "1"
+ }
+ ],
+ "2001:2::/64": [
+ {
+ "prefix": "2001:2::/64",
+ "protocol": "connected",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth1",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "prefix": "2001:3::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:1:1::100"
+ }
+ }
+ ],
+ "asPath": "1"
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf20_rib.json b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf20_rib.json
new file mode 100644
index 0000000000..c118518423
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/vrf20_rib.json
@@ -0,0 +1,89 @@
+{
+ "2001:4::/64": [
+ {
+ "prefix": "2001:4::/64",
+ "protocol": "connected",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth2",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:5::/64": [
+ {
+ "prefix": "2001:5::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:1:1::200"
+ }
+ }
+ ],
+ "asPath": "1"
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "prefix": "2001:6::/64",
+ "protocol": "connected",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth3",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/zebra.conf b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/zebra.conf
new file mode 100644
index 0000000000..f3e025d23a
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/r2/zebra.conf
@@ -0,0 +1,40 @@
+log file zebra.log
+!
+hostname r2
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel
+!
+interface eth0
+ ipv6 address 2001::2/64
+!
+interface eth1 vrf vrf10
+ ipv6 address 2001:2::1/64
+!
+interface eth2 vrf vrf20
+ ipv6 address 2001:4::1/64
+!
+interface eth3 vrf vrf20
+ ipv6 address 2001:6::1/64
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:2:2::/64
+ !
+ !
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route 2001:db8:1:1::/64 2001::1
+!
+line vty
+!
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
new file mode 100755
index 0000000000..2d80c66b0b
--- /dev/null
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+
+#
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2018, LabN Consulting, L.L.C.
+# Authored by Lou Berger <lberger@labn.net>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import os
+import re
+import sys
+import json
+import functools
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import required_linux_kernel_version
+from mininet.topo import Topo
+
+
+class Topology(Topo):
+ """
+ CE1 CE3 CE5
+ (eth0) (eth0) (eth0)
+ :2 :2 :2
+ | | |
+ 2001: 2001: 2001:
+ 1::/64 3::/64 5::/64
+ | | |
+ :1 :1 :1
+ +-(eth1)--(eth2)---(eth3)-+
+ | \ / | |
+ | (vrf10) (vrf20) |
+ | R1 |
+ +----------(eth0)---------+
+ :1
+ |
+ 2001::/64
+ |
+ :2
+ (eth0)
+ +----------(eth0)--------------+
+ | R2 |
+ | (vrf10) (vrf20) |
+ | / / \ |
+ +-(eth1)-----(eth2)-----(eth3)-+
+ :1 :1 :1
+ | | |
+ +------+ +------+ +------+
+ / 2001: \ / 2001: \ / 2001: \
+ \ 2::/64 / \ 4::/64 / \ 6::/64 /
+ +------+ +------+ +------+
+ | | |
+ :2 :2 :2
+ (eth0) (eth0) (eth0)
+ CE2 CE4 CE6
+ """
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("ce1")
+ tgen.add_router("ce2")
+ tgen.add_router("ce3")
+ tgen.add_router("ce4")
+ tgen.add_router("ce5")
+ tgen.add_router("ce6")
+
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "eth0", "eth0")
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "eth0", "eth1")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r2"], "eth0", "eth1")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r1"], "eth0", "eth2")
+ tgen.add_link(tgen.gears["ce4"], tgen.gears["r2"], "eth0", "eth2")
+ tgen.add_link(tgen.gears["ce5"], tgen.gears["r1"], "eth0", "eth3")
+ tgen.add_link(tgen.gears["ce6"], tgen.gears["r2"], "eth0", "eth3")
+
+
+def setup_module(mod):
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ tgen = Topogen(Topology, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_config(TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+
+ tgen.gears["r1"].run("ip link add vrf10 type vrf table 10")
+ tgen.gears["r1"].run("ip link set vrf10 up")
+ tgen.gears["r1"].run("ip link add vrf20 type vrf table 20")
+ tgen.gears["r1"].run("ip link set vrf20 up")
+ tgen.gears["r1"].run("ip link set eth1 master vrf10")
+ tgen.gears["r1"].run("ip link set eth2 master vrf10")
+ tgen.gears["r1"].run("ip link set eth3 master vrf20")
+
+ tgen.gears["r2"].run("ip link add vrf10 type vrf table 10")
+ tgen.gears["r2"].run("ip link set vrf10 up")
+ tgen.gears["r2"].run("ip link add vrf20 type vrf table 20")
+ tgen.gears["r2"].run("ip link set vrf20 up")
+ tgen.gears["r2"].run("ip link set eth1 master vrf10")
+ tgen.gears["r2"].run("ip link set eth2 master vrf20")
+ tgen.gears["r2"].run("ip link set eth3 master vrf20")
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def test_rib():
+ def _check(name, cmd, expected_file):
+ logger.info("polling")
+ tgen = get_topogen()
+ router = tgen.gears[name]
+ output = json.loads(router.vtysh_cmd(cmd))
+ expected = open_json_file("{}/{}".format(CWD, expected_file))
+ return topotest.json_cmp(output, expected)
+
+ def check(name, cmd, expected_file):
+ logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, cmd, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
+ assert result is None, 'Failed'
+
+ check("r1", "show bgp ipv6 vpn json", "r1/vpnv6_rib.json")
+ check("r2", "show bgp ipv6 vpn json", "r2/vpnv6_rib.json")
+ check("r1", "show ipv6 route vrf vrf10 json", "r1/vrf10_rib.json")
+ check("r1", "show ipv6 route vrf vrf20 json", "r1/vrf20_rib.json")
+ check("r2", "show ipv6 route vrf vrf10 json", "r2/vrf10_rib.json")
+ check("r2", "show ipv6 route vrf vrf20 json", "r2/vrf20_rib.json")
+ check("ce1", "show ipv6 route json", "ce1/ipv6_rib.json")
+ check("ce2", "show ipv6 route json", "ce2/ipv6_rib.json")
+ check("ce3", "show ipv6 route json", "ce3/ipv6_rib.json")
+ check("ce4", "show ipv6 route json", "ce4/ipv6_rib.json")
+ check("ce5", "show ipv6 route json", "ce5/ipv6_rib.json")
+ check("ce6", "show ipv6 route json", "ce6/ipv6_rib.json")
+
+
+def test_ping():
+ def _check(name, dest_addr, match):
+ tgen = get_topogen()
+ output = tgen.gears[name].run("ping6 {} -c 1 -w 1".format(dest_addr))
+ logger.info(output)
+ assert match in output, "ping fail"
+
+ def check(name, dest_addr, match):
+ logger.info("[+] check {} {} {}".format(name, dest_addr, match))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, dest_addr, match)
+ success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
+ assert result is None, 'Failed'
+
+ check("ce1", "2001:2::2", " 0% packet loss")
+ check("ce1", "2001:3::2", " 0% packet loss")
+ check("ce1", "2001:4::2", " 100% packet loss")
+ check("ce1", "2001:5::2", " 100% packet loss")
+ check("ce1", "2001:6::2", " 100% packet loss")
+ check("ce4", "2001:1::2", " 100% packet loss")
+ check("ce4", "2001:2::2", " 100% packet loss")
+ check("ce4", "2001:3::2", " 100% packet loss")
+ check("ce4", "2001:5::2", " 0% packet loss")
+ check("ce4", "2001:6::2", " 0% packet loss")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/srv6_locator/__init__.py b/tests/topotests/srv6_locator/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/srv6_locator/__init__.py
diff --git a/tests/topotests/srv6_locator/expected_chunks1.json b/tests/topotests/srv6_locator/expected_chunks1.json
new file mode 100644
index 0000000000..fe51488c70
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_chunks1.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/topotests/srv6_locator/expected_chunks2.json b/tests/topotests/srv6_locator/expected_chunks2.json
new file mode 100644
index 0000000000..8707384777
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_chunks2.json
@@ -0,0 +1,8 @@
+[
+ {
+ "name": "loc1",
+ "chunks": [
+ "2001:db8:1:1::/64"
+ ]
+ }
+]
diff --git a/tests/topotests/srv6_locator/expected_chunks3.json b/tests/topotests/srv6_locator/expected_chunks3.json
new file mode 100644
index 0000000000..fe51488c70
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_chunks3.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/topotests/srv6_locator/expected_chunks4.json b/tests/topotests/srv6_locator/expected_chunks4.json
new file mode 100644
index 0000000000..6e49738f37
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_chunks4.json
@@ -0,0 +1,6 @@
+[
+ {
+ "name": "loc3",
+ "chunks": []
+ }
+]
diff --git a/tests/topotests/srv6_locator/expected_chunks5.json b/tests/topotests/srv6_locator/expected_chunks5.json
new file mode 100644
index 0000000000..a18221859e
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_chunks5.json
@@ -0,0 +1,8 @@
+[
+ {
+ "name": "loc3",
+ "chunks": [
+ "2001:db8:3:3::/64"
+ ]
+ }
+]
diff --git a/tests/topotests/srv6_locator/expected_ipv6_routes.json b/tests/topotests/srv6_locator/expected_ipv6_routes.json
new file mode 100644
index 0000000000..fb92f25b73
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_ipv6_routes.json
@@ -0,0 +1,29 @@
+{
+ "2001:db8:1:1:1::/80":[
+ {
+ "prefix":"2001:db8:1:1:1::/80",
+ "protocol":"static",
+ "selected":true,
+ "installed":true,
+ "nexthops":[{
+ "fib":true,
+ "active":true,
+ "seg6local":{ "action":"End" }
+ }]
+ }
+ ],
+ "2001:db8:2:2:1::/80":[
+ {
+ "prefix":"2001:db8:2:2:1::/80",
+ "protocol":"static",
+ "selected":true,
+ "installed":true,
+ "nexthops":[{
+ "fib":true,
+ "active":true,
+ "seg6local":{ "action":"End" }
+
+ }]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator/expected_locators1.json b/tests/topotests/srv6_locator/expected_locators1.json
new file mode 100644
index 0000000000..3953bb0723
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_locators1.json
@@ -0,0 +1,26 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator/expected_locators2.json b/tests/topotests/srv6_locator/expected_locators2.json
new file mode 100644
index 0000000000..ce3a4045d7
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_locators2.json
@@ -0,0 +1,26 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "sharp"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator/expected_locators3.json b/tests/topotests/srv6_locator/expected_locators3.json
new file mode 100644
index 0000000000..3953bb0723
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_locators3.json
@@ -0,0 +1,26 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_locator/expected_locators4.json b/tests/topotests/srv6_locator/expected_locators4.json
new file mode 100644
index 0000000000..7989f9021b
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_locators4.json
@@ -0,0 +1,36 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name":"loc3",
+ "statusUp":false,
+ "chunks":[
+ {
+ "proto":"sharp"
+ }
+ ]
+ }
+ ]
+}
+
diff --git a/tests/topotests/srv6_locator/expected_locators5.json b/tests/topotests/srv6_locator/expected_locators5.json
new file mode 100644
index 0000000000..8c512ebc46
--- /dev/null
+++ b/tests/topotests/srv6_locator/expected_locators5.json
@@ -0,0 +1,38 @@
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc3",
+ "prefix": "2001:db8:3:3::/64",
+ "statusUp": true,
+ "chunks":[
+ {
+ "prefix": "2001:db8:3:3::/64",
+ "proto": "sharp"
+ }
+ ]
+ }
+ ]
+}
+
diff --git a/tests/topotests/srv6_locator/r1/setup.sh b/tests/topotests/srv6_locator/r1/setup.sh
new file mode 100644
index 0000000000..36ed713f24
--- /dev/null
+++ b/tests/topotests/srv6_locator/r1/setup.sh
@@ -0,0 +1,2 @@
+ip link add dummy0 type dummy
+ip link set dummy0 up
diff --git a/tests/topotests/srv6_locator/r1/sharpd.conf b/tests/topotests/srv6_locator/r1/sharpd.conf
new file mode 100644
index 0000000000..d46085935c
--- /dev/null
+++ b/tests/topotests/srv6_locator/r1/sharpd.conf
@@ -0,0 +1,7 @@
+hostname r1
+!
+log stdout notifications
+log monitor notifications
+log commands
+log file sharpd.log debugging
+!
diff --git a/tests/topotests/srv6_locator/r1/zebra.conf b/tests/topotests/srv6_locator/r1/zebra.conf
new file mode 100644
index 0000000000..d0c0232073
--- /dev/null
+++ b/tests/topotests/srv6_locator/r1/zebra.conf
@@ -0,0 +1,22 @@
+hostname r1
+!
+debug zebra events
+debug zebra rib detailed
+!
+log stdout notifications
+log monitor notifications
+log commands
+log file zebra.log debugging
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:1:1::/64
+ !
+ locator loc2
+ prefix 2001:db8:2:2::/64
+ !
+ !
+ !
+!
diff --git a/tests/topotests/srv6_locator/test_srv6_locator.py b/tests/topotests/srv6_locator/test_srv6_locator.py
new file mode 100755
index 0000000000..a7416ce085
--- /dev/null
+++ b/tests/topotests/srv6_locator/test_srv6_locator.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+
+#
+# test_srv6_manager.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by
+# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_srv6_manager.py:
+Test for SRv6 manager on zebra
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router('r1')
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, '{}/sharpd.conf'.format(rname)))
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_srv6():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears['r1']
+
+ def _check_srv6_locator(router, expected_locator_file):
+ logger.info("checking zebra locator status")
+ output = json.loads(router.vtysh_cmd("show segment-routing srv6 locator json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_locator_file))
+ return topotest.json_cmp(output, expected)
+
+ def _check_sharpd_chunk(router, expected_chunk_file):
+ logger.info("checking sharpd locator chunk status")
+ output = json.loads(router.vtysh_cmd("show sharp segment-routing srv6 json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_chunk_file))
+ return topotest.json_cmp(output, expected)
+
+ def check_srv6_locator(router, expected_file):
+ func = functools.partial(_check_srv6_locator, router, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
+ assert result is None, 'Failed'
+
+ def check_sharpd_chunk(router, expected_file):
+ func = functools.partial(_check_sharpd_chunk, router, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
+ assert result is None, 'Failed'
+
+ logger.info("Test1 for Locator Configuration")
+ check_srv6_locator(router, "expected_locators1.json")
+ check_sharpd_chunk(router, "expected_chunks1.json")
+
+ logger.info("Test2 get chunk for locator loc1")
+ router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc1")
+ check_srv6_locator(router, "expected_locators2.json")
+ check_sharpd_chunk(router, "expected_chunks2.json")
+
+ logger.info("Test3 release chunk for locator loc1")
+ router.vtysh_cmd("sharp srv6-manager release-locator-chunk loc1")
+ check_srv6_locator(router, "expected_locators3.json")
+ check_sharpd_chunk(router, "expected_chunks3.json")
+
+ logger.info("Test4 get chunk for non-exist locator by zclient")
+ router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc3")
+ check_srv6_locator(router, "expected_locators4.json")
+ check_sharpd_chunk(router, "expected_chunks4.json")
+
+ logger.info("Test5 Test for Zclient. after locator loc3 was configured")
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ locators
+ locator loc3
+ prefix 2001:db8:3:3::/64
+ """
+ )
+ check_srv6_locator(router, "expected_locators5.json")
+ check_sharpd_chunk(router, "expected_chunks5.json")
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/zebra_seg6_route/r1/routes.json b/tests/topotests/zebra_seg6_route/r1/routes.json
new file mode 100644
index 0000000000..a0c15b8fe4
--- /dev/null
+++ b/tests/topotests/zebra_seg6_route/r1/routes.json
@@ -0,0 +1,25 @@
+[
+ {
+ "in": {
+ "dest": "1::1",
+ "nh": "2001::1",
+ "sid": "a::"
+ },
+ "out":[{
+ "prefix":"1::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "seg6": { "segs": "a::" }
+ }]
+ }]
+ }
+]
diff --git a/tests/topotests/zebra_seg6_route/r1/setup.sh b/tests/topotests/zebra_seg6_route/r1/setup.sh
new file mode 100644
index 0000000000..2cb5c4a4ec
--- /dev/null
+++ b/tests/topotests/zebra_seg6_route/r1/setup.sh
@@ -0,0 +1,5 @@
+ip link add vrf10 type vrf table 10
+ip link set vrf10 up
+ip link add dum0 type dummy
+ip link set dum0 up
+sysctl -w net.ipv6.conf.dum0.disable_ipv6=0
diff --git a/tests/topotests/zebra_seg6_route/r1/sharpd.conf b/tests/topotests/zebra_seg6_route/r1/sharpd.conf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/zebra_seg6_route/r1/sharpd.conf
diff --git a/tests/topotests/zebra_seg6_route/r1/zebra.conf b/tests/topotests/zebra_seg6_route/r1/zebra.conf
new file mode 100644
index 0000000000..ad661e116b
--- /dev/null
+++ b/tests/topotests/zebra_seg6_route/r1/zebra.conf
@@ -0,0 +1,13 @@
+log file zebra.log
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel msgdump
+!
+interface dum0
+ ipv6 address 2001::1/64
+!
diff --git a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
new file mode 100755
index 0000000000..e83b2c1007
--- /dev/null
+++ b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+#
+# test_zebra_seg6_route.py
+#
+# Copyright (c) 2020 by
+# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_zebra_seg6_route.py: Test seg6 route addition with zapi.
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+import platform
+from functools import partial
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import shutdown_bringup_interface
+from mininet.topo import Topo
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+class TemplateTopo(Topo):
+ def build(self, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router("r1")
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_zebra_seg6local_routes():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Test for seg6local route install via ZAPI was start.")
+ r1 = tgen.gears["r1"]
+
+ def check(router, dest, nh, sid, expected):
+ router.vtysh_cmd("sharp install seg6-routes {} "\
+ "nexthop-seg6 {} encap {} 1".format(dest, nh, sid))
+ output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
+ output = output.get('{}/128'.format(dest))
+ if output is None:
+ return False
+ return topotest.json_cmp(output, expected)
+
+ manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
+ for manifest in manifests:
+ logger.info("CHECK {} {} {}".format(manifest['in']['dest'],
+ manifest['in']['nh'],
+ manifest['in']['sid']))
+ test_func = partial(check, r1,
+ manifest['in']['dest'],
+ manifest['in']['nh'],
+ manifest['in']['sid'],
+ manifest['out'])
+ success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
+ assert result is None, 'Failed'
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/zebra_seg6local_route/r1/routes.json b/tests/topotests/zebra_seg6local_route/r1/routes.json
new file mode 100644
index 0000000000..4cb1c4ae13
--- /dev/null
+++ b/tests/topotests/zebra_seg6local_route/r1/routes.json
@@ -0,0 +1,98 @@
+[
+ {
+ "in": {
+ "dest": "1::1",
+ "context": "End"
+ },
+ "out":[{
+ "prefix":"1::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End" }
+ }]
+ }]
+ },
+ {
+ "in": {
+ "dest": "2::1",
+ "context": "End_X 2001::1"
+ },
+ "out":[{
+ "prefix":"2::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End.X" }
+ }]
+ }]
+ },
+ {
+ "in": {
+ "dest": "3::1",
+ "context": "End_T 10"
+ },
+ "out":[{
+ "prefix":"3::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End.T" }
+ }]
+ }]
+ },
+ {
+ "in": {
+ "dest": "4::1",
+ "context": "End_DX4 10.0.0.1"
+ },
+ "out":[{
+ "prefix":"4::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End.DX4" }
+ }]
+ }]
+ }
+]
diff --git a/tests/topotests/zebra_seg6local_route/r1/setup.sh b/tests/topotests/zebra_seg6local_route/r1/setup.sh
new file mode 100644
index 0000000000..691adb0a19
--- /dev/null
+++ b/tests/topotests/zebra_seg6local_route/r1/setup.sh
@@ -0,0 +1,3 @@
+ip link add dum0 type dummy
+ip link set dum0 up
+sysctl -w net.ipv6.conf.dum0.disable_ipv6=0
diff --git a/tests/topotests/zebra_seg6local_route/r1/sharpd.conf b/tests/topotests/zebra_seg6local_route/r1/sharpd.conf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/zebra_seg6local_route/r1/sharpd.conf
diff --git a/tests/topotests/zebra_seg6local_route/r1/zebra.conf b/tests/topotests/zebra_seg6local_route/r1/zebra.conf
new file mode 100644
index 0000000000..22eb88098b
--- /dev/null
+++ b/tests/topotests/zebra_seg6local_route/r1/zebra.conf
@@ -0,0 +1,9 @@
+log file zebra.log
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel msgdump
diff --git a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
new file mode 100755
index 0000000000..1c9d208fef
--- /dev/null
+++ b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+#
+# test_zebra_seg6local_route.py
+#
+# Copyright (c) 2020 by
+# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_zebra_seg6local_route.py: Test seg6local route addition with zapi.
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+import platform
+from functools import partial
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import shutdown_bringup_interface
+from mininet.topo import Topo
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+class TemplateTopo(Topo):
+ def build(self, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router("r1")
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_zebra_seg6local_routes():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Test for seg6local route install via ZAPI was start.")
+ r1 = tgen.gears["r1"]
+
+ def check(router, dest, context, expected):
+ router.vtysh_cmd("sharp install seg6local-routes {} "\
+ "nexthop-seg6local dum0 {} 1".format(dest, context))
+ output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
+ output = output.get('{}/128'.format(dest))
+ if output is None:
+ return False
+ return topotest.json_cmp(output, expected)
+
+ manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
+ for manifest in manifests:
+ logger.info("CHECK {} {}".format(manifest['in']['dest'],
+ manifest['in']['context']))
+ test_func = partial(check, r1,
+ manifest['in']['dest'],
+ manifest['in']['context'],
+ manifest['out'])
+ success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
+ assert result is None, 'Failed'
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index 111c2dbc03..e8184c2dc8 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -1349,6 +1349,27 @@ static struct cmd_node rmap_node = {
.prompt = "%s(config-route-map)# ",
};
+static struct cmd_node srv6_node = {
+ .name = "srv6",
+ .node = SRV6_NODE,
+ .parent_node = SEGMENT_ROUTING_NODE,
+ .prompt = "%s(config-srv6)# ",
+};
+
+static struct cmd_node srv6_locs_node = {
+ .name = "srv6-locators",
+ .node = SRV6_LOCS_NODE,
+ .parent_node = SRV6_NODE,
+ .prompt = "%s(config-srv6-locators)# ",
+};
+
+static struct cmd_node srv6_loc_node = {
+ .name = "srv6-locator",
+ .node = SRV6_LOC_NODE,
+ .parent_node = SRV6_LOCS_NODE,
+ .prompt = "%s(config-srv6-locator)# ",
+};
+
#ifdef HAVE_PBRD
static struct cmd_node pbr_map_node = {
.name = "pbr-map",
@@ -1486,6 +1507,13 @@ static struct cmd_node bmp_node = {
.parent_node = BGP_NODE,
.prompt = "%s(config-bgp-bmp)# "
};
+
+static struct cmd_node bgp_srv6_node = {
+ .name = "bgp srv6",
+ .node = BGP_SRV6_NODE,
+ .parent_node = BGP_NODE,
+ .prompt = "%s(config-router-srv6)# ",
+};
#endif /* HAVE_BGPD */
#ifdef HAVE_OSPFD
@@ -1659,6 +1687,31 @@ DEFUNSH(VTYSH_REALLYALL, vtysh_end_all, vtysh_end_all_cmd, "end",
return vtysh_end();
}
+DEFUNSH(VTYSH_SR, srv6, srv6_cmd,
+ "srv6",
+ "Segment-Routing SRv6 configration\n")
+{
+ vty->node = SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, srv6_locators, srv6_locators_cmd,
+ "locators",
+ "Segment-Routing SRv6 locators configration\n")
+{
+ vty->node = SRV6_LOCS_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, srv6_locator, srv6_locator_cmd,
+ "locator WORD",
+ "Segment Routing SRv6 locator\n"
+ "Specify locator-name\n")
+{
+ vty->node = SRV6_LOC_NODE;
+ return CMD_SUCCESS;
+}
+
#ifdef HAVE_BGPD
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
"router bgp [(1-4294967295) [<view|vrf> WORD]]",
@@ -1816,6 +1869,39 @@ DEFUNSH(VTYSH_BGPD,
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_BGPD,
+ bgp_srv6,
+ bgp_srv6_cmd,
+ "segment-routing srv6",
+ "Segment-Routing configuration\n"
+ "Segment-Routing SRv6 configuration\n")
+{
+ vty->node = BGP_SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD,
+ exit_bgp_srv6,
+ exit_bgp_srv6_cmd,
+ "exit",
+ "exit Segment-Routing SRv6 configuration\n")
+{
+ if (vty->node == BGP_SRV6_NODE)
+ vty->node = BGP_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD,
+ quit_bgp_srv6,
+ quit_bgp_srv6_cmd,
+ "quit",
+ "quit Segment-Routing SRv6 configuration\n")
+{
+ if (vty->node == BGP_SRV6_NODE)
+ vty->node = BGP_NODE;
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
@@ -2084,7 +2170,7 @@ DEFUNSH(VTYSH_FABRICD, router_openfabric, router_openfabric_cmd, "router openfab
#endif /* HAVE_FABRICD */
#if defined(HAVE_PATHD)
-DEFUNSH(VTYSH_PATHD, segment_routing, segment_routing_cmd,
+DEFUNSH(VTYSH_SR, segment_routing, segment_routing_cmd,
"segment-routing",
"Configure segment routing\n")
{
@@ -2366,6 +2452,30 @@ DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf",
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_SR, exit_srv6_config, exit_srv6_config_cmd, "exit",
+ "Exit from SRv6 configuration mode\n")
+{
+ if (vty->node == SRV6_NODE)
+ vty->node = SEGMENT_ROUTING_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, exit_srv6_locs_config, exit_srv6_locs_config_cmd, "exit",
+ "Exit from SRv6-locator configuration mode\n")
+{
+ if (vty->node == SRV6_LOCS_NODE)
+ vty->node = SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, exit_srv6_loc_config, exit_srv6_loc_config_cmd, "exit",
+ "Exit from SRv6-locators configuration mode\n")
+{
+ if (vty->node == SRV6_LOC_NODE)
+ vty->node = SRV6_LOCS_NODE;
+ return CMD_SUCCESS;
+}
+
#ifdef HAVE_RIPD
DEFUNSH(VTYSH_RIPD, vtysh_exit_ripd, vtysh_exit_ripd_cmd, "exit",
"Exit current mode and down to previous mode\n")
@@ -4131,6 +4241,12 @@ void vtysh_init_vty(void)
install_element(BMP_NODE, &bmp_exit_cmd);
install_element(BMP_NODE, &bmp_quit_cmd);
install_element(BMP_NODE, &vtysh_end_all_cmd);
+
+ install_node(&bgp_srv6_node);
+ install_element(BGP_NODE, &bgp_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &exit_bgp_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &quit_bgp_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &vtysh_end_all_cmd);
#endif /* HAVE_BGPD */
/* ripd */
@@ -4431,6 +4547,22 @@ void vtysh_init_vty(void)
install_element(CONFIG_NODE, &vtysh_end_all_cmd);
install_element(ENABLE_NODE, &vtysh_end_all_cmd);
+ /* SRv6 Data-plane */
+ install_node(&srv6_node);
+ install_element(SEGMENT_ROUTING_NODE, &srv6_cmd);
+ install_element(SRV6_NODE, &srv6_locators_cmd);
+ install_element(SRV6_NODE, &exit_srv6_config_cmd);
+ install_element(SRV6_NODE, &vtysh_end_all_cmd);
+
+ install_node(&srv6_locs_node);
+ install_element(SRV6_LOCS_NODE, &srv6_locator_cmd);
+ install_element(SRV6_LOCS_NODE, &exit_srv6_locs_config_cmd);
+ install_element(SRV6_LOCS_NODE, &vtysh_end_all_cmd);
+
+ install_node(&srv6_loc_node);
+ install_element(SRV6_LOC_NODE, &exit_srv6_loc_config_cmd);
+ install_element(SRV6_LOC_NODE, &vtysh_end_all_cmd);
+
install_element(ENABLE_NODE, &vtysh_show_running_config_cmd);
install_element(ENABLE_NODE, &vtysh_copy_running_config_cmd);
install_element(ENABLE_NODE, &vtysh_copy_to_running_cmd);
diff --git a/vtysh/vtysh.h b/vtysh/vtysh.h
index 7c8d9315e1..87f1f67443 100644
--- a/vtysh/vtysh.h
+++ b/vtysh/vtysh.h
@@ -60,6 +60,7 @@ DECLARE_MGROUP(MVTYSH);
#define VTYSH_KEYS VTYSH_RIPD|VTYSH_EIGRPD
/* Daemons who can process nexthop-group configs */
#define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD
+#define VTYSH_SR VTYSH_ZEBRA|VTYSH_PATHD
enum vtysh_write_integrated {
WRITE_INTEGRATED_UNSPECIFIED,
diff --git a/vtysh/vtysh_config.c b/vtysh/vtysh_config.c
index f92b0e920b..6d80cf9d96 100644
--- a/vtysh/vtysh_config.c
+++ b/vtysh/vtysh_config.c
@@ -430,6 +430,10 @@ void vtysh_config_parse_line(void *arg, const char *line)
config = config_get(PROTOCOL_NODE, line);
else if (strncmp(line, "mpls", strlen("mpls")) == 0)
config = config_get(MPLS_NODE, line);
+ else if (strncmp(line, "segment-routing",
+ strlen("segment-routing"))
+ == 0)
+ config = config_get(SEGMENT_ROUTING_NODE, line);
else if (strncmp(line, "bfd", strlen("bfd")) == 0)
config = config_get(BFD_NODE, line);
else {
diff --git a/zebra/main.c b/zebra/main.c
index 3f75b222ba..e36af51005 100644
--- a/zebra/main.c
+++ b/zebra/main.c
@@ -57,6 +57,8 @@
#include "zebra/zebra_nb.h"
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
+#include "zebra/zebra_srv6.h"
+#include "zebra/zebra_srv6_vty.h"
#define ZEBRA_PTM_SUPPORT
@@ -418,6 +420,8 @@ int main(int argc, char **argv)
zebra_pbr_init();
zebra_opaque_init();
zebra_srte_init();
+ zebra_srv6_init();
+ zebra_srv6_vty_init();
/* For debug purpose. */
/* SET_FLAG (zebra_debug_event, ZEBRA_DEBUG_EVENT); */
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index fbf37230c7..38f8140db2 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -22,9 +22,18 @@
#ifdef HAVE_NETLINK
+/* The following definition is to workaround an issue in the Linux kernel
+ * header files with redefinition of 'struct in6_addr' in both
+ * netinet/in.h and linux/in6.h.
+ * Reference - https://sourceware.org/ml/libc-alpha/2013-01/msg00599.html
+ */
+#define _LINUX_IN6_H
+
#include <net/if_arp.h>
#include <linux/lwtunnel.h>
#include <linux/mpls_iptunnel.h>
+#include <linux/seg6_iptunnel.h>
+#include <linux/seg6_local.h>
#include <linux/neighbour.h>
#include <linux/rtnetlink.h>
#include <linux/nexthop.h>
@@ -38,6 +47,8 @@
#include "if.h"
#include "log.h"
#include "prefix.h"
+#include "plist.h"
+#include "plist_int.h"
#include "connected.h"
#include "table.h"
#include "memory.h"
@@ -404,6 +415,55 @@ static int parse_encap_mpls(struct rtattr *tb, mpls_label_t *labels)
return num_labels;
}
+static enum seg6local_action_t
+parse_encap_seg6local(struct rtattr *tb,
+ struct seg6local_context *ctx)
+{
+ struct rtattr *tb_encap[256] = {};
+ enum seg6local_action_t act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+
+ netlink_parse_rtattr_nested(tb_encap, 256, tb);
+
+ if (tb_encap[SEG6_LOCAL_ACTION])
+ act = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_ACTION]);
+
+ if (tb_encap[SEG6_LOCAL_NH4])
+ ctx->nh4 = *(struct in_addr *)RTA_DATA(
+ tb_encap[SEG6_LOCAL_NH4]);
+
+ if (tb_encap[SEG6_LOCAL_NH6])
+ ctx->nh6 = *(struct in6_addr *)RTA_DATA(
+ tb_encap[SEG6_LOCAL_NH6]);
+
+ if (tb_encap[SEG6_LOCAL_TABLE])
+ ctx->table = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_TABLE]);
+
+ return act;
+}
+
+static int parse_encap_seg6(struct rtattr *tb, struct in6_addr *segs)
+{
+ struct rtattr *tb_encap[256] = {};
+ struct seg6_iptunnel_encap *ipt = NULL;
+ struct in6_addr *segments = NULL;
+
+ netlink_parse_rtattr_nested(tb_encap, 256, tb);
+
+ /*
+ * TODO: It's not support multiple SID list.
+ */
+ if (tb_encap[SEG6_IPTUNNEL_SRH]) {
+ ipt = (struct seg6_iptunnel_encap *)
+ RTA_DATA(tb_encap[SEG6_IPTUNNEL_SRH]);
+ segments = ipt->srh[0].segments;
+ *segs = segments[0];
+ return 1;
+ }
+
+ return 0;
+}
+
+
static struct nexthop
parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
enum blackhole_type bh_type, int index, void *prefsrc,
@@ -413,6 +473,10 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
struct nexthop nh = {0};
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
int num_labels = 0;
+ enum seg6local_action_t seg6l_act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+ struct seg6local_context seg6l_ctx = {};
+ struct in6_addr seg6_segs = {};
+ int num_segs = 0;
vrf_id_t nh_vrf_id = vrf_id;
size_t sz = (afi == AFI_IP) ? 4 : 16;
@@ -452,6 +516,16 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
== LWTUNNEL_ENCAP_MPLS) {
num_labels = parse_encap_mpls(tb[RTA_ENCAP], labels);
}
+ if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6_LOCAL) {
+ seg6l_act = parse_encap_seg6local(tb[RTA_ENCAP], &seg6l_ctx);
+ }
+ if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6) {
+ num_segs = parse_encap_seg6(tb[RTA_ENCAP], &seg6_segs);
+ }
if (rtm->rtm_flags & RTNH_F_ONLINK)
SET_FLAG(nh.flags, NEXTHOP_FLAG_ONLINK);
@@ -459,6 +533,12 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
if (num_labels)
nexthop_add_labels(&nh, ZEBRA_LSP_STATIC, num_labels, labels);
+ if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(&nh, seg6l_act, &seg6l_ctx);
+
+ if (num_segs)
+ nexthop_add_srv6_seg6(&nh, &seg6_segs);
+
return nh;
}
@@ -475,6 +555,10 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
/* MPLS labels */
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
int num_labels = 0;
+ enum seg6local_action_t seg6l_act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+ struct seg6local_context seg6l_ctx = {};
+ struct in6_addr seg6_segs = {};
+ int num_segs = 0;
struct rtattr *rtnh_tb[RTA_MAX + 1] = {};
int len = RTA_PAYLOAD(tb[RTA_MULTIPATH]);
@@ -519,6 +603,18 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
num_labels = parse_encap_mpls(
rtnh_tb[RTA_ENCAP], labels);
}
+ if (rtnh_tb[RTA_ENCAP] && rtnh_tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(rtnh_tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6_LOCAL) {
+ seg6l_act = parse_encap_seg6local(
+ rtnh_tb[RTA_ENCAP], &seg6l_ctx);
+ }
+ if (rtnh_tb[RTA_ENCAP] && rtnh_tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(rtnh_tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6) {
+ num_segs = parse_encap_seg6(rtnh_tb[RTA_ENCAP],
+ &seg6_segs);
+ }
}
if (gate && rtm->rtm_family == AF_INET) {
@@ -544,6 +640,13 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
nexthop_add_labels(nh, ZEBRA_LSP_STATIC,
num_labels, labels);
+ if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(nh, seg6l_act,
+ &seg6l_ctx);
+
+ if (num_segs)
+ nexthop_add_srv6_seg6(nh, &seg6_segs);
+
if (rtnh->rtnh_flags & RTNH_F_ONLINK)
SET_FLAG(nh->flags, NEXTHOP_FLAG_ONLINK);
@@ -1227,6 +1330,40 @@ static bool _netlink_route_encode_nexthop_src(const struct nexthop *nexthop,
return true;
}
+static ssize_t fill_seg6ipt_encap(char *buffer, size_t buflen,
+ const struct in6_addr *seg)
+{
+ struct seg6_iptunnel_encap *ipt;
+ struct ipv6_sr_hdr *srh;
+ const size_t srhlen = 24;
+
+ /*
+ * Caution: Support only SINGLE-SID, not MULTI-SID
+ * This function only supports the case where segs represents
+ * a single SID. If you want to extend the SRv6 functionality,
+ * you should improve the Boundary Check.
+ * Ex. In case of set a SID-List include multiple-SIDs as an
+ * argument of the Transit Behavior, we must support variable
+ * boundary check for buflen.
+ */
+ if (buflen < (sizeof(struct seg6_iptunnel_encap) +
+ sizeof(struct ipv6_sr_hdr) + 16))
+ return -1;
+
+ memset(buffer, 0, buflen);
+
+ ipt = (struct seg6_iptunnel_encap *)buffer;
+ ipt->mode = SEG6_IPTUN_MODE_ENCAP;
+ srh = ipt->srh;
+ srh->hdrlen = (srhlen >> 3) - 1;
+ srh->type = 4;
+ srh->segments_left = 0;
+ srh->first_segment = 0;
+ memcpy(&srh->segments[0], seg, sizeof(struct in6_addr));
+
+ return srhlen + 4;
+}
+
/* This function takes a nexthop as argument and adds
* the appropriate netlink attributes to an existing
* netlink message.
@@ -1262,6 +1399,99 @@ static bool _netlink_route_build_singlepath(const struct prefix *p,
sizeof(label_buf)))
return false;
+ if (nexthop->nh_srv6) {
+ if (nexthop->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
+ struct rtattr *nest;
+ const struct seg6local_context *ctx;
+
+ ctx = &nexthop->nh_srv6->seg6local_ctx;
+ if (!nl_attr_put16(nlmsg, req_size, RTA_ENCAP_TYPE,
+ LWTUNNEL_ENCAP_SEG6_LOCAL))
+ return false;
+
+ nest = nl_attr_nest(nlmsg, req_size, RTA_ENCAP);
+ if (!nest)
+ return false;
+
+ switch (nexthop->nh_srv6->seg6local_action) {
+ case ZEBRA_SEG6_LOCAL_ACTION_END:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_X:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_X))
+ return false;
+ if (!nl_attr_put(nlmsg, req_size,
+ SEG6_LOCAL_NH6, &ctx->nh6,
+ sizeof(struct in6_addr)))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_T:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_T))
+ return false;
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_DX4:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DX4))
+ return false;
+ if (!nl_attr_put(nlmsg, req_size,
+ SEG6_LOCAL_NH4, &ctx->nh4,
+ sizeof(struct in_addr)))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_DT6:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DT6))
+ return false;
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return false;
+ break;
+ default:
+ zlog_err("%s: unsupport seg6local behaviour action=%u",
+ __func__,
+ nexthop->nh_srv6->seg6local_action);
+ return false;
+ }
+ nl_attr_nest_end(nlmsg, nest);
+ }
+
+ if (!sid_zero(&nexthop->nh_srv6->seg6_segs)) {
+ char tun_buf[4096];
+ ssize_t tun_len;
+ struct rtattr *nest;
+
+ if (!nl_attr_put16(nlmsg, req_size, RTA_ENCAP_TYPE,
+ LWTUNNEL_ENCAP_SEG6))
+ return false;
+ nest = nl_attr_nest(nlmsg, req_size, RTA_ENCAP);
+ if (!nest)
+ return false;
+ tun_len = fill_seg6ipt_encap(tun_buf, sizeof(tun_buf),
+ &nexthop->nh_srv6->seg6_segs);
+ if (tun_len < 0)
+ return false;
+ if (!nl_attr_put(nlmsg, req_size, SEG6_IPTUNNEL_SRH,
+ tun_buf, tun_len))
+ return false;
+ nl_attr_nest_end(nlmsg, nest);
+ }
+ }
+
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
rtmsg->rtm_flags |= RTNH_F_ONLINK;
@@ -2256,6 +2486,119 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd,
nl_attr_nest_end(&req->n, nest);
}
+ if (nh->nh_srv6) {
+ if (nh->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
+ uint32_t action;
+ uint16_t encap;
+ struct rtattr *nest;
+ const struct seg6local_context *ctx;
+
+ req->nhm.nh_family = AF_INET6;
+ action = nh->nh_srv6->seg6local_action;
+ ctx = &nh->nh_srv6->seg6local_ctx;
+ encap = LWTUNNEL_ENCAP_SEG6_LOCAL;
+ if (!nl_attr_put(&req->n, buflen,
+ NHA_ENCAP_TYPE,
+ &encap,
+ sizeof(uint16_t)))
+ return 0;
+
+ nest = nl_attr_nest(&req->n, buflen,
+ NHA_ENCAP | NLA_F_NESTED);
+ if (!nest)
+ return 0;
+
+ switch (action) {
+ case SEG6_LOCAL_ACTION_END:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_X:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_X))
+ return 0;
+ if (!nl_attr_put(
+ &req->n, buflen,
+ SEG6_LOCAL_NH6, &ctx->nh6,
+ sizeof(struct in6_addr)))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_T:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_T))
+ return 0;
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_DX4:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DX4))
+ return 0;
+ if (!nl_attr_put(
+ &req->n, buflen,
+ SEG6_LOCAL_NH4, &ctx->nh4,
+ sizeof(struct in_addr)))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_DT6:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DT6))
+ return 0;
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return 0;
+ break;
+ default:
+ zlog_err("%s: unsupport seg6local behaviour action=%u",
+ __func__, action);
+ return 0;
+ }
+ nl_attr_nest_end(&req->n, nest);
+ }
+
+ if (!sid_zero(&nh->nh_srv6->seg6_segs)) {
+ char tun_buf[4096];
+ ssize_t tun_len;
+ struct rtattr *nest;
+
+ if (!nl_attr_put16(&req->n, buflen,
+ NHA_ENCAP_TYPE,
+ LWTUNNEL_ENCAP_SEG6))
+ return 0;
+ nest = nl_attr_nest(&req->n, buflen,
+ NHA_ENCAP | NLA_F_NESTED);
+ if (!nest)
+ return 0;
+ tun_len = fill_seg6ipt_encap(tun_buf,
+ sizeof(tun_buf),
+ &nh->nh_srv6->seg6_segs);
+ if (tun_len < 0)
+ return 0;
+ if (!nl_attr_put(&req->n, buflen,
+ SEG6_IPTUNNEL_SRH,
+ tun_buf, tun_len))
+ return 0;
+ nl_attr_nest_end(&req->n, nest);
+ }
+ }
+
nexthop_done:
if (IS_ZEBRA_DEBUG_KERNEL)
diff --git a/zebra/subdir.am b/zebra/subdir.am
index 6fc8ef0df5..70d8c4005d 100644
--- a/zebra/subdir.am
+++ b/zebra/subdir.am
@@ -13,6 +13,7 @@ vtysh_scan += \
zebra/zebra_mlag_vty.c \
zebra/zebra_evpn_mh.c \
zebra/zebra_mpls_vty.c \
+ zebra/zebra_srv6_vty.c \
zebra/zebra_ptm.c \
zebra/zebra_pw.c \
zebra/zebra_routemap.c \
@@ -92,6 +93,8 @@ zebra_zebra_SOURCES = \
zebra/zebra_mpls_openbsd.c \
zebra/zebra_mpls_null.c \
zebra/zebra_mpls_vty.c \
+ zebra/zebra_srv6.c \
+ zebra/zebra_srv6_vty.c \
zebra/zebra_mroute.c \
zebra/zebra_nb.c \
zebra/zebra_nb_config.c \
@@ -128,6 +131,7 @@ clippy_scan += \
zebra/zebra_mlag_vty.c \
zebra/zebra_routemap.c \
zebra/zebra_vty.c \
+ zebra/zebra_srv6_vty.c \
# end
noinst_HEADERS += \
@@ -161,6 +165,8 @@ noinst_HEADERS += \
zebra/zebra_mlag.h \
zebra/zebra_mlag_vty.h \
zebra/zebra_mpls.h \
+ zebra/zebra_srv6.h \
+ zebra/zebra_srv6_vty.h \
zebra/zebra_mroute.h \
zebra/zebra_nb.h \
zebra/zebra_netns_id.h \
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 544bb07fbe..06aaa706dc 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -60,6 +60,7 @@
#include "zebra/connected.h"
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
+#include "zebra/zebra_srv6.h"
DEFINE_MTYPE_STATIC(ZEBRA, OPAQUE, "Opaque Data");
@@ -1747,6 +1748,27 @@ static bool zapi_read_nexthops(struct zserv *client, struct prefix *p,
&api_nh->labels[0]);
}
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: adding seg6local action %s",
+ __func__,
+ seg6local_action2str(
+ api_nh->seg6local_action));
+
+ nexthop_add_srv6_seg6local(nexthop,
+ api_nh->seg6local_action,
+ &api_nh->seg6local_ctx);
+ }
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6)
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: adding seg6", __func__);
+
+ nexthop_add_srv6_seg6(nexthop, &api_nh->seg6_segs);
+ }
+
if (IS_ZEBRA_DEBUG_RECV) {
labelbuf[0] = '\0';
nhbuf[0] = '\0';
@@ -2612,6 +2634,29 @@ int zsend_client_close_notify(struct zserv *client, struct zserv *closed_client)
return zserv_send_message(client, s);
}
+int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
+ vrf_id_t vrf_id,
+ struct srv6_locator *loc)
+{
+ struct srv6_locator_chunk chunk = {};
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ strlcpy(chunk.locator_name, loc->name, sizeof(chunk.locator_name));
+ chunk.prefix = loc->prefix;
+ chunk.block_bits_length = loc->block_bits_length;
+ chunk.node_bits_length = loc->node_bits_length;
+ chunk.function_bits_length = loc->function_bits_length;
+ chunk.argument_bits_length = loc->argument_bits_length;
+ chunk.keep = 0;
+ chunk.proto = client->proto;
+ chunk.instance = client->instance;
+
+ zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK, vrf_id);
+ zapi_srv6_locator_chunk_encode(s, &chunk);
+ stream_putw_at(s, 0, stream_get_endp(s));
+ return zserv_send_message(client, s);
+}
+
/* Send response to a table manager connect request to client */
static void zread_table_manager_connect(struct zserv *client,
struct stream *msg, vrf_id_t vrf_id)
@@ -2821,6 +2866,62 @@ static void zread_table_manager_request(ZAPI_HANDLER_ARGS)
}
}
+static void zread_srv6_manager_get_locator_chunk(struct zserv *client,
+ struct stream *msg,
+ vrf_id_t vrf_id)
+{
+ struct stream *s = msg;
+ uint16_t len;
+ char locator_name[SRV6_LOCNAME_SIZE] = {0};
+
+ /* Get data. */
+ STREAM_GETW(s, len);
+ STREAM_GET(locator_name, s, len);
+
+ /* call hook to get a chunk using wrapper */
+ struct srv6_locator *loc = NULL;
+ srv6_manager_get_locator_chunk_call(&loc, client, locator_name, vrf_id);
+
+stream_failure:
+ return;
+}
+
+static void zread_srv6_manager_release_locator_chunk(struct zserv *client,
+ struct stream *msg,
+ vrf_id_t vrf_id)
+{
+ struct stream *s = msg;
+ uint16_t len;
+ char locator_name[SRV6_LOCNAME_SIZE] = {0};
+
+ /* Get data. */
+ STREAM_GETW(s, len);
+ STREAM_GET(locator_name, s, len);
+
+ /* call hook to release a chunk using wrapper */
+ srv6_manager_release_locator_chunk_call(client, locator_name, vrf_id);
+
+stream_failure:
+ return;
+}
+
+static void zread_srv6_manager_request(ZAPI_HANDLER_ARGS)
+{
+ switch (hdr->command) {
+ case ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK:
+ zread_srv6_manager_get_locator_chunk(client, msg,
+ zvrf_id(zvrf));
+ break;
+ case ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK:
+ zread_srv6_manager_release_locator_chunk(client, msg,
+ zvrf_id(zvrf));
+ break;
+ default:
+ zlog_err("%s: unknown SRv6 Manager command", __func__);
+ break;
+ }
+}
+
static void zread_pseudowire(ZAPI_HANDLER_ARGS)
{
struct stream *s;
@@ -3580,6 +3681,8 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
[ZEBRA_MLAG_CLIENT_REGISTER] = zebra_mlag_client_register,
[ZEBRA_MLAG_CLIENT_UNREGISTER] = zebra_mlag_client_unregister,
[ZEBRA_MLAG_FORWARD_MSG] = zebra_mlag_forward_client_msg,
+ [ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK] = zread_srv6_manager_request,
+ [ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK] = zread_srv6_manager_request,
[ZEBRA_CLIENT_CAPABILITIES] = zread_client_capabilities,
[ZEBRA_NEIGH_DISCOVER] = zread_neigh_discover,
[ZEBRA_NHG_ADD] = zread_nhg_add,
diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h
index 0beb3cc100..e991dca4f3 100644
--- a/zebra/zapi_msg.h
+++ b/zebra/zapi_msg.h
@@ -30,6 +30,7 @@
#include "zebra/zebra_pbr.h"
#include "zebra/zebra_errors.h"
#include "zebra/label_manager.h"
+#include "zebra/zebra_srv6.h"
#ifdef __cplusplus
@@ -116,6 +117,13 @@ int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id,
extern void zapi_opaque_free(struct opaque *opaque);
+extern int zsend_zebra_srv6_locator_add(struct zserv *client,
+ struct srv6_locator *loc);
+extern int zsend_zebra_srv6_locator_delete(struct zserv *client,
+ struct srv6_locator *loc);
+extern int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
+ vrf_id_t vrf_id, struct srv6_locator *loc);
+
#ifdef __cplusplus
}
#endif
diff --git a/zebra/zebra_errors.c b/zebra/zebra_errors.c
index 29b271425d..c3890f7220 100644
--- a/zebra/zebra_errors.c
+++ b/zebra/zebra_errors.c
@@ -786,6 +786,12 @@ static struct log_ref ferr_zebra_err[] = {
.suggestion = "Use different table id's for the VRF's in question"
},
{
+ .code = EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
+ .title = "Zebra did not free any srv6 locator chunks",
+ .description = "Zebra's srv6-locator chunk cleanup procedure ran, but no srv6 locator chunks were released.",
+ .suggestion = "Ignore this error.",
+ },
+ {
.code = END_FERR,
}
};
diff --git a/zebra/zebra_errors.h b/zebra/zebra_errors.h
index 200a977a69..540c6dd7d0 100644
--- a/zebra/zebra_errors.h
+++ b/zebra/zebra_errors.h
@@ -135,6 +135,7 @@ enum zebra_log_refs {
EC_ZEBRA_VRF_MISCONFIGURED,
EC_ZEBRA_ES_CREATE,
EC_ZEBRA_GRE_SET_UPDATE,
+ EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
};
void zebra_error_init(void);
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 6b40eae5b7..face0ef3bc 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1007,6 +1007,8 @@ void nhg_ctx_free(struct nhg_ctx **ctx)
nh = nhg_ctx_get_nh(*ctx);
nexthop_del_labels(nh);
+ nexthop_del_srv6_seg6local(nh);
+ nexthop_del_srv6_seg6(nh);
done:
XFREE(MTYPE_NHG_CTX, *ctx);
@@ -1377,6 +1379,8 @@ static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
/* The copy may have allocated labels; free them if necessary. */
nexthop_del_labels(&lookup);
+ nexthop_del_srv6_seg6local(&lookup);
+ nexthop_del_srv6_seg6(&lookup);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv => %p (%u)",
diff --git a/zebra/zebra_srv6.c b/zebra/zebra_srv6.c
new file mode 100644
index 0000000000..5664a29682
--- /dev/null
+++ b/zebra/zebra_srv6.c
@@ -0,0 +1,350 @@
+/*
+ * Zebra SRv6 definitions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ * Copyright (C) 2020 Masakazu Asama
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "network.h"
+#include "prefix.h"
+#include "stream.h"
+#include "srv6.h"
+#include "zebra/debug.h"
+#include "zebra/zapi_msg.h"
+#include "zebra/zserv.h"
+#include "zebra/zebra_router.h"
+#include "zebra/zebra_srv6.h"
+#include "zebra/zebra_errors.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
+
+DEFINE_MGROUP(SRV6_MGR, "SRv6 Manager");
+DEFINE_MTYPE_STATIC(SRV6_MGR, SRV6M_CHUNK, "SRv6 Manager Chunk");
+
+/* define hooks for the basic API, so that it can be specialized or served
+ * externally
+ */
+
+DEFINE_HOOK(srv6_manager_client_connect,
+ (struct zserv *client, vrf_id_t vrf_id),
+ (client, vrf_id));
+DEFINE_HOOK(srv6_manager_client_disconnect,
+ (struct zserv *client), (client));
+DEFINE_HOOK(srv6_manager_get_chunk,
+ (struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (loc, client, locator_name, vrf_id));
+DEFINE_HOOK(srv6_manager_release_chunk,
+ (struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (client, locator_name, vrf_id));
+
+/* define wrappers to be called in zapi_msg.c (as hooks must be called in
+ * source file where they were defined)
+ */
+
+void srv6_manager_client_connect_call(struct zserv *client, vrf_id_t vrf_id)
+{
+ hook_call(srv6_manager_client_connect, client, vrf_id);
+}
+
+void srv6_manager_get_locator_chunk_call(struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ hook_call(srv6_manager_get_chunk, loc, client, locator_name, vrf_id);
+}
+
+void srv6_manager_release_locator_chunk_call(struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ hook_call(srv6_manager_release_chunk, client, locator_name, vrf_id);
+}
+
+int srv6_manager_client_disconnect_cb(struct zserv *client)
+{
+ hook_call(srv6_manager_client_disconnect, client);
+ return 0;
+}
+
+static int zebra_srv6_cleanup(struct zserv *client)
+{
+ return 0;
+}
+
+void zebra_srv6_locator_add(struct srv6_locator *locator)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *tmp;
+
+ tmp = zebra_srv6_locator_lookup(locator->name);
+ if (!tmp)
+ listnode_add(srv6->locators, locator);
+}
+
+void zebra_srv6_locator_delete(struct srv6_locator *locator)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+
+ listnode_delete(srv6->locators, locator);
+}
+
+struct srv6_locator *zebra_srv6_locator_lookup(const char *name)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *locator;
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator))
+ if (!strncmp(name, locator->name, SRV6_LOCNAME_SIZE))
+ return locator;
+ return NULL;
+}
+
+struct zebra_srv6 *zebra_srv6_get_default(void)
+{
+ static struct zebra_srv6 srv6;
+ static bool first_execution = true;
+
+ if (first_execution) {
+ first_execution = false;
+ srv6.locators = list_new();
+ }
+ return &srv6;
+}
+
+/**
+ * Core function, assigns srv6-locator chunks
+ *
+ * It first searches through the list to check if there's one available
+ * (previously released). Otherwise it creates and assigns a new one
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @param session_id SessionID of client
+ * @param name Name of SRv6-locator
+ * @return Pointer to the assigned srv6-locator chunk,
+ * or NULL if the request could not be satisfied
+ */
+static struct srv6_locator *
+assign_srv6_locator_chunk(uint8_t proto,
+ uint16_t instance,
+ uint32_t session_id,
+ const char *locator_name)
+{
+ bool chunk_found = false;
+ struct listnode *node = NULL;
+ struct srv6_locator *loc = NULL;
+ struct srv6_locator_chunk *chunk = NULL;
+
+ loc = zebra_srv6_locator_lookup(locator_name);
+ if (!loc) {
+ zlog_info("%s: locator %s was not found",
+ __func__, locator_name);
+
+ loc = srv6_locator_alloc(locator_name);
+ if (!loc) {
+ zlog_info("%s: locator %s can't allocated",
+ __func__, locator_name);
+ return NULL;
+ }
+
+ loc->status_up = false;
+ chunk = srv6_locator_chunk_alloc();
+ chunk->proto = 0;
+ listnode_add(loc->chunks, chunk);
+ zebra_srv6_locator_add(loc);
+ }
+
+ for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
+ if (chunk->proto != 0 && chunk->proto != proto)
+ continue;
+ chunk_found = true;
+ break;
+ }
+
+ if (!chunk_found) {
+ zlog_info("%s: locator is already owned", __func__);
+ return NULL;
+ }
+
+ chunk->proto = proto;
+ return loc;
+}
+
+static int zebra_srv6_manager_get_locator_chunk(struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ int ret = 0;
+
+ *loc = assign_srv6_locator_chunk(client->proto, client->instance,
+ client->session_id, locator_name);
+
+ if (!*loc)
+ zlog_err("Unable to assign locator chunk to %s instance %u",
+ zebra_route_string(client->proto), client->instance);
+ else if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_info("Assigned locator chunk %s to %s instance %u",
+ (*loc)->name, zebra_route_string(client->proto),
+ client->instance);
+
+ if (*loc && (*loc)->status_up)
+ ret = zsend_srv6_manager_get_locator_chunk_response(client,
+ vrf_id,
+ *loc);
+ return ret;
+}
+
+/**
+ * Core function, release no longer used srv6-locator chunks
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @param session_id Zclient session ID, to identify the zclient session
+ * @param locator_name SRv6-locator name, to identify the actual locator
+ * @return 0 on success, -1 otherwise
+ */
+static int release_srv6_locator_chunk(uint8_t proto, uint16_t instance,
+ uint32_t session_id,
+ const char *locator_name)
+{
+ int ret = -1;
+ struct listnode *node;
+ struct srv6_locator_chunk *chunk;
+ struct srv6_locator *loc = NULL;
+
+ loc = zebra_srv6_locator_lookup(locator_name);
+ if (!loc)
+ return -1;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Releasing srv6-locator on %s", __func__,
+ locator_name);
+
+ for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
+ if (chunk->proto != proto ||
+ chunk->instance != instance ||
+ chunk->session_id != session_id)
+ continue;
+ chunk->proto = NO_PROTO;
+ chunk->instance = 0;
+ chunk->session_id = 0;
+ chunk->keep = 0;
+ ret = 0;
+ break;
+ }
+
+ if (ret != 0)
+ flog_err(EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
+ "%s: SRv6 locator chunk not released", __func__);
+
+ return ret;
+}
+
+static int zebra_srv6_manager_release_locator_chunk(struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ if (vrf_id != VRF_DEFAULT) {
+ zlog_err("SRv6 locator doesn't support vrf");
+ return -1;
+ }
+
+ return release_srv6_locator_chunk(client->proto, client->instance,
+ client->session_id, locator_name);
+}
+
+/**
+ * Release srv6-locator chunks from a client.
+ *
+ * Called on client disconnection or reconnection. It only releases chunks
+ * with empty keep value.
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @return Number of chunks released
+ */
+int release_daemon_srv6_locator_chunks(struct zserv *client)
+{
+ int ret;
+ int count = 0;
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct listnode *loc_node;
+ struct listnode *chunk_node;
+ struct srv6_locator *loc;
+ struct srv6_locator_chunk *chunk;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Releasing chunks for client proto %s, instance %d, session %u",
+ __func__, zebra_route_string(client->proto),
+ client->instance, client->session_id);
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, loc_node, loc)) {
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node, chunk)) {
+ if (chunk->proto == client->proto &&
+ chunk->instance == client->instance &&
+ chunk->session_id == client->session_id &&
+ chunk->keep == 0) {
+ ret = release_srv6_locator_chunk(
+ chunk->proto, chunk->instance,
+ chunk->session_id, loc->name);
+ if (ret == 0)
+ count++;
+ }
+ }
+ }
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Released %d srv6-locator chunks",
+ __func__, count);
+
+ return count;
+}
+
+void zebra_srv6_init(void)
+{
+ hook_register(zserv_client_close, zebra_srv6_cleanup);
+ hook_register(srv6_manager_get_chunk,
+ zebra_srv6_manager_get_locator_chunk);
+ hook_register(srv6_manager_release_chunk,
+ zebra_srv6_manager_release_locator_chunk);
+}
+
+bool zebra_srv6_is_enable(void)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+
+ return listcount(srv6->locators);
+}
diff --git a/zebra/zebra_srv6.h b/zebra/zebra_srv6.h
new file mode 100644
index 0000000000..84fcc305bc
--- /dev/null
+++ b/zebra/zebra_srv6.h
@@ -0,0 +1,80 @@
+/*
+ * Zebra SRv6 definitions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ZEBRA_SRV6_H
+#define _ZEBRA_SRV6_H
+
+#include <zebra.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
+#include "qobj.h"
+#include "prefix.h"
+#include <pthread.h>
+#include <plist.h>
+
+/* SRv6 instance structure. */
+struct zebra_srv6 {
+ struct list *locators;
+};
+
+/* declare hooks for the basic API, so that it can be specialized or served
+ * externally. Also declare a hook when those functions have been registered,
+ * so that any external module wanting to replace those can react
+ */
+
+DECLARE_HOOK(srv6_manager_client_connect,
+ (struct zserv *client, vrf_id_t vrf_id),
+ (client, vrf_id));
+DECLARE_HOOK(srv6_manager_client_disconnect,
+ (struct zserv *client), (client));
+DECLARE_HOOK(srv6_manager_get_chunk,
+ (struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (mc, client, keep, size, base, vrf_id));
+DECLARE_HOOK(srv6_manager_release_chunk,
+ (struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (client, locator_name, vrf_id));
+
+
+extern void zebra_srv6_locator_add(struct srv6_locator *locator);
+extern void zebra_srv6_locator_delete(struct srv6_locator *locator);
+extern struct srv6_locator *zebra_srv6_locator_lookup(const char *name);
+
+extern void zebra_srv6_init(void);
+extern struct zebra_srv6 *zebra_srv6_get_default(void);
+extern bool zebra_srv6_is_enable(void);
+
+extern void srv6_manager_client_connect_call(struct zserv *client,
+ vrf_id_t vrf_id);
+extern void srv6_manager_get_locator_chunk_call(struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id);
+extern void srv6_manager_release_locator_chunk_call(struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id);
+extern int srv6_manager_client_disconnect_cb(struct zserv *client);
+extern int release_daemon_srv6_locator_chunks(struct zserv *client);
+
+#endif /* _ZEBRA_SRV6_H */
diff --git a/zebra/zebra_srv6_vty.c b/zebra/zebra_srv6_vty.c
new file mode 100644
index 0000000000..97935f126e
--- /dev/null
+++ b/zebra/zebra_srv6_vty.c
@@ -0,0 +1,356 @@
+/*
+ * Zebra SRv6 VTY functions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "memory.h"
+#include "if.h"
+#include "prefix.h"
+#include "command.h"
+#include "table.h"
+#include "rib.h"
+#include "nexthop.h"
+#include "vrf.h"
+#include "srv6.h"
+#include "lib/json.h"
+
+#include "zebra/zserv.h"
+#include "zebra/zebra_router.h"
+#include "zebra/zebra_vrf.h"
+#include "zebra/zebra_srv6.h"
+#include "zebra/zebra_srv6_vty.h"
+#include "zebra/zebra_rnh.h"
+#include "zebra/redistribute.h"
+#include "zebra/zebra_routemap.h"
+#include "zebra/zebra_dplane.h"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "zebra/zebra_srv6_vty_clippy.c"
+#endif
+
+static int zebra_sr_config(struct vty *vty);
+
+static struct cmd_node sr_node = {
+ .name = "sr",
+ .node = SEGMENT_ROUTING_NODE,
+ .parent_node = CONFIG_NODE,
+ .prompt = "%s(config-sr)# ",
+ .config_write = zebra_sr_config,
+};
+
+static struct cmd_node srv6_node = {
+ .name = "srv6",
+ .node = SRV6_NODE,
+ .parent_node = SEGMENT_ROUTING_NODE,
+ .prompt = "%s(config-srv6)# ",
+
+};
+
+static struct cmd_node srv6_locs_node = {
+ .name = "srv6-locators",
+ .node = SRV6_LOCS_NODE,
+ .parent_node = SRV6_NODE,
+ .prompt = "%s(config-srv6-locators)# ",
+};
+
+static struct cmd_node srv6_loc_node = {
+ .name = "srv6-locator",
+ .node = SRV6_LOC_NODE,
+ .parent_node = SRV6_LOCS_NODE,
+ .prompt = "%s(config-srv6-locator)# "
+};
+
+DEFUN (show_srv6_locator,
+ show_srv6_locator_cmd,
+ "show segment-routing srv6 locator [json]",
+ SHOW_STR
+ "Segment Routing\n"
+ "Segment Routing SRv6\n"
+ "Locator Information\n"
+ JSON_STR)
+{
+ const bool uj = use_json(argc, argv);
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *locator;
+ struct listnode *node;
+ char str[256];
+ int id;
+ json_object *json = NULL;
+ json_object *json_locators = NULL;
+ json_object *json_locator = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_locators = json_object_new_array();
+ json_object_object_add(json, "locators", json_locators);
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ json_locator = srv6_locator_json(locator);
+ if (!json_locator)
+ continue;
+ json_object_array_add(json_locators, json_locator);
+
+ }
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(json,
+ JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else {
+ vty_out(vty, "Locator:\n");
+ vty_out(vty, "Name ID Prefix Status\n");
+ vty_out(vty, "-------------------- ------- ------------------------ -------\n");
+
+ id = 1;
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ prefix2str(&locator->prefix, str, sizeof(str));
+ vty_out(vty, "%-20s %7d %-24s %s\n",
+ locator->name, id, str,
+ locator->status_up ? "Up" : "Down");
+ ++id;
+ }
+ vty_out(vty, "\n");
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_srv6_locator_detail,
+ show_srv6_locator_detail_cmd,
+ "show segment-routing srv6 locator NAME detail [json]",
+ SHOW_STR
+ "Segment Routing\n"
+ "Segment Routing SRv6\n"
+ "Locator Information\n"
+ "Locator Name\n"
+ "Detailed information\n"
+ JSON_STR)
+{
+ const bool uj = use_json(argc, argv);
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *locator;
+ struct listnode *node;
+ char str[256];
+ const char *locator_name = argv[4]->arg;
+
+ if (uj) {
+ vty_out(vty, "JSON format isn't supported\n");
+ return CMD_WARNING;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ struct listnode *node;
+ struct srv6_locator_chunk *chunk;
+
+ if (strcmp(locator->name, locator_name) != 0)
+ continue;
+
+ prefix2str(&locator->prefix, str, sizeof(str));
+ vty_out(vty, "Name: %s\n", locator->name);
+ vty_out(vty, "Prefix: %s\n", str);
+ vty_out(vty, "Function-Bit-Len: %u\n",
+ locator->function_bits_length);
+
+ vty_out(vty, "Chunks:\n");
+ for (ALL_LIST_ELEMENTS_RO((struct list *)locator->chunks, node,
+ chunk)) {
+ prefix2str(&chunk->prefix, str, sizeof(str));
+ vty_out(vty, "- prefix: %s, owner: %s\n", str,
+ zebra_route_string(chunk->proto));
+ }
+ }
+
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (segment_routing,
+ segment_routing_cmd,
+ "segment-routing",
+ "Segment Routing\n")
+{
+ vty->node = SEGMENT_ROUTING_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (srv6,
+ srv6_cmd,
+ "srv6",
+ "Segment Routing SRv6\n")
+{
+ vty->node = SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (srv6_locators,
+ srv6_locators_cmd,
+ "locators",
+ "Segment Routing SRv6 locators\n")
+{
+ vty->node = SRV6_LOCS_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (srv6_locator,
+ srv6_locator_cmd,
+ "locator WORD",
+ "Segment Routing SRv6 locator\n"
+ "Specify locator-name\n")
+{
+ struct srv6_locator *locator = NULL;
+
+ locator = zebra_srv6_locator_lookup(argv[1]->arg);
+ if (locator) {
+ VTY_PUSH_CONTEXT(SRV6_LOC_NODE, locator);
+ locator->status_up = true;
+ return CMD_SUCCESS;
+ }
+
+ locator = srv6_locator_alloc(argv[1]->arg);
+ if (!locator) {
+ vty_out(vty, "%% Alloc failed\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ locator->status_up = true;
+
+ VTY_PUSH_CONTEXT(SRV6_LOC_NODE, locator);
+ vty->node = SRV6_LOC_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (locator_prefix,
+ locator_prefix_cmd,
+ "prefix X:X::X:X/M$prefix [func-bits (16-64)$func_bit_len]",
+ "Configure SRv6 locator prefix\n"
+ "Specify SRv6 locator prefix\n"
+ "Configure SRv6 locator function length in bits\n"
+ "Specify SRv6 locator function length in bits\n")
+{
+ VTY_DECLVAR_CONTEXT(srv6_locator, locator);
+ struct srv6_locator_chunk *chunk = NULL;
+ struct listnode *node = NULL;
+
+ locator->prefix = *prefix;
+
+ /*
+ * TODO(slankdev): please support variable node-bit-length.
+ * In draft-ietf-bess-srv6-services-05#section-3.2.1.
+ * Locator block length and Locator node length are defined.
+ * Which are defined as "locator-len == block-len + node-len".
+ * In current implementation, node bits length is hardcoded as 24.
+ * It should be supported various val.
+ *
+ * Cisco IOS-XR support only following pattern.
+ * (1) Teh locator length should be 64-bits long.
+ * (2) The SID block portion (MSBs) cannot exceed 40 bits.
+ * If this value is less than 40 bits,
+ * user should use a pattern of zeros as a filler.
+ * (3) The Node Id portion (LSBs) cannot exceed 24 bits.
+ */
+ locator->block_bits_length = prefix->prefixlen - 24;
+ locator->node_bits_length = 24;
+ locator->function_bits_length = func_bit_len;
+ locator->argument_bits_length = 0;
+
+ if (list_isempty(locator->chunks)) {
+ chunk = srv6_locator_chunk_alloc();
+ chunk->prefix = *prefix;
+ chunk->proto = 0;
+ listnode_add(locator->chunks, chunk);
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(locator->chunks, node, chunk)) {
+ uint8_t zero[16] = {0};
+
+ if (memcmp(&chunk->prefix.prefix, zero, 16) == 0) {
+ struct zserv *client;
+ struct listnode *client_node;
+
+ chunk->prefix = *prefix;
+ for (ALL_LIST_ELEMENTS_RO(zrouter.client_list,
+ client_node,
+ client)) {
+ struct srv6_locator *tmp;
+
+ if (client->proto != chunk->proto)
+ continue;
+
+ srv6_manager_get_locator_chunk_call(
+ &tmp, client,
+ locator->name,
+ VRF_DEFAULT);
+ }
+ }
+ }
+ }
+
+ zebra_srv6_locator_add(locator);
+ return CMD_SUCCESS;
+}
+
+static int zebra_sr_config(struct vty *vty)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct listnode *node;
+ struct srv6_locator *locator;
+ char str[256];
+
+ vty_out(vty, "!\n");
+ if (zebra_srv6_is_enable()) {
+ vty_out(vty, "segment-routing\n");
+ vty_out(vty, " srv6\n");
+ vty_out(vty, " locators\n");
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ inet_ntop(AF_INET6, &locator->prefix.prefix,
+ str, sizeof(str));
+ vty_out(vty, " locator %s\n", locator->name);
+ vty_out(vty, " prefix %s/%u\n", str,
+ locator->prefix.prefixlen);
+ vty_out(vty, " !\n");
+ }
+ vty_out(vty, " !\n");
+ vty_out(vty, " !\n");
+ vty_out(vty, "!\n");
+ }
+ return 0;
+}
+
+void zebra_srv6_vty_init(void)
+{
+ /* Install nodes and its default commands */
+ install_node(&sr_node);
+ install_node(&srv6_node);
+ install_node(&srv6_locs_node);
+ install_node(&srv6_loc_node);
+ install_default(SEGMENT_ROUTING_NODE);
+ install_default(SRV6_NODE);
+ install_default(SRV6_LOCS_NODE);
+ install_default(SRV6_LOC_NODE);
+
+ /* Command for change node */
+ install_element(CONFIG_NODE, &segment_routing_cmd);
+ install_element(SEGMENT_ROUTING_NODE, &srv6_cmd);
+ install_element(SRV6_NODE, &srv6_locators_cmd);
+ install_element(SRV6_LOCS_NODE, &srv6_locator_cmd);
+
+ /* Command for configuration */
+ install_element(SRV6_LOC_NODE, &locator_prefix_cmd);
+
+ /* Command for operation */
+ install_element(VIEW_NODE, &show_srv6_locator_cmd);
+ install_element(VIEW_NODE, &show_srv6_locator_detail_cmd);
+}
diff --git a/zebra/zebra_srv6_vty.h b/zebra/zebra_srv6_vty.h
new file mode 100644
index 0000000000..42d6aefa9a
--- /dev/null
+++ b/zebra/zebra_srv6_vty.h
@@ -0,0 +1,25 @@
+/*
+ * Zebra SRv6 VTY functions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ZEBRA_SRV6_VTY_H
+#define _ZEBRA_SRV6_VTY_H
+
+extern void zebra_srv6_vty_init(void);
+
+#endif /* _ZEBRA_SRV6_VTY_H */
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 2c2c75c419..51f19a3c03 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -653,6 +653,18 @@ static void show_route_nexthop_helper(struct vty *vty,
sizeof(buf), 1));
}
+ if (nexthop->nh_srv6) {
+ seg6local_context2str(buf, sizeof(buf),
+ &nexthop->nh_srv6->seg6local_ctx,
+ nexthop->nh_srv6->seg6local_action);
+ vty_out(vty, ", seg6local %s %s", seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action), buf);
+
+ inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
+ sizeof(buf));
+ vty_out(vty, ", seg6 %s", buf);
+ }
+
if (nexthop->weight)
vty_out(vty, ", weight %u", nexthop->weight);
@@ -675,6 +687,8 @@ static void show_nexthop_json_helper(json_object *json_nexthop,
char buf[SRCDEST2STR_BUFFER];
json_object *json_labels = NULL;
json_object *json_backups = NULL;
+ json_object *json_seg6local = NULL;
+ json_object *json_seg6 = NULL;
int i;
json_object_int_add(json_nexthop, "flags",
@@ -852,6 +866,21 @@ static void show_nexthop_json_helper(json_object *json_nexthop,
if (nexthop->srte_color)
json_object_int_add(json_nexthop, "srteColor",
nexthop->srte_color);
+
+ if (nexthop->nh_srv6) {
+ json_seg6local = json_object_new_object();
+ json_object_string_add(
+ json_seg6local, "action", seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action));
+ json_object_object_add(json_nexthop, "seg6local",
+ json_seg6local);
+
+ json_seg6 = json_object_new_object();
+ inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
+ sizeof(buf));
+ json_object_string_add(json_seg6, "segs", buf);
+ json_object_object_add(json_nexthop, "seg6", json_seg6);
+ }
}
static void vty_show_ip_route(struct vty *vty, struct route_node *rn,