summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/babel_main.c5
-rw-r--r--bgpd/Makefile.am4
-rw-r--r--bgpd/bgp_attr.c99
-rw-r--r--bgpd/bgp_attr.h6
-rw-r--r--bgpd/bgp_attr_evpn.c14
-rw-r--r--bgpd/bgp_debug.c53
-rw-r--r--bgpd/bgp_debug.h4
-rw-r--r--bgpd/bgp_ecommunity.c86
-rw-r--r--bgpd/bgp_ecommunity.h6
-rw-r--r--bgpd/bgp_evpn.c185
-rw-r--r--bgpd/bgp_evpn_private.h48
-rw-r--r--bgpd/bgp_flowspec_util.c334
-rw-r--r--bgpd/bgp_flowspec_util.h3
-rw-r--r--bgpd/bgp_label.h1
-rw-r--r--bgpd/bgp_mplsvpn.c310
-rw-r--r--bgpd/bgp_mplsvpn.h12
-rw-r--r--bgpd/bgp_nexthop.h1
-rw-r--r--bgpd/bgp_pbr.c1140
-rw-r--r--bgpd/bgp_pbr.h256
-rw-r--r--bgpd/bgp_rd.c12
-rw-r--r--bgpd/bgp_rd.h2
-rw-r--r--bgpd/bgp_route.c205
-rw-r--r--bgpd/bgp_route.h10
-rw-r--r--bgpd/bgp_routemap.c2
-rw-r--r--bgpd/bgp_vty.c645
-rw-r--r--bgpd/bgp_zebra.c746
-rw-r--r--bgpd/bgp_zebra.h17
-rw-r--r--bgpd/bgpd.c152
-rw-r--r--bgpd/bgpd.h78
-rw-r--r--bgpd/rfapi/rfapi_vty.c10
-rwxr-xr-xconfigure.ac8
-rw-r--r--doc/Makefile.am6
-rw-r--r--doc/manpages/conf.py1
-rw-r--r--doc/manpages/index.rst1
-rw-r--r--doc/manpages/mtracebis.rst19
-rw-r--r--doc/manpages/sharpd.rst38
-rw-r--r--doc/user/bgp.rst25
-rw-r--r--doc/user/index.rst1
-rw-r--r--doc/user/installation.rst8
-rw-r--r--doc/user/ospf6d.rst2
-rw-r--r--doc/user/sharp.rst67
-rw-r--r--doc/user/snmp.rst9
-rw-r--r--doc/user/zebra.rst152
-rw-r--r--eigrpd/eigrp_hello.c2
-rw-r--r--eigrpd/eigrp_macros.h2
-rw-r--r--eigrpd/eigrp_packet.c3
-rw-r--r--eigrpd/eigrp_query.c5
-rw-r--r--eigrpd/eigrp_reply.c2
-rw-r--r--eigrpd/eigrp_siaquery.c2
-rw-r--r--eigrpd/eigrp_siareply.c2
-rw-r--r--eigrpd/eigrp_topology.c29
-rw-r--r--eigrpd/eigrp_update.c20
-rw-r--r--isisd/dict.c5
-rw-r--r--isisd/isis_redist.c14
-rw-r--r--lib/command.c42
-rw-r--r--lib/command.h1
-rw-r--r--lib/command_graph.c92
-rw-r--r--lib/command_graph.h16
-rw-r--r--lib/frr_zmq.c1
-rw-r--r--lib/grammar_sandbox.c120
-rw-r--r--lib/graph.c71
-rw-r--r--lib/graph.h53
-rw-r--r--lib/nexthop_group.c7
-rw-r--r--lib/plist.c48
-rw-r--r--lib/plist_int.h2
-rw-r--r--lib/prefix.c128
-rw-r--r--lib/prefix.h91
-rw-r--r--lib/routemap.c14
-rw-r--r--lib/routemap.h4
-rw-r--r--lib/stream.c4
-rw-r--r--lib/subdir.am3
-rw-r--r--lib/table.h1
-rw-r--r--lib/thread.c112
-rw-r--r--lib/thread.h15
-rw-r--r--lib/zclient.c36
-rw-r--r--lib/zclient.h7
-rw-r--r--ospf6d/ospf6_abr.c74
-rw-r--r--ospf6d/ospf6_area.h3
-rw-r--r--ospf6d/ospf6_asbr.c46
-rw-r--r--ospf6d/ospf6_intra.c495
-rw-r--r--ospf6d/ospf6_route.c16
-rw-r--r--ospf6d/ospf6_route.h3
-rw-r--r--ospf6d/ospf6_top.c11
-rw-r--r--ospf6d/ospf6d.c2
-rw-r--r--ospfd/ospf_zebra.c52
-rw-r--r--pbrd/pbr_nht.c14
-rw-r--r--pbrd/pbr_vty.c9
-rw-r--r--pbrd/pbr_zebra.c14
-rw-r--r--pimd/mtracebis.c43
-rw-r--r--pimd/pim_iface.c23
-rw-r--r--pimd/pim_iface.h4
-rw-r--r--pimd/pim_igmp_mtrace.c225
-rw-r--r--pimd/pim_igmp_mtrace.h2
-rw-r--r--pimd/pim_msdp.c11
-rw-r--r--pimd/pim_pim.c4
-rw-r--r--redhat/frr.service2
-rw-r--r--snapcraft/defaults/babeld.conf.default0
-rw-r--r--snapcraft/defaults/eigrpd.conf.default0
-rw-r--r--snapcraft/defaults/pbrd.conf.default0
-rw-r--r--snapcraft/scripts/Makefile3
-rw-r--r--snapcraft/scripts/babeld-service13
-rw-r--r--snapcraft/scripts/eigrpd-service13
-rw-r--r--snapcraft/scripts/pbrd-service13
-rw-r--r--snapcraft/snapcraft.yaml.in50
-rw-r--r--tests/.gitignore2
-rw-r--r--tests/Makefile.am4
-rw-r--r--tests/bgpd/test_mp_attr.c42
-rw-r--r--tests/lib/test_graph.c77
-rw-r--r--tests/lib/test_graph.py4
-rw-r--r--tests/lib/test_graph.refout64
-rw-r--r--tools/etc/iproute2/rt_protos.d/frr.conf3
-rwxr-xr-xtools/frr13
-rw-r--r--tools/frr.service2
-rw-r--r--vtysh/Makefile.am1
-rw-r--r--vtysh/vtysh.c49
-rw-r--r--zebra/kernel_socket.c2
-rw-r--r--zebra/label_manager.c67
-rw-r--r--zebra/label_manager.h4
-rw-r--r--zebra/main.c5
-rw-r--r--zebra/redistribute.c26
-rw-r--r--zebra/redistribute.h4
-rw-r--r--zebra/router-id.c1
-rw-r--r--zebra/rt_netlink.c31
-rw-r--r--zebra/rt_netlink.h1
-rw-r--r--zebra/rtadv.c2
-rw-r--r--zebra/subdir.am2
-rw-r--r--zebra/table_manager.c19
-rw-r--r--zebra/table_manager.h5
-rw-r--r--zebra/zapi_msg.c2961
-rw-r--r--zebra/zapi_msg.h88
-rw-r--r--zebra/zebra_mpls.c6
-rw-r--r--zebra/zebra_mpls.h6
-rw-r--r--zebra/zebra_ns.h3
-rw-r--r--zebra/zebra_pbr.c1
-rw-r--r--zebra/zebra_ptm.c19
-rw-r--r--zebra/zebra_ptm.h2
-rw-r--r--zebra/zebra_ptm_redistribute.c1
-rw-r--r--zebra/zebra_pw.c7
-rw-r--r--zebra/zebra_pw.h8
-rw-r--r--zebra/zebra_rib.c2
-rw-r--r--zebra/zebra_rnh.c92
-rw-r--r--zebra/zebra_rnh.h4
-rw-r--r--zebra/zebra_routemap.h2
-rw-r--r--zebra/zebra_vrf.c2
-rw-r--r--zebra/zebra_vty.c7
-rw-r--r--zebra/zserv.c3062
-rw-r--r--zebra/zserv.h100
147 files changed, 9169 insertions, 4516 deletions
diff --git a/babeld/babel_main.c b/babeld/babel_main.c
index 48f6994d82..9ea123c8f9 100644
--- a/babeld/babel_main.c
+++ b/babeld/babel_main.c
@@ -73,7 +73,6 @@ int protocol_port; /* babel's port */
int protocol_socket = -1; /* socket: communicate with others babeld */
static char babel_config_default[] = SYSCONFDIR BABEL_DEFAULT_CONFIG;
-static char *babel_config_file = NULL;
static char *babel_vty_addr = NULL;
static int babel_vty_port = BABEL_VTY_PORT;
@@ -198,7 +197,7 @@ main(int argc, char **argv)
babelz_zebra_init ();
/* Get zebra configuration file. */
- vty_read_config (babel_config_file, babel_config_default);
+ vty_read_config (babeld_di.config_file, babel_config_default);
/* init buffer */
rc = resize_receive_buffer(1500);
@@ -389,7 +388,7 @@ show_babel_main_configuration (struct vty *vty)
"id = %s\n"
"kernel_metric = %d\n",
state_file,
- babel_config_file ? babel_config_file : babel_config_default,
+ babeld_di.config_file ? babeld_di.config_file : babel_config_default,
format_address(protocol_group),
protocol_port,
babel_vty_addr ? babel_vty_addr : "None",
diff --git a/bgpd/Makefile.am b/bgpd/Makefile.am
index a2880b7b94..8a410adca1 100644
--- a/bgpd/Makefile.am
+++ b/bgpd/Makefile.am
@@ -87,7 +87,7 @@ libbgp_a_SOURCES = \
bgp_encap_tlv.c $(BGP_VNC_RFAPI_SRC) bgp_attr_evpn.c \
bgp_evpn.c bgp_evpn_vty.c bgp_vpn.c bgp_label.c bgp_rd.c \
bgp_keepalives.c bgp_io.c bgp_flowspec.c bgp_flowspec_util.c \
- bgp_flowspec_vty.c bgp_labelpool.c
+ bgp_flowspec_vty.c bgp_labelpool.c bgp_pbr.c
noinst_HEADERS = \
bgp_memory.h \
@@ -101,7 +101,7 @@ noinst_HEADERS = \
$(BGP_VNC_RFAPI_HD) bgp_attr_evpn.h bgp_evpn.h bgp_evpn_vty.h \
bgp_vpn.h bgp_label.h bgp_rd.h bgp_evpn_private.h bgp_keepalives.h \
bgp_io.h bgp_flowspec.h bgp_flowspec_private.h bgp_flowspec_util.h \
- bgp_labelpool.h
+ bgp_labelpool.h bgp_pbr.h
bgpd_SOURCES = bgp_main.c
bgpd_LDADD = libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libfrr.la @LIBCAP@ @LIBM@
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index ef839dba6a..276a7054e3 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -520,6 +520,8 @@ unsigned int attrhash_key_make(void *p)
MIX(attr->mp_nexthop_len);
key = jhash(attr->mp_nexthop_global.s6_addr, IPV6_MAX_BYTELEN, key);
key = jhash(attr->mp_nexthop_local.s6_addr, IPV6_MAX_BYTELEN, key);
+ MIX(attr->nh_ifindex);
+ MIX(attr->nh_lla_ifindex);
return key;
}
@@ -559,7 +561,9 @@ int attrhash_cmp(const void *p1, const void *p2)
&attr2->mp_nexthop_global_in)
&& IPV4_ADDR_SAME(&attr1->originator_id,
&attr2->originator_id)
- && overlay_index_same(attr1, attr2))
+ && overlay_index_same(attr1, attr2)
+ && attr1->nh_ifindex == attr2->nh_ifindex
+ && attr1->nh_lla_ifindex == attr2->nh_lla_ifindex)
return 1;
}
@@ -593,6 +597,9 @@ static void attr_show_all_iterator(struct hash_backet *backet, struct vty *vty)
vty_out(vty, "attr[%ld] nexthop %s\n", attr->refcnt,
inet_ntoa(attr->nexthop));
+ vty_out(vty, "\tflags: %" PRIu64 " med: %u local_pref: %u origin: %u weight: %u label: %u\n",
+ attr->flag, attr->med, attr->local_pref, attr->origin,
+ attr->weight, attr->label);
}
void attr_show_all(struct vty *vty)
@@ -1680,6 +1687,8 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
stream_getl(s); /* RD low */
}
stream_get(&attr->mp_nexthop_global, s, IPV6_MAX_BYTELEN);
+ if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global))
+ attr->nh_ifindex = peer->nexthop.ifp->ifindex;
break;
case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
case BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL:
@@ -1689,6 +1698,8 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
stream_getl(s); /* RD low */
}
stream_get(&attr->mp_nexthop_global, s, IPV6_MAX_BYTELEN);
+ if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global))
+ attr->nh_ifindex = peer->nexthop.ifp->ifindex;
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
stream_getl(s); /* RD high */
@@ -1712,6 +1723,7 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
attr->mp_nexthop_len = IPV6_MAX_BYTELEN;
}
+ attr->nh_lla_ifindex = peer->nexthop.ifp->ifindex;
break;
default:
zlog_info("%s: (%s) Wrong multiprotocol next hop length: %d",
@@ -2009,36 +2021,32 @@ static int bgp_attr_encap(uint8_t type, struct peer *peer, /* IN */
return 0;
}
-/* Prefix SID attribute
- * draft-ietf-idr-bgp-prefix-sid-05
+/*
+ * Read an individual SID value returning how much data we have read
+ * Returns 0 if there was an error that needs to be passed up the stack
*/
-static bgp_attr_parse_ret_t
-bgp_attr_prefix_sid(struct bgp_attr_parser_args *args,
- struct bgp_nlri *mp_update)
+static bgp_attr_parse_ret_t bgp_attr_psid_sub(int32_t type,
+ int32_t length,
+ struct bgp_attr_parser_args *args,
+ struct bgp_nlri *mp_update)
{
struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
- int type;
- int length;
uint32_t label_index;
struct in6_addr ipv6_sid;
uint32_t srgb_base;
uint32_t srgb_range;
int srgb_count;
- attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID);
-
- type = stream_getc(peer->curr);
- length = stream_getw(peer->curr);
-
if (type == BGP_PREFIX_SID_LABEL_INDEX) {
if (length != BGP_PREFIX_SID_LABEL_INDEX_LENGTH) {
zlog_err(
- "Prefix SID label index length is %d instead of %d",
- length, BGP_PREFIX_SID_LABEL_INDEX_LENGTH);
- return bgp_attr_malformed(
- args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
- args->total);
+ "Prefix SID label index length is %d instead of %d",
+ length,
+ BGP_PREFIX_SID_LABEL_INDEX_LENGTH);
+ return bgp_attr_malformed(args,
+ BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
+ args->total);
}
/* Ignore flags and reserved */
@@ -2048,9 +2056,8 @@ bgp_attr_prefix_sid(struct bgp_attr_parser_args *args,
/* Fetch the label index and see if it is valid. */
label_index = stream_getl(peer->curr);
if (label_index == BGP_INVALID_LABEL_INDEX)
- return bgp_attr_malformed(
- args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
- args->total);
+ return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ args->total);
/* Store label index; subsequently, we'll check on
* address-family */
@@ -2071,9 +2078,9 @@ bgp_attr_prefix_sid(struct bgp_attr_parser_args *args,
if (length != BGP_PREFIX_SID_IPV6_LENGTH) {
zlog_err("Prefix SID IPv6 length is %d instead of %d",
length, BGP_PREFIX_SID_IPV6_LENGTH);
- return bgp_attr_malformed(
- args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
- args->total);
+ return bgp_attr_malformed(args,
+ BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
+ args->total);
}
/* Ignore reserved */
@@ -2110,6 +2117,47 @@ bgp_attr_prefix_sid(struct bgp_attr_parser_args *args,
return BGP_ATTR_PARSE_PROCEED;
}
+/* Prefix SID attribute
+ * draft-ietf-idr-bgp-prefix-sid-05
+ */
+bgp_attr_parse_ret_t
+bgp_attr_prefix_sid(int32_t tlength, struct bgp_attr_parser_args *args,
+ struct bgp_nlri *mp_update)
+{
+ struct peer *const peer = args->peer;
+ struct attr *const attr = args->attr;
+ bgp_attr_parse_ret_t ret;
+
+ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID);
+
+ while (tlength) {
+ int32_t type, length;
+
+ type = stream_getc(peer->curr);
+ length = stream_getw(peer->curr);
+
+ ret = bgp_attr_psid_sub(type, length, args, mp_update);
+
+ if (ret != BGP_ATTR_PARSE_PROCEED)
+ return ret;
+ /*
+ * Subtract length + the T and the L
+ * since length is the Vector portion
+ */
+ tlength -= length + 3;
+
+ if (tlength < 0) {
+ zlog_err("Prefix SID internal length %d causes us to read beyond the total Prefix SID length",
+ length);
+ return bgp_attr_malformed(args,
+ BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
+ args->total);
+ }
+ }
+
+ return BGP_ATTR_PARSE_PROCEED;
+}
+
/* PMSI tunnel attribute (RFC 6514)
* Basic validation checks done here.
*/
@@ -2486,7 +2534,8 @@ bgp_attr_parse_ret_t bgp_attr_parse(struct peer *peer, struct attr *attr,
startp);
break;
case BGP_ATTR_PREFIX_SID:
- ret = bgp_attr_prefix_sid(&attr_args, mp_update);
+ ret = bgp_attr_prefix_sid(length,
+ &attr_args, mp_update);
break;
case BGP_ATTR_PMSI_TUNNEL:
ret = bgp_attr_pmsi_tunnel(&attr_args);
diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h
index 758db4a447..f17c2a68e4 100644
--- a/bgpd/bgp_attr.h
+++ b/bgpd/bgp_attr.h
@@ -144,6 +144,9 @@ struct attr {
struct in6_addr mp_nexthop_global;
struct in6_addr mp_nexthop_local;
+ /* ifIndex corresponding to mp_nexthop_local. */
+ ifindex_t nh_lla_ifindex;
+
/* Extended Communities attribute. */
struct ecommunity *ecommunity;
@@ -305,6 +308,9 @@ extern int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
struct bgp_nlri *);
extern int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args,
struct bgp_nlri *);
+extern bgp_attr_parse_ret_t
+bgp_attr_prefix_sid(int32_t tlength, struct bgp_attr_parser_args *args,
+ struct bgp_nlri *mp_update);
extern struct bgp_attr_encap_subtlv *
encap_tlv_dup(struct bgp_attr_encap_subtlv *orig);
diff --git a/bgpd/bgp_attr_evpn.c b/bgpd/bgp_attr_evpn.c
index d2a61b93fe..14ff01ada5 100644
--- a/bgpd/bgp_attr_evpn.c
+++ b/bgpd/bgp_attr_evpn.c
@@ -227,16 +227,18 @@ extern int bgp_build_evpn_prefix(int evpn_type, uint32_t eth_tag,
dst->family = AF_EVPN;
p_evpn_p->route_type = evpn_type;
if (evpn_type == BGP_EVPN_IP_PREFIX_ROUTE) {
- p_evpn_p->eth_tag = eth_tag;
- p_evpn_p->ip_prefix_length = p2.prefixlen;
+ p_evpn_p->prefix_addr.eth_tag = eth_tag;
+ p_evpn_p->prefix_addr.ip_prefix_length = p2.prefixlen;
if (src->family == AF_INET) {
- SET_IPADDR_V4(&p_evpn_p->ip);
- memcpy(&p_evpn_p->ip.ipaddr_v4, &src->u.prefix4,
+ SET_IPADDR_V4(&p_evpn_p->prefix_addr.ip);
+ memcpy(&p_evpn_p->prefix_addr.ip.ipaddr_v4,
+ &src->u.prefix4,
sizeof(struct in_addr));
dst->prefixlen = (uint8_t)PREFIX_LEN_ROUTE_TYPE_5_IPV4;
} else {
- SET_IPADDR_V6(&p_evpn_p->ip);
- memcpy(&p_evpn_p->ip.ipaddr_v6, &src->u.prefix6,
+ SET_IPADDR_V6(&p_evpn_p->prefix_addr.ip);
+ memcpy(&p_evpn_p->prefix_addr.ip.ipaddr_v6,
+ &src->u.prefix6,
sizeof(struct in6_addr));
dst->prefixlen = (uint8_t)PREFIX_LEN_ROUTE_TYPE_5_IPV6;
}
diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c
index 29ac5f520d..3e3fbcbfe8 100644
--- a/bgpd/bgp_debug.c
+++ b/bgpd/bgp_debug.c
@@ -59,6 +59,7 @@ unsigned long conf_bgp_debug_update_groups;
unsigned long conf_bgp_debug_vpn;
unsigned long conf_bgp_debug_flowspec;
unsigned long conf_bgp_debug_labelpool;
+unsigned long conf_bgp_debug_pbr;
unsigned long term_bgp_debug_as4;
unsigned long term_bgp_debug_neighbor_events;
@@ -75,6 +76,7 @@ unsigned long term_bgp_debug_update_groups;
unsigned long term_bgp_debug_vpn;
unsigned long term_bgp_debug_flowspec;
unsigned long term_bgp_debug_labelpool;
+unsigned long term_bgp_debug_pbr;
struct list *bgp_debug_neighbor_events_peers = NULL;
struct list *bgp_debug_keepalive_peers = NULL;
@@ -1653,7 +1655,40 @@ DEFUN (no_debug_bgp_vpn,
if (vty->node != CONFIG_NODE)
vty_out(vty, "disabled debug bgp vpn %s\n", argv[idx]->text);
+ return CMD_SUCCESS;
+}
+/* debug bgp pbr */
+DEFUN (debug_bgp_pbr,
+ debug_bgp_pbr_cmd,
+ "debug bgp pbr",
+ DEBUG_STR
+ BGP_STR
+ "BGP policy based routing\n")
+{
+ if (vty->node == CONFIG_NODE)
+ DEBUG_ON(pbr, PBR);
+ else {
+ TERM_DEBUG_ON(pbr, PBR);
+ vty_out(vty, "BGP policy based routing is on\n");
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_bgp_pbr,
+ no_debug_bgp_pbr_cmd,
+ "no debug bgp pbr",
+ NO_STR
+ DEBUG_STR
+ BGP_STR
+ "BGP policy based routing\n")
+{
+ if (vty->node == CONFIG_NODE)
+ DEBUG_OFF(pbr, PBR);
+ else {
+ TERM_DEBUG_OFF(pbr, PBR);
+ vty_out(vty, "BGP policy based routing is off\n");
+ }
return CMD_SUCCESS;
}
@@ -1733,6 +1768,7 @@ DEFUN (no_debug_bgp,
TERM_DEBUG_OFF(vpn, VPN_LEAK_LABEL);
TERM_DEBUG_OFF(flowspec, FLOWSPEC);
TERM_DEBUG_OFF(labelpool, LABELPOOL);
+ TERM_DEBUG_OFF(pbr, PBR);
vty_out(vty, "All possible debugging has been turned off\n");
return CMD_SUCCESS;
@@ -1808,6 +1844,9 @@ DEFUN_NOSH (show_debugging_bgp,
if (BGP_DEBUG(labelpool, LABELPOOL))
vty_out(vty, " BGP labelpool debugging is on\n");
+ if (BGP_DEBUG(pbr, PBR))
+ vty_out(vty, " BGP policy based routing debugging is on\n");
+
vty_out(vty, "\n");
return CMD_SUCCESS;
}
@@ -1865,6 +1904,9 @@ int bgp_debug_count(void)
if (BGP_DEBUG(labelpool, LABELPOOL))
ret++;
+ if (BGP_DEBUG(pbr, PBR))
+ ret++;
+
return ret;
}
@@ -1966,6 +2008,10 @@ static int bgp_config_write_debug(struct vty *vty)
write++;
}
+ if (CONF_BGP_DEBUG(pbr, PBR)) {
+ vty_out(vty, "debug bgp pbr\n");
+ write++;
+ }
return write;
}
@@ -2069,6 +2115,13 @@ void bgp_debug_init(void)
install_element(CONFIG_NODE, &debug_bgp_labelpool_cmd);
install_element(ENABLE_NODE, &no_debug_bgp_labelpool_cmd);
install_element(CONFIG_NODE, &no_debug_bgp_labelpool_cmd);
+
+ /* debug bgp pbr */
+ install_element(ENABLE_NODE, &debug_bgp_pbr_cmd);
+ install_element(CONFIG_NODE, &debug_bgp_pbr_cmd);
+ install_element(ENABLE_NODE, &no_debug_bgp_pbr_cmd);
+ install_element(CONFIG_NODE, &no_debug_bgp_pbr_cmd);
+
}
/* Return true if this prefix is on the per_prefix_list of prefixes to debug
diff --git a/bgpd/bgp_debug.h b/bgpd/bgp_debug.h
index ad476ee918..d5d8fbb505 100644
--- a/bgpd/bgp_debug.h
+++ b/bgpd/bgp_debug.h
@@ -75,6 +75,7 @@ extern unsigned long conf_bgp_debug_update_groups;
extern unsigned long conf_bgp_debug_vpn;
extern unsigned long conf_bgp_debug_flowspec;
extern unsigned long conf_bgp_debug_labelpool;
+extern unsigned long conf_bgp_debug_pbr;
extern unsigned long term_bgp_debug_as4;
extern unsigned long term_bgp_debug_neighbor_events;
@@ -89,6 +90,7 @@ extern unsigned long term_bgp_debug_update_groups;
extern unsigned long term_bgp_debug_vpn;
extern unsigned long term_bgp_debug_flowspec;
extern unsigned long term_bgp_debug_labelpool;
+extern unsigned long term_bgp_debug_pbr;
extern struct list *bgp_debug_neighbor_events_peers;
extern struct list *bgp_debug_keepalive_peers;
@@ -123,6 +125,8 @@ struct bgp_debug_filter {
#define BGP_DEBUG_VPN_LEAK_LABEL 0x08
#define BGP_DEBUG_FLOWSPEC 0x01
#define BGP_DEBUG_LABELPOOL 0x01
+#define BGP_DEBUG_PBR 0x01
+#define BGP_DEBUG_PBR_ERROR 0x02
#define BGP_DEBUG_PACKET_SEND 0x01
#define BGP_DEBUG_PACKET_SEND_DETAIL 0x02
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index 80166dd32b..85b9ffd8ca 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -34,6 +34,7 @@
#include "bgpd/bgp_lcommunity.h"
#include "bgpd/bgp_aspath.h"
#include "bgpd/bgp_flowspec_private.h"
+#include "bgpd/bgp_pbr.h"
/* struct used to dump the rate contained in FS set traffic-rate EC */
union traffic_rate {
@@ -895,3 +896,88 @@ extern int ecommunity_strip(struct ecommunity *ecom, uint8_t type,
ecom->val = p;
return 1;
}
+
+/*
+ * Remove specified extended community value from extended community.
+ * Returns 1 if value was present (and hence, removed), 0 otherwise.
+ */
+int ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval)
+{
+ uint8_t *p;
+ int c, found = 0;
+
+ /* Make sure specified value exists. */
+ if (ecom == NULL || ecom->val == NULL)
+ return 0;
+ c = 0;
+ for (p = ecom->val; c < ecom->size; p += ECOMMUNITY_SIZE, c++) {
+ if (!memcmp(p, eval->val, ECOMMUNITY_SIZE)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0)
+ return 0;
+
+ /* Delete the selected value */
+ ecom->size--;
+ p = XMALLOC(MTYPE_ECOMMUNITY_VAL, ecom->size * ECOMMUNITY_SIZE);
+ if (c != 0)
+ memcpy(p, ecom->val, c * ECOMMUNITY_SIZE);
+ if ((ecom->size - c) != 0)
+ memcpy(p + (c)*ECOMMUNITY_SIZE,
+ ecom->val + (c + 1) * ECOMMUNITY_SIZE,
+ (ecom->size - c) * ECOMMUNITY_SIZE);
+ XFREE(MTYPE_ECOMMUNITY_VAL, ecom->val);
+ ecom->val = p;
+ return 1;
+}
+
+int ecommunity_fill_pbr_action(struct ecommunity_val *ecom_eval,
+ struct bgp_pbr_entry_action *api)
+{
+ if (ecom_eval->val[1] == ECOMMUNITY_TRAFFIC_RATE) {
+ api->action = ACTION_TRAFFICRATE;
+ api->u.r.rate_info[3] = ecom_eval->val[4];
+ api->u.r.rate_info[2] = ecom_eval->val[5];
+ api->u.r.rate_info[1] = ecom_eval->val[6];
+ api->u.r.rate_info[0] = ecom_eval->val[7];
+ } else if (ecom_eval->val[1] == ECOMMUNITY_TRAFFIC_ACTION) {
+ api->action = ACTION_TRAFFIC_ACTION;
+ /* else distribute code is set by default */
+ if (ecom_eval->val[5] & (1 << FLOWSPEC_TRAFFIC_ACTION_TERMINAL))
+ api->u.za.filter |= TRAFFIC_ACTION_TERMINATE;
+ else
+ api->u.za.filter |= TRAFFIC_ACTION_DISTRIBUTE;
+ if (ecom_eval->val[5] == 1 << FLOWSPEC_TRAFFIC_ACTION_SAMPLE)
+ api->u.za.filter |= TRAFFIC_ACTION_SAMPLE;
+
+ } else if (ecom_eval->val[1] == ECOMMUNITY_TRAFFIC_MARKING) {
+ api->action = ACTION_MARKING;
+ api->u.marking_dscp = ecom_eval->val[7];
+ } else if (ecom_eval->val[1] == ECOMMUNITY_REDIRECT_VRF) {
+ /* must use external function */
+ return 0;
+ } else if (ecom_eval->val[1] == ECOMMUNITY_REDIRECT_IP_NH) {
+ /* see draft-ietf-idr-flowspec-redirect-ip-02
+ * Q1: how come a ext. community can host ipv6 address
+ * Q2 : from cisco documentation:
+ * Announces the reachability of one or more flowspec NLRI.
+ * When a BGP speaker receives an UPDATE message with the
+ * redirect-to-IP extended community, it is expected to
+ * create a traffic filtering rule for every flow-spec
+ * NLRI in the message that has this path as its best
+ * path. The filter entry matches the IP packets
+ * described in the NLRI field and redirects them or
+ * copies them towards the IPv4 or IPv6 address specified
+ * in the 'Network Address of Next- Hop'
+ * field of the associated MP_REACH_NLRI.
+ */
+ struct ecommunity_ip *ip_ecom = (struct ecommunity_ip *)
+ ecom_eval + 2;
+
+ api->u.zr.redirect_ip_v4 = ip_ecom->ip;
+ } else
+ return -1;
+ return 0;
+}
diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h
index 0c22c5a149..88bdb5e2ae 100644
--- a/bgpd/bgp_ecommunity.h
+++ b/bgpd/bgp_ecommunity.h
@@ -170,4 +170,10 @@ extern int ecommunity_add_val(struct ecommunity *, struct ecommunity_val *);
extern int ecommunity_strip(struct ecommunity *ecom, uint8_t type,
uint8_t subtype);
extern struct ecommunity *ecommunity_new(void);
+extern int ecommunity_del_val(struct ecommunity *ecom,
+ struct ecommunity_val *eval);
+struct bgp_pbr_entry_action;
+extern int ecommunity_fill_pbr_action(struct ecommunity_val *ecom_eval,
+ struct bgp_pbr_entry_action *api);
+
#endif /* _QUAGGA_BGP_ECOMMUNITY_H */
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 483d65be71..ad45da84eb 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -29,7 +29,6 @@
#include "stream.h"
#include "hash.h"
#include "jhash.h"
-#include "bitfield.h"
#include "zclient.h"
#include "bgpd/bgp_attr_evpn.h"
@@ -511,15 +510,15 @@ static int bgp_zebra_send_remote_macip(struct bgp *bgp, struct bgpevpn *vpn,
s, add ? ZEBRA_REMOTE_MACIP_ADD : ZEBRA_REMOTE_MACIP_DEL,
bgp->vrf_id);
stream_putl(s, vpn->vni);
- stream_put(s, &p->prefix.mac.octet, ETH_ALEN); /* Mac Addr */
+ stream_put(s, &p->prefix.macip_addr.mac.octet, ETH_ALEN); /* Mac Addr */
/* IP address length and IP address, if any. */
- if (IS_EVPN_PREFIX_IPADDR_NONE(p))
+ if (is_evpn_prefix_ipaddr_none(p))
stream_putl(s, 0);
else {
- ipa_len = IS_EVPN_PREFIX_IPADDR_V4(p) ? IPV4_MAX_BYTELEN
+ ipa_len = is_evpn_prefix_ipaddr_v4(p) ? IPV4_MAX_BYTELEN
: IPV6_MAX_BYTELEN;
stream_putl(s, ipa_len);
- stream_put(s, &p->prefix.ip.ip.addr, ipa_len);
+ stream_put(s, &p->prefix.macip_addr.ip.ip.addr, ipa_len);
}
stream_put_in_addr(s, &remote_vtep_ip);
@@ -533,8 +532,10 @@ static int bgp_zebra_send_remote_macip(struct bgp *bgp, struct bgpevpn *vpn,
zlog_debug(
"Tx %s MACIP, VNI %u MAC %s IP %s (flags: 0x%x) remote VTEP %s",
add ? "ADD" : "DEL", vpn->vni,
- prefix_mac2str(&p->prefix.mac, buf1, sizeof(buf1)),
- ipaddr2str(&p->prefix.ip, buf3, sizeof(buf3)), flags,
+ prefix_mac2str(&p->prefix.macip_addr.mac,
+ buf1, sizeof(buf1)),
+ ipaddr2str(&p->prefix.macip_addr.ip,
+ buf3, sizeof(buf3)), flags,
inet_ntop(AF_INET, &remote_vtep_ip, buf2,
sizeof(buf2)));
@@ -564,9 +565,9 @@ static int bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn,
s, add ? ZEBRA_REMOTE_VTEP_ADD : ZEBRA_REMOTE_VTEP_DEL,
bgp->vrf_id);
stream_putl(s, vpn->vni);
- if (IS_EVPN_PREFIX_IPADDR_V4(p))
- stream_put_in_addr(s, &p->prefix.ip.ipaddr_v4);
- else if (IS_EVPN_PREFIX_IPADDR_V6(p)) {
+ if (is_evpn_prefix_ipaddr_v4(p))
+ stream_put_in_addr(s, &p->prefix.imet_addr.ip.ipaddr_v4);
+ else if (is_evpn_prefix_ipaddr_v6(p)) {
zlog_err(
"Bad remote IP when trying to %s remote VTEP for VNI %u",
add ? "ADD" : "DEL", vpn->vni);
@@ -578,7 +579,7 @@ static int bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn,
if (bgp_debug_zebra(NULL))
zlog_debug("Tx %s Remote VTEP, VNI %u remote VTEP %s",
add ? "ADD" : "DEL", vpn->vni,
- inet_ntoa(p->prefix.ip.ipaddr_v4));
+ inet_ntoa(p->prefix.imet_addr.ip.ipaddr_v4));
return zclient_send_message(zclient);
}
@@ -1294,8 +1295,8 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
* these routes.
*/
if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE &&
- (IS_EVPN_PREFIX_IPADDR_V4(p) ||
- !IN6_IS_ADDR_LINKLOCAL(&p->prefix.ip.ipaddr_v6)) &&
+ (is_evpn_prefix_ipaddr_v4(p) ||
+ !IN6_IS_ADDR_LINKLOCAL(&p->prefix.macip_addr.ip.ipaddr_v6)) &&
CHECK_FLAG(vpn->flags, VNI_FLAG_USE_TWO_LABELS))
add_l3_ecomm = 1;
@@ -1540,8 +1541,8 @@ static int update_all_type2_routes(struct bgp *bgp, struct bgpevpn *vpn)
if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE)
continue;
- if (IS_EVPN_PREFIX_IPADDR_V6(evp) &&
- IN6_IS_ADDR_LINKLOCAL(&evp->prefix.ip.ipaddr_v6))
+ if (is_evpn_prefix_ipaddr_v6(evp) &&
+ IN6_IS_ADDR_LINKLOCAL(&evp->prefix.macip_addr.ip.ipaddr_v6))
update_evpn_route_entry(bgp, vpn, afi, safi, rn,
&attr_ip6_ll, 0, 1, &ri, 0);
else {
@@ -1793,10 +1794,7 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
char buf1[PREFIX_STRLEN];
memset(pp, 0, sizeof(struct prefix));
- if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE)
- ip_prefix_from_type2_prefix(evp, pp);
- else if (evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE)
- ip_prefix_from_type5_prefix(evp, pp);
+ ip_prefix_from_evpn_prefix(evp, pp);
if (bgp_debug_zebra(NULL)) {
zlog_debug(
@@ -1808,11 +1806,11 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
/* Create (or fetch) route within the VRF. */
/* NOTE: There is no RD here. */
- if (IS_EVPN_PREFIX_IPADDR_V4(evp)) {
+ if (is_evpn_prefix_ipaddr_v4(evp)) {
afi = AFI_IP;
safi = SAFI_UNICAST;
rn = bgp_node_get(bgp_vrf->rib[afi][safi], pp);
- } else if (IS_EVPN_PREFIX_IPADDR_V6(evp)) {
+ } else if (is_evpn_prefix_ipaddr_v6(evp)) {
afi = AFI_IP6;
safi = SAFI_UNICAST;
rn = bgp_node_get(bgp_vrf->rib[afi][safi], pp);
@@ -1971,10 +1969,7 @@ static int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
char buf1[PREFIX_STRLEN];
memset(pp, 0, sizeof(struct prefix));
- if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE)
- ip_prefix_from_type2_prefix(evp, pp);
- else if (evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE)
- ip_prefix_from_type5_prefix(evp, pp);
+ ip_prefix_from_evpn_prefix(evp, pp);
if (bgp_debug_zebra(NULL)) {
zlog_debug(
@@ -1986,7 +1981,7 @@ static int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
/* Locate route within the VRF. */
/* NOTE: There is no RD here. */
- if (IS_EVPN_PREFIX_IPADDR_V4(evp)) {
+ if (is_evpn_prefix_ipaddr_v4(evp)) {
afi = AFI_IP;
safi = SAFI_UNICAST;
rn = bgp_node_lookup(bgp_vrf->rib[afi][safi], pp);
@@ -2233,8 +2228,8 @@ static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, int install)
continue;
/* if not a mac+ip route skip this route */
- if (!(IS_EVPN_PREFIX_IPADDR_V4(evp)
- || IS_EVPN_PREFIX_IPADDR_V6(evp)))
+ if (!(is_evpn_prefix_ipaddr_v4(evp)
+ || is_evpn_prefix_ipaddr_v6(evp)))
continue;
for (ri = rn->info; ri; ri = ri->next) {
@@ -2424,8 +2419,8 @@ static int install_uninstall_route_in_vrfs(struct bgp *bgp_def, afi_t afi,
/* if it is type-2 route and not a mac+ip route skip this route */
if ((evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE)
- && !(IS_EVPN_PREFIX_IPADDR_V4(evp)
- || IS_EVPN_PREFIX_IPADDR_V6(evp)))
+ && !(is_evpn_prefix_ipaddr_v4(evp)
+ || is_evpn_prefix_ipaddr_v6(evp)))
return 0;
for (ALL_LIST_ELEMENTS(vrfs, node, nnode, bgp_vrf)) {
@@ -2851,7 +2846,7 @@ static int process_type2_route(struct peer *peer, afi_t afi, safi_t safi,
/* Copy Ethernet Tag */
memcpy(&eth_tag, pfx, 4);
- p.prefix.eth_tag = ntohl(eth_tag);
+ p.prefix.macip_addr.eth_tag = ntohl(eth_tag);
pfx += 4;
/* Get the MAC Addr len */
@@ -2859,7 +2854,7 @@ static int process_type2_route(struct peer *peer, afi_t afi, safi_t safi,
/* Get the MAC Addr */
if (macaddr_len == (ETH_ALEN * 8)) {
- memcpy(&p.prefix.mac.octet, pfx, ETH_ALEN);
+ memcpy(&p.prefix.macip_addr.mac.octet, pfx, ETH_ALEN);
pfx += ETH_ALEN;
} else {
zlog_err(
@@ -2881,10 +2876,10 @@ static int process_type2_route(struct peer *peer, afi_t afi, safi_t safi,
if (ipaddr_len) {
ipaddr_len /= 8; /* Convert to bytes. */
- p.prefix.ip.ipa_type = (ipaddr_len == IPV4_MAX_BYTELEN)
+ p.prefix.macip_addr.ip.ipa_type = (ipaddr_len == IPV4_MAX_BYTELEN)
? IPADDR_V4
: IPADDR_V6;
- memcpy(&p.prefix.ip.ip.addr, pfx, ipaddr_len);
+ memcpy(&p.prefix.macip_addr.ip.ip.addr, pfx, ipaddr_len);
}
pfx += ipaddr_len;
@@ -2966,14 +2961,14 @@ static int process_type3_route(struct peer *peer, afi_t afi, safi_t safi,
/* Copy Ethernet Tag */
memcpy(&eth_tag, pfx, 4);
- p.prefix.eth_tag = ntohl(eth_tag);
+ p.prefix.imet_addr.eth_tag = ntohl(eth_tag);
pfx += 4;
/* Get the IP. */
ipaddr_len = *pfx++;
if (ipaddr_len == IPV4_MAX_BITLEN) {
- p.prefix.ip.ipa_type = IPADDR_V4;
- memcpy(&p.prefix.ip.ip.addr, pfx, IPV4_MAX_BYTELEN);
+ p.prefix.imet_addr.ip.ipa_type = IPADDR_V4;
+ memcpy(&p.prefix.imet_addr.ip.ip.addr, pfx, IPV4_MAX_BYTELEN);
} else {
zlog_err(
"%u:%s - Rx EVPN Type-3 NLRI with unsupported IP address length %d",
@@ -3040,7 +3035,7 @@ static int process_type5_route(struct peer *peer, afi_t afi, safi_t safi,
/* Fetch Ethernet Tag. */
memcpy(&eth_tag, pfx, 4);
- p.prefix.eth_tag = ntohl(eth_tag);
+ p.prefix.prefix_addr.eth_tag = ntohl(eth_tag);
pfx += 4;
/* Fetch IP prefix length. */
@@ -3051,21 +3046,21 @@ static int process_type5_route(struct peer *peer, afi_t afi, safi_t safi,
peer->bgp->vrf_id, peer->host, ippfx_len);
return -1;
}
- p.prefix.ip_prefix_length = ippfx_len;
+ p.prefix.prefix_addr.ip_prefix_length = ippfx_len;
/* Determine IPv4 or IPv6 prefix */
/* Since the address and GW are from the same family, this just becomes
* a simple check on the total size.
*/
if (psize == 34) {
- SET_IPADDR_V4(&p.prefix.ip);
- memcpy(&p.prefix.ip.ipaddr_v4, pfx, 4);
+ SET_IPADDR_V4(&p.prefix.prefix_addr.ip);
+ memcpy(&p.prefix.prefix_addr.ip.ipaddr_v4, pfx, 4);
pfx += 4;
memcpy(&evpn.gw_ip.ipv4, pfx, 4);
pfx += 4;
} else {
- SET_IPADDR_V6(&p.prefix.ip);
- memcpy(&p.prefix.ip.ipaddr_v6, pfx, 16);
+ SET_IPADDR_V6(&p.prefix.prefix_addr.ip);
+ memcpy(&p.prefix.prefix_addr.ip.ipaddr_v6, pfx, 16);
pfx += 16;
memcpy(&evpn.gw_ip.ipv6, pfx, 16);
pfx += 16;
@@ -3110,7 +3105,7 @@ static void evpn_mpattr_encode_type5(struct stream *s, struct prefix *p,
/* len denites the total len of IP and GW-IP in the route
IP and GW-IP have to be both ipv4 or ipv6
*/
- if (IS_IPADDR_V4(&p_evpn_p->ip))
+ if (IS_IPADDR_V4(&p_evpn_p->prefix_addr.ip))
len = 8; /* IP and GWIP are both ipv4 */
else
len = 32; /* IP and GWIP are both ipv6 */
@@ -3121,20 +3116,20 @@ static void evpn_mpattr_encode_type5(struct stream *s, struct prefix *p,
stream_put(s, &(attr->evpn_overlay.eth_s_id), 10);
else
stream_put(s, &temp, 10);
- stream_putl(s, p_evpn_p->eth_tag);
- stream_putc(s, p_evpn_p->ip_prefix_length);
- if (IS_IPADDR_V4(&p_evpn_p->ip))
- stream_put_ipv4(s, p_evpn_p->ip.ipaddr_v4.s_addr);
+ stream_putl(s, p_evpn_p->prefix_addr.eth_tag);
+ stream_putc(s, p_evpn_p->prefix_addr.ip_prefix_length);
+ if (IS_IPADDR_V4(&p_evpn_p->prefix_addr.ip))
+ stream_put_ipv4(s, p_evpn_p->prefix_addr.ip.ipaddr_v4.s_addr);
else
- stream_put(s, &p_evpn_p->ip.ipaddr_v6, 16);
+ stream_put(s, &p_evpn_p->prefix_addr.ip.ipaddr_v6, 16);
if (attr) {
- if (IS_IPADDR_V4(&p_evpn_p->ip))
+ if (IS_IPADDR_V4(&p_evpn_p->prefix_addr.ip))
stream_put_ipv4(s,
attr->evpn_overlay.gw_ip.ipv4.s_addr);
else
stream_put(s, &(attr->evpn_overlay.gw_ip.ipv6), 16);
} else {
- if (IS_IPADDR_V4(&p_evpn_p->ip))
+ if (IS_IPADDR_V4(&p_evpn_p->prefix_addr.ip))
stream_put_ipv4(s, 0);
else
stream_put(s, &temp, 16);
@@ -3584,44 +3579,49 @@ void bgp_evpn_route2json(struct prefix_evpn *p, json_object *json)
if (p->prefix.route_type == BGP_EVPN_IMET_ROUTE) {
json_object_int_add(json, "routeType", p->prefix.route_type);
- json_object_int_add(json, "ethTag", p->prefix.eth_tag);
+ json_object_int_add(json, "ethTag",
+ p->prefix.imet_addr.eth_tag);
json_object_int_add(json, "ipLen",
- IS_EVPN_PREFIX_IPADDR_V4(p)
+ is_evpn_prefix_ipaddr_v4(p)
? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN);
json_object_string_add(json, "ip",
- inet_ntoa(p->prefix.ip.ipaddr_v4));
+ inet_ntoa(p->prefix.imet_addr.ip.ipaddr_v4));
} else if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
- if (IS_EVPN_PREFIX_IPADDR_NONE(p)) {
+ if (is_evpn_prefix_ipaddr_none(p)) {
json_object_int_add(json, "routeType",
p->prefix.route_type);
- json_object_int_add(json, "ethTag", p->prefix.eth_tag);
+ json_object_int_add(json, "ethTag",
+ p->prefix.macip_addr.eth_tag);
json_object_int_add(json, "macLen", 8 * ETH_ALEN);
json_object_string_add(json, "mac",
- prefix_mac2str(&p->prefix.mac,
+ prefix_mac2str(&p->prefix.macip_addr.mac,
buf1,
sizeof(buf1)));
} else {
uint8_t family;
- family = IS_EVPN_PREFIX_IPADDR_V4(p) ? AF_INET
+ family = is_evpn_prefix_ipaddr_v4(p) ? AF_INET
: AF_INET6;
json_object_int_add(json, "routeType",
p->prefix.route_type);
- json_object_int_add(json, "ethTag", p->prefix.eth_tag);
+ json_object_int_add(json, "ethTag",
+ p->prefix.macip_addr.eth_tag);
json_object_int_add(json, "macLen", 8 * ETH_ALEN);
json_object_string_add(json, "mac",
- prefix_mac2str(&p->prefix.mac,
+ prefix_mac2str(&p->prefix.macip_addr.mac,
buf1,
sizeof(buf1)));
json_object_int_add(json, "ipLen",
- IS_EVPN_PREFIX_IPADDR_V4(p)
+ is_evpn_prefix_ipaddr_v4(p)
? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN);
json_object_string_add(
json, "ip",
- inet_ntop(family, &p->prefix.ip.ip.addr, buf2,
+ inet_ntop(family,
+ &p->prefix.macip_addr.ip.ip.addr,
+ buf2,
PREFIX2STR_BUFFER));
}
} else {
@@ -3640,42 +3640,44 @@ char *bgp_evpn_route2str(struct prefix_evpn *p, char *buf, int len)
if (p->prefix.route_type == BGP_EVPN_IMET_ROUTE) {
snprintf(buf, len, "[%d]:[%d]:[%d]:[%s]", p->prefix.route_type,
- p->prefix.eth_tag,
- IS_EVPN_PREFIX_IPADDR_V4(p) ? IPV4_MAX_BITLEN
+ p->prefix.imet_addr.eth_tag,
+ is_evpn_prefix_ipaddr_v4(p) ? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN,
- inet_ntoa(p->prefix.ip.ipaddr_v4));
+ inet_ntoa(p->prefix.imet_addr.ip.ipaddr_v4));
} else if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
- if (IS_EVPN_PREFIX_IPADDR_NONE(p))
+ if (is_evpn_prefix_ipaddr_none(p))
snprintf(buf, len, "[%d]:[%d]:[%d]:[%s]",
p->prefix.route_type,
- p->prefix.eth_tag,
+ p->prefix.macip_addr.eth_tag,
8 * ETH_ALEN,
- prefix_mac2str(&p->prefix.mac, buf1,
+ prefix_mac2str(&p->prefix.macip_addr.mac, buf1,
sizeof(buf1)));
else {
uint8_t family;
- family = IS_EVPN_PREFIX_IPADDR_V4(p) ? AF_INET
+ family = is_evpn_prefix_ipaddr_v4(p) ? AF_INET
: AF_INET6;
snprintf(buf, len, "[%d]:[%d]:[%d]:[%s]:[%d]:[%s]",
p->prefix.route_type,
- p->prefix.eth_tag,
+ p->prefix.macip_addr.eth_tag,
8 * ETH_ALEN,
- prefix_mac2str(&p->prefix.mac, buf1,
+ prefix_mac2str(&p->prefix.macip_addr.mac, buf1,
sizeof(buf1)),
family == AF_INET ? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN,
- inet_ntop(family, &p->prefix.ip.ip.addr, buf2,
+ inet_ntop(family,
+ &p->prefix.macip_addr.ip.ip.addr,
+ buf2,
PREFIX2STR_BUFFER));
}
} else if (p->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE) {
snprintf(buf, len, "[%d]:[%d]:[%d]:[%s]",
p->prefix.route_type,
- p->prefix.eth_tag,
- p->prefix.ip_prefix_length,
- IS_EVPN_PREFIX_IPADDR_V4(p)
- ? inet_ntoa(p->prefix.ip.ipaddr_v4)
- : inet6_ntoa(p->prefix.ip.ipaddr_v6));
+ p->prefix.prefix_addr.eth_tag,
+ p->prefix.prefix_addr.ip_prefix_length,
+ is_evpn_prefix_ipaddr_v4(p)
+ ? inet_ntoa(p->prefix.prefix_addr.ip.ipaddr_v4)
+ : inet6_ntoa(p->prefix.prefix_addr.ip.ipaddr_v6));
} else {
/* For EVPN route types not supported yet. */
snprintf(buf, len, "(unsupported route type %d)",
@@ -3704,9 +3706,9 @@ void bgp_evpn_encode_prefix(struct stream *s, struct prefix *p,
switch (evp->prefix.route_type) {
case BGP_EVPN_MAC_IP_ROUTE:
- if (IS_EVPN_PREFIX_IPADDR_V4(evp))
+ if (is_evpn_prefix_ipaddr_v4(evp))
ipa_len = IPV4_MAX_BYTELEN;
- else if (IS_EVPN_PREFIX_IPADDR_V6(evp))
+ else if (is_evpn_prefix_ipaddr_v6(evp))
ipa_len = IPV6_MAX_BYTELEN;
/* RD, ESI, EthTag, MAC+len, IP len, [IP], 1 VNI */
len = 8 + 10 + 4 + 1 + 6 + 1 + ipa_len + 3;
@@ -3718,12 +3720,13 @@ void bgp_evpn_encode_prefix(struct stream *s, struct prefix *p,
stream_put(s, &attr->evpn_overlay.eth_s_id, ESI_LEN);
else
stream_put(s, 0, 10);
- stream_putl(s, evp->prefix.eth_tag); /* Ethernet Tag ID */
+ stream_putl(s, evp->prefix.macip_addr.eth_tag); /* Ethernet Tag ID */
stream_putc(s, 8 * ETH_ALEN); /* Mac Addr Len - bits */
- stream_put(s, evp->prefix.mac.octet, 6); /* Mac Addr */
- stream_putc(s, 8 * ipa_len); /* IP address Length */
- if (ipa_len) /* IP */
- stream_put(s, &evp->prefix.ip.ip.addr, ipa_len);
+ stream_put(s, evp->prefix.macip_addr.mac.octet, 6); /* Mac Addr */
+ stream_putc(s, 8 * ipa_len); /* IP address Length */
+ if (ipa_len) /* IP */
+ stream_put(s, &evp->prefix.macip_addr.ip.ip.addr,
+ ipa_len);
/* 1st label is the L2 VNI */
stream_put(s, label, BGP_LABEL_BYTES);
/* Include 2nd label (L3 VNI) if advertising MAC+IP */
@@ -3734,10 +3737,10 @@ void bgp_evpn_encode_prefix(struct stream *s, struct prefix *p,
case BGP_EVPN_IMET_ROUTE:
stream_putc(s, 17); // TODO: length - assumes IPv4 address
stream_put(s, prd->val, 8); /* RD */
- stream_putl(s, evp->prefix.eth_tag); /* Ethernet Tag ID */
+ stream_putl(s, evp->prefix.imet_addr.eth_tag); /* Ethernet Tag ID */
stream_putc(s, IPV4_MAX_BITLEN); /* IP address Length - bits */
/* Originating Router's IP Addr */
- stream_put_in_addr(s, &evp->prefix.ip.ipaddr_v4);
+ stream_put_in_addr(s, &evp->prefix.imet_addr.ip.ipaddr_v4);
break;
case BGP_EVPN_IP_PREFIX_ROUTE:
@@ -3988,12 +3991,7 @@ void bgp_evpn_derive_auto_rt_export(struct bgp *bgp, struct bgpevpn *vpn)
*/
void bgp_evpn_derive_auto_rd_for_vrf(struct bgp *bgp)
{
- char buf[100];
-
- bgp->vrf_prd.family = AF_UNSPEC;
- bgp->vrf_prd.prefixlen = 64;
- sprintf(buf, "%s:%hu", inet_ntoa(bgp->router_id), bgp->vrf_rd_id);
- (void)str2prefix_rd(buf, &bgp->vrf_prd);
+ form_auto_rd(bgp->router_id, bgp->vrf_rd_id, &bgp->vrf_prd);
}
/*
@@ -4577,7 +4575,6 @@ void bgp_evpn_cleanup(struct bgp *bgp)
list_delete_and_null(&bgp->vrf_export_rtl);
if (bgp->l2vnis)
list_delete_and_null(&bgp->l2vnis);
- bf_release_index(bm->rd_idspace, bgp->vrf_rd_id);
}
/*
@@ -4585,7 +4582,6 @@ void bgp_evpn_cleanup(struct bgp *bgp)
* Create
* VNI hash table
* hash for RT to VNI
- * assign a unique rd id for auto derivation of vrf_prd
*/
void bgp_evpn_init(struct bgp *bgp)
{
@@ -4606,7 +4602,6 @@ void bgp_evpn_init(struct bgp *bgp)
(int (*)(void *, void *))evpn_route_target_cmp;
bgp->l2vnis = list_new();
bgp->l2vnis->cmp = (int (*)(void *, void *))vni_hash_cmp;
- bf_assign_index(bm->rd_idspace, bgp->vrf_rd_id);
}
void bgp_evpn_vrf_delete(struct bgp *bgp_vrf)
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index 1eecb9ecf7..1efde3a719 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -272,15 +272,15 @@ static inline void ip_prefix_from_type5_prefix(struct prefix_evpn *evp,
struct prefix *ip)
{
memset(ip, 0, sizeof(struct prefix));
- if (IS_EVPN_PREFIX_IPADDR_V4(evp)) {
+ if (is_evpn_prefix_ipaddr_v4(evp)) {
ip->family = AF_INET;
- ip->prefixlen = evp->prefix.ip_prefix_length;
- memcpy(&(ip->u.prefix4), &(evp->prefix.ip.ip),
+ ip->prefixlen = evp->prefix.prefix_addr.ip_prefix_length;
+ memcpy(&(ip->u.prefix4), &(evp->prefix.prefix_addr.ip.ip),
IPV4_MAX_BYTELEN);
- } else if (IS_EVPN_PREFIX_IPADDR_V6(evp)) {
+ } else if (is_evpn_prefix_ipaddr_v6(evp)) {
ip->family = AF_INET6;
- ip->prefixlen = evp->prefix.ip_prefix_length;
- memcpy(&(ip->u.prefix6), &(evp->prefix.ip.ip),
+ ip->prefixlen = evp->prefix.prefix_addr.ip_prefix_length;
+ memcpy(&(ip->u.prefix6), &(evp->prefix.prefix_addr.ip.ip),
IPV6_MAX_BYTELEN);
}
}
@@ -290,26 +290,36 @@ static inline int is_evpn_prefix_default(struct prefix *evp)
if (evp->family != AF_EVPN)
return 0;
- return ((evp->u.prefix_evpn.ip_prefix_length == 0) ? 1 : 0);
+ return ((evp->u.prefix_evpn.prefix_addr.ip_prefix_length == 0) ?
+ 1 : 0);
}
static inline void ip_prefix_from_type2_prefix(struct prefix_evpn *evp,
struct prefix *ip)
{
memset(ip, 0, sizeof(struct prefix));
- if (IS_EVPN_PREFIX_IPADDR_V4(evp)) {
+ if (is_evpn_prefix_ipaddr_v4(evp)) {
ip->family = AF_INET;
ip->prefixlen = IPV4_MAX_BITLEN;
- memcpy(&(ip->u.prefix4), &(evp->prefix.ip.ip),
+ memcpy(&(ip->u.prefix4), &(evp->prefix.macip_addr.ip.ip),
IPV4_MAX_BYTELEN);
- } else if (IS_EVPN_PREFIX_IPADDR_V6(evp)) {
+ } else if (is_evpn_prefix_ipaddr_v6(evp)) {
ip->family = AF_INET6;
ip->prefixlen = IPV6_MAX_BITLEN;
- memcpy(&(ip->u.prefix6), &(evp->prefix.ip.ip),
+ memcpy(&(ip->u.prefix6), &(evp->prefix.macip_addr.ip.ip),
IPV6_MAX_BYTELEN);
}
}
+static inline void ip_prefix_from_evpn_prefix(struct prefix_evpn *evp,
+ struct prefix *ip)
+{
+ if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE)
+ ip_prefix_from_type2_prefix(evp, ip);
+ else if (evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE)
+ ip_prefix_from_type5_prefix(evp, ip);
+}
+
static inline void build_evpn_type2_prefix(struct prefix_evpn *p,
struct ethaddr *mac,
struct ipaddr *ip)
@@ -318,10 +328,10 @@ static inline void build_evpn_type2_prefix(struct prefix_evpn *p,
p->family = AF_EVPN;
p->prefixlen = EVPN_TYPE_2_ROUTE_PREFIXLEN;
p->prefix.route_type = BGP_EVPN_MAC_IP_ROUTE;
- memcpy(&p->prefix.mac.octet, mac->octet, ETH_ALEN);
- p->prefix.ip.ipa_type = IPADDR_NONE;
+ memcpy(&p->prefix.macip_addr.mac.octet, mac->octet, ETH_ALEN);
+ p->prefix.macip_addr.ip.ipa_type = IPADDR_NONE;
if (ip)
- memcpy(&p->prefix.ip, ip, sizeof(*ip));
+ memcpy(&p->prefix.macip_addr.ip, ip, sizeof(*ip));
}
static inline void build_type5_prefix_from_ip_prefix(struct prefix_evpn *evp,
@@ -343,10 +353,10 @@ static inline void build_type5_prefix_from_ip_prefix(struct prefix_evpn *evp,
memset(evp, 0, sizeof(struct prefix_evpn));
evp->family = AF_EVPN;
evp->prefixlen = EVPN_TYPE_5_ROUTE_PREFIXLEN;
- evp->prefix.ip_prefix_length = ip_prefix->prefixlen;
evp->prefix.route_type = BGP_EVPN_IP_PREFIX_ROUTE;
- evp->prefix.ip.ipa_type = ip.ipa_type;
- memcpy(&evp->prefix.ip, &ip, sizeof(struct ipaddr));
+ evp->prefix.prefix_addr.ip_prefix_length = ip_prefix->prefixlen;
+ evp->prefix.prefix_addr.ip.ipa_type = ip.ipa_type;
+ memcpy(&evp->prefix.prefix_addr.ip, &ip, sizeof(struct ipaddr));
}
static inline void build_evpn_type3_prefix(struct prefix_evpn *p,
@@ -356,8 +366,8 @@ static inline void build_evpn_type3_prefix(struct prefix_evpn *p,
p->family = AF_EVPN;
p->prefixlen = EVPN_TYPE_3_ROUTE_PREFIXLEN;
p->prefix.route_type = BGP_EVPN_IMET_ROUTE;
- p->prefix.ip.ipa_type = IPADDR_V4;
- p->prefix.ip.ipaddr_v4 = originator_ip;
+ p->prefix.imet_addr.ip.ipa_type = IPADDR_V4;
+ p->prefix.imet_addr.ip.ipaddr_v4 = originator_ip;
}
static inline int evpn_default_originate_set(struct bgp *bgp, afi_t afi,
diff --git a/bgpd/bgp_flowspec_util.c b/bgpd/bgp_flowspec_util.c
index 007b27f17e..956cf28c21 100644
--- a/bgpd/bgp_flowspec_util.c
+++ b/bgpd/bgp_flowspec_util.c
@@ -25,6 +25,7 @@
#include "bgp_table.h"
#include "bgp_flowspec_util.h"
#include "bgp_flowspec_private.h"
+#include "bgp_pbr.h"
static void hex2bin(uint8_t *hex, int *bin)
{
@@ -50,6 +51,109 @@ static int hexstr2num(uint8_t *hexstr, int len)
return num;
}
+/* call bgp_flowspec_op_decode
+ * returns offset
+ */
+static int bgp_flowspec_call_non_opaque_decode(uint8_t *nlri_content, int len,
+ struct bgp_pbr_match_val *mval,
+ uint8_t *match_num, int *error)
+{
+ int ret;
+
+ ret = bgp_flowspec_op_decode(
+ BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE,
+ nlri_content,
+ len,
+ mval, error);
+ if (*error < 0)
+ zlog_err("%s: flowspec_op_decode error %d",
+ __func__, *error);
+ else
+ *match_num = *error;
+ return ret;
+}
+
+static bool bgp_flowspec_contains_prefix(struct prefix *pfs,
+ struct prefix *input,
+ int prefix_check)
+{
+ uint32_t offset = 0;
+ int type;
+ int ret = 0, error = 0;
+ uint8_t *nlri_content = (uint8_t *)pfs->u.prefix_flowspec.ptr;
+ size_t len = pfs->u.prefix_flowspec.prefixlen;
+ struct prefix compare;
+
+ error = 0;
+ while (offset < len-1 && error >= 0) {
+ type = nlri_content[offset];
+ offset++;
+ switch (type) {
+ case FLOWSPEC_DEST_PREFIX:
+ case FLOWSPEC_SRC_PREFIX:
+ memset(&compare, 0, sizeof(struct prefix));
+ ret = bgp_flowspec_ip_address(
+ BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE,
+ nlri_content+offset,
+ len - offset,
+ &compare, &error);
+ if (ret <= 0)
+ break;
+ if (prefix_check &&
+ compare.prefixlen != input->prefixlen)
+ break;
+ if (compare.family != input->family)
+ break;
+ if ((input->family == AF_INET) &&
+ IPV4_ADDR_SAME(&input->u.prefix4,
+ &compare.u.prefix4))
+ return true;
+ if ((input->family == AF_INET6) &&
+ IPV6_ADDR_SAME(&input->u.prefix6.s6_addr,
+ &compare.u.prefix6.s6_addr))
+ return true;
+ break;
+ case FLOWSPEC_IP_PROTOCOL:
+ case FLOWSPEC_PORT:
+ case FLOWSPEC_DEST_PORT:
+ case FLOWSPEC_SRC_PORT:
+ case FLOWSPEC_ICMP_TYPE:
+ case FLOWSPEC_ICMP_CODE:
+ ret = bgp_flowspec_op_decode(BGP_FLOWSPEC_VALIDATE_ONLY,
+ nlri_content+offset,
+ len - offset,
+ NULL, &error);
+ break;
+ case FLOWSPEC_TCP_FLAGS:
+ ret = bgp_flowspec_tcpflags_decode(
+ BGP_FLOWSPEC_VALIDATE_ONLY,
+ nlri_content+offset,
+ len - offset,
+ NULL, &error);
+ break;
+ case FLOWSPEC_PKT_LEN:
+ case FLOWSPEC_DSCP:
+ ret = bgp_flowspec_op_decode(
+ BGP_FLOWSPEC_VALIDATE_ONLY,
+ nlri_content + offset,
+ len - offset, NULL,
+ &error);
+ break;
+ case FLOWSPEC_FRAGMENT:
+ ret = bgp_flowspec_fragment_type_decode(
+ BGP_FLOWSPEC_VALIDATE_ONLY,
+ nlri_content + offset,
+ len - offset, NULL,
+ &error);
+ break;
+ default:
+ error = -1;
+ break;
+ }
+ offset += ret;
+ }
+ return false;
+}
/*
* handle the flowspec address src/dst or generic address NLRI
@@ -122,9 +226,12 @@ int bgp_flowspec_op_decode(enum bgp_flowspec_util_nlri_t type,
uint32_t offset = 0;
int len_string = BGP_FLOWSPEC_STRING_DISPLAY_MAX;
int len_written;
+ struct bgp_pbr_match_val *mval = (struct bgp_pbr_match_val *)result;
*error = 0;
do {
+ if (loop > BGP_PBR_MATCH_VAL_MAX)
+ *error = -2;
hex2bin(&nlri_ptr[offset], op);
offset++;
len = 2*op[2]+op[3];
@@ -168,7 +275,24 @@ int bgp_flowspec_op_decode(enum bgp_flowspec_util_nlri_t type,
ptr += len_written;
break;
case BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE:
- /* TODO : FS OPAQUE */
+ /* limitation: stop converting */
+ if (*error == -2)
+ break;
+ mval->value = value;
+ if (op[5] == 1)
+ mval->compare_operator |=
+ OPERATOR_COMPARE_LESS_THAN;
+ if (op[6] == 1)
+ mval->compare_operator |=
+ OPERATOR_COMPARE_GREATER_THAN;
+ if (op[7] == 1)
+ mval->compare_operator |=
+ OPERATOR_COMPARE_EQUAL_TO;
+ if (op[1] == 1)
+ mval->unary_operator = OPERATOR_UNARY_AND;
+ else
+ mval->unary_operator = OPERATOR_UNARY_OR;
+ mval++;
break;
case BGP_FLOWSPEC_VALIDATE_ONLY:
default:
@@ -203,12 +327,15 @@ int bgp_flowspec_tcpflags_decode(enum bgp_flowspec_util_nlri_t type,
int op[8];
int len, value_size, loop = 0, value;
char *ptr = (char *)result; /* for return_string */
+ struct bgp_pbr_match_val *mval = (struct bgp_pbr_match_val *)result;
uint32_t offset = 0;
int len_string = BGP_FLOWSPEC_STRING_DISPLAY_MAX;
int len_written;
*error = 0;
do {
+ if (loop > BGP_PBR_MATCH_VAL_MAX)
+ *error = -2;
hex2bin(&nlri_ptr[offset], op);
/* if first element, AND bit can not be set */
if (op[1] == 1 && loop == 0)
@@ -252,7 +379,29 @@ int bgp_flowspec_tcpflags_decode(enum bgp_flowspec_util_nlri_t type,
ptr += len_written;
break;
case BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE:
- /* TODO : FS OPAQUE */
+ /* limitation: stop converting */
+ if (*error == -2)
+ break;
+ mval->value = value;
+ if (op[6] == 1) {
+ /* different from */
+ mval->compare_operator |=
+ OPERATOR_COMPARE_LESS_THAN;
+ mval->compare_operator |=
+ OPERATOR_COMPARE_GREATER_THAN;
+ } else
+ mval->compare_operator |=
+ OPERATOR_COMPARE_EQUAL_TO;
+ if (op[7] == 1)
+ mval->compare_operator |=
+ OPERATOR_COMPARE_EXACT_MATCH;
+ if (op[1] == 1)
+ mval->unary_operator =
+ OPERATOR_UNARY_AND;
+ else
+ mval->unary_operator =
+ OPERATOR_UNARY_OR;
+ mval++;
break;
case BGP_FLOWSPEC_VALIDATE_ONLY:
default:
@@ -284,6 +433,8 @@ int bgp_flowspec_fragment_type_decode(enum bgp_flowspec_util_nlri_t type,
int op[8];
int len, value, value_size, loop = 0;
char *ptr = (char *)result; /* for return_string */
+ struct bgp_pbr_fragment_val *mval =
+ (struct bgp_pbr_fragment_val *)result;
uint32_t offset = 0;
int len_string = BGP_FLOWSPEC_STRING_DISPLAY_MAX;
int len_written;
@@ -340,7 +491,7 @@ int bgp_flowspec_fragment_type_decode(enum bgp_flowspec_util_nlri_t type,
}
break;
case BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE:
- /* TODO : FS OPAQUE */
+ mval->bitmask = (uint8_t)value;
break;
case BGP_FLOWSPEC_VALIDATE_ONLY:
default:
@@ -354,89 +505,158 @@ int bgp_flowspec_fragment_type_decode(enum bgp_flowspec_util_nlri_t type,
return offset;
}
-
-static bool bgp_flowspec_contains_prefix(struct prefix *pfs,
- struct prefix *input,
- int prefix_check)
+int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len,
+ struct bgp_pbr_entry_main *bpem)
{
- uint32_t offset = 0;
- int type;
- int ret = 0, error = 0;
- uint8_t *nlri_content = (uint8_t *)pfs->u.prefix_flowspec.ptr;
- size_t len = pfs->u.prefix_flowspec.prefixlen;
- struct prefix compare;
+ int offset = 0, error = 0;
+ struct prefix *prefix;
+ struct bgp_pbr_match_val *mval;
+ uint8_t *match_num;
+ uint8_t bitmask = 0;
+ int ret = 0, type;
- error = 0;
- while (offset < len-1 && error >= 0) {
+ while (offset < len - 1 && error >= 0) {
type = nlri_content[offset];
offset++;
switch (type) {
case FLOWSPEC_DEST_PREFIX:
case FLOWSPEC_SRC_PREFIX:
- memset(&compare, 0, sizeof(struct prefix));
+ bitmask = 0;
+ if (type == FLOWSPEC_DEST_PREFIX) {
+ bitmask |= PREFIX_DST_PRESENT;
+ prefix = &bpem->dst_prefix;
+ } else {
+ bitmask |= PREFIX_SRC_PRESENT;
+ prefix = &bpem->src_prefix;
+ }
ret = bgp_flowspec_ip_address(
BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE,
- nlri_content+offset,
+ nlri_content + offset,
len - offset,
- &compare, &error);
- if (ret <= 0)
- break;
- if (prefix_check &&
- compare.prefixlen != input->prefixlen)
- break;
- if (compare.family != input->family)
- break;
- if ((input->family == AF_INET) &&
- IPV4_ADDR_SAME(&input->u.prefix4,
- &compare.u.prefix4))
- return true;
- if ((input->family == AF_INET6) &&
- IPV6_ADDR_SAME(&input->u.prefix6.s6_addr,
- &compare.u.prefix6.s6_addr))
- return true;
+ prefix, &error);
+ if (error < 0)
+ zlog_err("%s: flowspec_ip_address error %d",
+ __func__, error);
+ else
+ bpem->match_bitmask |= bitmask;
+ offset += ret;
break;
case FLOWSPEC_IP_PROTOCOL:
+ match_num = &(bpem->match_protocol_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->protocol);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
+ break;
case FLOWSPEC_PORT:
+ match_num = &(bpem->match_port_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->port);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
+ break;
case FLOWSPEC_DEST_PORT:
+ match_num = &(bpem->match_dst_port_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->dst_port);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
+ break;
case FLOWSPEC_SRC_PORT:
+ match_num = &(bpem->match_src_port_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->src_port);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
+ break;
case FLOWSPEC_ICMP_TYPE:
- case FLOWSPEC_ICMP_CODE:
- ret = bgp_flowspec_op_decode(BGP_FLOWSPEC_VALIDATE_ONLY,
- nlri_content+offset,
- len - offset,
- NULL, &error);
+ match_num = &(bpem->match_icmp_type_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->icmp_type);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
break;
- case FLOWSPEC_TCP_FLAGS:
- ret = bgp_flowspec_tcpflags_decode(
- BGP_FLOWSPEC_VALIDATE_ONLY,
- nlri_content+offset,
- len - offset,
- NULL, &error);
+ case FLOWSPEC_ICMP_CODE:
+ match_num = &(bpem->match_icmp_code_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->icmp_code);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
break;
case FLOWSPEC_PKT_LEN:
+ match_num =
+ &(bpem->match_packet_length_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->packet_length);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
+ break;
case FLOWSPEC_DSCP:
- ret = bgp_flowspec_op_decode(
- BGP_FLOWSPEC_VALIDATE_ONLY,
- nlri_content + offset,
- len - offset, NULL,
- &error);
+ match_num = &(bpem->match_dscp_num);
+ mval = (struct bgp_pbr_match_val *)
+ &(bpem->dscp);
+ offset += bgp_flowspec_call_non_opaque_decode(
+ nlri_content + offset,
+ len - offset,
+ mval, match_num,
+ &error);
+ break;
+ case FLOWSPEC_TCP_FLAGS:
+ ret = bgp_flowspec_tcpflags_decode(
+ BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE,
+ nlri_content + offset,
+ len - offset,
+ &bpem->tcpflags, &error);
+ if (error < 0)
+ zlog_err("%s: flowspec_tcpflags_decode error %d",
+ __func__, error);
+ else
+ bpem->match_tcpflags_num = error;
+ /* contains the number of slots used */
+ offset += ret;
break;
case FLOWSPEC_FRAGMENT:
ret = bgp_flowspec_fragment_type_decode(
- BGP_FLOWSPEC_VALIDATE_ONLY,
- nlri_content + offset,
- len - offset, NULL,
- &error);
+ BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE,
+ nlri_content + offset,
+ len - offset, &bpem->fragment,
+ &error);
+ if (error < 0)
+ zlog_err("%s: flowspec_fragment_type_decode error %d",
+ __func__, error);
+ else
+ bpem->match_bitmask |= FRAGMENT_PRESENT;
+ offset += ret;
break;
default:
- error = -1;
- break;
+ zlog_err("%s: unknown type %d\n", __func__, type);
}
- offset += ret;
}
- return false;
+ return error;
}
+
struct bgp_node *bgp_flowspec_get_match_per_ip(afi_t afi,
struct bgp_table *rib,
struct prefix *match,
diff --git a/bgpd/bgp_flowspec_util.h b/bgpd/bgp_flowspec_util.h
index aa21461102..e4454ab4db 100644
--- a/bgpd/bgp_flowspec_util.h
+++ b/bgpd/bgp_flowspec_util.h
@@ -50,6 +50,9 @@ extern int bgp_flowspec_fragment_type_decode(enum bgp_flowspec_util_nlri_t type,
uint8_t *nlri_ptr,
uint32_t max_len,
void *result, int *error);
+struct bgp_pbr_entry_main;
+extern int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len,
+ struct bgp_pbr_entry_main *bpem);
extern struct bgp_node *bgp_flowspec_get_match_per_ip(afi_t afi,
struct bgp_table *rib,
diff --git a/bgpd/bgp_label.h b/bgpd/bgp_label.h
index 01bf8b372b..2b2525dd0e 100644
--- a/bgpd/bgp_label.h
+++ b/bgpd/bgp_label.h
@@ -24,6 +24,7 @@
#define BGP_LABEL_BYTES 3
#define BGP_LABEL_BITS 24
#define BGP_WITHDRAW_LABEL 0x800000
+#define BGP_PREVENT_VRF_2_VRF_LEAK 0xFFFFFFFE
struct bgp_node;
struct bgp_info;
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index d0e881f854..64d12cf607 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -87,6 +87,10 @@ void encode_label(mpls_label_t label, mpls_label_t *label_pnt)
uint8_t *pnt = (uint8_t *)label_pnt;
if (pnt == NULL)
return;
+ if (label == BGP_PREVENT_VRF_2_VRF_LEAK) {
+ *label_pnt = label;
+ return;
+ }
*pnt++ = (label >> 12) & 0xff;
*pnt++ = (label >> 4) & 0xff;
*pnt++ = ((label << 4) + 1) & 0xff; /* S=1 */
@@ -640,8 +644,8 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
char *s = ecommunity_ecom2str(info_vrf->attr->ecommunity,
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
- zlog_debug("%s: info_vrf->type=%d, EC{%s}", __func__,
- info_vrf->type, s);
+ zlog_debug("%s: %s info_vrf->type=%d, EC{%s}", __func__,
+ bgp_vrf->name, info_vrf->type, s);
XFREE(MTYPE_ECOMMUNITY_STR, s);
}
@@ -654,14 +658,15 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
return;
}
- /* loop check */
- if (info_vrf->extra && info_vrf->extra->bgp_orig == bgp_vpn)
+ /* loop check - should not be an imported route. */
+ if (info_vrf->extra && info_vrf->extra->bgp_orig)
return;
if (!vpn_leak_to_vpn_active(bgp_vrf, afi, &debugmsg)) {
if (debug)
- zlog_debug("%s: skipping: %s", __func__, debugmsg);
+ zlog_debug("%s: %s skipping: %s", __func__,
+ bgp_vrf->name, debugmsg);
return;
}
@@ -737,9 +742,9 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
/* if policy nexthop not set, use 0 */
if (CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_NEXTHOP_SET)) {
-
struct prefix *nexthop =
&bgp_vrf->vpn_policy[afi].tovpn_nexthop;
+
switch (nexthop->family) {
case AF_INET:
/* prevent mp_nexthop_global_in <- self in bgp_route.c
@@ -759,12 +764,38 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
assert(0);
}
} else {
- if (afi == AFI_IP) {
- /* For ipv4, copy to multiprotocol nexthop field */
- static_attr.mp_nexthop_global_in = static_attr.nexthop;
- static_attr.mp_nexthop_len = 4;
- /* XXX Leave static_attr.nexthop intact for NHT */
- static_attr.flag &= ~ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
+ if (!CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
+ if (afi == AFI_IP) {
+ /*
+ * For ipv4, copy to multiprotocol
+ * nexthop field
+ */
+ static_attr.mp_nexthop_global_in =
+ static_attr.nexthop;
+ static_attr.mp_nexthop_len = 4;
+ /*
+ * XXX Leave static_attr.nexthop
+ * intact for NHT
+ */
+ static_attr.flag &=
+ ~ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
+ }
+ } else {
+ /* Update based on next-hop family to account for
+ * RFC 5549 (BGP unnumbered) scenario. Note that
+ * specific action is only needed for the case of
+ * IPv4 nexthops as the attr has been copied
+ * otherwise.
+ */
+ if (afi == AFI_IP &&
+ !BGP_ATTR_NEXTHOP_AFI_IP6(info_vrf->attr)) {
+ static_attr.mp_nexthop_global_in.s_addr =
+ static_attr.nexthop.s_addr;
+ static_attr.mp_nexthop_len = 4;
+ static_attr.flag |=
+ ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
+ }
}
nexthop_self_flag = 1;
}
@@ -838,14 +869,9 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */
info_vrf->type, info_vrf->sub_type);
}
- if (info_vrf->type != ZEBRA_ROUTE_BGP) {
- if (debug)
- zlog_debug("%s: wrong type %d", __func__,
- info_vrf->type);
- return;
- }
if (info_vrf->sub_type != BGP_ROUTE_NORMAL
- && info_vrf->sub_type != BGP_ROUTE_STATIC) {
+ && info_vrf->sub_type != BGP_ROUTE_STATIC
+ && info_vrf->sub_type != BGP_ROUTE_REDISTRIBUTE) {
if (debug)
zlog_debug("%s: wrong sub_type %d", __func__,
@@ -999,6 +1025,7 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
int nexthop_self_flag = 1;
struct bgp_info *bi_ultimate = NULL;
int origin_local = 0;
+ struct bgp *src_vrf;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
@@ -1035,22 +1062,35 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
nexthop_orig.family = nhfamily;
switch (nhfamily) {
-
case AF_INET:
/* save */
nexthop_orig.u.prefix4 = info_vpn->attr->mp_nexthop_global_in;
nexthop_orig.prefixlen = 32;
+
+ if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+ static_attr.nexthop.s_addr =
+ nexthop_orig.u.prefix4.s_addr;
+
+ static_attr.mp_nexthop_global_in =
+ info_vpn->attr->mp_nexthop_global_in;
+ static_attr.mp_nexthop_len =
+ info_vpn->attr->mp_nexthop_len;
+ }
static_attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
break;
-
case AF_INET6:
/* save */
nexthop_orig.u.prefix6 = info_vpn->attr->mp_nexthop_global;
nexthop_orig.prefixlen = 128;
+
+ if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+ static_attr.mp_nexthop_global = nexthop_orig.u.prefix6;
+ }
break;
}
-
/*
* route map handling
*/
@@ -1100,28 +1140,34 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
* labels for these routes enables the non-labeled nexthops
* from the originating VRF to be considered valid for this route.
*/
+ if (!CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+ /* work back to original route */
+ for (bi_ultimate = info_vpn;
+ bi_ultimate->extra && bi_ultimate->extra->parent;
+ bi_ultimate = bi_ultimate->extra->parent)
+ ;
- /* work back to original route */
- for (bi_ultimate = info_vpn;
- bi_ultimate->extra && bi_ultimate->extra->parent;
- bi_ultimate = bi_ultimate->extra->parent)
- ;
-
- /* if original route was unicast, then it did not arrive over vpn */
- if (bi_ultimate->net) {
- struct bgp_table *table;
+ /*
+ * if original route was unicast,
+ * then it did not arrive over vpn
+ */
+ if (bi_ultimate->net) {
+ struct bgp_table *table;
- table = bgp_node_table(bi_ultimate->net);
- if (table && (table->safi == SAFI_UNICAST))
- origin_local = 1;
- }
+ table = bgp_node_table(bi_ultimate->net);
+ if (table && (table->safi == SAFI_UNICAST))
+ origin_local = 1;
+ }
- /* copy labels */
- if (!origin_local && info_vpn->extra && info_vpn->extra->num_labels) {
- num_labels = info_vpn->extra->num_labels;
- if (num_labels > BGP_MAX_LABELS)
- num_labels = BGP_MAX_LABELS;
- pLabels = info_vpn->extra->label;
+ /* copy labels */
+ if (!origin_local &&
+ info_vpn->extra && info_vpn->extra->num_labels) {
+ num_labels = info_vpn->extra->num_labels;
+ if (num_labels > BGP_MAX_LABELS)
+ num_labels = BGP_MAX_LABELS;
+ pLabels = info_vpn->extra->label;
+ }
}
if (debug) {
@@ -1131,10 +1177,19 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
num_labels);
}
+ /*
+ * For VRF-2-VRF route-leaking,
+ * the source will be the originating VRF.
+ */
+ if (info_vpn->extra && info_vpn->extra->bgp_orig)
+ src_vrf = info_vpn->extra->bgp_orig;
+ else
+ src_vrf = bgp_vpn;
+
leak_update(bgp_vrf, bn, new_attr, afi, safi, info_vpn,
pLabels, num_labels,
info_vpn, /* parent */
- bgp_vpn, &nexthop_orig, nexthop_self_flag, debug);
+ src_vrf, &nexthop_orig, nexthop_self_flag, debug);
}
void vpn_leak_to_vrf_update(struct bgp *bgp_vpn, /* from */
@@ -1249,18 +1304,17 @@ void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, /* to */
struct bgp_info *bi;
safi_t safi = SAFI_UNICAST;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
- struct bgp *bgp_vpn = bgp_get_default();
if (debug)
zlog_debug("%s: entry", __func__);
/*
- * Walk vrf table, delete bi with bgp_orig == bgp_vpn
+ * Walk vrf table, delete bi with bgp_orig in a different vrf
*/
for (bn = bgp_table_top(bgp_vrf->rib[afi][safi]); bn;
bn = bgp_route_next(bn)) {
for (bi = bn->info; bi; bi = bi->next) {
- if (bi->extra && bi->extra->bgp_orig == bgp_vpn) {
+ if (bi->extra && bi->extra->bgp_orig != bgp_vrf) {
/* delete route */
bgp_aggregate_decrement(bgp_vrf, &bn->p, bi,
@@ -1403,6 +1457,172 @@ void vpn_policy_routemap_event(const char *rmap_name)
vpn_policy_routemap_update(bgp, rmap_name);
}
+void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
+ afi_t afi, safi_t safi)
+{
+ const char *export_name;
+ vpn_policy_direction_t idir, edir;
+ char *vname;
+ char buf[1000];
+ struct ecommunity *ecom;
+ bool first_export = false;
+
+ export_name = to_bgp->name ? to_bgp->name : BGP_DEFAULT_NAME;
+ idir = BGP_VPN_POLICY_DIR_FROMVPN;
+ edir = BGP_VPN_POLICY_DIR_TOVPN;
+
+ /*
+ * Cross-ref both VRFs. Also, note if this is the first time
+ * any VRF is importing from "import_vrf".
+ */
+ vname = (from_bgp->name ? XSTRDUP(MTYPE_TMP, from_bgp->name)
+ : XSTRDUP(MTYPE_TMP, BGP_DEFAULT_NAME));
+
+ listnode_add(to_bgp->vpn_policy[afi].import_vrf, vname);
+
+ if (!listcount(from_bgp->vpn_policy[afi].export_vrf))
+ first_export = true;
+ vname = XSTRDUP(MTYPE_TMP, export_name);
+ listnode_add(from_bgp->vpn_policy[afi].export_vrf, vname);
+
+ /* Update import RT for current VRF using export RT of the VRF we're
+ * importing from. First though, make sure "import_vrf" has that
+ * set.
+ */
+ if (first_export) {
+ form_auto_rd(from_bgp->router_id, from_bgp->vrf_rd_id,
+ &from_bgp->vrf_prd_auto);
+ from_bgp->vpn_policy[afi].tovpn_rd = from_bgp->vrf_prd_auto;
+ SET_FLAG(from_bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_RD_SET);
+ prefix_rd2str(&from_bgp->vpn_policy[afi].tovpn_rd,
+ buf, sizeof(buf));
+ from_bgp->vpn_policy[afi].rtlist[edir] =
+ ecommunity_str2com(buf, ECOMMUNITY_ROUTE_TARGET, 0);
+ SET_FLAG(from_bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT);
+ from_bgp->vpn_policy[afi].tovpn_label =
+ BGP_PREVENT_VRF_2_VRF_LEAK;
+ }
+ ecom = from_bgp->vpn_policy[afi].rtlist[edir];
+ if (to_bgp->vpn_policy[afi].rtlist[idir])
+ to_bgp->vpn_policy[afi].rtlist[idir] =
+ ecommunity_merge(to_bgp->vpn_policy[afi]
+ .rtlist[idir], ecom);
+ else
+ to_bgp->vpn_policy[afi].rtlist[idir] = ecommunity_dup(ecom);
+ SET_FLAG(to_bgp->af_flags[afi][safi], BGP_CONFIG_VRF_TO_VRF_IMPORT);
+
+ /* Does "import_vrf" first need to export its routes or that
+ * is already done and we just need to import those routes
+ * from the global table?
+ */
+ if (first_export)
+ vpn_leak_postchange(edir, afi, bgp_get_default(), from_bgp);
+ else
+ vpn_leak_postchange(idir, afi, bgp_get_default(), to_bgp);
+}
+
+void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
+ afi_t afi, safi_t safi)
+{
+ const char *export_name, *tmp_name;
+ vpn_policy_direction_t idir, edir;
+ char *vname;
+ struct ecommunity *ecom;
+ struct listnode *node;
+
+ export_name = to_bgp->name ? to_bgp->name : BGP_DEFAULT_NAME;
+ tmp_name = from_bgp->name ? from_bgp->name : BGP_DEFAULT_NAME;
+ idir = BGP_VPN_POLICY_DIR_FROMVPN;
+ edir = BGP_VPN_POLICY_DIR_TOVPN;
+
+ /* Were we importing from "import_vrf"? */
+ for (ALL_LIST_ELEMENTS_RO(to_bgp->vpn_policy[afi].import_vrf, node,
+ vname)) {
+ if (strcmp(vname, tmp_name) == 0)
+ break;
+ }
+
+ /*
+ * We do not check in the cli if the passed in bgp
+ * instance is actually imported into us before
+ * we call this function. As such if we do not
+ * find this in the import_vrf list than
+ * we just need to return safely.
+ */
+ if (!vname)
+ return;
+
+ /* Remove "import_vrf" from our import list. */
+ listnode_delete(to_bgp->vpn_policy[afi].import_vrf, vname);
+ XFREE(MTYPE_TMP, vname);
+
+ /* Remove routes imported from "import_vrf". */
+ /* TODO: In the current logic, we have to first remove all
+ * imported routes and then (if needed) import back routes
+ */
+ vpn_leak_prechange(idir, afi, bgp_get_default(), to_bgp);
+
+ if (to_bgp->vpn_policy[afi].import_vrf->count == 0) {
+ UNSET_FLAG(to_bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT);
+ ecommunity_free(&to_bgp->vpn_policy[afi].rtlist[idir]);
+ } else {
+ ecom = from_bgp->vpn_policy[afi].rtlist[edir];
+ ecommunity_del_val(to_bgp->vpn_policy[afi].rtlist[idir],
+ (struct ecommunity_val *)ecom->val);
+ vpn_leak_postchange(idir, afi, bgp_get_default(), to_bgp);
+ }
+
+ /*
+ * What?
+ * So SA is assuming that since the ALL_LIST_ELEMENTS_RO
+ * below is checking for NULL that export_vrf can be
+ * NULL, consequently it is complaining( like a cabbage )
+ * that we could dereference and crash in the listcount(..)
+ * check below.
+ * So make it happy, under protest, with liberty and justice
+ * for all.
+ */
+ assert(from_bgp->vpn_policy[afi].export_vrf);
+
+ /* Remove us from "import_vrf's" export list. If no other VRF
+ * is importing from "import_vrf", cleanup appropriately.
+ */
+ for (ALL_LIST_ELEMENTS_RO(from_bgp->vpn_policy[afi].export_vrf,
+ node, vname)) {
+ if (strcmp(vname, export_name) == 0)
+ break;
+ }
+
+ /*
+ * If we have gotten to this point then the vname must
+ * exist. If not, we are in a world of trouble and
+ * have slag sitting around.
+ *
+ * import_vrf and export_vrf must match in having
+ * the in/out names as appropriate.
+ */
+ assert(vname);
+
+ listnode_delete(from_bgp->vpn_policy[afi].export_vrf, vname);
+ XFREE(MTYPE_TMP, vname);
+
+ if (!listcount(from_bgp->vpn_policy[afi].export_vrf)) {
+ vpn_leak_prechange(edir, afi, bgp_get_default(), from_bgp);
+ ecommunity_free(&from_bgp->vpn_policy[afi].rtlist[edir]);
+ UNSET_FLAG(from_bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT);
+ memset(&from_bgp->vpn_policy[afi].tovpn_rd, 0,
+ sizeof(struct prefix_rd));
+ UNSET_FLAG(from_bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_RD_SET);
+ from_bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE;
+
+ }
+}
+
/* For testing purpose, static route of MPLS-VPN. */
DEFUN (vpnv4_network,
vpnv4_network_cmd,
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index c13030c6c8..384108dc0c 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -78,6 +78,10 @@ extern void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,
extern void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi);
extern void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi);
extern int vpn_leak_label_callback(mpls_label_t label, void *lblid, bool alloc);
+extern void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
+ afi_t afi, safi_t safi);
+void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
+ afi_t afi, safi_t safi);
static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi,
const char **pmsg)
@@ -92,7 +96,9 @@ static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi,
/* Is vrf configured to export to vpn? */
if (!CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
- BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT)) {
+ BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT)
+ && !CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
if (pmsg)
*pmsg = "export not set";
return 0;
@@ -147,7 +153,9 @@ static inline int vpn_leak_from_vpn_active(struct bgp *bgp_vrf, afi_t afi,
/* Is vrf configured to import from vpn? */
if (!CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
- BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT)) {
+ BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT)
+ && !CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
if (pmsg)
*pmsg = "import not set";
return 0;
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 519f092762..a771bead23 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -29,6 +29,7 @@
(((nexthop_len) == 4 || (nexthop_len) == 12 \
? AF_INET \
: ((nexthop_len) == 16 || (nexthop_len) == 24 \
+ || (nexthop_len) == 32 \
|| (nexthop_len) == 48 \
? AF_INET6 \
: AF_UNSPEC)))
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
new file mode 100644
index 0000000000..04d6314fd7
--- /dev/null
+++ b/bgpd/bgp_pbr.c
@@ -0,0 +1,1140 @@
+/*
+ * BGP pbr
+ * Copyright (C) 6WIND
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "zebra.h"
+#include "prefix.h"
+#include "zclient.h"
+#include "jhash.h"
+
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_pbr.h"
+#include "bgpd/bgp_debug.h"
+#include "bgpd/bgp_flowspec_util.h"
+#include "bgpd/bgp_ecommunity.h"
+#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_attr.h"
+#include "bgpd/bgp_zebra.h"
+#include "bgpd/bgp_mplsvpn.h"
+
+DEFINE_MTYPE_STATIC(BGPD, PBR_MATCH_ENTRY, "PBR match entry")
+DEFINE_MTYPE_STATIC(BGPD, PBR_MATCH, "PBR match")
+DEFINE_MTYPE_STATIC(BGPD, PBR_ACTION, "PBR action")
+
+static int bgp_pbr_match_counter_unique;
+static int bgp_pbr_match_entry_counter_unique;
+static int bgp_pbr_action_counter_unique;
+static int bgp_pbr_match_iptable_counter_unique;
+
+struct bgp_pbr_match_iptable_unique {
+ uint32_t unique;
+ struct bgp_pbr_match *bpm_found;
+};
+
+struct bgp_pbr_match_entry_unique {
+ uint32_t unique;
+ struct bgp_pbr_match_entry *bpme_found;
+};
+
+struct bgp_pbr_action_unique {
+ uint32_t unique;
+ struct bgp_pbr_action *bpa_found;
+};
+
+static int bgp_pbr_action_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct bgp_pbr_action *bpa = (struct bgp_pbr_action *)backet->data;
+ struct bgp_pbr_action_unique *bpau = (struct bgp_pbr_action_unique *)
+ arg;
+ uint32_t unique = bpau->unique;
+
+ if (bpa->unique == unique) {
+ bpau->bpa_found = bpa;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+static int bgp_pbr_match_entry_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct bgp_pbr_match_entry *bpme =
+ (struct bgp_pbr_match_entry *)backet->data;
+ struct bgp_pbr_match_entry_unique *bpmeu =
+ (struct bgp_pbr_match_entry_unique *)arg;
+ uint32_t unique = bpmeu->unique;
+
+ if (bpme->unique == unique) {
+ bpmeu->bpme_found = bpme;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+struct bgp_pbr_match_ipsetname {
+ char *ipsetname;
+ struct bgp_pbr_match *bpm_found;
+};
+
+static int bgp_pbr_match_pername_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct bgp_pbr_match *bpm = (struct bgp_pbr_match *)backet->data;
+ struct bgp_pbr_match_ipsetname *bpmi =
+ (struct bgp_pbr_match_ipsetname *)arg;
+ char *ipset_name = bpmi->ipsetname;
+
+ if (!strncmp(ipset_name, bpm->ipset_name,
+ ZEBRA_IPSET_NAME_SIZE)) {
+ bpmi->bpm_found = bpm;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+static int bgp_pbr_match_iptable_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct bgp_pbr_match *bpm = (struct bgp_pbr_match *)backet->data;
+ struct bgp_pbr_match_iptable_unique *bpmiu =
+ (struct bgp_pbr_match_iptable_unique *)arg;
+ uint32_t unique = bpmiu->unique;
+
+ if (bpm->unique2 == unique) {
+ bpmiu->bpm_found = bpm;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+struct bgp_pbr_match_unique {
+ uint32_t unique;
+ struct bgp_pbr_match *bpm_found;
+};
+
+static int bgp_pbr_match_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct bgp_pbr_match *bpm = (struct bgp_pbr_match *)backet->data;
+ struct bgp_pbr_match_unique *bpmu = (struct bgp_pbr_match_unique *)
+ arg;
+ uint32_t unique = bpmu->unique;
+
+ if (bpm->unique == unique) {
+ bpmu->bpm_found = bpm;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+static int sprintf_bgp_pbr_match_val(char *str, struct bgp_pbr_match_val *mval,
+ const char *prepend)
+{
+ char *ptr = str;
+
+ if (prepend)
+ ptr += sprintf(ptr, "%s", prepend);
+ else {
+ if (mval->unary_operator & OPERATOR_UNARY_OR)
+ ptr += sprintf(ptr, ", or ");
+ if (mval->unary_operator & OPERATOR_UNARY_AND)
+ ptr += sprintf(ptr, ", and ");
+ }
+ if (mval->compare_operator & OPERATOR_COMPARE_LESS_THAN)
+ ptr += sprintf(ptr, "<");
+ if (mval->compare_operator & OPERATOR_COMPARE_GREATER_THAN)
+ ptr += sprintf(ptr, ">");
+ if (mval->compare_operator & OPERATOR_COMPARE_EQUAL_TO)
+ ptr += sprintf(ptr, "=");
+ if (mval->compare_operator & OPERATOR_COMPARE_EXACT_MATCH)
+ ptr += sprintf(ptr, "match");
+ ptr += sprintf(ptr, " %u", mval->value);
+ return (int)(ptr - str);
+}
+
+#define INCREMENT_DISPLAY(_ptr, _cnt) do { \
+ if (_cnt) \
+ (_ptr) += sprintf((_ptr), "; "); \
+ _cnt++; \
+ } while (0)
+
+/* return 1 if OK, 0 if validation should stop) */
+static int bgp_pbr_validate_policy_route(struct bgp_pbr_entry_main *api)
+{
+ /* because bgp pbr entry may contain unsupported
+ * combinations, a message will be displayed here if
+ * not supported.
+ * for now, only match/set supported is
+ * - combination src/dst => redirect nexthop [ + rate]
+ * - combination src/dst => redirect VRF [ + rate]
+ * - combination src/dst => drop
+ */
+ if (api->match_src_port_num || api->match_dst_port_num
+ || api->match_port_num || api->match_protocol_num
+ || api->match_icmp_type_num || api->match_icmp_type_num
+ || api->match_packet_length_num || api->match_dscp_num
+ || api->match_tcpflags_num) {
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_debug("BGP: some SET actions not supported by Zebra. ignoring.");
+ }
+ return 0;
+ }
+ if (!(api->match_bitmask & PREFIX_SRC_PRESENT) &&
+ !(api->match_bitmask & PREFIX_DST_PRESENT)) {
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_debug("BGP: match actions without src"
+ " or dst address can not operate."
+ " ignoring.");
+ }
+ return 0;
+ }
+ return 1;
+}
+
+/* return -1 if build or validation failed */
+static int bgp_pbr_build_and_validate_entry(struct prefix *p,
+ struct bgp_info *info,
+ struct bgp_pbr_entry_main *api)
+{
+ int ret;
+ int i, action_count = 0;
+ struct ecommunity *ecom;
+ struct ecommunity_val *ecom_eval;
+ struct bgp_pbr_entry_action *api_action;
+ struct prefix *src = NULL, *dst = NULL;
+ int valid_prefix = 0;
+ afi_t afi = AFI_IP;
+
+ /* extract match from flowspec entries */
+ ret = bgp_flowspec_match_rules_fill((uint8_t *)p->u.prefix_flowspec.ptr,
+ p->u.prefix_flowspec.prefixlen, api);
+ if (ret < 0)
+ return -1;
+ /* extract actiosn from flowspec ecom list */
+ if (info && info->attr && info->attr->ecommunity) {
+ ecom = info->attr->ecommunity;
+ for (i = 0; i < ecom->size; i++) {
+ ecom_eval = (struct ecommunity_val *)
+ ecom->val + (i * ECOMMUNITY_SIZE);
+
+ if (action_count > ACTIONS_MAX_NUM) {
+ if (BGP_DEBUG(pbr, PBR_ERROR))
+ zlog_err("%s: flowspec actions exceeds limit (max %u)",
+ __func__, action_count);
+ break;
+ }
+ api_action = &api->actions[action_count];
+
+ if ((ecom_eval->val[1] ==
+ (char)ECOMMUNITY_REDIRECT_VRF) &&
+ (ecom_eval->val[0] ==
+ (char)ECOMMUNITY_ENCODE_TRANS_EXP ||
+ ecom_eval->val[0] ==
+ (char)ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 ||
+ ecom_eval->val[0] ==
+ (char)ECOMMUNITY_EXTENDED_COMMUNITY_PART_3)) {
+ struct ecommunity *eckey = ecommunity_new();
+ struct ecommunity_val ecom_copy;
+
+ memcpy(&ecom_copy, ecom_eval,
+ sizeof(struct ecommunity_val));
+ ecom_copy.val[0] &=
+ ~ECOMMUNITY_ENCODE_TRANS_EXP;
+ ecom_copy.val[1] = ECOMMUNITY_ROUTE_TARGET;
+ ecommunity_add_val(eckey, &ecom_copy);
+
+ api_action->action = ACTION_REDIRECT;
+ api_action->u.redirect_vrf =
+ get_first_vrf_for_redirect_with_rt(
+ eckey);
+ ecommunity_free(&eckey);
+ } else if ((ecom_eval->val[0] ==
+ (char)ECOMMUNITY_ENCODE_REDIRECT_IP_NH) &&
+ (ecom_eval->val[1] ==
+ (char)ECOMMUNITY_REDIRECT_IP_NH)) {
+ api_action->action = ACTION_REDIRECT_IP;
+ api_action->u.zr.redirect_ip_v4.s_addr =
+ info->attr->nexthop.s_addr;
+ api_action->u.zr.duplicate = ecom_eval->val[7];
+ } else {
+ if (ecom_eval->val[0] !=
+ (char)ECOMMUNITY_ENCODE_TRANS_EXP)
+ continue;
+ ret = ecommunity_fill_pbr_action(ecom_eval,
+ api_action);
+ if (ret != 0)
+ continue;
+ }
+ api->action_num++;
+ }
+ }
+
+ /* validate if incoming matc/action is compatible
+ * with our policy routing engine
+ */
+ if (!bgp_pbr_validate_policy_route(api))
+ return -1;
+
+ /* check inconsistency in the match rule */
+ if (api->match_bitmask & PREFIX_SRC_PRESENT) {
+ src = &api->src_prefix;
+ afi = family2afi(src->family);
+ valid_prefix = 1;
+ }
+ if (api->match_bitmask & PREFIX_DST_PRESENT) {
+ dst = &api->dst_prefix;
+ if (valid_prefix && afi != family2afi(dst->family)) {
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_debug("%s: inconsistency:"
+ " no match for afi src and dst (%u/%u)",
+ __func__, afi, family2afi(dst->family));
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void bgp_pbr_match_entry_free(void *arg)
+{
+ struct bgp_pbr_match_entry *bpme;
+
+ bpme = (struct bgp_pbr_match_entry *)arg;
+
+ if (bpme->installed) {
+ bgp_send_pbr_ipset_entry_match(bpme, false);
+ bpme->installed = false;
+ bpme->backpointer = NULL;
+ }
+ XFREE(MTYPE_PBR_MATCH_ENTRY, bpme);
+}
+
+static void bgp_pbr_match_free(void *arg)
+{
+ struct bgp_pbr_match *bpm;
+
+ bpm = (struct bgp_pbr_match *)arg;
+
+ hash_clean(bpm->entry_hash, bgp_pbr_match_entry_free);
+
+ if (hashcount(bpm->entry_hash) == 0) {
+ /* delete iptable entry first */
+ /* then delete ipset match */
+ if (bpm->installed) {
+ if (bpm->installed_in_iptable) {
+ bgp_send_pbr_iptable(bpm->action,
+ bpm, false);
+ bpm->installed_in_iptable = false;
+ bpm->action->refcnt--;
+ }
+ bgp_send_pbr_ipset_match(bpm, false);
+ bpm->installed = false;
+ bpm->action = NULL;
+ }
+ }
+ hash_free(bpm->entry_hash);
+
+ XFREE(MTYPE_PBR_MATCH, bpm);
+}
+
+static void *bgp_pbr_match_alloc_intern(void *arg)
+{
+ struct bgp_pbr_match *bpm, *new;
+
+ bpm = (struct bgp_pbr_match *)arg;
+
+ new = XCALLOC(MTYPE_PBR_MATCH, sizeof(*new));
+ memcpy(new, bpm, sizeof(*bpm));
+
+ return new;
+}
+
+static void bgp_pbr_action_free(void *arg)
+{
+ struct bgp_pbr_action *bpa;
+
+ bpa = (struct bgp_pbr_action *)arg;
+
+ if (bpa->refcnt == 0) {
+ if (bpa->installed && bpa->table_id != 0) {
+ bgp_send_pbr_rule_action(bpa, false);
+ bgp_zebra_announce_default(bpa->bgp, &(bpa->nh),
+ AFI_IP,
+ bpa->table_id,
+ false);
+ }
+ }
+ XFREE(MTYPE_PBR_ACTION, bpa);
+}
+
+static void *bgp_pbr_action_alloc_intern(void *arg)
+{
+ struct bgp_pbr_action *bpa, *new;
+
+ bpa = (struct bgp_pbr_action *)arg;
+
+ new = XCALLOC(MTYPE_PBR_ACTION, sizeof(*new));
+
+ memcpy(new, bpa, sizeof(*bpa));
+
+ return new;
+}
+
+static void *bgp_pbr_match_entry_alloc_intern(void *arg)
+{
+ struct bgp_pbr_match_entry *bpme, *new;
+
+ bpme = (struct bgp_pbr_match_entry *)arg;
+
+ new = XCALLOC(MTYPE_PBR_MATCH_ENTRY, sizeof(*new));
+
+ memcpy(new, bpme, sizeof(*bpme));
+
+ return new;
+}
+
+uint32_t bgp_pbr_match_hash_key(void *arg)
+{
+ struct bgp_pbr_match *pbm = (struct bgp_pbr_match *)arg;
+ uint32_t key;
+
+ key = jhash_1word(pbm->vrf_id, 0x4312abde);
+ key = jhash_1word(pbm->flags, key);
+ return jhash_1word(pbm->type, key);
+}
+
+int bgp_pbr_match_hash_equal(const void *arg1, const void *arg2)
+{
+ const struct bgp_pbr_match *r1, *r2;
+
+ r1 = (const struct bgp_pbr_match *)arg1;
+ r2 = (const struct bgp_pbr_match *)arg2;
+
+ if (r1->vrf_id != r2->vrf_id)
+ return 0;
+
+ if (r1->type != r2->type)
+ return 0;
+
+ if (r1->flags != r2->flags)
+ return 0;
+
+ if (r1->action != r2->action)
+ return 0;
+
+ return 1;
+}
+
+uint32_t bgp_pbr_match_entry_hash_key(void *arg)
+{
+ struct bgp_pbr_match_entry *pbme;
+ uint32_t key;
+
+ pbme = (struct bgp_pbr_match_entry *)arg;
+ key = prefix_hash_key(&pbme->src);
+ key = jhash_1word(prefix_hash_key(&pbme->dst), key);
+
+ return key;
+}
+
+int bgp_pbr_match_entry_hash_equal(const void *arg1, const void *arg2)
+{
+ const struct bgp_pbr_match_entry *r1, *r2;
+
+ r1 = (const struct bgp_pbr_match_entry *)arg1;
+ r2 = (const struct bgp_pbr_match_entry *)arg2;
+
+ /* on updates, comparing
+ * backpointer is not necessary
+ */
+
+ /* unique value is self calculated
+ */
+
+ /* rate is ignored for now
+ */
+
+ if (!prefix_same(&r1->src, &r2->src))
+ return 0;
+
+ if (!prefix_same(&r1->dst, &r2->dst))
+ return 0;
+
+ return 1;
+}
+
+uint32_t bgp_pbr_action_hash_key(void *arg)
+{
+ struct bgp_pbr_action *pbra;
+ uint32_t key;
+
+ pbra = (struct bgp_pbr_action *)arg;
+ key = jhash_1word(pbra->table_id, 0x4312abde);
+ key = jhash_1word(pbra->fwmark, key);
+ return key;
+}
+
+int bgp_pbr_action_hash_equal(const void *arg1, const void *arg2)
+{
+ const struct bgp_pbr_action *r1, *r2;
+
+ r1 = (const struct bgp_pbr_action *)arg1;
+ r2 = (const struct bgp_pbr_action *)arg2;
+
+ /* unique value is self calculated
+ * table and fwmark is self calculated
+ */
+ if (r1->rate != r2->rate)
+ return 0;
+
+ if (r1->vrf_id != r2->vrf_id)
+ return 0;
+
+ if (memcmp(&r1->nh, &r2->nh, sizeof(struct nexthop)))
+ return 0;
+ return 1;
+}
+
+struct bgp_pbr_action *bgp_pbr_action_rule_lookup(vrf_id_t vrf_id,
+ uint32_t unique)
+{
+ struct bgp *bgp = bgp_lookup_by_vrf_id(vrf_id);
+ struct bgp_pbr_action_unique bpau;
+
+ if (!bgp || unique == 0)
+ return NULL;
+ bpau.unique = unique;
+ bpau.bpa_found = NULL;
+ hash_walk(bgp->pbr_action_hash, bgp_pbr_action_walkcb, &bpau);
+ return bpau.bpa_found;
+}
+
+struct bgp_pbr_match *bgp_pbr_match_ipset_lookup(vrf_id_t vrf_id,
+ uint32_t unique)
+{
+ struct bgp *bgp = bgp_lookup_by_vrf_id(vrf_id);
+ struct bgp_pbr_match_unique bpmu;
+
+ if (!bgp || unique == 0)
+ return NULL;
+ bpmu.unique = unique;
+ bpmu.bpm_found = NULL;
+ hash_walk(bgp->pbr_match_hash, bgp_pbr_match_walkcb, &bpmu);
+ return bpmu.bpm_found;
+}
+
+struct bgp_pbr_match_entry *bgp_pbr_match_ipset_entry_lookup(vrf_id_t vrf_id,
+ char *ipset_name,
+ uint32_t unique)
+{
+ struct bgp *bgp = bgp_lookup_by_vrf_id(vrf_id);
+ struct bgp_pbr_match_entry_unique bpmeu;
+ struct bgp_pbr_match_ipsetname bpmi;
+
+ if (!bgp || unique == 0)
+ return NULL;
+ bpmi.ipsetname = XCALLOC(MTYPE_TMP, ZEBRA_IPSET_NAME_SIZE);
+ snprintf(bpmi.ipsetname, ZEBRA_IPSET_NAME_SIZE, "%s", ipset_name);
+ bpmi.bpm_found = NULL;
+ hash_walk(bgp->pbr_match_hash, bgp_pbr_match_pername_walkcb, &bpmi);
+ XFREE(MTYPE_TMP, bpmi.ipsetname);
+ if (!bpmi.bpm_found)
+ return NULL;
+ bpmeu.bpme_found = NULL;
+ bpmeu.unique = unique;
+ hash_walk(bpmi.bpm_found->entry_hash,
+ bgp_pbr_match_entry_walkcb, &bpmeu);
+ return bpmeu.bpme_found;
+}
+
+struct bgp_pbr_match *bgp_pbr_match_iptable_lookup(vrf_id_t vrf_id,
+ uint32_t unique)
+{
+ struct bgp *bgp = bgp_lookup_by_vrf_id(vrf_id);
+ struct bgp_pbr_match_iptable_unique bpmiu;
+
+ if (!bgp || unique == 0)
+ return NULL;
+ bpmiu.unique = unique;
+ bpmiu.bpm_found = NULL;
+ hash_walk(bgp->pbr_match_hash, bgp_pbr_match_iptable_walkcb, &bpmiu);
+ return bpmiu.bpm_found;
+}
+
+void bgp_pbr_cleanup(struct bgp *bgp)
+{
+ if (bgp->pbr_match_hash) {
+ hash_clean(bgp->pbr_match_hash, bgp_pbr_match_free);
+ hash_free(bgp->pbr_match_hash);
+ bgp->pbr_match_hash = NULL;
+ }
+ if (bgp->pbr_action_hash) {
+ hash_clean(bgp->pbr_action_hash, bgp_pbr_action_free);
+ hash_free(bgp->pbr_action_hash);
+ bgp->pbr_action_hash = NULL;
+ }
+}
+
+void bgp_pbr_init(struct bgp *bgp)
+{
+ bgp->pbr_match_hash =
+ hash_create_size(8, bgp_pbr_match_hash_key,
+ bgp_pbr_match_hash_equal,
+ "Match Hash");
+ bgp->pbr_action_hash =
+ hash_create_size(8, bgp_pbr_action_hash_key,
+ bgp_pbr_action_hash_equal,
+ "Match Hash Entry");
+}
+
+void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api)
+{
+ int i = 0;
+ char return_string[512];
+ char *ptr = return_string;
+ char buff[64];
+ int nb_items = 0;
+
+ ptr += sprintf(ptr, "MATCH : ");
+ if (api->match_bitmask & PREFIX_SRC_PRESENT) {
+ struct prefix *p = &(api->src_prefix);
+
+ ptr += sprintf(ptr, "@src %s", prefix2str(p, buff, 64));
+ INCREMENT_DISPLAY(ptr, nb_items);
+ }
+ if (api->match_bitmask & PREFIX_DST_PRESENT) {
+ struct prefix *p = &(api->dst_prefix);
+
+ INCREMENT_DISPLAY(ptr, nb_items);
+ ptr += sprintf(ptr, "@dst %s", prefix2str(p, buff, 64));
+ }
+
+ if (api->match_protocol_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_protocol_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->protocol[i],
+ i > 0 ? NULL : "@proto ");
+
+ if (api->match_src_port_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_src_port_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->src_port[i],
+ i > 0 ? NULL : "@srcport ");
+
+ if (api->match_dst_port_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_dst_port_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->dst_port[i],
+ i > 0 ? NULL : "@dstport ");
+
+ if (api->match_port_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_port_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->port[i],
+ i > 0 ? NULL : "@port ");
+
+ if (api->match_icmp_type_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_icmp_type_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->icmp_type[i],
+ i > 0 ? NULL : "@icmptype ");
+
+ if (api->match_icmp_code_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_icmp_code_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->icmp_code[i],
+ i > 0 ? NULL : "@icmpcode ");
+
+ if (api->match_packet_length_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_packet_length_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->packet_length[i],
+ i > 0 ? NULL : "@plen ");
+
+ if (api->match_dscp_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_dscp_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->dscp[i],
+ i > 0 ? NULL : "@dscp ");
+
+ if (api->match_tcpflags_num)
+ INCREMENT_DISPLAY(ptr, nb_items);
+ for (i = 0; i < api->match_tcpflags_num; i++)
+ ptr += sprintf_bgp_pbr_match_val(ptr, &api->tcpflags[i],
+ i > 0 ? NULL : "@tcpflags ");
+
+ if (api->match_bitmask & FRAGMENT_PRESENT) {
+ INCREMENT_DISPLAY(ptr, nb_items);
+ ptr += sprintf(ptr, "@fragment %u", api->fragment.bitmask);
+ }
+ if (!nb_items)
+ ptr = return_string;
+ else
+ ptr += sprintf(ptr, "; ");
+ if (api->action_num)
+ ptr += sprintf(ptr, "SET : ");
+ nb_items = 0;
+ for (i = 0; i < api->action_num; i++) {
+ switch (api->actions[i].action) {
+ case ACTION_TRAFFICRATE:
+ INCREMENT_DISPLAY(ptr, nb_items);
+ ptr += sprintf(ptr, "@set rate %f",
+ api->actions[i].u.r.rate);
+ break;
+ case ACTION_TRAFFIC_ACTION:
+ INCREMENT_DISPLAY(ptr, nb_items);
+ ptr += sprintf(ptr, "@action ");
+ if (api->actions[i].u.za.filter
+ & TRAFFIC_ACTION_TERMINATE)
+ ptr += sprintf(ptr,
+ " terminate (apply filter(s))");
+ if (api->actions[i].u.za.filter
+ & TRAFFIC_ACTION_DISTRIBUTE)
+ ptr += sprintf(ptr, " distribute");
+ if (api->actions[i].u.za.filter
+ & TRAFFIC_ACTION_SAMPLE)
+ ptr += sprintf(ptr, " sample");
+ break;
+ case ACTION_REDIRECT_IP:
+ INCREMENT_DISPLAY(ptr, nb_items);
+ char local_buff[INET_ADDRSTRLEN];
+
+ if (inet_ntop(AF_INET,
+ &api->actions[i].u.zr.redirect_ip_v4,
+ local_buff, INET_ADDRSTRLEN) != NULL)
+ ptr += sprintf(ptr,
+ "@redirect ip nh %s", local_buff);
+ break;
+ case ACTION_REDIRECT:
+ INCREMENT_DISPLAY(ptr, nb_items);
+ ptr += sprintf(ptr, "@redirect vrf %u",
+ api->actions[i].u.redirect_vrf);
+ break;
+ case ACTION_MARKING:
+ INCREMENT_DISPLAY(ptr, nb_items);
+ ptr += sprintf(ptr, "@set dscp %u",
+ api->actions[i].u.marking_dscp);
+ break;
+ default:
+ break;
+ }
+ }
+ zlog_info("%s", return_string);
+}
+
+static void bgp_pbr_flush_entry(struct bgp *bgp, struct bgp_pbr_action *bpa,
+ struct bgp_pbr_match *bpm,
+ struct bgp_pbr_match_entry *bpme)
+{
+ /* if bpme is null, bpm is also null
+ */
+ if (bpme == NULL)
+ return;
+ /* ipset del entry */
+ if (bpme->installed) {
+ bgp_send_pbr_ipset_entry_match(bpme, false);
+ bpme->installed = false;
+ bpme->backpointer = NULL;
+ }
+ hash_release(bpm->entry_hash, bpme);
+ if (hashcount(bpm->entry_hash) == 0) {
+ /* delete iptable entry first */
+ /* then delete ipset match */
+ if (bpm->installed) {
+ if (bpm->installed_in_iptable) {
+ bgp_send_pbr_iptable(bpm->action,
+ bpm, false);
+ bpm->installed_in_iptable = false;
+ bpm->action->refcnt--;
+ }
+ bgp_send_pbr_ipset_match(bpm, false);
+ bpm->installed = false;
+ bpm->action = NULL;
+ }
+ hash_release(bgp->pbr_match_hash, bpm);
+ /* XXX release pbr_match_action if not used
+ * note that drop does not need to call send_pbr_action
+ */
+ }
+ if (bpa->refcnt == 0) {
+ if (bpa->installed && bpa->table_id != 0) {
+ bgp_send_pbr_rule_action(bpa, false);
+ bgp_zebra_announce_default(bpa->bgp, &(bpa->nh),
+ AFI_IP,
+ bpa->table_id,
+ false);
+ }
+ }
+}
+
+struct bgp_pbr_match_entry_remain {
+ struct bgp_pbr_match_entry *bpme_to_match;
+ struct bgp_pbr_match_entry *bpme_found;
+};
+
+static int bgp_pbr_get_remaining_entry(struct hash_backet *backet, void *arg)
+{
+ struct bgp_pbr_match *bpm = (struct bgp_pbr_match *)backet->data;
+ struct bgp_pbr_match_entry_remain *bpmer =
+ (struct bgp_pbr_match_entry_remain *)arg;
+ struct bgp_pbr_match *bpm_temp;
+ struct bgp_pbr_match_entry *bpme = bpmer->bpme_to_match;
+
+ if (!bpme->backpointer ||
+ bpm == bpme->backpointer ||
+ bpme->backpointer->action == bpm->action)
+ return HASHWALK_CONTINUE;
+ /* ensure bpm other characteristics are equal */
+ bpm_temp = bpme->backpointer;
+ if (bpm_temp->vrf_id != bpm->vrf_id ||
+ bpm_temp->type != bpm->type ||
+ bpm_temp->flags != bpm->flags)
+ return HASHWALK_CONTINUE;
+
+ /* look for remaining bpme */
+ bpmer->bpme_found = hash_lookup(bpm->entry_hash, bpme);
+ if (!bpmer->bpme_found)
+ return HASHWALK_CONTINUE;
+ return HASHWALK_ABORT;
+}
+
+static void bgp_pbr_policyroute_remove_from_zebra(struct bgp *bgp,
+ struct bgp_info *binfo,
+ vrf_id_t vrf_id,
+ struct prefix *src,
+ struct prefix *dst)
+{
+ struct bgp_pbr_match temp;
+ struct bgp_pbr_match_entry temp2;
+ struct bgp_pbr_match *bpm;
+ struct bgp_pbr_match_entry *bpme;
+ struct bgp_pbr_match_entry_remain bpmer;
+
+ /* as we don't know information from EC
+ * look for bpm that have the bpm
+ * with vrf_id characteristics
+ */
+ memset(&temp2, 0, sizeof(temp2));
+ memset(&temp, 0, sizeof(temp));
+ if (src) {
+ temp.flags |= MATCH_IP_SRC_SET;
+ prefix_copy(&temp2.src, src);
+ } else
+ temp2.src.family = AF_INET;
+ if (dst) {
+ temp.flags |= MATCH_IP_DST_SET;
+ prefix_copy(&temp2.dst, dst);
+ } else
+ temp2.dst.family = AF_INET;
+
+ if (src == NULL || dst == NULL)
+ temp.type = IPSET_NET;
+ else
+ temp.type = IPSET_NET_NET;
+ if (vrf_id == VRF_UNKNOWN) /* XXX case BGP destroy */
+ temp.vrf_id = 0;
+ else
+ temp.vrf_id = vrf_id;
+ bpme = &temp2;
+ bpm = &temp;
+ bpme->backpointer = bpm;
+ /* right now, a previous entry may already exist
+ * flush previous entry if necessary
+ */
+ bpmer.bpme_to_match = bpme;
+ bpmer.bpme_found = NULL;
+ hash_walk(bgp->pbr_match_hash, bgp_pbr_get_remaining_entry, &bpmer);
+ if (bpmer.bpme_found) {
+ static struct bgp_pbr_match *local_bpm;
+ static struct bgp_pbr_action *local_bpa;
+
+ local_bpm = bpmer.bpme_found->backpointer;
+ local_bpa = local_bpm->action;
+ bgp_pbr_flush_entry(bgp, local_bpa,
+ local_bpm, bpmer.bpme_found);
+ }
+}
+
+static void bgp_pbr_policyroute_add_to_zebra(struct bgp *bgp,
+ struct bgp_info *binfo,
+ vrf_id_t vrf_id,
+ struct prefix *src,
+ struct prefix *dst,
+ struct nexthop *nh,
+ float *rate)
+{
+ struct bgp_pbr_match temp;
+ struct bgp_pbr_match_entry temp2;
+ struct bgp_pbr_match *bpm;
+ struct bgp_pbr_match_entry *bpme = NULL;
+ struct bgp_pbr_action temp3;
+ struct bgp_pbr_action *bpa = NULL;
+ struct bgp_pbr_match_entry_remain bpmer;
+
+ /* look for bpa first */
+ memset(&temp3, 0, sizeof(temp3));
+ if (rate)
+ temp3.rate = *rate;
+ if (nh)
+ memcpy(&temp3.nh, nh, sizeof(struct nexthop));
+ temp3.vrf_id = vrf_id;
+ bpa = hash_get(bgp->pbr_action_hash, &temp3,
+ bgp_pbr_action_alloc_intern);
+
+ if (bpa->fwmark == 0) {
+ /* drop is handled by iptable */
+ if (nh && nh->type == NEXTHOP_TYPE_BLACKHOLE) {
+ bpa->table_id = 0;
+ bpa->installed = true;
+ } else {
+ bpa->fwmark = bgp_zebra_tm_get_id();
+ bpa->table_id = bpa->fwmark;
+ bpa->installed = false;
+ }
+ bpa->bgp = bgp;
+ bpa->unique = ++bgp_pbr_action_counter_unique;
+ /* 0 value is forbidden */
+ bpa->install_in_progress = false;
+ }
+
+ /* then look for bpm */
+ memset(&temp, 0, sizeof(temp));
+ if (src == NULL || dst == NULL)
+ temp.type = IPSET_NET;
+ else
+ temp.type = IPSET_NET_NET;
+ temp.vrf_id = vrf_id;
+ if (src)
+ temp.flags |= MATCH_IP_SRC_SET;
+ if (dst)
+ temp.flags |= MATCH_IP_DST_SET;
+ temp.action = bpa;
+ bpm = hash_get(bgp->pbr_match_hash, &temp,
+ bgp_pbr_match_alloc_intern);
+
+ /* new, then self allocate ipset_name and unique */
+ if (bpm && bpm->unique == 0) {
+ bpm->unique = ++bgp_pbr_match_counter_unique;
+ /* 0 value is forbidden */
+ sprintf(bpm->ipset_name, "match%p", bpm);
+ bpm->entry_hash = hash_create_size(8,
+ bgp_pbr_match_entry_hash_key,
+ bgp_pbr_match_entry_hash_equal,
+ "Match Entry Hash");
+ bpm->installed = false;
+
+ /* unique2 should be updated too */
+ bpm->unique2 = ++bgp_pbr_match_iptable_counter_unique;
+ bpm->installed_in_iptable = false;
+ bpm->install_in_progress = false;
+ bpm->install_iptable_in_progress = false;
+ }
+
+ memset(&temp2, 0, sizeof(temp2));
+ if (src)
+ prefix_copy(&temp2.src, src);
+ else
+ temp2.src.family = AF_INET;
+ if (dst)
+ prefix_copy(&temp2.dst, dst);
+ else
+ temp2.dst.family = AF_INET;
+ if (bpm)
+ bpme = hash_get(bpm->entry_hash, &temp2,
+ bgp_pbr_match_entry_alloc_intern);
+ if (bpme && bpme->unique == 0) {
+ bpme->unique = ++bgp_pbr_match_entry_counter_unique;
+ /* 0 value is forbidden */
+ bpme->backpointer = bpm;
+ bpme->installed = false;
+ bpme->install_in_progress = false;
+ }
+
+ /* BGP FS: append entry to zebra
+ * - policies are not routing entries and as such
+ * route replace semantics don't necessarily follow
+ * through to policy entries
+ * - because of that, not all policing information will be stored
+ * into zebra. and non selected policies will be suppressed from zebra
+ * - as consequence, in order to bring consistency
+ * a policy will be added, then ifan ecmp policy exists,
+ * it will be suppressed subsequently
+ */
+ /* ip rule add */
+ if (!bpa->installed) {
+ bgp_send_pbr_rule_action(bpa, true);
+ bgp_zebra_announce_default(bgp, nh,
+ AFI_IP, bpa->table_id, true);
+ }
+
+ /* ipset create */
+ if (bpm && !bpm->installed)
+ bgp_send_pbr_ipset_match(bpm, true);
+ /* ipset add */
+ if (bpme && !bpme->installed)
+ bgp_send_pbr_ipset_entry_match(bpme, true);
+
+ /* iptables */
+ if (bpm && !bpm->installed_in_iptable)
+ bgp_send_pbr_iptable(bpa, bpm, true);
+
+ /* A previous entry may already exist
+ * flush previous entry if necessary
+ */
+ bpmer.bpme_to_match = bpme;
+ bpmer.bpme_found = NULL;
+ hash_walk(bgp->pbr_match_hash, bgp_pbr_get_remaining_entry, &bpmer);
+ if (bpmer.bpme_found) {
+ static struct bgp_pbr_match *local_bpm;
+ static struct bgp_pbr_action *local_bpa;
+
+ local_bpm = bpmer.bpme_found->backpointer;
+ local_bpa = local_bpm->action;
+ bgp_pbr_flush_entry(bgp, local_bpa,
+ local_bpm, bpmer.bpme_found);
+ }
+
+
+}
+
+static void bgp_pbr_handle_entry(struct bgp *bgp,
+ struct bgp_info *binfo,
+ struct bgp_pbr_entry_main *api,
+ bool add)
+{
+ struct nexthop nh;
+ int i = 0;
+ int continue_loop = 1;
+ float rate = 0;
+ struct prefix *src = NULL, *dst = NULL;
+
+ if (api->match_bitmask & PREFIX_SRC_PRESENT)
+ src = &api->src_prefix;
+ if (api->match_bitmask & PREFIX_DST_PRESENT)
+ dst = &api->dst_prefix;
+ memset(&nh, 0, sizeof(struct nexthop));
+ nh.vrf_id = VRF_UNKNOWN;
+
+ if (!add)
+ return bgp_pbr_policyroute_remove_from_zebra(bgp, binfo,
+ api->vrf_id, src, dst);
+ /* no action for add = true */
+ for (i = 0; i < api->action_num; i++) {
+ switch (api->actions[i].action) {
+ case ACTION_TRAFFICRATE:
+ /* drop packet */
+ if (api->actions[i].u.r.rate == 0) {
+ nh.vrf_id = api->vrf_id;
+ nh.type = NEXTHOP_TYPE_BLACKHOLE;
+ bgp_pbr_policyroute_add_to_zebra(bgp, binfo,
+ api->vrf_id, src, dst,
+ &nh, &rate);
+ } else {
+ /* update rate. can be reentrant */
+ rate = api->actions[i].u.r.rate;
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_warn("PBR: ignoring Set action rate %f",
+ api->actions[i].u.r.rate);
+ }
+ }
+ break;
+ case ACTION_TRAFFIC_ACTION:
+ if (api->actions[i].u.za.filter
+ & TRAFFIC_ACTION_SAMPLE) {
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_warn("PBR: Sample action Ignored");
+ }
+ }
+#if 0
+ if (api->actions[i].u.za.filter
+ & TRAFFIC_ACTION_DISTRIBUTE) {
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_warn("PBR: Distribute action Applies");
+ }
+ continue_loop = 0;
+ /* continue forwarding entry as before
+ * no action
+ */
+ }
+#endif /* XXX to confirm behaviour of traffic action. for now , ignore */
+ /* terminate action: run other filters
+ */
+ break;
+ case ACTION_REDIRECT_IP:
+ nh.type = NEXTHOP_TYPE_IPV4;
+ nh.gate.ipv4.s_addr =
+ api->actions[i].u.zr.redirect_ip_v4.s_addr;
+ nh.vrf_id = api->vrf_id;
+ bgp_pbr_policyroute_add_to_zebra(bgp, binfo,
+ api->vrf_id,
+ src, dst,
+ &nh, &rate);
+ /* XXX combination with REDIRECT_VRF
+ * + REDIRECT_NH_IP not done
+ */
+ continue_loop = 0;
+ break;
+ case ACTION_REDIRECT:
+ nh.vrf_id = api->actions[i].u.redirect_vrf;
+ nh.type = NEXTHOP_TYPE_IPV4;
+ bgp_pbr_policyroute_add_to_zebra(bgp, binfo,
+ api->vrf_id,
+ src, dst,
+ &nh, &rate);
+ continue_loop = 0;
+ break;
+ case ACTION_MARKING:
+ if (BGP_DEBUG(pbr, PBR)) {
+ bgp_pbr_print_policy_route(api);
+ zlog_warn("PBR: Set DSCP %u Ignored",
+ api->actions[i].u.marking_dscp);
+ }
+ break;
+ default:
+ break;
+ }
+ if (continue_loop == 0)
+ break;
+ }
+}
+
+void bgp_pbr_update_entry(struct bgp *bgp, struct prefix *p,
+ struct bgp_info *info, afi_t afi, safi_t safi,
+ bool nlri_update)
+{
+ struct bgp_pbr_entry_main api;
+
+ if (afi == AFI_IP6)
+ return; /* IPv6 not supported */
+ if (safi != SAFI_FLOWSPEC)
+ return; /* not supported */
+ /* Make Zebra API structure. */
+ memset(&api, 0, sizeof(api));
+ api.vrf_id = bgp->vrf_id;
+ api.afi = afi;
+
+ if (bgp_pbr_build_and_validate_entry(p, info, &api) < 0) {
+ if (BGP_DEBUG(pbr, PBR_ERROR))
+ zlog_err("%s: cancel updating entry in bgp pbr",
+ __func__);
+ return;
+ }
+ bgp_pbr_handle_entry(bgp, info, &api, nlri_update);
+}
diff --git a/bgpd/bgp_pbr.h b/bgpd/bgp_pbr.h
new file mode 100644
index 0000000000..5129ada37b
--- /dev/null
+++ b/bgpd/bgp_pbr.h
@@ -0,0 +1,256 @@
+/*
+ * BGP pbr
+ * Copyright (C) 6WIND
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __BGP_PBR_H__
+#define __BGP_PBR_H__
+
+#include "nexthop.h"
+#include "zclient.h"
+
+/* flowspec case: 0 to 3 actions maximum:
+ * 1 redirect
+ * 1 set dscp
+ * 1 set traffic rate
+ */
+#define ACTIONS_MAX_NUM 4
+enum bgp_pbr_action_enum {
+ ACTION_TRAFFICRATE = 1,
+ ACTION_TRAFFIC_ACTION = 2,
+ ACTION_REDIRECT = 3,
+ ACTION_MARKING = 4,
+ ACTION_REDIRECT_IP = 5
+};
+
+#define TRAFFIC_ACTION_SAMPLE (1 << 0)
+#define TRAFFIC_ACTION_TERMINATE (1 << 1)
+#define TRAFFIC_ACTION_DISTRIBUTE (1 << 2)
+
+#define OPERATOR_COMPARE_LESS_THAN (1<<1)
+#define OPERATOR_COMPARE_GREATER_THAN (1<<2)
+#define OPERATOR_COMPARE_EQUAL_TO (1<<3)
+#define OPERATOR_COMPARE_EXACT_MATCH (1<<4)
+
+#define OPERATOR_UNARY_OR (1<<1)
+#define OPERATOR_UNARY_AND (1<<2)
+
+/* struct used to store values [0;65535]
+ * this can be used for port number of protocol
+ */
+#define BGP_PBR_MATCH_VAL_MAX 5
+
+struct bgp_pbr_match_val {
+ uint16_t value;
+ uint8_t compare_operator;
+ uint8_t unary_operator;
+} bgp_pbr_value_t;
+
+#define FRAGMENT_DONT 1
+#define FRAGMENT_IS 2
+#define FRAGMENT_FIRST 4
+#define FRAGMENT_LAST 8
+
+struct bgp_pbr_fragment_val {
+ uint8_t bitmask;
+};
+
+struct bgp_pbr_entry_action {
+ /* used to store enum bgp_pbr_action_enum enumerate */
+ uint8_t action;
+ union {
+ union {
+ uint8_t rate_info[4]; /* IEEE.754.1985 */
+ float rate;
+ } r __attribute__((aligned(8)));
+ struct _pbr_action {
+ uint8_t do_sample;
+ uint8_t filter;
+ } za;
+ vrf_id_t redirect_vrf;
+ struct _pbr_redirect_ip {
+ struct in_addr redirect_ip_v4;
+ uint8_t duplicate;
+ } zr;
+ uint8_t marking_dscp;
+ } u __attribute__((aligned(8)));
+};
+
+/* BGP Policy Route structure */
+struct bgp_pbr_entry_main {
+ uint8_t type;
+ uint16_t instance;
+
+ uint32_t flags;
+
+ uint8_t message;
+
+ /*
+ * This is an enum but we are going to treat it as a uint8_t
+ * for purpose of encoding/decoding
+ */
+ afi_t afi;
+ safi_t safi;
+
+#define PREFIX_SRC_PRESENT (1 << 0)
+#define PREFIX_DST_PRESENT (1 << 1)
+#define FRAGMENT_PRESENT (1 << 2)
+ uint8_t match_bitmask;
+
+ uint8_t match_src_port_num;
+ uint8_t match_dst_port_num;
+ uint8_t match_port_num;
+ uint8_t match_protocol_num;
+ uint8_t match_icmp_type_num;
+ uint8_t match_icmp_code_num;
+ uint8_t match_packet_length_num;
+ uint8_t match_dscp_num;
+ uint8_t match_tcpflags_num;
+
+ struct prefix src_prefix;
+ struct prefix dst_prefix;
+
+ struct bgp_pbr_match_val protocol[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val src_port[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val dst_port[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val port[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val icmp_type[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val icmp_code[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val packet_length[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val dscp[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_match_val tcpflags[BGP_PBR_MATCH_VAL_MAX];
+ struct bgp_pbr_fragment_val fragment;
+
+ uint16_t action_num;
+ struct bgp_pbr_entry_action actions[ACTIONS_MAX_NUM];
+
+ uint8_t distance;
+
+ uint32_t metric;
+
+ route_tag_t tag;
+
+ uint32_t mtu;
+
+ vrf_id_t vrf_id;
+};
+
+struct bgp_pbr_match {
+ char ipset_name[ZEBRA_IPSET_NAME_SIZE];
+
+ /* mapped on enum ipset_type
+ */
+ uint32_t type;
+
+#define MATCH_IP_SRC_SET (1 << 0)
+#define MATCH_IP_DST_SET (1 << 1)
+ uint32_t flags;
+
+ vrf_id_t vrf_id;
+
+ /* unique identifier for ipset create transaction
+ */
+ uint32_t unique;
+
+ /* unique identifier for iptable add transaction
+ */
+ uint32_t unique2;
+
+ bool installed;
+ bool install_in_progress;
+
+ bool installed_in_iptable;
+ bool install_iptable_in_progress;
+
+ struct hash *entry_hash;
+
+ struct bgp_pbr_action *action;
+
+};
+
+struct bgp_pbr_match_entry {
+ struct bgp_pbr_match *backpointer;
+
+ uint32_t unique;
+
+ struct prefix src;
+ struct prefix dst;
+
+ bool installed;
+ bool install_in_progress;
+};
+
+struct bgp_pbr_action {
+
+ /*
+ * The Unique identifier of this specific pbrms
+ */
+ uint32_t unique;
+
+ uint32_t fwmark;
+
+ uint32_t table_id;
+
+ float rate;
+
+ /*
+ * nexthop information, or drop information
+ * contains src vrf_id and nh contains dest vrf_id
+ */
+ vrf_id_t vrf_id;
+ struct nexthop nh;
+
+ bool installed;
+ bool install_in_progress;
+ uint32_t refcnt;
+ struct bgp *bgp;
+};
+
+extern struct bgp_pbr_action *bgp_pbr_action_rule_lookup(vrf_id_t vrf_id,
+ uint32_t unique);
+
+extern struct bgp_pbr_match *bgp_pbr_match_ipset_lookup(vrf_id_t vrf_id,
+ uint32_t unique);
+
+extern struct bgp_pbr_match_entry *bgp_pbr_match_ipset_entry_lookup(
+ vrf_id_t vrf_id, char *name,
+ uint32_t unique);
+extern struct bgp_pbr_match *bgp_pbr_match_iptable_lookup(vrf_id_t vrf_id,
+ uint32_t unique);
+
+extern void bgp_pbr_cleanup(struct bgp *bgp);
+extern void bgp_pbr_init(struct bgp *bgp);
+
+extern uint32_t bgp_pbr_action_hash_key(void *arg);
+extern int bgp_pbr_action_hash_equal(const void *arg1,
+ const void *arg2);
+extern uint32_t bgp_pbr_match_entry_hash_key(void *arg);
+extern int bgp_pbr_match_entry_hash_equal(const void *arg1,
+ const void *arg2);
+extern uint32_t bgp_pbr_match_hash_key(void *arg);
+extern int bgp_pbr_match_hash_equal(const void *arg1,
+ const void *arg2);
+
+void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api);
+
+struct bgp_node;
+struct bgp_info;
+extern void bgp_pbr_update_entry(struct bgp *bgp, struct prefix *p,
+ struct bgp_info *new_select,
+ afi_t afi, safi_t safi,
+ bool nlri_update);
+
+#endif /* __BGP_PBR_H__ */
diff --git a/bgpd/bgp_rd.c b/bgpd/bgp_rd.c
index 64e083d1ef..3f7ea16045 100644
--- a/bgpd/bgp_rd.c
+++ b/bgpd/bgp_rd.c
@@ -200,3 +200,15 @@ char *prefix_rd2str(struct prefix_rd *prd, char *buf, size_t size)
snprintf(buf, size, "Unknown Type: %d", type);
return buf;
}
+
+void form_auto_rd(struct in_addr router_id,
+ uint16_t rd_id,
+ struct prefix_rd *prd)
+{
+ char buf[100];
+
+ prd->family = AF_UNSPEC;
+ prd->prefixlen = 64;
+ sprintf(buf, "%s:%hu", inet_ntoa(router_id), rd_id);
+ str2prefix_rd(buf, prd);
+}
diff --git a/bgpd/bgp_rd.h b/bgpd/bgp_rd.h
index a8ea83a4a7..c5ea34103f 100644
--- a/bgpd/bgp_rd.h
+++ b/bgpd/bgp_rd.h
@@ -66,5 +66,7 @@ extern void decode_rd_vnc_eth(uint8_t *pnt, struct rd_vnc_eth *rd_vnc_eth);
extern int str2prefix_rd(const char *, struct prefix_rd *);
extern char *prefix_rd2str(struct prefix_rd *, char *, size_t);
+extern void form_auto_rd(struct in_addr router_id, uint16_t rd_id,
+ struct prefix_rd *prd);
#endif /* _QUAGGA_BGP_RD_H */
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index a71f5ac956..cfaa04a8c9 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -75,6 +75,7 @@
#include "bgpd/bgp_evpn_vty.h"
#include "bgpd/bgp_flowspec.h"
#include "bgpd/bgp_flowspec_util.h"
+#include "bgpd/bgp_pbr.h"
#ifndef VTYSH_EXTRACT_PL
#include "bgpd/bgp_route_clippy.c"
@@ -99,6 +100,8 @@ static const struct message bgp_pmsi_tnltype_str[] = {
{0}
};
+#define VRFID_NONE_STR "-"
+
struct bgp_node *bgp_afi_node_get(struct bgp_table *table, afi_t afi,
safi_t safi, struct prefix *p,
struct prefix_rd *prd)
@@ -1418,6 +1421,16 @@ int subgroup_announce_check(struct bgp_node *rn, struct bgp_info *ri,
return 0;
}
+ /*
+ * If we are doing VRF 2 VRF leaking via the import
+ * statement, we want to prevent the route going
+ * off box as that the RT and RD created are localy
+ * significant and globaly useless.
+ */
+ if (safi == SAFI_MPLS_VPN && ri->extra && ri->extra->num_labels
+ && ri->extra->label[0] == BGP_PREVENT_VRF_2_VRF_LEAK)
+ return 0;
+
/* If it's labeled safi, make sure the route has a valid label. */
if (safi == SAFI_LABELED_UNICAST) {
mpls_label_t label = bgp_adv_label(rn, ri, peer, afi, safi);
@@ -2219,7 +2232,6 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_node *rn,
/* If best route remains the same and this is not due to user-initiated
* clear, see exactly what needs to be done.
*/
-
if (old_select && old_select == new_select
&& !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR)
&& !CHECK_FLAG(old_select->flags, BGP_INFO_ATTR_CHANGED)
@@ -4614,7 +4626,7 @@ static void bgp_static_update_safi(struct bgp *bgp, struct prefix *p,
if (bgp_static->encap_tunneltype == BGP_ENCAP_TYPE_VXLAN) {
struct bgp_encap_type_vxlan bet;
memset(&bet, 0, sizeof(struct bgp_encap_type_vxlan));
- bet.vnid = p->u.prefix_evpn.eth_tag;
+ bet.vnid = p->u.prefix_evpn.prefix_addr.eth_tag;
bgp_encap_type_vxlan_to_tlv(&bet, &attr);
}
if (bgp_static->router_mac) {
@@ -5089,10 +5101,10 @@ int bgp_static_set_safi(afi_t afi, safi_t safi, struct vty *vty,
return CMD_WARNING_CONFIG_FAILED;
}
if ((gw_ip.family == AF_INET
- && IS_EVPN_PREFIX_IPADDR_V6(
+ && is_evpn_prefix_ipaddr_v6(
(struct prefix_evpn *)&p))
|| (gw_ip.family == AF_INET6
- && IS_EVPN_PREFIX_IPADDR_V4(
+ && is_evpn_prefix_ipaddr_v4(
(struct prefix_evpn *)&p))) {
vty_out(vty,
"%% GatewayIp family differs with IP prefix\n");
@@ -6461,6 +6473,13 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
json_object *json_nexthops = NULL;
json_object *json_nexthop_global = NULL;
json_object *json_nexthop_ll = NULL;
+ char vrf_id_str[VRF_NAMSIZ] = {0};
+ bool nexthop_self = CHECK_FLAG(binfo->flags, BGP_INFO_ANNC_NH_SELF)
+ ? true
+ : false;
+ bool nexthop_othervrf = false;
+ vrf_id_t nexthop_vrfid;
+ const char *nexthop_vrfname = "Default";
if (json_paths)
json_path = json_object_new_object();
@@ -6490,6 +6509,39 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
}
/*
+ * If vrf id of nexthop is different from that of prefix,
+ * set up printable string to append
+ */
+ if (binfo->extra && binfo->extra->bgp_orig) {
+ const char *self = "";
+
+ if (nexthop_self)
+ self = "<";
+
+ nexthop_othervrf = true;
+ nexthop_vrfid = binfo->extra->bgp_orig->vrf_id;
+
+ if (binfo->extra->bgp_orig->vrf_id == VRF_UNKNOWN)
+ snprintf(vrf_id_str, sizeof(vrf_id_str),
+ "@%s%s", VRFID_NONE_STR, self);
+ else
+ snprintf(vrf_id_str, sizeof(vrf_id_str), "@%u%s",
+ binfo->extra->bgp_orig->vrf_id, self);
+
+ if (binfo->extra->bgp_orig->inst_type !=
+ BGP_INSTANCE_TYPE_DEFAULT)
+
+ nexthop_vrfname = binfo->extra->bgp_orig->name;
+ } else {
+ const char *self = "";
+
+ if (nexthop_self)
+ self = "<";
+
+ snprintf(vrf_id_str, sizeof(vrf_id_str), "%s", self);
+ }
+
+ /*
* For ENCAP and EVPN routes, nexthop address family is not
* neccessarily the same as the prefix address family.
* Both SAFI_MPLS_VPN and SAFI_ENCAP use the MP nexthop field
@@ -6531,7 +6583,7 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
json_object_boolean_true_add(json_nexthop_global,
"used");
} else
- vty_out(vty, "%s", nexthop);
+ vty_out(vty, "%s%s", nexthop, vrf_id_str);
} else if (safi == SAFI_EVPN) {
if (json_paths) {
json_nexthop_global = json_object_new_object();
@@ -6543,7 +6595,8 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
json_object_boolean_true_add(json_nexthop_global,
"used");
} else
- vty_out(vty, "%-16s", inet_ntoa(attr->nexthop));
+ vty_out(vty, "%-16s%s", inet_ntoa(attr->nexthop),
+ vrf_id_str);
} else if (safi == SAFI_FLOWSPEC) {
if (attr->nexthop.s_addr != 0) {
if (json_paths) {
@@ -6577,11 +6630,17 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
json_object_boolean_true_add(json_nexthop_global,
"used");
} else {
+ char buf[BUFSIZ];
+
if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_EVPN))
- vty_out(vty, "%-16s",
- inet_ntoa(attr->mp_nexthop_global_in));
+ snprintf(buf, sizeof(buf), "%s%s",
+ inet_ntoa(attr->mp_nexthop_global_in),
+ vrf_id_str);
else
- vty_out(vty, "%-16s", inet_ntoa(attr->nexthop));
+ snprintf(buf, sizeof(buf), "%s%s",
+ inet_ntoa(attr->nexthop),
+ vrf_id_str);
+ vty_out(vty, "%-16s", buf);
}
}
@@ -6648,11 +6707,12 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
vty_out(vty, "%*s", len, " ");
} else {
len = vty_out(
- vty, "%s",
+ vty, "%s%s",
inet_ntop(
AF_INET6,
&attr->mp_nexthop_local,
- buf, BUFSIZ));
+ buf, BUFSIZ),
+ vrf_id_str);
len = 16 - len;
if (len < 1)
@@ -6662,10 +6722,11 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
}
} else {
len = vty_out(
- vty, "%s",
+ vty, "%s%s",
inet_ntop(AF_INET6,
&attr->mp_nexthop_global, buf,
- BUFSIZ));
+ BUFSIZ),
+ vrf_id_str);
len = 16 - len;
if (len < 1)
@@ -6724,6 +6785,21 @@ void route_vty_out(struct vty *vty, struct prefix *p, struct bgp_info *binfo,
vty_out(vty, "%s", bgp_origin_str[attr->origin]);
if (json_paths) {
+ if (nexthop_self)
+ json_object_boolean_true_add(json_path,
+ "announceNexthopSelf");
+ if (nexthop_othervrf) {
+ json_object_string_add(json_path, "nhVrfName",
+ nexthop_vrfname);
+
+ json_object_int_add(json_path, "nhVrfId",
+ ((nexthop_vrfid == VRF_UNKNOWN)
+ ? -1
+ : (int)nexthop_vrfid));
+ }
+ }
+
+ if (json_paths) {
if (json_nexthop_global || json_nexthop_ll) {
json_nexthops = json_object_new_array();
@@ -7053,10 +7129,10 @@ void route_vty_out_overlay(struct vty *vty, struct prefix *p,
vty_out(vty, "%s", str);
XFREE(MTYPE_TMP, str);
- if (IS_EVPN_PREFIX_IPADDR_V4((struct prefix_evpn *)p)) {
+ if (is_evpn_prefix_ipaddr_v4((struct prefix_evpn *)p)) {
vty_out(vty, "/%s",
inet_ntoa(attr->evpn_overlay.gw_ip.ipv4));
- } else if (IS_EVPN_PREFIX_IPADDR_V6((struct prefix_evpn *)p)) {
+ } else if (is_evpn_prefix_ipaddr_v6((struct prefix_evpn *)p)) {
vty_out(vty, "/%s",
inet_ntop(AF_INET6,
&(attr->evpn_overlay.gw_ip.ipv6), buf,
@@ -7323,6 +7399,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct prefix *p,
int addpath_capable;
int has_adj;
unsigned int first_as;
+ bool nexthop_self = CHECK_FLAG(binfo->flags, BGP_INFO_ANNC_NH_SELF)
+ ? true
+ : false;
if (json_paths) {
json_path = json_object_new_object();
@@ -7626,6 +7705,49 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct prefix *p,
}
}
+ /*
+ * Note when vrfid of nexthop is different from that of prefix
+ */
+ if (binfo->extra && binfo->extra->bgp_orig) {
+ vrf_id_t nexthop_vrfid = binfo->extra->bgp_orig->vrf_id;
+
+ if (json_paths) {
+ const char *vn;
+
+ if (binfo->extra->bgp_orig->inst_type ==
+ BGP_INSTANCE_TYPE_DEFAULT)
+
+ vn = "Default";
+ else
+ vn = binfo->extra->bgp_orig->name;
+
+ json_object_string_add(json_path, "nhVrfName",
+ vn);
+
+ if (nexthop_vrfid == VRF_UNKNOWN) {
+ json_object_int_add(json_path,
+ "nhVrfId", -1);
+ } else {
+ json_object_int_add(json_path,
+ "nhVrfId", (int)nexthop_vrfid);
+ }
+ } else {
+ if (nexthop_vrfid == VRF_UNKNOWN)
+ vty_out(vty, " vrf ?");
+ else
+ vty_out(vty, " vrf %u", nexthop_vrfid);
+ }
+ }
+
+ if (nexthop_self) {
+ if (json_paths) {
+ json_object_boolean_true_add(json_path,
+ "announceNexthopSelf");
+ } else {
+ vty_out(vty, " announce-nh-self");
+ }
+ }
+
if (!json_paths)
vty_out(vty, "\n");
@@ -8323,10 +8445,16 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
if (!use_json && header) {
vty_out(vty, "BGP table version is %" PRIu64
- ", local router ID is %s\n",
+ ", local router ID is %s, vrf id ",
table->version,
inet_ntoa(bgp->router_id));
+ if (bgp->vrf_id == VRF_UNKNOWN)
+ vty_out(vty, "%s", VRFID_NONE_STR);
+ else
+ vty_out(vty, "%u", bgp->vrf_id);
+ vty_out(vty, "\n");
vty_out(vty, BGP_SHOW_SCODE_HEADER);
+ vty_out(vty, BGP_SHOW_NCODE_HEADER);
vty_out(vty, BGP_SHOW_OCODE_HEADER);
if (type == bgp_show_type_dampend_paths
|| type == bgp_show_type_damp_neighbor)
@@ -10136,9 +10264,15 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
"0.0.0.0");
} else {
vty_out(vty, "BGP table version is %" PRIu64
- ", local router ID is %s\n",
+ ", local router ID is %s, vrf id ",
table->version, inet_ntoa(bgp->router_id));
+ if (bgp->vrf_id == VRF_UNKNOWN)
+ vty_out(vty, "%s", VRFID_NONE_STR);
+ else
+ vty_out(vty, "%u", bgp->vrf_id);
+ vty_out(vty, "\n");
vty_out(vty, BGP_SHOW_SCODE_HEADER);
+ vty_out(vty, BGP_SHOW_NCODE_HEADER);
vty_out(vty, BGP_SHOW_OCODE_HEADER);
vty_out(vty, "Originating default network 0.0.0.0\n\n");
@@ -10169,12 +10303,21 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
json_ocode);
} else {
vty_out(vty,
- "BGP table version is 0, local router ID is %s\n",
+ "BGP table version is 0, local router ID is %s, vrf id ",
inet_ntoa(
- bgp->router_id));
+ bgp->router_id));
+ if (bgp->vrf_id == VRF_UNKNOWN)
+ vty_out(vty, "%s",
+ VRFID_NONE_STR);
+ else
+ vty_out(vty, "%u",
+ bgp->vrf_id);
+ vty_out(vty, "\n");
vty_out(vty,
BGP_SHOW_SCODE_HEADER);
vty_out(vty,
+ BGP_SHOW_NCODE_HEADER);
+ vty_out(vty,
BGP_SHOW_OCODE_HEADER);
}
header1 = 0;
@@ -10227,13 +10370,25 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
} else {
vty_out(vty,
"BGP table version is %" PRIu64
- ", local router ID is %s\n",
+ ", local router ID is %s, vrf id ",
table->version,
inet_ntoa(
bgp->router_id));
+ if (bgp->vrf_id ==
+ VRF_UNKNOWN)
+ vty_out(vty,
+ "%s",
+ VRFID_NONE_STR);
+ else
+ vty_out(vty,
+ "%u",
+ bgp->vrf_id);
+ vty_out(vty, "\n");
vty_out(vty,
BGP_SHOW_SCODE_HEADER);
vty_out(vty,
+ BGP_SHOW_NCODE_HEADER);
+ vty_out(vty,
BGP_SHOW_OCODE_HEADER);
}
header1 = 0;
@@ -11243,14 +11398,15 @@ static void bgp_config_write_network_evpn(struct vty *vty, struct bgp *bgp,
prefix_rd2str(prd, rdbuf, sizeof(rdbuf));
if (p->u.prefix_evpn.route_type == 5) {
char local_buf[PREFIX_STRLEN];
- uint8_t family = IS_EVPN_PREFIX_IPADDR_V4((
+ uint8_t family = is_evpn_prefix_ipaddr_v4((
struct prefix_evpn *)p)
? AF_INET
: AF_INET6;
- inet_ntop(family, &p->u.prefix_evpn.ip.ip.addr,
+ inet_ntop(family,
+ &p->u.prefix_evpn.prefix_addr.ip.ip.addr,
local_buf, PREFIX_STRLEN);
sprintf(buf, "%s/%u", local_buf,
- p->u.prefix_evpn.ip_prefix_length);
+ p->u.prefix_evpn.prefix_addr.ip_prefix_length);
} else {
prefix2str(p, buf, sizeof(buf));
}
@@ -11262,7 +11418,8 @@ static void bgp_config_write_network_evpn(struct vty *vty, struct bgp *bgp,
sizeof(buf2));
vty_out(vty,
" network %s rd %s ethtag %u label %u esi %s gwip %s routermac %s\n",
- buf, rdbuf, p->u.prefix_evpn.eth_tag,
+ buf, rdbuf,
+ p->u.prefix_evpn.prefix_addr.eth_tag,
decode_label(&bgp_static->label), esi, buf2,
macrouter);
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 1e788b00f1..00e5677fe0 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -54,10 +54,11 @@ enum bgp_show_type {
#define BGP_SHOW_SCODE_HEADER \
- "Status codes: s suppressed, d damped, " \
+ "Status codes: s suppressed, d damped, " \
"h history, * valid, > best, = multipath,\n" \
- " i internal, r RIB-failure, S Stale, R Removed\n"
-#define BGP_SHOW_OCODE_HEADER "Origin codes: i - IGP, e - EGP, ? - incomplete\n\n"
+ " i internal, r RIB-failure, S Stale, R Removed\n"
+#define BGP_SHOW_OCODE_HEADER "Origin codes: i - IGP, e - EGP, ? - incomplete\n\n"
+#define BGP_SHOW_NCODE_HEADER "Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self\n"
#define BGP_SHOW_HEADER " Network Next Hop Metric LocPrf Weight Path\n"
/* Maximum number of labels we can process or send with a prefix. We
@@ -319,7 +320,8 @@ static inline void bgp_bump_version(struct bgp_node *node)
static inline int bgp_fibupd_safi(safi_t safi)
{
if (safi == SAFI_UNICAST || safi == SAFI_MULTICAST
- || safi == SAFI_LABELED_UNICAST)
+ || safi == SAFI_LABELED_UNICAST
+ || safi == SAFI_FLOWSPEC)
return 1;
return 0;
}
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index 4cc889286e..63400f7d31 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -635,7 +635,7 @@ static route_map_result_t route_match_mac_address(void *rule,
p.family = AF_ETHERNET;
p.prefixlen = ETH_ALEN * 8;
- p.u.prefix_eth = prefix->u.prefix_evpn.mac;
+ p.u.prefix_eth = prefix->u.prefix_evpn.macip_addr.mac;
return (access_list_apply(alist, &p) == FILTER_DENY
? RMAP_NOMATCH
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 6bc50fb77e..e1b050bf59 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -6159,21 +6159,46 @@ static int set_ecom_list(struct vty *vty, int argc, struct cmd_token **argv,
return CMD_SUCCESS;
}
-static int vpn_policy_getafi(struct vty *vty, int *doafi)
+/*
+ * v2vimport is true if we are handling a `import vrf ...` command
+ */
+static afi_t vpn_policy_getafi(struct vty *vty, struct bgp *bgp, bool v2vimport)
{
+ afi_t afi;
+
switch (vty->node) {
case BGP_IPV4_NODE:
- doafi[AFI_IP] = 1;
+ afi = AFI_IP;
break;
case BGP_IPV6_NODE:
- doafi[AFI_IP6] = 1;
+ afi = AFI_IP6;
break;
default:
vty_out(vty,
"%% context error: valid only in address-family <ipv4|ipv6> unicast block\n");
- return CMD_WARNING_CONFIG_FAILED;
+ return AFI_MAX;
}
- return CMD_SUCCESS;
+
+ if (!v2vimport) {
+ if (CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)
+ || CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
+ vty_out(vty,
+ "%% error: Please unconfigure import vrf commands before using vpn commands\n");
+ return AFI_MAX;
+ }
+ } else {
+ if (CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT)
+ || CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT)) {
+ vty_out(vty,
+ "%% error: Please unconfigure vpn to vrf commands before using import vrf commands\n");
+ return AFI_MAX;
+ }
+ }
+ return afi;
}
DEFPY (af_rd_vpn_export,
@@ -6188,7 +6213,6 @@ DEFPY (af_rd_vpn_export,
VTY_DECLVAR_CONTEXT(bgp, bgp);
struct prefix_rd prd;
int ret;
- int doafi[AFI_MAX] = {0};
afi_t afi;
int idx = 0;
int yes = 1;
@@ -6204,34 +6228,29 @@ DEFPY (af_rd_vpn_export,
}
}
- ret = vpn_policy_getafi(vty, doafi);
- if (ret != CMD_SUCCESS)
- return ret;
-
-
- for (afi = 0; afi < AFI_MAX; ++afi) {
- if (!doafi[afi])
- continue;
-
- /* pre-change: un-export vpn routes (vpn->vrf routes unaffected)
- */
- vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
- bgp_get_default(), bgp);
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
- if (yes) {
- bgp->vpn_policy[afi].tovpn_rd = prd;
- SET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_RD_SET);
- } else {
- UNSET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_RD_SET);
- }
+ /*
+ * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
+ */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
- /* post-change: re-export vpn routes */
- vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
- bgp_get_default(), bgp);
+ if (yes) {
+ bgp->vpn_policy[afi].tovpn_rd = prd;
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_RD_SET);
+ } else {
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_RD_SET);
}
+ /* post-change: re-export vpn routes */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+
return CMD_SUCCESS;
}
@@ -6255,9 +6274,7 @@ DEFPY (af_label_vpn_export,
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
mpls_label_t label = MPLS_LABEL_NONE;
- int doafi[AFI_MAX] = {0};
afi_t afi;
- int ret;
int idx = 0;
int yes = 1;
@@ -6269,62 +6286,57 @@ DEFPY (af_label_vpn_export,
label = label_val; /* parser should force unsigned */
}
- ret = vpn_policy_getafi(vty, doafi);
- if (ret != CMD_SUCCESS)
- return ret;
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
- for (afi = 0; afi < AFI_MAX; ++afi) {
- if (!doafi[afi])
- continue;
- if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
+ if (label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
+ /* no change */
+ return CMD_SUCCESS;
- continue; /* no change */
+ /*
+ * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
+ */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
- /*
- * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
- */
- vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
- bgp_get_default(), bgp);
-
- if (!label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
-
- if (bgp->vpn_policy[afi].tovpn_label !=
- MPLS_LABEL_NONE) {
-
- /*
- * label has previously been automatically
- * assigned by labelpool: release it
- *
- * NB if tovpn_label == MPLS_LABEL_NONE it
- * means the automatic assignment is in flight
- * and therefore the labelpool callback must
- * detect that the auto label is not needed.
- */
-
- bgp_lp_release(LP_TYPE_VRF,
- &bgp->vpn_policy[afi],
- bgp->vpn_policy[afi].tovpn_label);
- }
- UNSET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
- }
+ if (!label_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
+
+ if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) {
- bgp->vpn_policy[afi].tovpn_label = label;
- if (label_auto) {
- SET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
- bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi],
- vpn_leak_label_callback);
+ /*
+ * label has previously been automatically
+ * assigned by labelpool: release it
+ *
+ * NB if tovpn_label == MPLS_LABEL_NONE it
+ * means the automatic assignment is in flight
+ * and therefore the labelpool callback must
+ * detect that the auto label is not needed.
+ */
+
+ bgp_lp_release(LP_TYPE_VRF,
+ &bgp->vpn_policy[afi],
+ bgp->vpn_policy[afi].tovpn_label);
}
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+ }
- /* post-change: re-export vpn routes */
- vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
- bgp_get_default(), bgp);
+ bgp->vpn_policy[afi].tovpn_label = label;
+ if (label_auto) {
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+ bgp_lp_get(LP_TYPE_VRF, &bgp->vpn_policy[afi],
+ vpn_leak_label_callback);
}
+ /* post-change: re-export vpn routes */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+
return CMD_SUCCESS;
}
@@ -6347,9 +6359,7 @@ DEFPY (af_nexthop_vpn_export,
"IPv6 prefix\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
- int doafi[AFI_MAX] = {0};
afi_t afi;
- int ret;
struct prefix p;
int idx = 0;
int yes = 1;
@@ -6362,34 +6372,29 @@ DEFPY (af_nexthop_vpn_export,
return CMD_WARNING_CONFIG_FAILED;
}
- ret = vpn_policy_getafi(vty, doafi);
- if (ret != CMD_SUCCESS)
- return ret;
-
- for (afi = 0; afi < AFI_MAX; ++afi) {
- if (!doafi[afi])
- continue;
-
- /*
- * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
- */
- vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
- bgp_get_default(), bgp);
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
- if (yes) {
- bgp->vpn_policy[afi].tovpn_nexthop = p;
- SET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_NEXTHOP_SET);
- } else {
- UNSET_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_NEXTHOP_SET);
- }
+ /*
+ * pre-change: un-export vpn routes (vpn->vrf routes unaffected)
+ */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
- /* post-change: re-export vpn routes */
- vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
- bgp_get_default(), bgp);
+ if (yes) {
+ bgp->vpn_policy[afi].tovpn_nexthop = p;
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_NEXTHOP_SET);
+ } else {
+ UNSET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_NEXTHOP_SET);
}
+ /* post-change: re-export vpn routes */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+
return CMD_SUCCESS;
}
@@ -6433,7 +6438,6 @@ DEFPY (af_rt_vpn_imexport,
int ret;
struct ecommunity *ecom = NULL;
int dodir[BGP_VPN_POLICY_DIR_MAX] = {0};
- int doafi[AFI_MAX] = {0};
vpn_policy_direction_t dir;
afi_t afi;
int idx = 0;
@@ -6442,9 +6446,9 @@ DEFPY (af_rt_vpn_imexport,
if (argv_find(argv, argc, "no", &idx))
yes = 0;
- ret = vpn_policy_getafi(vty, doafi);
- if (ret != CMD_SUCCESS)
- return ret;
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
ret = vpn_policy_getdirs(vty, direction_str, dodir);
if (ret != CMD_SUCCESS)
@@ -6461,31 +6465,28 @@ DEFPY (af_rt_vpn_imexport,
}
}
- for (afi = 0; afi < AFI_MAX; ++afi) {
- if (!doafi[afi])
+ for (dir = 0; dir < BGP_VPN_POLICY_DIR_MAX; ++dir) {
+ if (!dodir[dir])
continue;
- for (dir = 0; dir < BGP_VPN_POLICY_DIR_MAX; ++dir) {
- if (!dodir[dir])
- continue;
- vpn_leak_prechange(dir, afi, bgp_get_default(), bgp);
-
- if (yes) {
- if (bgp->vpn_policy[afi].rtlist[dir])
- ecommunity_free(
- &bgp->vpn_policy[afi].rtlist[dir]);
- bgp->vpn_policy[afi].rtlist[dir] =
- ecommunity_dup(ecom);
- } else {
- if (bgp->vpn_policy[afi].rtlist[dir])
- ecommunity_free(
- &bgp->vpn_policy[afi].rtlist[dir]);
- bgp->vpn_policy[afi].rtlist[dir] = NULL;
- }
+ vpn_leak_prechange(dir, afi, bgp_get_default(), bgp);
- vpn_leak_postchange(dir, afi, bgp_get_default(), bgp);
+ if (yes) {
+ if (bgp->vpn_policy[afi].rtlist[dir])
+ ecommunity_free(
+ &bgp->vpn_policy[afi].rtlist[dir]);
+ bgp->vpn_policy[afi].rtlist[dir] =
+ ecommunity_dup(ecom);
+ } else {
+ if (bgp->vpn_policy[afi].rtlist[dir])
+ ecommunity_free(
+ &bgp->vpn_policy[afi].rtlist[dir]);
+ bgp->vpn_policy[afi].rtlist[dir] = NULL;
}
+
+ vpn_leak_postchange(dir, afi, bgp_get_default(), bgp);
}
+
if (ecom)
ecommunity_free(&ecom);
@@ -6517,7 +6518,6 @@ DEFPY (af_route_map_vpn_imexport,
VTY_DECLVAR_CONTEXT(bgp, bgp);
int ret;
int dodir[BGP_VPN_POLICY_DIR_MAX] = {0};
- int doafi[AFI_MAX] = {0};
vpn_policy_direction_t dir;
afi_t afi;
int idx = 0;
@@ -6526,41 +6526,39 @@ DEFPY (af_route_map_vpn_imexport,
if (argv_find(argv, argc, "no", &idx))
yes = 0;
- ret = vpn_policy_getafi(vty, doafi);
- if (ret != CMD_SUCCESS)
- return ret;
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
ret = vpn_policy_getdirs(vty, direction_str, dodir);
if (ret != CMD_SUCCESS)
return ret;
- for (afi = 0; afi < AFI_MAX; ++afi) {
- if (!doafi[afi])
+ for (dir = 0; dir < BGP_VPN_POLICY_DIR_MAX; ++dir) {
+ if (!dodir[dir])
continue;
- for (dir = 0; dir < BGP_VPN_POLICY_DIR_MAX; ++dir) {
- if (!dodir[dir])
- continue;
- vpn_leak_prechange(dir, afi, bgp_get_default(), bgp);
-
- if (yes) {
- if (bgp->vpn_policy[afi].rmap_name[dir])
- XFREE(MTYPE_ROUTE_MAP_NAME,
- bgp->vpn_policy[afi].rmap_name[dir]);
- bgp->vpn_policy[afi].rmap_name[dir] = XSTRDUP(
- MTYPE_ROUTE_MAP_NAME, rmap_str);
- bgp->vpn_policy[afi].rmap[dir] =
- route_map_lookup_by_name(rmap_str);
- } else {
- if (bgp->vpn_policy[afi].rmap_name[dir])
- XFREE(MTYPE_ROUTE_MAP_NAME,
- bgp->vpn_policy[afi].rmap_name[dir]);
- bgp->vpn_policy[afi].rmap_name[dir] = NULL;
- bgp->vpn_policy[afi].rmap[dir] = NULL;
- }
+ vpn_leak_prechange(dir, afi, bgp_get_default(), bgp);
- vpn_leak_postchange(dir, afi, bgp_get_default(), bgp);
+ if (yes) {
+ if (bgp->vpn_policy[afi].rmap_name[dir])
+ XFREE(MTYPE_ROUTE_MAP_NAME,
+ bgp->vpn_policy[afi].rmap_name[dir]);
+ bgp->vpn_policy[afi].rmap_name[dir] = XSTRDUP(
+ MTYPE_ROUTE_MAP_NAME, rmap_str);
+ bgp->vpn_policy[afi].rmap[dir] =
+ route_map_lookup_by_name(rmap_str);
+ if (!bgp->vpn_policy[afi].rmap[dir])
+ return CMD_SUCCESS;
+ } else {
+ if (bgp->vpn_policy[afi].rmap_name[dir])
+ XFREE(MTYPE_ROUTE_MAP_NAME,
+ bgp->vpn_policy[afi].rmap_name[dir]);
+ bgp->vpn_policy[afi].rmap_name[dir] = NULL;
+ bgp->vpn_policy[afi].rmap[dir] = NULL;
}
+
+ vpn_leak_postchange(dir, afi, bgp_get_default(), bgp);
}
return CMD_SUCCESS;
@@ -6575,6 +6573,158 @@ ALIAS (af_route_map_vpn_imexport,
"For routes leaked from vpn to current address-family\n"
"For routes leaked from current address-family to vpn\n")
+DEFPY(af_import_vrf_route_map, af_import_vrf_route_map_cmd,
+ "[no] import vrf route-map RMAP$rmap_str",
+ NO_STR
+ "Import routes from another VRF\n"
+ "Vrf routes being filtered\n"
+ "Specify route map\n"
+ "name of route-map\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ vpn_policy_direction_t dir = BGP_VPN_POLICY_DIR_FROMVPN;
+ afi_t afi;
+ int idx = 0;
+ int yes = 1;
+ struct bgp *bgp_default;
+
+ if (argv_find(argv, argc, "no", &idx))
+ yes = 0;
+
+ afi = vpn_policy_getafi(vty, bgp, true);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ bgp_default = bgp_get_default();
+ if (!bgp_default) {
+ int32_t ret;
+ as_t as = bgp->as;
+
+ /* Auto-create assuming the same AS */
+ ret = bgp_get(&bgp_default, &as, NULL,
+ BGP_INSTANCE_TYPE_DEFAULT);
+
+ if (ret) {
+ vty_out(vty,
+ "VRF default is not configured as a bgp instance\n");
+ return CMD_WARNING;
+ }
+ }
+
+ vpn_leak_prechange(dir, afi, bgp_get_default(), bgp);
+
+ if (yes) {
+ if (bgp->vpn_policy[afi].rmap_name[dir])
+ XFREE(MTYPE_ROUTE_MAP_NAME,
+ bgp->vpn_policy[afi].rmap_name[dir]);
+ bgp->vpn_policy[afi].rmap_name[dir] =
+ XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap_str);
+ bgp->vpn_policy[afi].rmap[dir] =
+ route_map_lookup_by_name(rmap_str);
+ if (!bgp->vpn_policy[afi].rmap[dir])
+ return CMD_SUCCESS;
+ } else {
+ if (bgp->vpn_policy[afi].rmap_name[dir])
+ XFREE(MTYPE_ROUTE_MAP_NAME,
+ bgp->vpn_policy[afi].rmap_name[dir]);
+ bgp->vpn_policy[afi].rmap_name[dir] = NULL;
+ bgp->vpn_policy[afi].rmap[dir] = NULL;
+ }
+
+ vpn_leak_postchange(dir, afi, bgp_get_default(), bgp);
+
+ return CMD_SUCCESS;
+}
+
+ALIAS(af_import_vrf_route_map, af_no_import_vrf_route_map_cmd,
+ "no import vrf route-map",
+ NO_STR
+ "Import routes from another VRF\n"
+ "Vrf routes being filtered\n"
+ "Specify route map\n")
+
+DEFPY (bgp_imexport_vrf,
+ bgp_imexport_vrf_cmd,
+ "[no] import vrf NAME$import_name",
+ NO_STR
+ "Import routes from another VRF\n"
+ "VRF to import from\n"
+ "The name of the VRF\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct listnode *node;
+ struct bgp *vrf_bgp, *bgp_default;
+ int32_t ret = 0;
+ as_t as = bgp->as;
+ bool remove = false;
+ int32_t idx = 0;
+ char *vname;
+ enum bgp_instance_type bgp_type = BGP_INSTANCE_TYPE_VRF;
+ safi_t safi;
+ afi_t afi;
+
+ if (argv_find(argv, argc, "no", &idx))
+ remove = true;
+
+ afi = vpn_policy_getafi(vty, bgp, true);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ safi = bgp_node_safi(vty);
+
+ if (((BGP_INSTANCE_TYPE_DEFAULT == bgp->inst_type)
+ && (strcmp(import_name, BGP_DEFAULT_NAME) == 0))
+ || (bgp->name && (strcmp(import_name, bgp->name) == 0))) {
+ vty_out(vty, "%% Cannot %s vrf %s into itself\n",
+ remove ? "unimport" : "import", import_name);
+ return CMD_WARNING;
+ }
+
+ bgp_default = bgp_get_default();
+ if (!bgp_default) {
+ /* Auto-create assuming the same AS */
+ ret = bgp_get(&bgp_default, &as, NULL,
+ BGP_INSTANCE_TYPE_DEFAULT);
+
+ if (ret) {
+ vty_out(vty,
+ "VRF default is not configured as a bgp instance\n");
+ return CMD_WARNING;
+ }
+ }
+
+ vrf_bgp = bgp_lookup_by_name(import_name);
+ if (!vrf_bgp) {
+ if (strcmp(import_name, BGP_DEFAULT_NAME) == 0)
+ vrf_bgp = bgp_default;
+ else
+ /* Auto-create assuming the same AS */
+ ret = bgp_get(&vrf_bgp, &as, import_name, bgp_type);
+
+ if (ret) {
+ vty_out(vty,
+ "VRF %s is not configured as a bgp instance\n",
+ import_name);
+ return CMD_WARNING;
+ }
+ }
+
+ if (remove) {
+ vrf_unimport_from_vrf(bgp, vrf_bgp, afi, safi);
+ } else {
+ /* Already importing from "import_vrf"? */
+ for (ALL_LIST_ELEMENTS_RO(bgp->vpn_policy[afi].import_vrf, node,
+ vname)) {
+ if (strcmp(vname, import_name) == 0)
+ return CMD_WARNING;
+ }
+
+ vrf_import_from_vrf(bgp, vrf_bgp, afi, safi);
+ }
+
+ return CMD_SUCCESS;
+}
+
/* This command is valid only in a bgp vrf instance or the default instance */
DEFPY (bgp_imexport_vpn,
bgp_imexport_vpn_cmd,
@@ -6653,7 +6803,6 @@ DEFPY (af_routetarget_import,
VTY_DECLVAR_CONTEXT(bgp, bgp);
int ret;
struct ecommunity *ecom = NULL;
- int doafi[AFI_MAX] = {0};
afi_t afi;
int idx = 0;
int yes = 1;
@@ -6661,9 +6810,10 @@ DEFPY (af_routetarget_import,
if (argv_find(argv, argc, "no", &idx))
yes = 0;
- ret = vpn_policy_getafi(vty, doafi);
- if (ret != CMD_SUCCESS)
- return ret;
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
+
if (yes) {
if (!argv_find(argv, argc, "RTLIST", &idx)) {
vty_out(vty, "%% Missing RTLIST\n");
@@ -6673,24 +6823,20 @@ DEFPY (af_routetarget_import,
if (ret != CMD_SUCCESS)
return ret;
}
- for (afi = 0; afi < AFI_MAX; ++afi) {
- if (!doafi[afi])
- continue;
- if (yes) {
- if (bgp->vpn_policy[afi].import_redirect_rtlist)
- ecommunity_free(
- &bgp->vpn_policy[afi]
+
+ if (yes) {
+ if (bgp->vpn_policy[afi].import_redirect_rtlist)
+ ecommunity_free(&bgp->vpn_policy[afi]
.import_redirect_rtlist);
- bgp->vpn_policy[afi].import_redirect_rtlist =
- ecommunity_dup(ecom);
- } else {
- if (bgp->vpn_policy[afi].import_redirect_rtlist)
- ecommunity_free(
- &bgp->vpn_policy[afi]
+ bgp->vpn_policy[afi].import_redirect_rtlist =
+ ecommunity_dup(ecom);
+ } else {
+ if (bgp->vpn_policy[afi].import_redirect_rtlist)
+ ecommunity_free(&bgp->vpn_policy[afi]
.import_redirect_rtlist);
- bgp->vpn_policy[afi].import_redirect_rtlist = NULL;
- }
+ bgp->vpn_policy[afi].import_redirect_rtlist = NULL;
}
+
if (ecom)
ecommunity_free(&ecom);
@@ -10748,6 +10894,125 @@ DEFUN (show_ip_bgp_attr_info,
return CMD_SUCCESS;
}
+static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
+ afi_t afi, safi_t safi)
+{
+ struct bgp *bgp;
+ struct listnode *node;
+ char *vname;
+ char buf1[INET6_ADDRSTRLEN];
+ char *ecom_str;
+ vpn_policy_direction_t dir;
+
+ if (name) {
+ bgp = bgp_lookup_by_name(name);
+ if (!bgp) {
+ vty_out(vty, "%% No such BGP instance exist\n");
+ return CMD_WARNING;
+ }
+ } else {
+ bgp = bgp_get_default();
+ if (!bgp) {
+ vty_out(vty,
+ "%% Default BGP instance does not exist\n");
+ return CMD_WARNING;
+ }
+ }
+
+ if (!CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+ vty_out(vty,
+ "This VRF is not importing %s routes from any other VRF\n",
+ afi_safi_print(afi, safi));
+ } else {
+ vty_out(vty,
+ "This VRF is importing %s routes from the following VRFs:\n",
+ afi_safi_print(afi, safi));
+ for (ALL_LIST_ELEMENTS_RO(bgp->vpn_policy[afi].import_vrf, node,
+ vname)) {
+ vty_out(vty, " %s\n", vname);
+ }
+ dir = BGP_VPN_POLICY_DIR_FROMVPN;
+ ecom_str = ecommunity_ecom2str(
+ bgp->vpn_policy[afi].rtlist[dir],
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, "Import RT(s): %s\n", ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ }
+
+ if (!CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
+ vty_out(vty,
+ "This VRF is not exporting %s routes to any other VRF\n",
+ afi_safi_print(afi, safi));
+ } else {
+ vty_out(vty,
+ "This VRF is exporting %s routes to the following VRFs:\n",
+ afi_safi_print(afi, safi));
+ for (ALL_LIST_ELEMENTS_RO(bgp->vpn_policy[afi].export_vrf, node,
+ vname)) {
+ vty_out(vty, " %s\n", vname);
+ }
+ vty_out(vty, "RD: %s\n",
+ prefix_rd2str(&bgp->vpn_policy[afi].tovpn_rd,
+ buf1, RD_ADDRSTRLEN));
+ dir = BGP_VPN_POLICY_DIR_TOVPN;
+ ecom_str = ecommunity_ecom2str(
+ bgp->vpn_policy[afi].rtlist[dir],
+ ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, "Emport RT: %s\n", ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ }
+
+ return CMD_SUCCESS;
+}
+
+/* "show [ip] bgp route-leak" command. */
+DEFUN (show_ip_bgp_route_leak,
+ show_ip_bgp_route_leak_cmd,
+ "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_CMD_STR"]] route-leak",
+ SHOW_STR
+ IP_STR
+ BGP_STR
+ BGP_INSTANCE_HELP_STR
+ BGP_AFI_HELP_STR
+ BGP_SAFI_HELP_STR
+ "Route leaking information\n")
+{
+ char *vrf = NULL;
+ afi_t afi = AFI_MAX;
+ safi_t safi = SAFI_MAX;
+
+ int idx = 0;
+
+ /* show [ip] bgp */
+ if (argv_find(argv, argc, "ip", &idx)) {
+ afi = AFI_IP;
+ safi = SAFI_UNICAST;
+ }
+ /* [vrf VIEWVRFNAME] */
+ if (argv_find(argv, argc, "view", &idx)) {
+ vty_out(vty,
+ "%% This command is not applicable to BGP views\n");
+ return CMD_WARNING;
+ }
+
+ if (argv_find(argv, argc, "vrf", &idx))
+ vrf = argv[++idx]->arg;
+ /* ["BGP_AFI_CMD_STR" ["BGP_SAFI_CMD_STR"]] */
+ if (argv_find_and_parse_afi(argv, argc, &idx, &afi)) {
+ argv_find_and_parse_safi(argv, argc, &idx, &safi);
+ }
+
+ if (!((afi == AFI_IP || afi == AFI_IP6) && safi == SAFI_UNICAST)) {
+ vty_out(vty,
+ "%% This command is applicable only for unicast ipv4|ipv6\n");
+ return CMD_WARNING;
+ }
+
+ return bgp_show_route_leak_vty(vty, vrf, afi, safi);
+}
+
static void bgp_show_all_instances_updgrps_vty(struct vty *vty, afi_t afi,
safi_t safi)
{
@@ -11744,6 +12009,17 @@ void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
{
int indent = 2;
+ if (bgp->vpn_policy[afi].rmap_name[BGP_VPN_POLICY_DIR_FROMVPN])
+ vty_out(vty, "%*simport vrf route-map %s\n", indent, "",
+ bgp->vpn_policy[afi]
+ .rmap_name[BGP_VPN_POLICY_DIR_FROMVPN]);
+
+ if (CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)
+ || CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT))
+ return;
+
if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
@@ -11805,16 +12081,12 @@ void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
XFREE(MTYPE_ECOMMUNITY_STR, b);
}
}
- if (bgp->vpn_policy[afi].rmap_name[BGP_VPN_POLICY_DIR_FROMVPN]) {
- vty_out(vty, "%*sroute-map vpn import %s\n", indent, "",
- bgp->vpn_policy[afi]
- .rmap_name[BGP_VPN_POLICY_DIR_FROMVPN]);
- }
- if (bgp->vpn_policy[afi].rmap_name[BGP_VPN_POLICY_DIR_TOVPN]) {
+
+ if (bgp->vpn_policy[afi].rmap_name[BGP_VPN_POLICY_DIR_TOVPN])
vty_out(vty, "%*sroute-map vpn export %s\n", indent, "",
bgp->vpn_policy[afi]
.rmap_name[BGP_VPN_POLICY_DIR_TOVPN]);
- }
+
if (bgp->vpn_policy[afi].import_redirect_rtlist) {
char *b = ecommunity_ecom2str(
bgp->vpn_policy[afi]
@@ -13019,6 +13291,8 @@ void bgp_vty_init(void)
install_element(VIEW_NODE, &show_ip_bgp_lcommunity_info_cmd);
/* "show [ip] bgp attribute-info" commands. */
install_element(VIEW_NODE, &show_ip_bgp_attr_info_cmd);
+ /* "show [ip] bgp route-leak" command */
+ install_element(VIEW_NODE, &show_ip_bgp_route_leak_cmd);
/* "redistribute" commands. */
install_element(BGP_NODE, &bgp_redistribute_ipv4_hidden_cmd);
@@ -13063,6 +13337,9 @@ void bgp_vty_init(void)
install_element(BGP_IPV4_NODE, &bgp_imexport_vpn_cmd);
install_element(BGP_IPV6_NODE, &bgp_imexport_vpn_cmd);
+ install_element(BGP_IPV4_NODE, &bgp_imexport_vrf_cmd);
+ install_element(BGP_IPV6_NODE, &bgp_imexport_vrf_cmd);
+
/* ttl_security commands */
install_element(BGP_NODE, &neighbor_ttl_security_cmd);
install_element(BGP_NODE, &no_neighbor_ttl_security_cmd);
@@ -13093,6 +13370,8 @@ void bgp_vty_init(void)
install_element(BGP_IPV6_NODE, &af_rt_vpn_imexport_cmd);
install_element(BGP_IPV4_NODE, &af_route_map_vpn_imexport_cmd);
install_element(BGP_IPV6_NODE, &af_route_map_vpn_imexport_cmd);
+ install_element(BGP_IPV4_NODE, &af_import_vrf_route_map_cmd);
+ install_element(BGP_IPV6_NODE, &af_import_vrf_route_map_cmd);
install_element(BGP_IPV4_NODE, &af_routetarget_import_cmd);
install_element(BGP_IPV6_NODE, &af_routetarget_import_cmd);
@@ -13107,6 +13386,8 @@ void bgp_vty_init(void)
install_element(BGP_IPV6_NODE, &af_no_rt_vpn_imexport_cmd);
install_element(BGP_IPV4_NODE, &af_no_route_map_vpn_imexport_cmd);
install_element(BGP_IPV6_NODE, &af_no_route_map_vpn_imexport_cmd);
+ install_element(BGP_IPV4_NODE, &af_no_import_vrf_route_map_cmd);
+ install_element(BGP_IPV6_NODE, &af_no_import_vrf_route_map_cmd);
}
#include "memory.h"
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index b564fccf43..93a509c219 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -56,6 +56,7 @@
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_mplsvpn.h"
#include "bgpd/bgp_labelpool.h"
+#include "bgpd/bgp_pbr.h"
/* All information about zebra. */
struct zclient *zclient = NULL;
@@ -583,13 +584,20 @@ static int zebra_read_route(int command, struct zclient *zclient,
char buf[2][PREFIX_STRLEN];
prefix2str(&api.prefix, buf[0], sizeof(buf[0]));
- inet_ntop(api.prefix.family, &nexthop, buf[1], sizeof(buf[1]));
- zlog_debug(
- "Rx route %s VRF %u %s[%d] %s "
- "nexthop %s metric %u tag %" ROUTE_TAG_PRI,
- (add) ? "add" : "delete", vrf_id,
- zebra_route_string(api.type), api.instance, buf[0],
- buf[1], api.metric, api.tag);
+ if (add) {
+ inet_ntop(api.prefix.family, &nexthop, buf[1],
+ sizeof(buf[1]));
+ zlog_debug(
+ "Rx route ADD VRF %u %s[%d] %s nexthop %s (type %d if %u) metric %u tag %" ROUTE_TAG_PRI,
+ vrf_id, zebra_route_string(api.type),
+ api.instance, buf[0], buf[1], nhtype,
+ ifindex, api.metric, api.tag);
+ } else {
+ zlog_debug(
+ "Rx route DEL VRF %u %s[%d] %s",
+ vrf_id, zebra_route_string(api.type),
+ api.instance, buf[0]);
+ }
}
return 0;
@@ -906,28 +914,42 @@ int bgp_nexthop_set(union sockunion *local, union sockunion *remote,
return 0;
}
-static struct in6_addr *bgp_info_to_ipv6_nexthop(struct bgp_info *info)
+static struct in6_addr *bgp_info_to_ipv6_nexthop(struct bgp_info *info,
+ ifindex_t *ifindex)
{
struct in6_addr *nexthop = NULL;
/* Only global address nexthop exists. */
- if (info->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL)
+ if (info->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL) {
nexthop = &info->attr->mp_nexthop_global;
+ if (IN6_IS_ADDR_LINKLOCAL(nexthop))
+ *ifindex = info->attr->nh_ifindex;
+
+ }
/* If both global and link-local address present. */
if (info->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
/* Check if route-map is set to prefer global over link-local */
- if (info->attr->mp_nexthop_prefer_global)
+ if (info->attr->mp_nexthop_prefer_global) {
nexthop = &info->attr->mp_nexthop_global;
- else {
+ if (IN6_IS_ADDR_LINKLOCAL(nexthop))
+ *ifindex = info->attr->nh_ifindex;
+ } else {
/* Workaround for Cisco's nexthop bug. */
if (IN6_IS_ADDR_UNSPECIFIED(
&info->attr->mp_nexthop_global)
- && info->peer->su_remote->sa.sa_family == AF_INET6)
+ && info->peer->su_remote->sa.sa_family
+ == AF_INET6) {
nexthop =
&info->peer->su_remote->sin6.sin6_addr;
- else
+ if (IN6_IS_ADDR_LINKLOCAL(nexthop))
+ *ifindex = info->peer->nexthop.ifp
+ ->ifindex;
+ } else {
nexthop = &info->attr->mp_nexthop_local;
+ if (IN6_IS_ADDR_LINKLOCAL(nexthop))
+ *ifindex = info->attr->nh_lla_ifindex;
+ }
}
}
@@ -958,13 +980,16 @@ static int bgp_table_map_apply(struct route_map *map, struct prefix *p,
}
if (p->family == AF_INET6) {
char buf[2][INET6_ADDRSTRLEN];
+ ifindex_t ifindex;
+ struct in6_addr *nexthop;
+
+ nexthop = bgp_info_to_ipv6_nexthop(info, &ifindex);
zlog_debug(
"Zebra rmap deny: IPv6 route %s/%d nexthop %s",
inet_ntop(AF_INET6, &p->u.prefix6, buf[0],
sizeof(buf[0])),
p->prefixlen,
- inet_ntop(AF_INET6,
- bgp_info_to_ipv6_nexthop(info),
+ inet_ntop(AF_INET6, nexthop,
buf[1], sizeof(buf[1])));
}
}
@@ -973,6 +998,9 @@ static int bgp_table_map_apply(struct route_map *map, struct prefix *p,
static struct thread *bgp_tm_thread_connect;
static bool bgp_tm_status_connected;
+static bool bgp_tm_chunk_obtained;
+#define BGP_FLOWSPEC_TABLE_CHUNK 100000
+static uint32_t bgp_tm_min, bgp_tm_max, bgp_tm_chunk_size;
static int bgp_zebra_tm_connect(struct thread *t)
{
@@ -993,12 +1021,27 @@ static int bgp_zebra_tm_connect(struct thread *t)
if (!bgp_tm_status_connected)
zlog_debug("Connecting to table manager. Success");
bgp_tm_status_connected = true;
+ if (!bgp_tm_chunk_obtained) {
+ if (bgp_zebra_get_table_range(bgp_tm_chunk_size,
+ &bgp_tm_min,
+ &bgp_tm_max) >= 0)
+ bgp_tm_chunk_obtained = true;
+ }
}
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
return 0;
}
+uint32_t bgp_zebra_tm_get_id(void)
+{
+ static int table_id;
+
+ if (!bgp_tm_chunk_obtained)
+ return ++table_id;
+ return bgp_tm_min++;
+}
+
void bgp_zebra_init_tm_connect(void)
{
int delay = 1;
@@ -1008,6 +1051,9 @@ void bgp_zebra_init_tm_connect(void)
if (bgp_tm_thread_connect != NULL)
return;
bgp_tm_status_connected = false;
+ bgp_tm_chunk_obtained = false;
+ bgp_tm_min = bgp_tm_max = 0;
+ bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
}
@@ -1029,6 +1075,91 @@ int bgp_zebra_get_table_range(uint32_t chunk_size,
return 0;
}
+static int update_ipv4nh_for_route_install(int nh_othervrf,
+ struct in_addr *nexthop,
+ struct attr *attr,
+ bool is_evpn,
+ struct zapi_nexthop *api_nh)
+{
+ api_nh->gate.ipv4 = *nexthop;
+
+ /* Need to set fields appropriately for EVPN routes imported into
+ * a VRF (which are programmed as onlink on l3-vni SVI) as well as
+ * connected routes leaked into a VRF.
+ */
+ if (is_evpn)
+ api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ else if (nh_othervrf &&
+ api_nh->gate.ipv4.s_addr == INADDR_ANY) {
+ api_nh->type = NEXTHOP_TYPE_IFINDEX;
+ api_nh->ifindex = attr->nh_ifindex;
+ } else
+ api_nh->type = NEXTHOP_TYPE_IPV4;
+
+ return 1;
+}
+
+static int update_ipv6nh_for_route_install(int nh_othervrf,
+ struct in6_addr *nexthop,
+ ifindex_t ifindex,
+ struct bgp_info *ri,
+ struct bgp_info *best_ri,
+ bool is_evpn,
+ struct zapi_nexthop *api_nh)
+{
+ struct attr *attr;
+
+ attr = ri->attr;
+
+ if (is_evpn)
+ api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ else if (nh_othervrf) {
+ if (IN6_IS_ADDR_UNSPECIFIED(nexthop)) {
+ api_nh->type = NEXTHOP_TYPE_IFINDEX;
+ api_nh->ifindex = attr->nh_ifindex;
+ } else if (IN6_IS_ADDR_LINKLOCAL(nexthop)) {
+ if (ifindex == 0)
+ return 0;
+ api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ api_nh->ifindex = ifindex;
+ } else {
+ api_nh->type = NEXTHOP_TYPE_IPV6;
+ api_nh->ifindex = 0;
+ }
+ } else {
+ if (IN6_IS_ADDR_LINKLOCAL(nexthop)) {
+ if (ri == best_ri &&
+ attr->mp_nexthop_len
+ == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
+ if (ri->peer->nexthop.ifp)
+ ifindex = ri->peer->nexthop.ifp
+ ->ifindex;
+ if (!ifindex) {
+ if (ri->peer->conf_if)
+ ifindex = ri->peer->ifp->ifindex;
+ else if (ri->peer->ifname)
+ ifindex = ifname2ifindex(
+ ri->peer->ifname,
+ ri->peer->bgp->vrf_id);
+ else if (ri->peer->nexthop.ifp)
+ ifindex = ri->peer->nexthop.ifp
+ ->ifindex;
+ }
+
+ if (ifindex == 0)
+ return 0;
+ api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ api_nh->ifindex = ifindex;
+ } else {
+ api_nh->type = NEXTHOP_TYPE_IPV6;
+ api_nh->ifindex = 0;
+ }
+ }
+ api_nh->gate.ipv6 = *nexthop;
+
+ return 1;
+}
+
void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
struct bgp_info *info, struct bgp *bgp, afi_t afi,
safi_t safi)
@@ -1049,6 +1180,8 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
mpls_label_t label;
int nh_othervrf = 0;
char buf_prefix[PREFIX_STRLEN]; /* filled in if we are debugging */
+ bool is_evpn = false;
+ int nh_updated;
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
* know of this instance.
@@ -1062,6 +1195,10 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
if (bgp_debug_zebra(p))
prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
+ if (safi == SAFI_FLOWSPEC)
+ return bgp_pbr_update_entry(bgp, &rn->p,
+ info, afi, safi, true);
+
/*
* vrf leaking support (will have only one nexthop)
*/
@@ -1082,28 +1219,23 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
if (info->type == ZEBRA_ROUTE_BGP
&& info->sub_type == BGP_ROUTE_IMPORTED) {
- struct bgp_info *bi;
-
- /*
- * Look at parent chain for peer sort
- */
- for (bi = info; bi->extra && bi->extra->parent;
- bi = bi->extra->parent) {
-
- peer = ((struct bgp_info *)(bi->extra->parent))->peer;
- }
+ /* Obtain peer from parent */
+ if (info->extra && info->extra->parent)
+ peer = ((struct bgp_info *)(info->extra->parent))->peer;
}
tag = info->attr->tag;
- /* When we create an aggregate route we must also install a Null0 route
- * in
- * the RIB */
+ /*
+ * When we create an aggregate route we must also install a
+ * Null0 route in the RIB
+ */
if (info->sub_type == BGP_ROUTE_AGGREGATE)
zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
/* If the route's source is EVPN, flag as such. */
- if (is_route_parent_evpn(info))
+ is_evpn = is_route_parent_evpn(info);
+ if (is_evpn)
SET_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE);
if (peer->sort == BGP_PEER_IBGP || peer->sort == BGP_PEER_CONFED
@@ -1141,8 +1273,6 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
api_nh->vrf_id = nh_othervrf ? info->extra->bgp_orig->vrf_id
: bgp->vrf_id;
if (nh_family == AF_INET) {
- struct in_addr *nexthop;
-
if (bgp_debug_zebra(&api.prefix)) {
if (mpinfo->extra) {
zlog_debug(
@@ -1185,22 +1315,14 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
}
}
- nexthop = &mpinfo_cp->attr->nexthop;
- api_nh->gate.ipv4 = *nexthop;
-
- /* EVPN type-2 routes are
- programmed as onlink on l3-vni SVI
- */
- if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
- api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- else
- api_nh->type = NEXTHOP_TYPE_IPV4;
+ nh_updated = update_ipv4nh_for_route_install(
+ nh_othervrf,
+ &mpinfo_cp->attr->nexthop,
+ mpinfo_cp->attr, is_evpn, api_nh);
} else {
ifindex_t ifindex;
struct in6_addr *nexthop;
- ifindex = 0;
-
if (bgp->table_map[afi][safi].name || nh_othervrf) {
/* Copy info and attributes, so the route-map
apply doesn't modify the BGP route info. */
@@ -1234,39 +1356,17 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
tag = mpinfo_cp->attr->tag;
}
}
- nexthop = bgp_info_to_ipv6_nexthop(mpinfo_cp);
-
- if ((mpinfo == info)
- && mpinfo->attr->mp_nexthop_len
- == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
- if (mpinfo->peer->nexthop.ifp)
- ifindex = mpinfo->peer->nexthop.ifp
- ->ifindex;
-
- if (!ifindex) {
- if (mpinfo->peer->conf_if)
- ifindex = mpinfo->peer->ifp->ifindex;
- else if (mpinfo->peer->ifname)
- ifindex = ifname2ifindex(
- mpinfo->peer->ifname,
- bgp->vrf_id);
- else if (mpinfo->peer->nexthop.ifp)
- ifindex = mpinfo->peer->nexthop.ifp
- ->ifindex;
- }
-
- if (IN6_IS_ADDR_LINKLOCAL(nexthop)) {
- if (ifindex == 0)
- continue;
- } else
- ifindex = 0;
-
- api_nh->gate.ipv6 = *nexthop;
- api_nh->ifindex = ifindex;
- api_nh->type = ifindex ? NEXTHOP_TYPE_IPV6_IFINDEX
- : NEXTHOP_TYPE_IPV6;
+ nexthop = bgp_info_to_ipv6_nexthop(mpinfo_cp,
+ &ifindex);
+ nh_updated = update_ipv6nh_for_route_install(
+ nh_othervrf, nexthop, ifindex,
+ mpinfo, info, is_evpn, api_nh);
}
+ /* Did we get proper nexthop info to update zebra? */
+ if (!nh_updated)
+ continue;
+
if (mpinfo->extra
&& bgp_is_valid_label(&mpinfo->extra->label[0])
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
@@ -1279,6 +1379,7 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
valid_nh_count++;
}
+
/* if this is a evpn route we don't have to include the label */
if (has_valid_label && !(CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)))
SET_FLAG(api.message, ZAPI_MESSAGE_LABEL);
@@ -1314,20 +1415,25 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
for (i = 0; i < api.nexthop_num; i++) {
api_nh = &api.nexthops[i];
- if (api_nh->type == NEXTHOP_TYPE_IPV4)
- nh_family = AF_INET;
- else
- nh_family = AF_INET6;
- inet_ntop(nh_family, &api_nh->gate, nh_buf,
- sizeof(nh_buf));
+ if (api_nh->type == NEXTHOP_TYPE_IFINDEX)
+ nh_buf[0] = '\0';
+ else {
+ if (api_nh->type == NEXTHOP_TYPE_IPV4)
+ nh_family = AF_INET;
+ else
+ nh_family = AF_INET6;
+ inet_ntop(nh_family, &api_nh->gate, nh_buf,
+ sizeof(nh_buf));
+ }
label_buf[0] = '\0';
if (has_valid_label
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
sprintf(label_buf, "label %u",
api_nh->labels[0]);
- zlog_debug(" nhop [%d]: %s %s", i + 1, nh_buf,
- label_buf);
+ zlog_debug(" nhop [%d]: %s if %u VRF %u %s",
+ i + 1, nh_buf, api_nh->ifindex,
+ api_nh->vrf_id, label_buf);
}
}
@@ -1381,30 +1487,18 @@ void bgp_zebra_withdraw(struct prefix *p, struct bgp_info *info,
struct zapi_route api;
struct peer *peer;
- peer = info->peer;
- assert(peer);
-
- if (info->type == ZEBRA_ROUTE_BGP
- && info->sub_type == BGP_ROUTE_IMPORTED) {
-
- struct bgp_info *bi;
-
- /*
- * Look at parent chain for peer sort
- */
- for (bi = info; bi->extra && bi->extra->parent;
- bi = bi->extra->parent) {
-
- peer = ((struct bgp_info *)(bi->extra->parent))->peer;
- }
- }
-
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
* know of this instance.
*/
if (!bgp_install_info_to_zebra(bgp))
return;
+ if (safi == SAFI_FLOWSPEC) {
+ peer = info->peer;
+ return bgp_pbr_update_entry(peer->bgp, p,
+ info, AFI_IP, safi, false);
+ }
+
memset(&api, 0, sizeof(api));
memcpy(&api.rmac, &(info->attr->rmac), sizeof(struct ethaddr));
api.vrf_id = bgp->vrf_id;
@@ -1416,16 +1510,6 @@ void bgp_zebra_withdraw(struct prefix *p, struct bgp_info *info,
if (is_route_parent_evpn(info))
SET_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE);
- if (peer->sort == BGP_PEER_IBGP) {
- SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
- SET_FLAG(api.flags, ZEBRA_FLAG_IBGP);
- }
-
- if ((peer->sort == BGP_PEER_EBGP && peer->ttl != 1)
- || CHECK_FLAG(peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK)
- || bgp_flag_check(bgp, BGP_FLAG_DISABLE_NH_CONNECTED_CHK))
- SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
-
if (bgp_debug_zebra(p)) {
char buf[PREFIX_STRLEN];
@@ -1857,6 +1941,271 @@ int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise)
return zclient_send_message(zclient);
}
+static int rule_notify_owner(int command, struct zclient *zclient,
+ zebra_size_t length, vrf_id_t vrf_id)
+{
+ uint32_t seqno, priority, unique;
+ enum zapi_rule_notify_owner note;
+ struct bgp_pbr_action *bgp_pbra;
+ ifindex_t ifi;
+
+ if (!zapi_rule_notify_decode(zclient->ibuf, &seqno, &priority, &unique,
+ &ifi, &note))
+ return -1;
+
+ bgp_pbra = bgp_pbr_action_rule_lookup(vrf_id, unique);
+ if (!bgp_pbra) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Fail to look BGP rule (%u)",
+ __PRETTY_FUNCTION__, unique);
+ return 0;
+ }
+
+ switch (note) {
+ case ZAPI_RULE_FAIL_INSTALL:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received RULE_FAIL_INSTALL",
+ __PRETTY_FUNCTION__);
+ bgp_pbra->installed = false;
+ bgp_pbra->install_in_progress = false;
+ break;
+ case ZAPI_RULE_INSTALLED:
+ bgp_pbra->installed = true;
+ bgp_pbra->install_in_progress = false;
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received RULE_INSTALLED",
+ __PRETTY_FUNCTION__);
+ break;
+ case ZAPI_RULE_REMOVED:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received RULE REMOVED",
+ __PRETTY_FUNCTION__);
+ break;
+ }
+
+ return 0;
+}
+
+static int ipset_notify_owner(int command, struct zclient *zclient,
+ zebra_size_t length, vrf_id_t vrf_id)
+{
+ uint32_t unique;
+ enum zapi_ipset_notify_owner note;
+ struct bgp_pbr_match *bgp_pbim;
+
+ if (!zapi_ipset_notify_decode(zclient->ibuf,
+ &unique,
+ &note))
+ return -1;
+
+ bgp_pbim = bgp_pbr_match_ipset_lookup(vrf_id, unique);
+ if (!bgp_pbim) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Fail to look BGP match (%u)",
+ __PRETTY_FUNCTION__, unique);
+ return 0;
+ }
+
+ switch (note) {
+ case ZAPI_IPSET_FAIL_INSTALL:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET_FAIL_INSTALL",
+ __PRETTY_FUNCTION__);
+ bgp_pbim->installed = false;
+ bgp_pbim->install_in_progress = false;
+ break;
+ case ZAPI_IPSET_INSTALLED:
+ bgp_pbim->installed = true;
+ bgp_pbim->install_in_progress = false;
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET_INSTALLED",
+ __PRETTY_FUNCTION__);
+ break;
+ case ZAPI_IPSET_REMOVED:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET REMOVED",
+ __PRETTY_FUNCTION__);
+ break;
+ }
+
+ return 0;
+}
+
+static int ipset_entry_notify_owner(int command, struct zclient *zclient,
+ zebra_size_t length, vrf_id_t vrf_id)
+{
+ uint32_t unique;
+ char ipset_name[ZEBRA_IPSET_NAME_SIZE];
+ enum zapi_ipset_entry_notify_owner note;
+ struct bgp_pbr_match_entry *bgp_pbime;
+
+ if (!zapi_ipset_entry_notify_decode(
+ zclient->ibuf,
+ &unique,
+ ipset_name,
+ &note))
+ return -1;
+ bgp_pbime = bgp_pbr_match_ipset_entry_lookup(vrf_id,
+ ipset_name,
+ unique);
+ if (!bgp_pbime) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Fail to look BGP match entry (%u)",
+ __PRETTY_FUNCTION__, unique);
+ return 0;
+ }
+
+ switch (note) {
+ case ZAPI_IPSET_ENTRY_FAIL_INSTALL:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET_ENTRY_FAIL_INSTALL",
+ __PRETTY_FUNCTION__);
+ bgp_pbime->installed = false;
+ bgp_pbime->install_in_progress = false;
+ break;
+ case ZAPI_IPSET_ENTRY_INSTALLED:
+ bgp_pbime->installed = true;
+ bgp_pbime->install_in_progress = false;
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET_ENTRY_INSTALLED",
+ __PRETTY_FUNCTION__);
+ break;
+ case ZAPI_IPSET_ENTRY_REMOVED:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET_ENTRY_REMOVED",
+ __PRETTY_FUNCTION__);
+ break;
+ }
+ return 0;
+}
+
+static int iptable_notify_owner(int command, struct zclient *zclient,
+ zebra_size_t length, vrf_id_t vrf_id)
+{
+ uint32_t unique;
+ enum zapi_iptable_notify_owner note;
+ struct bgp_pbr_match *bgpm;
+
+ if (!zapi_iptable_notify_decode(
+ zclient->ibuf,
+ &unique,
+ &note))
+ return -1;
+ bgpm = bgp_pbr_match_iptable_lookup(vrf_id, unique);
+ if (!bgpm) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Fail to look BGP iptable (%u)",
+ __PRETTY_FUNCTION__, unique);
+ return 0;
+ }
+ switch (note) {
+ case ZAPI_IPTABLE_FAIL_INSTALL:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPTABLE_FAIL_INSTALL",
+ __PRETTY_FUNCTION__);
+ bgpm->installed_in_iptable = false;
+ bgpm->install_iptable_in_progress = false;
+ break;
+ case ZAPI_IPTABLE_INSTALLED:
+ bgpm->installed_in_iptable = true;
+ bgpm->install_iptable_in_progress = false;
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPTABLE_INSTALLED",
+ __PRETTY_FUNCTION__);
+ bgpm->action->refcnt++;
+ break;
+ case ZAPI_IPTABLE_REMOVED:
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPTABLE REMOVED",
+ __PRETTY_FUNCTION__);
+ break;
+ }
+ return 0;
+}
+
+static void bgp_encode_pbr_rule_action(struct stream *s,
+ struct bgp_pbr_action *pbra)
+{
+ struct prefix any;
+
+ stream_putl(s, 0); /* seqno unused */
+ stream_putl(s, 0); /* ruleno unused */
+
+ stream_putl(s, pbra->unique);
+
+ memset(&any, 0, sizeof(any));
+ any.family = AF_INET;
+ stream_putc(s, any.family);
+ stream_putc(s, any.prefixlen);
+ stream_put(s, &any.u.prefix, prefix_blen(&any));
+
+ stream_putw(s, 0); /* src port */
+
+ stream_putc(s, any.family);
+ stream_putc(s, any.prefixlen);
+ stream_put(s, &any.u.prefix, prefix_blen(&any));
+
+ stream_putw(s, 0); /* dst port */
+
+ stream_putl(s, pbra->fwmark); /* fwmark */
+
+ stream_putl(s, pbra->table_id);
+
+ stream_putl(s, 0); /* ifindex unused */
+}
+
+static void bgp_encode_pbr_ipset_match(struct stream *s,
+ struct bgp_pbr_match *pbim)
+{
+ stream_putl(s, pbim->unique);
+ stream_putl(s, pbim->type);
+
+ stream_put(s, pbim->ipset_name,
+ ZEBRA_IPSET_NAME_SIZE);
+
+
+}
+
+static void bgp_encode_pbr_ipset_entry_match(struct stream *s,
+ struct bgp_pbr_match_entry *pbime)
+{
+ stream_putl(s, pbime->unique);
+ /* check that back pointer is not null */
+ stream_put(s, pbime->backpointer->ipset_name,
+ ZEBRA_IPSET_NAME_SIZE);
+
+ stream_putc(s, pbime->src.family);
+ stream_putc(s, pbime->src.prefixlen);
+ stream_put(s, &pbime->src.u.prefix, prefix_blen(&pbime->src));
+
+ stream_putc(s, pbime->dst.family);
+ stream_putc(s, pbime->dst.prefixlen);
+ stream_put(s, &pbime->dst.u.prefix, prefix_blen(&pbime->dst));
+}
+
+static void bgp_encode_pbr_iptable_match(struct stream *s,
+ struct bgp_pbr_action *bpa,
+ struct bgp_pbr_match *pbm)
+{
+ stream_putl(s, pbm->unique2);
+
+ stream_putl(s, pbm->type);
+
+ stream_putl(s, pbm->flags);
+
+ /* TODO: correlate with what is contained
+ * into bgp_pbr_action.
+ * currently only forward supported
+ */
+ if (bpa->nh.type == NEXTHOP_TYPE_BLACKHOLE)
+ stream_putl(s, ZEBRA_IPTABLES_DROP);
+ else
+ stream_putl(s, ZEBRA_IPTABLES_FORWARD);
+ stream_putl(s, bpa->fwmark);
+ stream_put(s, pbm->ipset_name,
+ ZEBRA_IPSET_NAME_SIZE);
+}
+
/* BGP has established connection with Zebra. */
static void bgp_zebra_connected(struct zclient *zclient)
{
@@ -2116,6 +2465,10 @@ void bgp_zebra_init(struct thread_master *master)
zclient->local_ip_prefix_add = bgp_zebra_process_local_ip_prefix;
zclient->local_ip_prefix_del = bgp_zebra_process_local_ip_prefix;
zclient->label_chunk = bgp_zebra_process_label_chunk;
+ zclient->rule_notify_owner = rule_notify_owner;
+ zclient->ipset_notify_owner = ipset_notify_owner;
+ zclient->ipset_entry_notify_owner = ipset_entry_notify_owner;
+ zclient->iptable_notify_owner = iptable_notify_owner;
}
void bgp_zebra_destroy(void)
@@ -2131,3 +2484,176 @@ int bgp_zebra_num_connects(void)
{
return zclient_num_connects;
}
+
+void bgp_send_pbr_rule_action(struct bgp_pbr_action *pbra, bool install)
+{
+ struct stream *s;
+
+ if (pbra->install_in_progress)
+ return;
+ zlog_debug("%s: table %d fwmark %d %d", __PRETTY_FUNCTION__,
+ pbra->table_id, pbra->fwmark, install);
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s,
+ install ? ZEBRA_RULE_ADD : ZEBRA_RULE_DELETE,
+ VRF_DEFAULT);
+ stream_putl(s, 1); /* send one pbr action */
+
+ bgp_encode_pbr_rule_action(s, pbra);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+ if (!zclient_send_message(zclient) && install)
+ pbra->install_in_progress = true;
+}
+
+void bgp_send_pbr_ipset_match(struct bgp_pbr_match *pbrim, bool install)
+{
+ struct stream *s;
+
+ if (pbrim->install_in_progress)
+ return;
+ zlog_debug("%s: name %s type %d %d", __PRETTY_FUNCTION__,
+ pbrim->ipset_name, pbrim->type, install);
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s,
+ install ? ZEBRA_IPSET_CREATE :
+ ZEBRA_IPSET_DESTROY,
+ VRF_DEFAULT);
+
+ stream_putl(s, 1); /* send one pbr action */
+
+ bgp_encode_pbr_ipset_match(s, pbrim);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+ if (!zclient_send_message(zclient) && install)
+ pbrim->install_in_progress = true;
+}
+
+void bgp_send_pbr_ipset_entry_match(struct bgp_pbr_match_entry *pbrime,
+ bool install)
+{
+ struct stream *s;
+
+ if (pbrime->install_in_progress)
+ return;
+ zlog_debug("%s: name %s %d %d", __PRETTY_FUNCTION__,
+ pbrime->backpointer->ipset_name,
+ pbrime->unique, install);
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s,
+ install ? ZEBRA_IPSET_ENTRY_ADD :
+ ZEBRA_IPSET_ENTRY_DELETE,
+ VRF_DEFAULT);
+
+ stream_putl(s, 1); /* send one pbr action */
+
+ bgp_encode_pbr_ipset_entry_match(s, pbrime);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+ if (!zclient_send_message(zclient) && install)
+ pbrime->install_in_progress = true;
+}
+
+void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
+ struct bgp_pbr_match *pbm,
+ bool install)
+{
+ struct stream *s;
+
+ if (pbm->install_iptable_in_progress)
+ return;
+ zlog_debug("%s: name %s type %d mark %d %d", __PRETTY_FUNCTION__,
+ pbm->ipset_name, pbm->type, pba->fwmark, install);
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s,
+ install ? ZEBRA_IPTABLE_ADD :
+ ZEBRA_IPTABLE_DELETE,
+ VRF_DEFAULT);
+
+ bgp_encode_pbr_iptable_match(s, pba, pbm);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+ if (!zclient_send_message(zclient) && install) {
+ pbm->install_iptable_in_progress = true;
+ pba->refcnt++;
+ }
+}
+
+/* inject in table <table_id> a default route to:
+ * - if nexthop IP is present : to this nexthop
+ * - if vrf is different from local : to the matching VRF
+ */
+void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
+ afi_t afi, uint32_t table_id, bool announce)
+{
+ struct zapi_nexthop *api_nh;
+ struct zapi_route api;
+ struct prefix p;
+
+ if (!nh || nh->type != NEXTHOP_TYPE_IPV4
+ || nh->vrf_id == VRF_UNKNOWN)
+ return;
+ memset(&p, 0, sizeof(struct prefix));
+ /* default route */
+ if (afi != AFI_IP)
+ return;
+ p.family = AF_INET;
+ memset(&api, 0, sizeof(api));
+ api.vrf_id = bgp->vrf_id;
+ api.type = ZEBRA_ROUTE_BGP;
+ api.safi = SAFI_UNICAST;
+ api.prefix = p;
+ api.tableid = table_id;
+ api.nexthop_num = 1;
+ SET_FLAG(api.message, ZAPI_MESSAGE_TABLEID);
+ SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
+ api_nh = &api.nexthops[0];
+
+ /* redirect IP */
+ if (nh->gate.ipv4.s_addr) {
+ char buff[PREFIX_STRLEN];
+
+ api_nh->vrf_id = nh->vrf_id;
+ api_nh->gate.ipv4 = nh->gate.ipv4;
+ api_nh->type = NEXTHOP_TYPE_IPV4;
+
+ inet_ntop(AF_INET, &(nh->gate.ipv4), buff, INET_ADDRSTRLEN);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_info("BGP: sending default route to %s table %d (redirect IP)",
+ buff, table_id);
+ zclient_route_send(announce ? ZEBRA_ROUTE_ADD
+ : ZEBRA_ROUTE_DELETE,
+ zclient, &api);
+ } else if (nh->vrf_id != bgp->vrf_id) {
+ struct vrf *vrf;
+ struct interface *ifp;
+
+ vrf = vrf_lookup_by_id(nh->vrf_id);
+ if (!vrf)
+ return;
+ /* create default route with interface <VRF>
+ * with nexthop-vrf <VRF>
+ */
+ ifp = if_lookup_by_name_all_vrf(vrf->name);
+ if (!ifp)
+ return;
+ api_nh->vrf_id = nh->vrf_id;
+ api_nh->type = NEXTHOP_TYPE_IFINDEX;
+ api_nh->ifindex = ifp->ifindex;
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_info("BGP: sending default route to %s table %d (redirect VRF)",
+ vrf->name, table_id);
+ zclient_route_send(announce ? ZEBRA_ROUTE_ADD
+ : ZEBRA_ROUTE_DELETE,
+ zclient, &api);
+ return;
+ }
+}
diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h
index 7263317b6f..7ac40fecff 100644
--- a/bgpd/bgp_zebra.h
+++ b/bgpd/bgp_zebra.h
@@ -25,6 +25,7 @@
extern void bgp_zebra_init(struct thread_master *master);
extern void bgp_zebra_init_tm_connect(void);
+extern uint32_t bgp_zebra_tm_get_id(void);
extern void bgp_zebra_destroy(void);
extern int bgp_zebra_get_table_range(uint32_t chunk_size,
uint32_t *start, uint32_t *end);
@@ -70,4 +71,20 @@ extern int bgp_zebra_advertise_all_vni(struct bgp *, int);
extern int bgp_zebra_num_connects(void);
+struct bgp_pbr_action;
+struct bgp_pbr_match;
+struct bgp_pbr_match_entry;
+extern void bgp_send_pbr_rule_action(struct bgp_pbr_action *pbra,
+ bool install);
+extern void bgp_send_pbr_ipset_match(struct bgp_pbr_match *pbrim,
+ bool install);
+extern void bgp_send_pbr_ipset_entry_match(struct bgp_pbr_match_entry *pbrime,
+ bool install);
+extern void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
+ struct bgp_pbr_match *pbm,
+ bool install);
+
+extern void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
+ afi_t afi, uint32_t table_id, bool announce);
+
#endif /* _QUAGGA_BGP_ZEBRA_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index df0f1bd19c..32a1ea5a5f 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -44,6 +44,7 @@
#include "table.h"
#include "lib/json.h"
#include "frr_pthread.h"
+#include "bitfield.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
@@ -82,6 +83,7 @@
#include "bgpd/bgp_ecommunity.h"
#include "bgpd/bgp_flowspec.h"
#include "bgpd/bgp_labelpool.h"
+#include "bgpd/bgp_pbr.h"
DEFINE_MTYPE_STATIC(BGPD, PEER_TX_SHUTDOWN_MSG, "Peer shutdown message (TX)");
DEFINE_QOBJ_TYPE(bgp_master)
@@ -2956,6 +2958,9 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE;
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent =
MPLS_LABEL_NONE;
+
+ bgp->vpn_policy[afi].import_vrf = list_new();
+ bgp->vpn_policy[afi].export_vrf = list_new();
}
if (name) {
bgp->name = XSTRDUP(MTYPE_BGP, name);
@@ -2997,7 +3002,12 @@ static struct bgp *bgp_create(as_t *as, const char *name,
QOBJ_REG(bgp, bgp);
update_bgp_group_init(bgp);
+
+ /* assign a unique rd id for auto derivation of vrf's RD */
+ bf_assign_index(bm->rd_idspace, bgp->vrf_rd_id);
+
bgp_evpn_init(bgp);
+ bgp_pbr_init(bgp);
return bgp;
}
@@ -3372,14 +3382,28 @@ void bgp_free(struct bgp *bgp)
rmap = &bgp->table_map[afi][safi];
if (rmap->name)
XFREE(MTYPE_ROUTE_MAP_NAME, rmap->name);
+
+ /*
+ * Yes this is per AFI, but
+ * the list_delete_and_null nulls the pointer
+ * and we'll not leak anything on going down
+ * and the if test will fail on the second safi.
+ */
+ if (bgp->vpn_policy[afi].import_vrf)
+ list_delete_and_null(&bgp->vpn_policy[afi].import_vrf);
+ if (bgp->vpn_policy[afi].export_vrf)
+ list_delete_and_null(&bgp->vpn_policy[afi].export_vrf);
}
bgp_scan_finish(bgp);
bgp_address_destroy(bgp);
bgp_tip_hash_destroy(bgp);
- bgp_evpn_cleanup(bgp);
+ /* release the auto RD id */
+ bf_release_index(bm->rd_idspace, bgp->vrf_rd_id);
+ bgp_evpn_cleanup(bgp);
+ bgp_pbr_cleanup(bgp);
if (bgp->name)
XFREE(MTYPE_BGP, bgp->name);
if (bgp->name_pretty)
@@ -5923,39 +5947,63 @@ int peer_maximum_prefix_set(struct peer *peer, afi_t afi, safi_t safi,
struct peer_group *group;
struct listnode *node, *nnode;
+ /* apply configuration and set flags */
SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
- peer->pmax[afi][safi] = max;
- peer->pmax_threshold[afi][safi] = threshold;
- peer->pmax_restart[afi][safi] = restart;
if (warning)
SET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_MAX_PREFIX_WARNING);
else
UNSET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_MAX_PREFIX_WARNING);
+ peer->pmax[afi][safi] = max;
+ peer->pmax_threshold[afi][safi] = threshold;
+ peer->pmax_restart[afi][safi] = restart;
+ /* if handling a peer-group, apply to all children */
if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
group = peer->group;
for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX);
- peer->pmax[afi][safi] = max;
- peer->pmax_threshold[afi][safi] = threshold;
- peer->pmax_restart[afi][safi] = restart;
- if (warning)
+ /*
+ * If peer configuration is user-set, it overrides
+ * peer-group config.
+ */
+ if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX)) {
SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- else
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
+ PEER_FLAG_MAX_PREFIX);
+ peer->pmax[afi][safi] = max;
+ peer->pmax_threshold[afi][safi] = threshold;
+ peer->pmax_restart[afi][safi] = restart;
+ }
+ if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING)) {
+ if (warning)
+ SET_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ else
+ UNSET_FLAG(
+ peer->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ }
if ((peer->status == Established)
&& (peer->afc[afi][safi]))
bgp_maximum_prefix_overflow(peer, afi, safi, 1);
}
} else {
+ /* if not handling a peer-group, set the override flags */
if ((peer->status == Established) && (peer->afc[afi][safi]))
bgp_maximum_prefix_overflow(peer, afi, safi, 1);
+
+ SET_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX);
+
+ if (warning)
+ SET_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ else
+ UNSET_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
}
return 0;
@@ -5966,49 +6014,49 @@ int peer_maximum_prefix_unset(struct peer *peer, afi_t afi, safi_t safi)
struct peer_group *group;
struct listnode *node, *nnode;
- /* apply peer-group config */
- if (peer_group_active(peer)) {
- if (CHECK_FLAG(peer->group->conf->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX))
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX);
- else
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX);
-
- if (CHECK_FLAG(peer->group->conf->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING))
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- else
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
-
- peer->pmax[afi][safi] = peer->group->conf->pmax[afi][safi];
- peer->pmax_threshold[afi][safi] =
- peer->group->conf->pmax_threshold[afi][safi];
- peer->pmax_restart[afi][safi] =
- peer->group->conf->pmax_restart[afi][safi];
- return 0;
- }
-
UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
peer->pmax[afi][safi] = 0;
peer->pmax_threshold[afi][safi] = 0;
peer->pmax_restart[afi][safi] = 0;
- if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ /* if not handling a peer-group, unset override flags */
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ UNSET_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX);
+ UNSET_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ /* if peer is part of a peer-group, apply peer-group config */
+ if (peer_group_active(peer)) {
+ peer->pmax[afi][safi] =
+ peer->group->conf->pmax[afi][safi];
+ peer->pmax_threshold[afi][safi] =
+ peer->group->conf->pmax_threshold[afi][safi];
+ peer->pmax_restart[afi][safi] =
+ peer->group->conf->pmax_restart[afi][safi];
+ }
+
return 0;
+ }
+ /*
+ * If this peer is a peer-group, set all peers in the group unless they
+ * have overrides for our config.
+ */
group = peer->group;
for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- peer->pmax[afi][safi] = 0;
- peer->pmax_threshold[afi][safi] = 0;
- peer->pmax_restart[afi][safi] = 0;
+ if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING))
+ UNSET_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX)) {
+ UNSET_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX);
+ peer->pmax[afi][safi] = 0;
+ peer->pmax_threshold[afi][safi] = 0;
+ peer->pmax_restart[afi][safi] = 0;
+ }
}
return 0;
}
@@ -7194,6 +7242,16 @@ static void bgp_config_write_family(struct vty *vty, struct bgp *bgp, afi_t afi,
vty_out(vty, " import vpn\n");
}
+ if (CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+ struct listnode *node;
+ char *name;
+
+ for (ALL_LIST_ELEMENTS_RO(
+ bgp->vpn_policy[afi].import_vrf, node,
+ name))
+ vty_out(vty, " import vrf %s\n", name);
+ }
}
vty_endframe(vty, " exit-address-family\n");
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 680bac0214..d9ce77a55a 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -188,6 +188,18 @@ struct vpn_policy {
#define BGP_VPN_POLICY_TOVPN_LABEL_AUTO (1 << 0)
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
+
+ /*
+ * If we are importing another vrf into us keep a list of
+ * vrf names that are being imported into us.
+ */
+ struct list *import_vrf;
+
+ /*
+ * if we are being exported to another vrf keep a list of
+ * vrf names that we are being exported to.
+ */
+ struct list *export_vrf;
};
/*
@@ -342,16 +354,19 @@ struct bgp {
/* BGP Per AF flags */
uint16_t af_flags[AFI_MAX][SAFI_MAX];
-#define BGP_CONFIG_DAMPENING (1 << 0)
-#define BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT (1 << 1)
-#define BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT (1 << 2)
-
+#define BGP_CONFIG_DAMPENING (1 << 0)
/* l2vpn evpn flags - 1 << 0 is used for DAMPENNG */
-#define BGP_L2VPN_EVPN_ADVERTISE_IPV4_UNICAST (1 << 1)
-#define BGP_L2VPN_EVPN_ADVERTISE_IPV6_UNICAST (1 << 2)
-#define BGP_L2VPN_EVPN_DEFAULT_ORIGINATE_IPV4 (1 << 3)
-#define BGP_L2VPN_EVPN_DEFAULT_ORIGINATE_IPV6 (1 << 4)
-
+#define BGP_L2VPN_EVPN_ADVERTISE_IPV4_UNICAST (1 << 1)
+#define BGP_L2VPN_EVPN_ADVERTISE_IPV6_UNICAST (1 << 2)
+#define BGP_L2VPN_EVPN_DEFAULT_ORIGINATE_IPV4 (1 << 3)
+#define BGP_L2VPN_EVPN_DEFAULT_ORIGINATE_IPV6 (1 << 4)
+/* import/export between address families */
+#define BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT (1 << 5)
+#define BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT (1 << 6)
+/* vrf-route leaking flags */
+#define BGP_CONFIG_VRF_TO_VRF_IMPORT (1 << 7)
+#define BGP_CONFIG_VRF_TO_VRF_EXPORT (1 << 8)
+#define BGP_DEFAULT_NAME "default"
/* Route table for next-hop lookup cache. */
struct bgp_table *nexthop_cache_table[AFI_MAX];
@@ -385,6 +400,25 @@ struct bgp {
/* Allocate MPLS labels */
uint8_t allocate_mpls_labels[AFI_MAX][SAFI_MAX];
+ /* Allocate hash entries to store policy routing information
+ * The hash are used to host pbr rules somewhere.
+ * Actually, pbr will only be used by flowspec
+ * those hash elements will have relationship together as
+ * illustrated in below diagram:
+ *
+ * pbr_action a <----- pbr_match i <--- pbr_match_entry 1..n
+ * <----- pbr_match j <--- pbr_match_entry 1..m
+ *
+ * - here in BGP structure, the list of match and actions will
+ * stand for the list of ipset sets, and table_ids in the kernel
+ * - the arrow above between pbr_match and pbr_action indicate
+ * that a backpointer permits match to find the action
+ * - the arrow betwen match_entry and match is a hash list
+ * contained in match, that lists the whole set of entries
+ */
+ struct hash *pbr_match_hash;
+ struct hash *pbr_action_hash;
+
/* timer to re-evaluate neighbor default-originate route-maps */
struct thread *t_rmap_def_originate_eval;
#define RMAP_DEFAULT_ORIGINATE_EVAL_TIMER 5
@@ -473,6 +507,9 @@ struct bgp {
/* unique ID for auto derivation of RD for this vrf */
uint16_t vrf_rd_id;
+ /* Automatically derived RD for this VRF */
+ struct prefix_rd vrf_prd_auto;
+
/* RD for this VRF */
struct prefix_rd vrf_prd;
@@ -799,7 +836,28 @@ struct peer {
/* NSF mode (graceful restart) */
uint8_t nsf[AFI_MAX][SAFI_MAX];
- /* Per AF configuration flags. */
+ /* Peer Per AF flags */
+ /*
+ * Parallel array to af_flags that indicates whether each flag
+ * originates from a peer-group or if it is config that is specific to
+ * this individual peer. If a flag is set independent of the
+ * peer-group the same bit should be set here. If this peer is a
+ * peer-group, this memory region should be all zeros. The assumption
+ * is that the default state for all flags is unset.
+ *
+ * Notes:
+ * - if a flag for an individual peer is unset, the corresponding
+ * override flag is unset and the peer is considered to be back in
+ * sync with the peer-group.
+ * - This does *not* contain the flag values, rather it contains
+ * whether the flag at the same position in af_flags is
+ * *peer-specific*.
+ */
+ uint32_t af_flags_override[AFI_MAX][SAFI_MAX];
+ /*
+ * Effective flags, computed by applying peer-group flags and then
+ * overriding with individual flags
+ */
uint32_t af_flags[AFI_MAX][SAFI_MAX];
#define PEER_FLAG_SEND_COMMUNITY (1 << 0) /* send-community */
#define PEER_FLAG_SEND_EXT_COMMUNITY (1 << 1) /* send-community ext. */
diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c
index 5fb44bb492..ccaa472092 100644
--- a/bgpd/rfapi/rfapi_vty.c
+++ b/bgpd/rfapi/rfapi_vty.c
@@ -46,6 +46,7 @@
#include "bgpd/bgp_aspath.h"
#include "bgpd/bgp_community.h"
#include "bgpd/bgp_vnc_types.h"
+#include "bgpd/bgp_label.h"
#include "bgpd/rfapi/rfapi_import.h"
#include "bgpd/rfapi/rfapi_private.h"
@@ -431,8 +432,13 @@ void rfapi_vty_out_vncinfo(struct vty *vty, struct prefix *p,
XFREE(MTYPE_ECOMMUNITY_STR, s);
}
- if (bi->extra != NULL)
- vty_out(vty, " label=%u", decode_label(&bi->extra->label[0]));
+ if (bi->extra != NULL) {
+ if (bi->extra->label[0] == BGP_PREVENT_VRF_2_VRF_LEAK)
+ vty_out(vty, " label=VRF2VRF");
+ else
+ vty_out(vty, " label=%u",
+ decode_label(&bi->extra->label[0]));
+ }
if (!rfapiGetVncLifetime(bi->attr, &lifetime)) {
vty_out(vty, " life=%d", lifetime);
diff --git a/configure.ac b/configure.ac
index 53a80e790f..7662f2a4e5 100755
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ(2.60)
-AC_INIT(frr, 4.1-dev, [https://github.com/frrouting/frr/issues])
+AC_INIT(frr, 5.1-dev, [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST(PACKAGE_URL)
PACKAGE_FULLNAME="FRRouting"
@@ -232,7 +232,6 @@ else
fi
fi
AM_CONDITIONAL([DEV_BUILD], [test "x$enable_dev_build" = "xyes"])
-AM_CONDITIONAL([SHARPD], [test "x$enable_dev_build" = "xyes"])
dnl always want these CFLAGS
AC_C_FLAG([-fno-omit-frame-pointer])
@@ -354,6 +353,10 @@ AC_ARG_ENABLE(isisd,
AS_HELP_STRING([--disable-isisd], [do not build isisd]))
AC_ARG_ENABLE(pimd,
AS_HELP_STRING([--disable-pimd], [do not build pimd]))
+AC_ARG_ENABLE(pbrd,
+ AS_HELP_STRING([--disable-pbrd], [do not build pbrd]))
+AC_ARG_ENABLE(sharpd,
+ AS_HELP_STRING([--enable-sharpd], [do not build sharpd]))
AC_ARG_ENABLE(bgp-announce,
AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE(bgp-vnc,
@@ -1378,6 +1381,7 @@ AM_CONDITIONAL(OSPF6D, test "${enable_ospf6d}" != "no")
AM_CONDITIONAL(ISISD, test "${enable_isisd}" != "no")
AM_CONDITIONAL(PIMD, test "${enable_pimd}" != "no")
AM_CONDITIONAL(PBRD, test "${enable_pbrd}" != "no")
+AM_CONDITIONAL(SHARPD, test "${enable_sharpd}" = "yes")
if test "${enable_bgp_announce}" = "no";then
AC_DEFINE(DISABLE_BGP_ANNOUNCE,1,Disable BGP installation to zebra)
diff --git a/doc/Makefile.am b/doc/Makefile.am
index dec6b53e0f..053842283e 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -82,6 +82,10 @@ if EIGRPD
man_MANS += $(MANPAGE_BUILDDIR)/eigrpd.8
endif
+if SHARPD
+man_MANS += $(MANPAGE_BUILDDIR)/sharpd.8
+endif
+
# Automake is particular about manpages. It is aware of them and has some
# special facilities for handling them, but it assumes that manpages are always
# given in groff source and so these facilities are limited to simply
@@ -146,6 +150,7 @@ EXTRA_DIST = frr-sphinx.mk \
manpages/ripd.rst \
manpages/pbrd.rst \
manpages/ripngd.rst \
+ manpages/sharpd.rst \
manpages/vtysh.rst \
manpages/watchfrr.rst \
manpages/zebra.rst \
@@ -212,6 +217,7 @@ EXTRA_DIST = frr-sphinx.mk \
user/routemap.rst \
user/routeserver.rst \
user/rpki.rst \
+ user/sharp.rst \
user/snmp.rst \
user/snmptrap.rst \
user/Useful_Sysctl_Settings.md \
diff --git a/doc/manpages/conf.py b/doc/manpages/conf.py
index 41683ed678..2b0f7e3893 100644
--- a/doc/manpages/conf.py
+++ b/doc/manpages/conf.py
@@ -323,6 +323,7 @@ man_pages = [
('nhrpd', 'nhrpd', fwfrr.format("a Next Hop Routing Protocol "), [], 8),
('pimd', 'pimd', fwfrr.format("a PIM "), [], 8),
('pbrd', 'pbrd', fwfrr.format("a PBR "), [], 8),
+ ('sharpd', 'sharpd', fwfrr.format("a SHARP "), [], 8),
('mtracebis', 'mtracebis', "a multicast trace client", [], 8),
('ripd', 'ripd', fwfrr.format("a RIP "), [], 8),
('ripngd', 'ripngd', fwfrr.format("a RIPNG "), [], 8),
diff --git a/doc/manpages/index.rst b/doc/manpages/index.rst
index e05890f8d1..6d3f3aae55 100644
--- a/doc/manpages/index.rst
+++ b/doc/manpages/index.rst
@@ -19,6 +19,7 @@
mtracebis
ripd
ripngd
+ sharpd
watchfrr
zebra
vtysh
diff --git a/doc/manpages/mtracebis.rst b/doc/manpages/mtracebis.rst
index b4d57c1b97..d3ba8036d8 100644
--- a/doc/manpages/mtracebis.rst
+++ b/doc/manpages/mtracebis.rst
@@ -9,17 +9,22 @@ SYNOPSIS
========
|PROGRAM| |synopsis-options-hv|
-|PROGRAM| <multicast source>
+|PROGRAM| <multicast source> [<multicast group>]
DESCRIPTION
===========
-|PROGRAM| is a program to initiate multicast traceroute, or "mtrace", queries.
+|PROGRAM| is a program for initiating multicast traceroute, or "mtrace", queries.
-The initial version of the program requires multicast source IP address and
-initiates a weak traceroute across the network. This tests whether the
-interfaces towards the source are multicast enabled. The first query sent is a
-full query, capable of crossing the network all the way to the source. If this
-fails, hop-by-hop queries are initiated.
+It can initiate two types of mtrace queries: weak and group.
+
+Weak tests whether the interfaces towards the source are multicast enabled and is
+initiated by supplying only the multicast source address.
+
+Group tests whether there is multicast routing protocol state for particular
+multicast group and is initiated by supplying mutlicast source and group.
+
+The first query sent is a full query, capable of crossing the network all the way
+to the source. If this fails, hop-by-hop queries are initiated.
Hop-by-hop queries start by requesting only a response from the nearest router.
Following that, next query is extended to the next two routers, and so on...
diff --git a/doc/manpages/sharpd.rst b/doc/manpages/sharpd.rst
new file mode 100644
index 0000000000..016f3f9254
--- /dev/null
+++ b/doc/manpages/sharpd.rst
@@ -0,0 +1,38 @@
+******
+SHARPD
+******
+
+.. include:: defines.rst
+.. |DAEMON| replace:: sharpd
+
+SYNOPSIS
+========
+|DAEMON| |synopsis-options-hv|
+
+|DAEMON| |synopsis-options|
+
+DESCRIPTION
+===========
+|DAEMON| is a routing component that works with the FRRouting engine.
+
+OPTIONS
+=======
+OPTIONS available for the |DAEMON| command:
+
+.. include:: common-options.rst
+
+FILES
+=====
+
+|INSTALL_PREFIX_SBIN|/|DAEMON|
+ The default location of the |DAEMON| binary.
+
+|INSTALL_PREFIX_ETC|/|DAEMON|.conf
+ The default location of the |DAEMON| config file.
+
+$(PWD)/|DAEMON|.log
+ If the |DAEMON| process is configured to output logs to a file, then you will find this file in the directory where you started |DAEMON|.
+
+.. include:: epilogue.rst
+
+
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 3298460a58..861e3d0d6d 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -1549,7 +1549,9 @@ is specified, the BGP protocol process belongs to the default VRF.
BGP routes may be leaked (i.e., copied) between a unicast VRF RIB and the VPN
safi RIB of the default VRF (leaking is also permitted between the unicast RIB
-of the default VRF and VPN). A common application of this feature is to
+of the default VRF and VPN). A shortcut syntax is also available for
+specifying leaking from one vrf to another vrf using the VPN RIB as
+the intemediary. A common application of the VPN-VRF feature is to
connect a customer's private routing domain to a provider's VPN service.
Leaking is configured from the point of view of an individual VRF: ``import``
refers to routes leaked from VPN to a unicast VRF, whereas ``export`` refers to
@@ -1586,6 +1588,9 @@ routing domain which is shared across all its sites. More complex routing
topologies are possible through use of additional route-targets to augment the
leaking of sets of routes in various ways.
+When using the shortcut syntax for vrf-to-vrf leaking, the RD and RT are
+auto-derived.
+
Configuration
-------------
@@ -1667,6 +1672,24 @@ address-family:
Disables import or export of routes between the current unicast VRF and VPN.
+.. index:: import vrf VRFNAME
+.. clicmd:: import vrf VRFNAME
+
+ Shortcut syntax for specifying automatic leaking from vrf VRFNAME to
+ the current VRF using the VPN RIB as intermediary. The RD and RT
+ are auto derived and should not be specified explicitly for either the
+ source or destination VRF's.
+
+ This shortcut syntax mode is not compatible with the explicit
+ `import vpn` and `export vpn` statements for the two VRF's involved.
+ The CLI will disallow attempts to configure incompatible leaking
+ modes.
+
+.. index:: no import vrf VRFNAME
+.. clicmd:: no import vrf VRFNAME
+
+ Disables automatic leaking from vrf VRFNAME to the current VRF using
+ the VPN RIB as intermediary.
.. _displaying-bgp-information:
diff --git a/doc/user/index.rst b/doc/user/index.rst
index c692f1098b..9b9189dc30 100644
--- a/doc/user/index.rst
+++ b/doc/user/index.rst
@@ -25,6 +25,7 @@ FRRouting User Guide
pbr
ripd
ripngd
+ sharp
vnc
glossary
appendix
diff --git a/doc/user/installation.rst b/doc/user/installation.rst
index dc2599f13e..700a727d8f 100644
--- a/doc/user/installation.rst
+++ b/doc/user/installation.rst
@@ -171,6 +171,14 @@ customize the build to include or exclude specific features and dependencies.
With this option, we provide a way to strip out these characters for APK dev
package builds.
+.. option:: --enable-multipath=X
+
+ Compile FRR with up to X way ECMP supported. This number can be from 0-999.
+ For backwards compatability with older configure options when setting X = 0,
+ we will build FRR with 64 way ECMP. This is needed because there are
+ hardcoded arrays that FRR builds towards, so we need to know how big to
+ make these arrays at build time.
+
You may specify any combination of the above options to the configure
script. By default, the executables are placed in :file:`/usr/local/sbin`
and the configuration files in :file:`/usr/local/etc`. The :file:`/usr/local/`
diff --git a/doc/user/ospf6d.rst b/doc/user/ospf6d.rst
index 56f95e64b0..71bc047720 100644
--- a/doc/user/ospf6d.rst
+++ b/doc/user/ospf6d.rst
@@ -101,7 +101,7 @@ OSPF6 interface
.. index:: ipv6 ospf6 hello-interval HELLOINTERVAL
.. clicmd:: ipv6 ospf6 hello-interval HELLOINTERVAL
- Sets interface's Hello Interval. Default 40
+ Sets interface's Hello Interval. Default 10
.. index:: ipv6 ospf6 dead-interval DEADINTERVAL
.. clicmd:: ipv6 ospf6 dead-interval DEADINTERVAL
diff --git a/doc/user/sharp.rst b/doc/user/sharp.rst
new file mode 100644
index 0000000000..090628044c
--- /dev/null
+++ b/doc/user/sharp.rst
@@ -0,0 +1,67 @@
+.. _sharp:
+
+***
+SHARP
+***
+
+:abbr:`SHARP` Super Happy Advanced Routing Process. This daemon is useful
+for the testing of FRR itself as well as useful for creation of Proof of
+Concept labs.
+
+.. _starting-sharp:
+
+Starting SHARP
+==============
+
+Default configuration file for *sharpd* is :file:`sharpd.conf`. The typical
+location of :file:`sharpd.conf` is |INSTALL_PREFIX_ETC|/sharpd.conf.
+
+If the user is using integrated config, then :file:`sharpd.conf` need not be
+present and the :file:`frr.conf` is read instead.
+
+.. program:: sharpd
+
+:abbr:`SHARP` supports all the common FRR daemon start options which are
+documented elsewhere.
+
+.. _using-sharp:
+
+USING SHARP
+===========
+
+All sharp commands are under the enable node and proceeded by the
+:abbr:`sharp` keyword. There are currently no permenent sharp
+commands for configuration.
+
+..index:: sharp install
+..clicmd:: sharp install routes A.B.C.D nexthop E.F.G.H (1-1000000)
+
+Install up to a million /32 routes starting at A.B.C.D with specified nexthop
+E.F.G.H. The nexthop is a NEXTHOP_TYPE_IPV4 and must be reachable to be
+installed into the kernel. The routes are installed into zebra as
+ZEBRA_ROUTE_SHARP and can be used as part of a normal route redistribution.
+Route installation time is noted in the debug log and upon zebra successful
+installation into the kernel and sharp receiving the notification of all
+route installs the success will be noted in the debug log as well.
+
+..index:: sharp remove
+..clicmd:: sharp remove routes A.B.C.D (1-1000000)
+
+Remove up 1000000 million /32 routes starting at A.B.C.D. The routes are
+removed from zebra. Route deletion start is noted in the debug log
+and when all routes have been successfully deleted the debug log will
+be updated with this information as well.
+
+..index:: sharp label
+..clicmd:: sharp label <ipv4|ipv6> vrf NAME label (0-1000000)
+
+Install a label into the kernel that causes the specified vrf NAME table to be
+used for pop and forward operations when the specified label is seen.
+
+..index:: sharp watch
+..clicmd: sharp watch nexthop <A.B.C.D|X:X::X:X>
+
+Instruct zebra to monitor and notify sharp when the specified nexthop is
+changed. The notification from zebra is written into the debug log.
+
+
diff --git a/doc/user/snmp.rst b/doc/user/snmp.rst
index 1a24d56cb7..38918ff250 100644
--- a/doc/user/snmp.rst
+++ b/doc/user/snmp.rst
@@ -43,6 +43,9 @@ master SNMP agent (snmpd) and each of the FRR daemons must be configured. In
In each of the FRR daemons, ``agentx`` command will enable AgentX support.
:file:`/etc/snmp/snmpd.conf`:
+
+::
+
#
# example access restrictions setup
#
@@ -113,6 +116,9 @@ using the password "frr_ospfd". For testing it is recommending to take exactly
the below snmpd.conf as wrong access restrictions can be hard to debug.
:file:`/etc/snmp/snmpd.conf`:
+
+::
+
#
# example access restrictions setup
#
@@ -126,6 +132,9 @@ the below snmpd.conf as wrong access restrictions can be hard to debug.
smuxpeer .1.3.6.1.4.1.3317.1.2.5 frr_ospfd
:file:`/etc/frr/ospf`:
+
+::
+
! ... the rest of ospfd.conf has been omitted for clarity ...
!
smux peer .1.3.6.1.4.1.3317.1.2.5 frr_ospfd
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 0927a4dbe9..78e1959d36 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -31,6 +31,21 @@ Besides the common invocation options (:ref:`common-invocation-options`), the
When program terminates, retain routes added by zebra.
+.. option:: -e X, --ecmp X
+
+ Run zebra with a limited ecmp ability compared to what it is compiled to.
+ If you are running zebra on hardware limited functionality you can
+ force zebra to limit the maximum ecmp allowed to X. This number
+ is bounded by what you compiled FRR with as the maximum number.
+
+.. option:: -n, --vrfwnetns
+
+ When *Zebra* starts with this option, the VRF backend is based on Linux
+ network namespaces. That implies that all network namespaces discovered by
+ ZEBRA will create an associated VRF. The other daemons will operate on the VRF
+ VRF defined by *Zebra*, as usual. More information in the :ref:`VRF Commands`
+ chapter.
+
.. program:: configure
.. _interface-commands:
@@ -47,6 +62,10 @@ Standard Commands
.. clicmd:: interface IFNAME
+.. index:: interface IFNAME vrf VRF
+
+.. clicmd:: interface IFNAME vrf VRF
+
.. index:: shutdown
.. clicmd:: shutdown
@@ -351,6 +370,139 @@ default) should the specified gateways not be reachable. E.g.:
.. _multicast-rib-commands:
+VRF (Virtual Routing and Forwarding)
+====================================
+
+Currently, the user has the possibility to configure VRFs. VRF is a way to
+separate networking contexts on the same machine. Those networking contexts
+are associated with separate interfaces, thus making it possible to associate
+one interface with a specific VRF.
+VRF can be used, for example, when instantiating per enterprise networking
+services, without having to instantiate the physical host machine or the routing
+management daemons for each enterprise. As a result, interfaces are separate for
+each set of VRF, and routing daemons can have their own context for each VRF.
+
+This conceptual view introduces the *Default VRF* case. If the user does not
+configure any specific VRF, then by default, the user will however configure the
+*Default VRF*.
+On the *Zebra* context, this can be done when being in configuration mode, when
+configuring a static route clicmd:`ip route NETWORK GATEWAY`.
+
+.. code-block:: frr
+
+ # case without VRF
+ configure terminal
+ ip route 10.0.0.0 255.255.255.0 10.0.0.2
+ exit
+
+Configuring VRF networking contexts can be done in various ways on FRR. The VRF
+interfaces can be configured by entering in interface configuration mode :
+:clicmd:`interface IFNAME vrf VRF`. Also, if the user wants to configure a static
+route for a specific VRF, then a specific VRF configuration mode is available. After
+entering into that mode by following command: :clicmd:`vrf VRF`. the user can enter
+the same route command as before, but this time, the route command will apply to vrf
+VRF.
+
+.. code-block:: frr
+
+ # case with VRF
+ configure terminal
+ vrf r1-cust1
+ ip route 10.0.0.0 255.255.255.0 10.0.0.2
+ exit-vrf
+
+A VRF backend mode is chosen when running *Zebra*.
+
+If no option is chosen, then the *Linux VRF* implementation as references in
+`https://www.kernel.org/doc/Documentation/networking/vrf.txt` will be mapped over
+the *Zebra* VRF. The routing table associated to that VRF is a Linux table
+identifier located in the same *Linux network namespace* where *Zebra* started.
+
+If the :option:`-n` option is chosen, then the *Linux network namespace* will be
+mapped over the *Zebra* VRF. That implies that *Zebra* is able to configure several
+*Linux network namespaces*. The routing table associated to that VRF is the whole
+routing tables located in that namespace. For instance, this mode matches OpenStack
+Network Namespaces. It matches also OpenFastPath. The default behavior remains Linux
+VRF which is supported by the Linux kernel community, see
+`https://www.kernel.org/doc/Documentation/networking/vrf.txt`.
+
+Because of that difference, there are some subtle differences when running some
+commands in relationship to VRF. Here is an extract of some of those commands:
+
+.. index:: vrf VRF
+.. clicmd:: vrf VRF
+
+ This command is available on configuration mode. By default, above command
+ permits accessing the vrf configuration mode. This mode is available for both
+ VRFs. It is to be noted that *Zebra* does not create *Linux VRF*. Provisioning
+ this command is used to keep the configuration intact.
+
+.. index:: netns NAMESPACE
+.. clicmd:: netns NAMESPACE
+
+ This command is based on VRF configuration mode. This command is available when
+ *Zebra* is run in :option:`-n` mode. This command reflects which *Linux network
+ namespace* is to be mapped with *Zebra* VRF. It is to be noted that *Zebra*
+ creates and detects added/suppressed VRFs from the Linux environment (in fact,
+ those managed with iproute2). Provisioning this command is used to keep the
+ configuration intact.
+
+.. index:: ip route NETWORK NETMASK GATEWAY
+.. clicmd:: ip route NETWORK NETMASK GATEWAY
+
+ This command is based on VRF configuration mode or in configuration mode. If
+ applied on configuration mode, this applies to default VRF. Otherwise, this
+ command applies to the VRF of the vrf configuration mode. By default, above
+ command permits configuring a network in the table identifier of the mentioned
+ VRF. If :option:`-n` option is given, the network will be configured in the
+ default routing table of the *Linux network namespace*.
+
+.. index:: ip route NETWORK NETMASK GATEWAY NEXTHOPVRF
+.. clicmd:: ip route NETWORK NETMASK GATEWAY NEXTHOPVRF
+
+ This command is based on VRF configuration mode or in configuration mode. If
+ on configuration mode, this applies to default VRF. Otherwise, this command
+ applies to the VRF of the vrf configuration mode. This command is used to
+ configure a vrf route leak across 2 VRFs. This command is only available when
+ *Zebra* is launched without :option:`-n` option.
+
+.. index:: ip route NETWORK NETMASK GATEWAY table TABLENO
+.. clicmd:: ip route NETWORK NETMASK GATEWAY table TABLENO
+
+ This command is based on VRF configuration mode. There, this command is only
+ available with :option:`-n` command. This commands permits configuring a network
+ route in the given ``TABLENO`` of the *Linux network namespace*.
+
+.. index:: ip route NETWORK NETMASK GATEWAY table TABLENO
+.. clicmd:: ip route NETWORK NETMASK GATEWAY table TABLENO
+
+ This command is based on configuration mode. There, for default VRF, this command
+ is available for all modes. The ``TABLENO`` configured is one of the tables from
+ Default *Linux network namespace*.
+
+.. index:: show ip route vrf VRF
+.. clicmd:: show ip route vrf VRF
+
+ The show command permits dumping the routing table associated to the VRF. If
+ *Zebra* is launched with default settings, this will be the ``TABLENO`` of the
+ VRF configured on the kernel, thanks to information provided in
+ `https://www.kernel.org/doc/Documentation/networking/vrf.txt`.
+ If *Zebra* is launched with :option:`-n` option, this will be the default routing
+ table of the *Linux network namespace* ``VRF``.
+
+.. index:: show ip route vrf VRF table TABLENO
+.. clicmd:: show ip route vrf VRF table TABLENO
+
+ The show command is only available with :option:`-n` option.
+ This command will dump the routing table ``TABLENO`` of the *Linux network
+ namespace* ``VRF``.
+
+.. code-block:: frr
+
+ ip route 10.0.0.0 255.255.255.0 10.0.0.2 vrf r1-cust1 table 43
+ show ip table vrf r1-cust1 table 43
+
+
Multicast RIB Commands
======================
diff --git a/eigrpd/eigrp_hello.c b/eigrpd/eigrp_hello.c
index 6d74670514..d9e89357ca 100644
--- a/eigrpd/eigrp_hello.c
+++ b/eigrpd/eigrp_hello.c
@@ -630,7 +630,7 @@ static struct eigrp_packet *eigrp_hello_encode(struct eigrp_interface *ei,
uint16_t length = EIGRP_HEADER_LEN;
// allocate a new packet to be sent
- ep = eigrp_packet_new(ei->ifp->mtu, NULL);
+ ep = eigrp_packet_new(EIGRP_PACKET_MTU(ei->ifp->mtu), NULL);
if (ep) {
// encode common header feilds
diff --git a/eigrpd/eigrp_macros.h b/eigrpd/eigrp_macros.h
index eea7a26425..b30e19a867 100644
--- a/eigrpd/eigrp_macros.h
+++ b/eigrpd/eigrp_macros.h
@@ -35,6 +35,8 @@
//--------------------------------------------------------------------------
+#define EIGRP_PACKET_MTU(mtu) ((mtu) - (sizeof(struct ip)))
+
/* Topology Macros */
diff --git a/eigrpd/eigrp_packet.c b/eigrpd/eigrp_packet.c
index 990d1dc08e..59864532cf 100644
--- a/eigrpd/eigrp_packet.c
+++ b/eigrpd/eigrp_packet.c
@@ -51,6 +51,7 @@
#include "eigrpd/eigrp_zebra.h"
#include "eigrpd/eigrp_vty.h"
#include "eigrpd/eigrp_dump.h"
+#include "eigrpd/eigrp_macros.h"
#include "eigrpd/eigrp_network.h"
#include "eigrpd/eigrp_topology.h"
#include "eigrpd/eigrp_fsm.h"
@@ -1088,7 +1089,7 @@ struct eigrp_packet *eigrp_packet_duplicate(struct eigrp_packet *old,
{
struct eigrp_packet *new;
- new = eigrp_packet_new(nbr->ei->ifp->mtu, nbr);
+ new = eigrp_packet_new(EIGRP_PACKET_MTU(nbr->ei->ifp->mtu), nbr);
new->length = old->length;
new->retrans_counter = old->retrans_counter;
new->dst = old->dst;
diff --git a/eigrpd/eigrp_query.c b/eigrpd/eigrp_query.c
index 00234bb35c..dd4231fa00 100644
--- a/eigrpd/eigrp_query.c
+++ b/eigrpd/eigrp_query.c
@@ -167,6 +167,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
struct eigrp_prefix_entry *pe;
bool has_tlv = false;
bool new_packet = true;
+ uint16_t eigrp_mtu = EIGRP_PACKET_MTU(ei->ifp->mtu);
for (ALL_LIST_ELEMENTS(ei->eigrp->topology_changes_internalIPV4, node,
nnode, pe)) {
@@ -174,7 +175,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
continue;
if (new_packet) {
- ep = eigrp_packet_new(ei->ifp->mtu, NULL);
+ ep = eigrp_packet_new(eigrp_mtu, NULL);
/* Prepare EIGRP INIT UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_QUERY, ei->eigrp,
@@ -197,7 +198,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
listnode_add(pe->rij, nbr);
}
- if (length + EIGRP_TLV_MAX_IPV4_BYTE > (uint16_t)ei->ifp->mtu) {
+ if (length + EIGRP_TLV_MAX_IPV4_BYTE > eigrp_mtu) {
if ((ei->params.auth_type == EIGRP_AUTH_TYPE_MD5)
&& ei->params.auth_keychain != NULL) {
eigrp_make_md5_digest(ei, ep->s,
diff --git a/eigrpd/eigrp_reply.c b/eigrpd/eigrp_reply.c
index a702c1fbd1..b7490cd492 100644
--- a/eigrpd/eigrp_reply.c
+++ b/eigrpd/eigrp_reply.c
@@ -85,7 +85,7 @@ void eigrp_send_reply(struct eigrp_neighbor *nbr, struct eigrp_prefix_entry *pe)
* End of filtering
*/
- ep = eigrp_packet_new(ei->ifp->mtu, nbr);
+ ep = eigrp_packet_new(EIGRP_PACKET_MTU(ei->ifp->mtu), nbr);
/* Prepare EIGRP INIT UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_REPLY, eigrp, ep->s, 0,
diff --git a/eigrpd/eigrp_siaquery.c b/eigrpd/eigrp_siaquery.c
index d398d75724..ff38325465 100644
--- a/eigrpd/eigrp_siaquery.c
+++ b/eigrpd/eigrp_siaquery.c
@@ -119,7 +119,7 @@ void eigrp_send_siaquery(struct eigrp_neighbor *nbr,
struct eigrp_packet *ep;
uint16_t length = EIGRP_HEADER_LEN;
- ep = eigrp_packet_new(nbr->ei->ifp->mtu, nbr);
+ ep = eigrp_packet_new(EIGRP_PACKET_MTU(nbr->ei->ifp->mtu), nbr);
/* Prepare EIGRP INIT UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_SIAQUERY, nbr->ei->eigrp, ep->s, 0,
diff --git a/eigrpd/eigrp_siareply.c b/eigrpd/eigrp_siareply.c
index 3b7a82b665..d3dd123f90 100644
--- a/eigrpd/eigrp_siareply.c
+++ b/eigrpd/eigrp_siareply.c
@@ -118,7 +118,7 @@ void eigrp_send_siareply(struct eigrp_neighbor *nbr,
struct eigrp_packet *ep;
uint16_t length = EIGRP_HEADER_LEN;
- ep = eigrp_packet_new(nbr->ei->ifp->mtu, nbr);
+ ep = eigrp_packet_new(EIGRP_PACKET_MTU(nbr->ei->ifp->mtu), nbr);
/* Prepare EIGRP INIT UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_SIAREPLY, nbr->ei->eigrp, ep->s, 0,
diff --git a/eigrpd/eigrp_topology.c b/eigrpd/eigrp_topology.c
index 2d1bc46e6b..becb29a95f 100644
--- a/eigrpd/eigrp_topology.c
+++ b/eigrpd/eigrp_topology.c
@@ -443,17 +443,24 @@ void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *dest)
struct eigrp *eigrp = eigrp_lookup();
for (ALL_LIST_ELEMENTS_RO(dest->entries, node, entry)) {
- if (((uint64_t)entry->distance
- <= (uint64_t)dest->distance * (uint64_t)eigrp->variance)
- && entry->distance != EIGRP_MAX_METRIC) // is successor
- {
- entry->flags |= EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG;
- entry->flags &= ~EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG;
- } else if (entry->reported_distance
- < dest->fdistance) // is feasible successor
- {
- entry->flags |= EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG;
- entry->flags &= ~EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG;
+ if (entry->reported_distance < dest->fdistance) {
+ // is feasible successor, can be successor
+ if (((uint64_t)entry->distance
+ <= (uint64_t)dest->distance
+ * (uint64_t)eigrp->variance)
+ && entry->distance != EIGRP_MAX_METRIC) {
+ // is successor
+ entry->flags |=
+ EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG;
+ entry->flags &=
+ ~EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG;
+ } else {
+ // is feasible successor only
+ entry->flags |=
+ EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG;
+ entry->flags &=
+ ~EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG;
+ }
} else {
entry->flags &= ~EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG;
entry->flags &= ~EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG;
diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c
index bd80ea366f..a3080136b5 100644
--- a/eigrpd/eigrp_update.c
+++ b/eigrpd/eigrp_update.c
@@ -420,7 +420,7 @@ void eigrp_update_send_init(struct eigrp_neighbor *nbr)
struct eigrp_packet *ep;
uint16_t length = EIGRP_HEADER_LEN;
- ep = eigrp_packet_new(nbr->ei->ifp->mtu, nbr);
+ ep = eigrp_packet_new(EIGRP_PACKET_MTU(nbr->ei->ifp->mtu), nbr);
/* Prepare EIGRP INIT UPDATE header */
if (IS_DEBUG_EIGRP_PACKET(0, RECV))
@@ -533,10 +533,10 @@ void eigrp_update_send_EOT(struct eigrp_neighbor *nbr)
struct eigrp *eigrp = ei->eigrp;
struct prefix *dest_addr;
uint32_t seq_no = eigrp->sequence_number;
- uint16_t mtu = ei->ifp->mtu;
+ uint16_t eigrp_mtu = EIGRP_PACKET_MTU(ei->ifp->mtu);
struct route_node *rn;
- ep = eigrp_packet_new(mtu, nbr);
+ ep = eigrp_packet_new(eigrp_mtu, nbr);
/* Prepare EIGRP EOT UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_UPDATE, eigrp, ep->s, EIGRP_EOT_FLAG,
@@ -557,13 +557,13 @@ void eigrp_update_send_EOT(struct eigrp_neighbor *nbr)
if (eigrp_nbr_split_horizon_check(te, ei))
continue;
- if ((length + EIGRP_TLV_MAX_IPV4_BYTE) > mtu) {
+ if ((length + EIGRP_TLV_MAX_IPV4_BYTE) > eigrp_mtu) {
eigrp_update_place_on_nbr_queue(nbr, ep, seq_no,
length);
seq_no++;
length = EIGRP_HEADER_LEN;
- ep = eigrp_packet_new(mtu, nbr);
+ ep = eigrp_packet_new(eigrp_mtu, nbr);
eigrp_packet_header_init(
EIGRP_OPC_UPDATE, nbr->ei->eigrp, ep->s,
EIGRP_EOT_FLAG, seq_no,
@@ -604,13 +604,14 @@ void eigrp_update_send(struct eigrp_interface *ei)
struct eigrp *eigrp = ei->eigrp;
struct prefix *dest_addr;
uint32_t seq_no = eigrp->sequence_number;
+ uint16_t eigrp_mtu = EIGRP_PACKET_MTU(ei->ifp->mtu);
if (ei->nbrs->count == 0)
return;
uint16_t length = EIGRP_HEADER_LEN;
- ep = eigrp_packet_new(ei->ifp->mtu, NULL);
+ ep = eigrp_packet_new(eigrp_mtu, NULL);
/* Prepare EIGRP INIT UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_UPDATE, eigrp, ep->s, 0, seq_no, 0);
@@ -633,8 +634,7 @@ void eigrp_update_send(struct eigrp_interface *ei)
if (eigrp_nbr_split_horizon_check(ne, ei))
continue;
- if ((length + EIGRP_TLV_MAX_IPV4_BYTE)
- > (uint16_t)ei->ifp->mtu) {
+ if ((length + EIGRP_TLV_MAX_IPV4_BYTE) > eigrp_mtu) {
if ((ei->params.auth_type == EIGRP_AUTH_TYPE_MD5)
&& (ei->params.auth_keychain != NULL)) {
eigrp_make_md5_digest(ei, ep->s,
@@ -651,7 +651,7 @@ void eigrp_update_send(struct eigrp_interface *ei)
eigrp_update_send_to_all_nbrs(ei, ep);
length = EIGRP_HEADER_LEN;
- ep = eigrp_packet_new(ei->ifp->mtu, NULL);
+ ep = eigrp_packet_new(eigrp_mtu, NULL);
eigrp_packet_header_init(EIGRP_OPC_UPDATE, eigrp, ep->s,
0, seq_no, 0);
if ((ei->params.auth_type == EIGRP_AUTH_TYPE_MD5)
@@ -790,7 +790,7 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr)
}
}
- ep = eigrp_packet_new(ei->ifp->mtu, nbr);
+ ep = eigrp_packet_new(EIGRP_PACKET_MTU(ei->ifp->mtu), nbr);
/* Prepare EIGRP Graceful restart UPDATE header */
eigrp_packet_header_init(EIGRP_OPC_UPDATE, eigrp, ep->s, flags,
diff --git a/isisd/dict.c b/isisd/dict.c
index 2ea86d1b68..20a4c0ff73 100644
--- a/isisd/dict.c
+++ b/isisd/dict.c
@@ -1323,9 +1323,8 @@ static void construct(dict_t *d)
free(val);
if (dn)
dnode_destroy(dn);
- }
-
- dict_load_next(&dl, dn, key);
+ } else
+ dict_load_next(&dl, dn, key);
break;
default:
putchar('?');
diff --git a/isisd/isis_redist.c b/isisd/isis_redist.c
index 9c61512df4..e903dc8c7f 100644
--- a/isisd/isis_redist.c
+++ b/isisd/isis_redist.c
@@ -184,8 +184,7 @@ static void isis_redist_update_ext_reach(struct isis_area *area, int level,
route_map_result_t map_ret;
memcpy(&area_info, info, sizeof(area_info));
- if (redist->metric != 0xffffffff)
- area_info.metric = redist->metric;
+ area_info.metric = redist->metric;
if (redist->map_name) {
map_ret =
@@ -540,7 +539,7 @@ DEFUN (isis_redistribute,
int afi;
int type;
int level;
- unsigned long metric;
+ unsigned long metric = 0;
const char *routemap = NULL;
family = str2family(argv[idx_afi]->text);
@@ -567,9 +566,6 @@ DEFUN (isis_redistribute,
return CMD_WARNING_CONFIG_FAILED;
}
- metric = 0xffffffff;
- routemap = NULL;
-
if (argc > idx_metric_rmap + 1) {
if (argv[idx_metric_rmap + 1]->arg[0] == '\0')
return CMD_WARNING_CONFIG_FAILED;
@@ -651,7 +647,7 @@ DEFUN (isis_default_originate,
int family;
int originate_type = DEFAULT_ORIGINATE;
int level;
- unsigned long metric = 0xffffffff;
+ unsigned long metric = 0;
const char *routemap = NULL;
family = str2family(argv[idx_afi]->text);
@@ -748,7 +744,7 @@ int isis_redist_config_write(struct vty *vty, struct isis_area *area,
continue;
vty_out(vty, " redistribute %s %s level-%d", family_str,
zebra_route_string(type), level);
- if (redist->metric != 0xffffffff)
+ if (redist->metric)
vty_out(vty, " metric %u", redist->metric);
if (redist->map_name)
vty_out(vty, " route-map %s", redist->map_name);
@@ -766,7 +762,7 @@ int isis_redist_config_write(struct vty *vty, struct isis_area *area,
family_str, level);
if (redist->redist == DEFAULT_ORIGINATE_ALWAYS)
vty_out(vty, " always");
- if (redist->metric != 0xffffffff)
+ if (redist->metric)
vty_out(vty, " metric %u", redist->metric);
if (redist->map_name)
vty_out(vty, " route-map %s", redist->map_name);
diff --git a/lib/command.c b/lib/command.c
index 2bff3b17a2..2744061b5a 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -49,6 +49,29 @@ DEFINE_MTYPE(LIB, HOST, "Host config")
DEFINE_MTYPE(LIB, STRVEC, "String vector")
DEFINE_MTYPE(LIB, COMPLETION, "Completion item")
+#define item(x) \
+ { \
+ x, #x \
+ }
+
+/* clang-format off */
+const struct message tokennames[] = {
+ item(WORD_TKN),
+ item(VARIABLE_TKN),
+ item(RANGE_TKN),
+ item(IPV4_TKN),
+ item(IPV4_PREFIX_TKN),
+ item(IPV6_TKN),
+ item(IPV6_PREFIX_TKN),
+ item(MAC_TKN),
+ item(MAC_PREFIX_TKN),
+ item(FORK_TKN),
+ item(JOIN_TKN),
+ item(START_TKN),
+ item(END_TKN),
+ {0},
+};
+
const char *node_names[] = {
"auth", // AUTH_NODE,
"view", // VIEW_NODE,
@@ -121,6 +144,7 @@ const char *node_names[] = {
"bgp ipv6 flowspec", /* BGP_FLOWSPECV6_NODE
*/
};
+/* clang-format on */
/* Command vector which includes some level of command lists. Normally
each daemon maintains each own cmdvec. */
@@ -1566,6 +1590,21 @@ DEFUN (show_commandtree,
return cmd_list_cmds(vty, argc == 3);
}
+DEFUN_HIDDEN(show_cli_graph,
+ show_cli_graph_cmd,
+ "show cli graph",
+ SHOW_STR
+ "CLI reflection\n"
+ "Dump current command space as DOT graph\n")
+{
+ struct cmd_node *cn = vector_slot(cmdvec, vty->node);
+ char *dot = cmd_graph_dump_dot(cn->cmdgraph);
+
+ vty_out(vty, "%s\n", dot);
+ XFREE(MTYPE_TMP, dot);
+ return CMD_SUCCESS;
+}
+
static int vty_write_config(struct vty *vty)
{
size_t i;
@@ -1834,7 +1873,7 @@ DEFUN (config_hostname,
{
struct cmd_token *word = argv[1];
- if (!isalpha((int)word->arg[0])) {
+ if (!isalnum((int)word->arg[0])) {
vty_out(vty, "Please specify string starting with alphabet\n");
return CMD_WARNING_CONFIG_FAILED;
}
@@ -2573,6 +2612,7 @@ void install_default(enum node_type node)
install_element(node, &config_end_cmd);
install_element(node, &config_help_cmd);
install_element(node, &config_list_cmd);
+ install_element(node, &show_cli_graph_cmd);
install_element(node, &find_cmd);
install_element(node, &config_write_cmd);
diff --git a/lib/command.h b/lib/command.h
index bb77812350..f18de3417c 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -146,6 +146,7 @@ enum node_type {
};
extern vector cmdvec;
+extern const struct message tokennames[];
extern const char *node_names[];
/* Node which has some commands and prompt string and configuration
diff --git a/lib/command_graph.c b/lib/command_graph.c
index 3e8261cb3e..0e8669c4b5 100644
--- a/lib/command_graph.c
+++ b/lib/command_graph.c
@@ -457,3 +457,95 @@ void cmd_graph_names(struct graph *graph)
cmd_node_names(start, NULL, NULL);
}
+
+#ifndef BUILDING_CLIPPY
+
+#include "command.h"
+#include "log.h"
+
+void cmd_graph_node_print_cb(struct graph_node *gn, struct buffer *buf)
+{
+ static bool wasend;
+
+ char nbuf[512];
+ struct cmd_token *tok = gn->data;
+ const char *color;
+
+ if (wasend == true) {
+ wasend = false;
+ return;
+ }
+
+ if (tok->type == END_TKN) {
+ wasend = true;
+ return;
+ }
+
+ snprintf(nbuf, sizeof(nbuf), " n%p [ shape=box, label=<", gn);
+ buffer_putstr(buf, nbuf);
+ snprintf(nbuf, sizeof(nbuf), "<b>%s</b>",
+ lookup_msg(tokennames, tok->type, NULL));
+ buffer_putstr(buf, nbuf);
+ if (tok->attr == CMD_ATTR_DEPRECATED)
+ buffer_putstr(buf, " (d)");
+ else if (tok->attr == CMD_ATTR_HIDDEN)
+ buffer_putstr(buf, " (h)");
+ if (tok->text) {
+ if (tok->type == WORD_TKN)
+ snprintf(
+ nbuf, sizeof(nbuf),
+ "<br/>\"<font color=\"#0055ff\" point-size=\"11\"><b>%s</b></font>\"",
+ tok->text);
+ else
+ snprintf(nbuf, sizeof(nbuf), "<br/>%s", tok->text);
+ buffer_putstr(buf, nbuf);
+ }
+
+ switch (tok->type) {
+ case START_TKN:
+ color = "#ccffcc";
+ break;
+ case FORK_TKN:
+ color = "#aaddff";
+ break;
+ case JOIN_TKN:
+ color = "#ddaaff";
+ break;
+ case WORD_TKN:
+ color = "#ffffff";
+ break;
+ default:
+ color = "#ffffff";
+ break;
+ }
+ snprintf(nbuf, sizeof(nbuf),
+ ">, style = filled, fillcolor = \"%s\" ];\n", color);
+ buffer_putstr(buf, nbuf);
+
+ for (unsigned int i = 0; i < vector_active(gn->to); i++) {
+ struct graph_node *adj = vector_slot(gn->to, i);
+
+ if (((struct cmd_token *)adj->data)->type == END_TKN) {
+ snprintf(nbuf, sizeof(nbuf), " n%p -> end%p;\n", gn,
+ adj);
+ buffer_putstr(buf, nbuf);
+ snprintf(
+ nbuf, sizeof(nbuf),
+ " end%p [ shape=box, label=<end>, style = filled, fillcolor = \"#ffddaa\" ];\n",
+ adj);
+ } else
+ snprintf(nbuf, sizeof(nbuf), " n%p -> n%p;\n", gn,
+ adj);
+
+ buffer_putstr(buf, nbuf);
+ }
+}
+
+char *cmd_graph_dump_dot(struct graph *cmdgraph)
+{
+ struct graph_node *start = vector_slot(cmdgraph->nodes, 0);
+
+ return graph_dump_dot(cmdgraph, start, cmd_graph_node_print_cb);
+}
+
+#endif /* BUILDING_CLIPPY */
diff --git a/lib/command_graph.h b/lib/command_graph.h
index ec68e284ed..82d562694c 100644
--- a/lib/command_graph.h
+++ b/lib/command_graph.h
@@ -116,5 +116,21 @@ extern void cmd_graph_parse(struct graph *graph, struct cmd_element *cmd);
extern void cmd_graph_names(struct graph *graph);
extern void cmd_graph_merge(struct graph *old, struct graph *new,
int direction);
+/*
+ * Print callback for DOT dumping.
+ *
+ * See graph.h for more details.
+ */
+extern void cmd_graph_node_print_cb(struct graph_node *gn, struct buffer *buf);
+/*
+ * Dump command graph to DOT.
+ *
+ * cmdgraph
+ * A command graph to dump
+ *
+ * Returns:
+ * String allocated with MTYPE_TMP representing this graph
+ */
+char *cmd_graph_dump_dot(struct graph *cmdgraph);
#endif /* _FRR_COMMAND_GRAPH_H */
diff --git a/lib/frr_zmq.c b/lib/frr_zmq.c
index d4df5130e7..8f190a3a09 100644
--- a/lib/frr_zmq.c
+++ b/lib/frr_zmq.c
@@ -338,6 +338,7 @@ void frrzmq_check_events(struct frrzmq_cb **cbp, struct cb_core *core,
if (!cb || !cb->zmqsock)
return;
+ len = sizeof(events);
if (zmq_getsockopt(cb->zmqsock, ZMQ_EVENTS, &events, &len))
return;
if (events & event && core->thread && !core->cancelled) {
diff --git a/lib/grammar_sandbox.c b/lib/grammar_sandbox.c
index 41dd57b7f1..51e7a3987e 100644
--- a/lib/grammar_sandbox.c
+++ b/lib/grammar_sandbox.c
@@ -37,9 +37,6 @@ DEFINE_MTYPE_STATIC(LIB, CMD_TOKENS, "Command desc")
void grammar_sandbox_init(void);
void pretty_print_graph(struct vty *vty, struct graph_node *, int, int,
struct graph_node **, size_t);
-static void pretty_print_dot(FILE *ofd, unsigned opts, struct graph_node *start,
- struct graph_node **stack, size_t stackpos,
- struct graph_node **visited, size_t *visitpos);
void init_cmdgraph(struct vty *, struct graph **);
/** shim interface commands **/
@@ -274,23 +271,19 @@ DEFUN (grammar_test_dot,
".dot filename\n")
{
check_nodegraph();
-
- struct graph_node *stack[CMD_ARGC_MAX];
- struct graph_node *visited[CMD_ARGC_MAX * CMD_ARGC_MAX];
- size_t vpos = 0;
-
FILE *ofd = fopen(argv[2]->arg, "w");
+
if (!ofd) {
vty_out(vty, "%s: %s\r\n", argv[2]->arg, strerror(errno));
return CMD_SUCCESS;
}
- fprintf(ofd,
- "digraph {\n graph [ rankdir = LR ];\n node [ fontname = \"Fira Mono\", fontsize = 9 ];\n\n");
- pretty_print_dot(ofd, 0, vector_slot(nodegraph->nodes, 0), stack, 0,
- visited, &vpos);
- fprintf(ofd, "}\n");
+ char *dot = cmd_graph_dump_dot(nodegraph);
+
+ fprintf(ofd, "%s", dot);
fclose(ofd);
+ XFREE(MTYPE_TMP, dot);
+
return CMD_SUCCESS;
}
@@ -489,24 +482,6 @@ void grammar_sandbox_init(void)
install_element(ENABLE_NODE, &grammar_access_cmd);
}
-#define item(x) { x, #x }
-struct message tokennames[] = {item(WORD_TKN), // words
- item(VARIABLE_TKN), // almost anything
- item(RANGE_TKN), // integer range
- item(IPV4_TKN), // IPV4 addresses
- item(IPV4_PREFIX_TKN), // IPV4 network prefixes
- item(IPV6_TKN), // IPV6 prefixes
- item(IPV6_PREFIX_TKN), // IPV6 network prefixes
- item(MAC_TKN), // MAC address
- item(MAC_PREFIX_TKN), // MAC address w/ mask
-
- /* plumbing types */
- item(FORK_TKN),
- item(JOIN_TKN),
- item(START_TKN), // first token in line
- item(END_TKN), // last token in line
- {0}};
-
/**
* Pretty-prints a graph, assuming it is a tree.
*
@@ -571,89 +546,6 @@ void pretty_print_graph(struct vty *vty, struct graph_node *start, int level,
vty_out(vty, "\n");
}
-static void pretty_print_dot(FILE *ofd, unsigned opts, struct graph_node *start,
- struct graph_node **stack, size_t stackpos,
- struct graph_node **visited, size_t *visitpos)
-{
- // print this node
- char tokennum[32];
- struct cmd_token *tok = start->data;
- const char *color;
-
- for (size_t i = 0; i < (*visitpos); i++)
- if (visited[i] == start)
- return;
- visited[(*visitpos)++] = start;
- if ((*visitpos) == CMD_ARGC_MAX * CMD_ARGC_MAX)
- return;
-
- snprintf(tokennum, sizeof(tokennum), "%d?", tok->type);
- fprintf(ofd, " n%p [ shape=box, label=<", start);
-
- fprintf(ofd, "<b>%s</b>", lookup_msg(tokennames, tok->type, NULL));
- if (tok->attr == CMD_ATTR_DEPRECATED)
- fprintf(ofd, " (d)");
- else if (tok->attr == CMD_ATTR_HIDDEN)
- fprintf(ofd, " (h)");
- if (tok->text) {
- if (tok->type == WORD_TKN)
- fprintf(ofd,
- "<br/>\"<font color=\"#0055ff\" point-size=\"11\"><b>%s</b></font>\"",
- tok->text);
- else
- fprintf(ofd, "<br/>%s", tok->text);
- }
- /* if (desc)
- fprintf(ofd, " ?'%s'", tok->desc); */
- switch (tok->type) {
- case START_TKN:
- color = "#ccffcc";
- break;
- case FORK_TKN:
- color = "#aaddff";
- break;
- case JOIN_TKN:
- color = "#ddaaff";
- break;
- case WORD_TKN:
- color = "#ffffff";
- break;
- default:
- color = "#ffffff";
- break;
- }
- fprintf(ofd, ">, style = filled, fillcolor = \"%s\" ];\n", color);
-
- if (stackpos == CMD_ARGC_MAX)
- return;
- stack[stackpos++] = start;
-
- for (unsigned int i = 0; i < vector_active(start->to); i++) {
- struct graph_node *adj = vector_slot(start->to, i);
- // if this node is a vararg, just print *
- if (adj == start) {
- fprintf(ofd, " n%p -> n%p;\n", start, start);
- } else if (((struct cmd_token *)adj->data)->type == END_TKN) {
- // struct cmd_token *et = adj->data;
- fprintf(ofd, " n%p -> end%p;\n", start, adj);
- fprintf(ofd,
- " end%p [ shape=box, label=<end>, style = filled, fillcolor = \"#ffddaa\" ];\n",
- adj);
- } else {
- fprintf(ofd, " n%p -> n%p;\n", start, adj);
- size_t k;
- for (k = 0; k < stackpos; k++)
- if (stack[k] == adj)
- break;
- if (k == stackpos) {
- pretty_print_dot(ofd, opts, adj, stack,
- stackpos, visited, visitpos);
- }
- }
- }
-}
-
-
/** stuff that should go in command.c + command.h */
void init_cmdgraph(struct vty *vty, struct graph **graph)
{
diff --git a/lib/graph.c b/lib/graph.c
index a9cc43f7c1..a9e35b46ff 100644
--- a/lib/graph.c
+++ b/lib/graph.c
@@ -23,6 +23,7 @@
#include <zebra.h>
#include "graph.h"
#include "memory.h"
+#include "buffer.h"
DEFINE_MTYPE_STATIC(LIB, GRAPH, "Graph")
DEFINE_MTYPE_STATIC(LIB, GRAPH_NODE, "Graph Node")
@@ -157,3 +158,73 @@ bool graph_has_edge(struct graph_node *from, struct graph_node *to)
return false;
}
+
+static void _graph_dfs(struct graph *graph, struct graph_node *start,
+ vector visited,
+ void (*dfs_cb)(struct graph_node *, void *), void *arg)
+{
+ /* check that we have not visited this node */
+ for (unsigned int i = 0; i < vector_active(visited); i++) {
+ if (start == vector_slot(visited, i))
+ return;
+ }
+
+ /* put this node in visited stack */
+ vector_ensure(visited, vector_active(visited));
+ vector_set_index(visited, vector_active(visited), start);
+
+ /* callback */
+ dfs_cb(start, arg);
+
+ /* recurse into children */
+ for (unsigned int i = vector_active(start->to); i--; /**/) {
+ struct graph_node *c = vector_slot(start->to, i);
+
+ _graph_dfs(graph, c, visited, dfs_cb, arg);
+ }
+}
+
+void graph_dfs(struct graph *graph, struct graph_node *start,
+ void (*dfs_cb)(struct graph_node *, void *), void *arg)
+{
+ vector visited = vector_init(VECTOR_MIN_SIZE);
+
+ _graph_dfs(graph, start, visited, dfs_cb, arg);
+ vector_free(visited);
+}
+
+#ifndef BUILDING_CLIPPY
+
+void graph_dump_dot_default_print_cb(struct graph_node *gn, struct buffer *buf)
+{
+ char nbuf[64];
+
+ for (unsigned int i = 0; i < vector_active(gn->to); i++) {
+ struct graph_node *adj = vector_slot(gn->to, i);
+
+ snprintf(nbuf, sizeof(nbuf), " n%p -> n%p;\n", gn, adj);
+ buffer_putstr(buf, nbuf);
+ }
+}
+
+char *graph_dump_dot(struct graph *graph, struct graph_node *start,
+ void (*pcb)(struct graph_node *, struct buffer *))
+{
+ struct buffer *buf = buffer_new(0);
+ char *ret;
+
+ pcb = (pcb) ? pcb : graph_dump_dot_default_print_cb;
+ buffer_putstr(buf, "digraph {\n");
+
+ graph_dfs(graph, start, (void (*)(struct graph_node *, void *))pcb,
+ buf);
+
+ buffer_putstr(buf, "}\n");
+
+ ret = buffer_getstr(buf);
+ buffer_free(buf);
+
+ return ret;
+}
+
+#endif /* BUILDING_CLIPPY */
diff --git a/lib/graph.h b/lib/graph.h
index d6dfef5a63..87262a07b8 100644
--- a/lib/graph.h
+++ b/lib/graph.h
@@ -26,6 +26,7 @@
#include <stdbool.h>
#include "vector.h"
+#include "buffer.h"
struct graph {
vector nodes;
@@ -111,4 +112,56 @@ struct graph_node *graph_find_node(struct graph *graph, void *data);
*/
bool graph_has_edge(struct graph_node *from, struct graph_node *to);
+/*
+ * Depth-first search.
+ *
+ * Performs a depth-first traversal of the given graph, visiting each node
+ * exactly once and calling the user-provided callback for each visit.
+ *
+ * @param graph the graph to operate on
+ * @param start the node to take as the root
+ * @param dfs_cb callback called for each node visited in the traversal
+ * @param arg argument to provide to dfs_cb
+ */
+void graph_dfs(struct graph *graph, struct graph_node *start,
+ void (*dfs_cb)(struct graph_node *, void *), void *arg);
+
+#ifndef BUILDING_CLIPPY
+/*
+ * Clippy relies on a small subset of sources in lib/, but it cannot link
+ * libfrr since clippy itself is required to build libfrr. Instead it directly
+ * includes the sources it needs. One of these is the command graph
+ * implementation, which wraps this graph implementation. Since we need to use
+ * the buffer.[ch] sources here, which indirectly rely on most of libfrr, we
+ * have to ignore them when compiling clippy to avoid build dependency issues.
+ *
+ * TODO: Fix clippy build.
+ */
+
+/*
+ * Default node printer for use with graph_dump_dot.
+ *
+ * @param gn the node to print
+ * @param buf the buffer to print into
+ */
+void graph_dump_dot_default_print_cb(struct graph_node *gn, struct buffer *buf);
+
+/*
+ * Prints a graph in the DOT language.
+ *
+ * The generated output is produced from a depth-first traversal of the graph.
+ *
+ * @param graph the graph to print
+ * @param start the node to take as the root
+ * @param pcb callback called for each node in the traversal that should
+ * print the node in the DOT language. Passing NULL for this argument
+ * will use the default printer. See graph_dump_dot_default_print_cb for
+ * an example.
+ * @return representation of graph in DOT language, allocated with MTYPE_TMP.
+ * Caller is responsible for freeing this string.
+ */
+char *graph_dump_dot(struct graph *graph, struct graph_node *start,
+ void (*pcb)(struct graph_node *, struct buffer *buf));
+
+#endif /* BUILDING_CLIPPY */
#endif /* _ZEBRA_COMMAND_GRAPH_H */
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index 5ac38d6685..937b84bddd 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -387,6 +387,13 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
struct nexthop *nh;
bool legal;
+ /*
+ * This is impossible to happen as that the cli parser refuses
+ * to let you get here without an addr, but the SA system
+ * does not understand this intricacy
+ */
+ assert(addr);
+
legal = nexthop_group_parse_nexthop(&nhop, addr, intf, name);
if (nhop.type == NEXTHOP_TYPE_IPV6
diff --git a/lib/plist.c b/lib/plist.c
index 01b55f9f1d..e1dac46a90 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -73,7 +73,7 @@ struct prefix_master {
struct prefix_list_list str;
/* Whether sequential number is used. */
- int seqnum;
+ bool seqnum;
/* The latest update. */
struct prefix_list *recent;
@@ -348,7 +348,7 @@ static void prefix_list_delete(struct prefix_list *plist)
static struct prefix_list_entry *
prefix_list_entry_make(struct prefix *prefix, enum prefix_list_type type,
- int seq, int le, int ge, int any)
+ int64_t seq, int le, int ge, int any)
{
struct prefix_list_entry *pentry;
@@ -381,10 +381,10 @@ void prefix_list_delete_hook(void (*func)(struct prefix_list *plist))
}
/* Calculate new sequential number. */
-static int prefix_new_seq_get(struct prefix_list *plist)
+static int64_t prefix_new_seq_get(struct prefix_list *plist)
{
- int maxseq;
- int newseq;
+ int64_t maxseq;
+ int64_t newseq;
struct prefix_list_entry *pentry;
maxseq = newseq = 0;
@@ -401,7 +401,7 @@ static int prefix_new_seq_get(struct prefix_list *plist)
/* Return prefix list entry which has same seq number. */
static struct prefix_list_entry *prefix_seq_check(struct prefix_list *plist,
- int seq)
+ int64_t seq)
{
struct prefix_list_entry *pentry;
@@ -413,7 +413,8 @@ static struct prefix_list_entry *prefix_seq_check(struct prefix_list *plist,
static struct prefix_list_entry *
prefix_list_entry_lookup(struct prefix_list *plist, struct prefix *prefix,
- enum prefix_list_type type, int seq, int le, int ge)
+ enum prefix_list_type type, int64_t seq,
+ int le, int ge)
{
struct prefix_list_entry *pentry;
@@ -771,7 +772,7 @@ static void __attribute__((unused)) prefix_list_print(struct prefix_list *plist)
p = &pentry->prefix;
- printf(" seq %u %s %s/%d", pentry->seq,
+ printf(" seq %" PRId64 " %s %s/%d", pentry->seq,
prefix_list_type_str(pentry),
inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
p->prefixlen);
@@ -793,7 +794,7 @@ prefix_entry_dup_check(struct prefix_list *plist, struct prefix_list_entry *new)
size_t validbits = new->prefix.prefixlen;
struct pltrie_table *table;
struct prefix_list_entry *pentry;
- int seq = 0;
+ int64_t seq = 0;
if (new->seq == -1)
seq = prefix_new_seq_get(plist);
@@ -845,13 +846,13 @@ static int vty_prefix_list_install(struct vty *vty, afi_t afi, const char *name,
struct prefix_list_entry *dup;
struct prefix p, p_tmp;
int any = 0;
- int seqnum = -1;
+ int64_t seqnum = -1;
int lenum = 0;
int genum = 0;
/* Sequential number. */
if (seq)
- seqnum = atoi(seq);
+ seqnum = (int64_t)atol(seq);
/* ge and le number */
if (ge)
@@ -972,7 +973,7 @@ static int vty_prefix_list_uninstall(struct vty *vty, afi_t afi,
struct prefix_list *plist;
struct prefix_list_entry *pentry;
struct prefix p;
- int seqnum = -1;
+ int64_t seqnum = -1;
int lenum = 0;
int genum = 0;
@@ -998,7 +999,7 @@ static int vty_prefix_list_uninstall(struct vty *vty, afi_t afi,
/* Check sequence number. */
if (seq)
- seqnum = atoi(seq);
+ seqnum = (int64_t)atol(seq);
/* ge and le number */
if (ge)
@@ -1113,7 +1114,7 @@ static void vty_show_prefix_entry(struct vty *vty, afi_t afi,
vty_out(vty, " Description: %s\n", plist->desc);
vty_out(vty,
- " count: %d, range entries: %d, sequences: %u - %u\n",
+ " count: %d, range entries: %d, sequences: %" PRId64 " - %" PRId64 "\n",
plist->count, plist->rangecount,
plist->head ? plist->head->seq : 0,
plist->tail ? plist->tail->seq : 0);
@@ -1128,7 +1129,7 @@ static void vty_show_prefix_entry(struct vty *vty, afi_t afi,
vty_out(vty, " ");
if (master->seqnum)
- vty_out(vty, "seq %u ", pentry->seq);
+ vty_out(vty, "seq %" PRId64 " ", pentry->seq);
vty_out(vty, "%s ", prefix_list_type_str(pentry));
@@ -1164,14 +1165,14 @@ static int vty_show_prefix_list(struct vty *vty, afi_t afi, const char *name,
{
struct prefix_list *plist;
struct prefix_master *master;
- int seqnum = 0;
+ int64_t seqnum = 0;
master = prefix_master_get(afi, 0);
if (master == NULL)
return CMD_WARNING;
if (seq)
- seqnum = atoi(seq);
+ seqnum = (int64_t)atol(seq);
if (name) {
plist = prefix_list_lookup(afi, name);
@@ -1236,7 +1237,7 @@ static int vty_show_prefix_list_prefix(struct vty *vty, afi_t afi,
}
if (match) {
- vty_out(vty, " seq %u %s ", pentry->seq,
+ vty_out(vty, " seq %" PRId64 " %s ", pentry->seq,
prefix_list_type_str(pentry));
if (pentry->any)
@@ -1387,7 +1388,7 @@ DEFPY (ip_prefix_list_sequence_number,
PREFIX_LIST_STR
"Include/exclude sequence numbers in NVGEN\n")
{
- prefix_master_ipv4.seqnum = no ? 0 : 1;
+ prefix_master_ipv4.seqnum = no ? false : true;
return CMD_SUCCESS;
}
@@ -1581,7 +1582,7 @@ DEFPY (ipv6_prefix_list_sequence_number,
PREFIX_LIST_STR
"Include/exclude sequence numbers in NVGEN\n")
{
- prefix_master_ipv6.seqnum = no ? 0 : 1;
+ prefix_master_ipv6.seqnum = no ? false : true;
return CMD_SUCCESS;
}
@@ -1744,7 +1745,7 @@ static int config_write_prefix_afi(afi_t afi, struct vty *vty)
afi == AFI_IP ? "" : "v6", plist->name);
if (master->seqnum)
- vty_out(vty, "seq %u ", pentry->seq);
+ vty_out(vty, "seq %" PRId64 " ", pentry->seq);
vty_out(vty, "%s ", prefix_list_type_str(pentry));
@@ -1783,7 +1784,7 @@ static int config_write_prefix_afi(afi_t afi, struct vty *vty)
afi == AFI_IP ? "" : "v6", plist->name);
if (master->seqnum)
- vty_out(vty, "seq %u ", pentry->seq);
+ vty_out(vty, "seq %" PRId64 " ", pentry->seq);
vty_out(vty, "%s", prefix_list_type_str(pentry));
@@ -1959,7 +1960,8 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name,
struct prefix *p = &pentry->prefix;
char buf[BUFSIZ];
- vty_out(vty, " seq %u %s %s/%d", pentry->seq,
+ vty_out(vty, " seq %" PRId64 " %s %s/%d",
+ pentry->seq,
prefix_list_type_str(pentry),
inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
p->prefixlen);
diff --git a/lib/plist_int.h b/lib/plist_int.h
index aa81a3bce2..6bc2d034d6 100644
--- a/lib/plist_int.h
+++ b/lib/plist_int.h
@@ -48,7 +48,7 @@ struct prefix_list {
/* Each prefix-list's entry. */
struct prefix_list_entry {
- int seq;
+ int64_t seq;
int le;
int ge;
diff --git a/lib/prefix.c b/lib/prefix.c
index b38dd94589..05af190e9d 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -1206,54 +1206,104 @@ int str2prefix(const char *str, struct prefix *p)
return 0;
}
-static const char *prefixevpn2str(const struct prefix *p, char *str, int size)
+static const char *prefixevpn_ead2str(const struct prefix_evpn *p, char *str,
+ int size)
+{
+ snprintf(str, size, "Unsupported EVPN prefix");
+ return str;
+}
+
+static const char *prefixevpn_macip2str(const struct prefix_evpn *p, char *str,
+ int size)
{
uint8_t family;
char buf[PREFIX2STR_BUFFER];
char buf2[ETHER_ADDR_STRLEN];
- if (p->u.prefix_evpn.route_type == 2) {
- if (IS_EVPN_PREFIX_IPADDR_NONE((struct prefix_evpn *)p))
- snprintf(str, size, "[%d]:[%s]/%d",
- p->u.prefix_evpn.route_type,
- prefix_mac2str(&p->u.prefix_evpn.mac, buf2,
- sizeof(buf2)),
- p->prefixlen);
- else {
- family = IS_EVPN_PREFIX_IPADDR_V4(
- (struct prefix_evpn *)p)
- ? AF_INET
- : AF_INET6;
- snprintf(str, size, "[%d]:[%s]:[%s]/%d",
- p->u.prefix_evpn.route_type,
- prefix_mac2str(&p->u.prefix_evpn.mac, buf2,
- sizeof(buf2)),
- inet_ntop(family, &p->u.prefix_evpn.ip.ip.addr,
- buf, PREFIX2STR_BUFFER),
- p->prefixlen);
- }
- } else if (p->u.prefix_evpn.route_type == 3) {
- family = IS_EVPN_PREFIX_IPADDR_V4((struct prefix_evpn *)p)
- ? AF_INET
- : AF_INET6;
- snprintf(str, size, "[%d]:[%s]/%d", p->u.prefix_evpn.route_type,
- inet_ntop(family, &p->u.prefix_evpn.ip.ip.addr, buf,
- PREFIX2STR_BUFFER),
+ if (is_evpn_prefix_ipaddr_none(p))
+ snprintf(str, size, "[%d]:[%s]/%d",
+ p->prefix.route_type,
+ prefix_mac2str(&p->prefix.macip_addr.mac,
+ buf2, sizeof(buf2)),
p->prefixlen);
- } else if (p->u.prefix_evpn.route_type == 5) {
- family = IS_EVPN_PREFIX_IPADDR_V4((struct prefix_evpn *)p)
+ else {
+ family = is_evpn_prefix_ipaddr_v4(p)
? AF_INET
: AF_INET6;
- snprintf(str, size, "[%d]:[%u][%s/%d]/%d",
- p->u.prefix_evpn.route_type, p->u.prefix_evpn.eth_tag,
- inet_ntop(family, &p->u.prefix_evpn.ip.ip.addr, buf,
- PREFIX2STR_BUFFER),
- p->u.prefix_evpn.ip_prefix_length, p->prefixlen);
- } else {
- sprintf(str, "Unsupported EVPN route type %d",
- p->u.prefix_evpn.route_type);
+ snprintf(str, size, "[%d]:[%s]:[%s]/%d",
+ p->prefix.route_type,
+ prefix_mac2str(&p->prefix.macip_addr.mac,
+ buf2, sizeof(buf2)),
+ inet_ntop(family,
+ &p->prefix.macip_addr.ip.ip.addr,
+ buf, PREFIX2STR_BUFFER),
+ p->prefixlen);
}
+ return str;
+}
+
+static const char *prefixevpn_imet2str(const struct prefix_evpn *p, char *str,
+ int size)
+{
+ uint8_t family;
+ char buf[PREFIX2STR_BUFFER];
+
+ family = is_evpn_prefix_ipaddr_v4(p)
+ ? AF_INET
+ : AF_INET6;
+ snprintf(str, size, "[%d]:[%s]/%d", p->prefix.route_type,
+ inet_ntop(family,
+ &p->prefix.imet_addr.ip.ip.addr, buf,
+ PREFIX2STR_BUFFER),
+ p->prefixlen);
+ return str;
+}
+static const char *prefixevpn_es2str(const struct prefix_evpn *p, char *str,
+ int size)
+{
+ snprintf(str, size, "Unsupported EVPN prefix");
+ return str;
+}
+
+static const char *prefixevpn_prefix2str(const struct prefix_evpn *p, char *str,
+ int size)
+{
+ uint8_t family;
+ char buf[PREFIX2STR_BUFFER];
+
+ family = is_evpn_prefix_ipaddr_v4(p)
+ ? AF_INET
+ : AF_INET6;
+ snprintf(str, size, "[%d]:[%u][%s/%d]/%d",
+ p->prefix.route_type,
+ p->prefix.prefix_addr.eth_tag,
+ inet_ntop(family,
+ &p->prefix.prefix_addr.ip.ip.addr, buf,
+ PREFIX2STR_BUFFER),
+ p->prefix.prefix_addr.ip_prefix_length,
+ p->prefixlen);
+ return str;
+}
+
+static const char *prefixevpn2str(const struct prefix_evpn *p, char *str,
+ int size)
+{
+ switch (p->prefix.route_type) {
+ case 1:
+ return prefixevpn_ead2str(p, str, size);
+ case 2:
+ return prefixevpn_macip2str(p, str, size);
+ case 3:
+ return prefixevpn_imet2str(p, str, size);
+ case 4:
+ return prefixevpn_es2str(p, str, size);
+ case 5:
+ return prefixevpn_prefix2str(p, str, size);
+ default:
+ snprintf(str, size, "Unsupported EVPN prefix");
+ break;
+ }
return str;
}
@@ -1277,7 +1327,7 @@ const char *prefix2str(union prefixconstptr pu, char *str, int size)
break;
case AF_EVPN:
- prefixevpn2str(p, str, size);
+ prefixevpn2str((const struct prefix_evpn *)p, str, size);
break;
case AF_FLOWSPEC:
diff --git a/lib/prefix.h b/lib/prefix.h
index f01c85b811..ab3c05ae74 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -56,26 +56,56 @@ struct ethaddr {
#define PREFIX_LEN_ROUTE_TYPE_5_IPV4 (18*8)
#define PREFIX_LEN_ROUTE_TYPE_5_IPV6 (30*8)
-/* EVPN address (RFC 7432) */
-struct evpn_addr {
- uint8_t route_type;
+typedef struct esi_t_ {
+ uint8_t val[10];
+} esi_t;
+
+struct evpn_ead_addr {
+ esi_t esi;
+ uint32_t eth_tag;
+};
+
+struct evpn_macip_addr {
+ uint32_t eth_tag;
uint8_t ip_prefix_length;
struct ethaddr mac;
+ struct ipaddr ip;
+};
+
+struct evpn_imet_addr {
uint32_t eth_tag;
+ uint8_t ip_prefix_length;
struct ipaddr ip;
-#if 0
- union
- {
- uint8_t addr;
- struct in_addr v4_addr;
- struct in6_addr v6_addr;
- } ip;
-#endif
};
-#define IS_EVPN_PREFIX_IPADDR_NONE(evp) IS_IPADDR_NONE(&(evp)->prefix.ip)
-#define IS_EVPN_PREFIX_IPADDR_V4(evp) IS_IPADDR_V4(&(evp)->prefix.ip)
-#define IS_EVPN_PREFIX_IPADDR_V6(evp) IS_IPADDR_V6(&(evp)->prefix.ip)
+struct evpn_es_addr {
+ esi_t esi;
+ uint8_t ip_prefix_length;
+ struct ipaddr ip;
+};
+
+struct evpn_prefix_addr {
+ uint32_t eth_tag;
+ uint8_t ip_prefix_length;
+ struct ipaddr ip;
+};
+
+/* EVPN address (RFC 7432) */
+struct evpn_addr {
+ uint8_t route_type;
+ union {
+ struct evpn_ead_addr _ead_addr;
+ struct evpn_macip_addr _macip_addr;
+ struct evpn_imet_addr _imet_addr;
+ struct evpn_es_addr _es_addr;
+ struct evpn_prefix_addr _prefix_addr;
+ } u;
+#define ead_addr u._ead_addr
+#define macip_addr u._macip_addr
+#define imet_addr u._imet_addr
+#define es_addr u._es_addr
+#define prefix_addr u._prefix_addr
+};
/*
* A struct prefix contains an address family, a prefix length, and an
@@ -177,6 +207,39 @@ struct prefix_evpn {
struct evpn_addr prefix __attribute__((aligned(8)));
};
+static inline int is_evpn_prefix_ipaddr_none(const struct prefix_evpn *evp)
+{
+ if (evp->prefix.route_type == 2)
+ return IS_IPADDR_NONE(&(evp)->prefix.macip_addr.ip);
+ if (evp->prefix.route_type == 3)
+ return IS_IPADDR_NONE(&(evp)->prefix.imet_addr.ip);
+ if (evp->prefix.route_type == 5)
+ return IS_IPADDR_NONE(&(evp)->prefix.prefix_addr.ip);
+ return 0;
+}
+
+static inline int is_evpn_prefix_ipaddr_v4(const struct prefix_evpn *evp)
+{
+ if (evp->prefix.route_type == 2)
+ return IS_IPADDR_V4(&(evp)->prefix.macip_addr.ip);
+ if (evp->prefix.route_type == 3)
+ return IS_IPADDR_V4(&(evp)->prefix.imet_addr.ip);
+ if (evp->prefix.route_type == 5)
+ return IS_IPADDR_V4(&(evp)->prefix.prefix_addr.ip);
+ return 0;
+}
+
+static inline int is_evpn_prefix_ipaddr_v6(const struct prefix_evpn *evp)
+{
+ if (evp->prefix.route_type == 2)
+ return IS_IPADDR_V6(&(evp)->prefix.macip_addr.ip);
+ if (evp->prefix.route_type == 3)
+ return IS_IPADDR_V6(&(evp)->prefix.imet_addr.ip);
+ if (evp->prefix.route_type == 5)
+ return IS_IPADDR_V6(&(evp)->prefix.prefix_addr.ip);
+ return 0;
+}
+
/* Prefix for a generic pointer */
struct prefix_ptr {
uint8_t family;
diff --git a/lib/routemap.c b/lib/routemap.c
index ea61043a8d..892b19dac5 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -722,7 +722,7 @@ static void route_map_delete(struct route_map *map)
/* Clear all dependencies */
route_map_clear_all_references(name);
- map->deleted = 1;
+ map->deleted = true;
/* Execute deletion hook. */
if (route_map_master.delete_hook) {
(*route_map_master.delete_hook)(name);
@@ -762,19 +762,19 @@ int route_map_mark_updated(const char *name, int del_later)
map = route_map_lookup_by_name(name);
- /* If we did not find the routemap with deleted=0 try again
- * with deleted=1
+ /* If we did not find the routemap with deleted=false try again
+ * with deleted=true
*/
if (!map) {
memset(&tmp_map, 0, sizeof(struct route_map));
tmp_map.name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, name);
- tmp_map.deleted = 1;
+ tmp_map.deleted = true;
map = hash_lookup(route_map_master_hash, &tmp_map);
XFREE(MTYPE_ROUTE_MAP_NAME, tmp_map.name);
}
if (map) {
- map->to_be_processed = 1;
+ map->to_be_processed = true;
ret = 0;
}
@@ -786,7 +786,7 @@ int route_map_clear_updated(struct route_map *map)
int ret = -1;
if (map) {
- map->to_be_processed = 0;
+ map->to_be_processed = false;
if (map->deleted)
route_map_free_map(map);
}
@@ -2743,7 +2743,7 @@ void route_map_finish(void)
/* cleanup route_map */
while (route_map_master.head) {
struct route_map *map = route_map_master.head;
- map->to_be_processed = 0;
+ map->to_be_processed = false;
route_map_delete(map);
}
diff --git a/lib/routemap.h b/lib/routemap.h
index 0046b77c46..990c7fa72f 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -158,8 +158,8 @@ struct route_map {
struct route_map *prev;
/* Maintain update info */
- int to_be_processed; /* True if modification isn't acted on yet */
- int deleted; /* If 1, then this node will be deleted */
+ bool to_be_processed; /* True if modification isn't acted on yet */
+ bool deleted; /* If 1, then this node will be deleted */
QOBJ_FIELDS
};
diff --git a/lib/stream.c b/lib/stream.c
index 927a3d3d55..c4edd3d5bf 100644
--- a/lib/stream.c
+++ b/lib/stream.c
@@ -1113,6 +1113,7 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
fifo->head = s;
fifo->tail = s;
+ fifo->tail->next = NULL;
fifo->count++;
}
@@ -1131,6 +1132,9 @@ struct stream *stream_fifo_pop(struct stream_fifo *fifo)
fifo->tail = NULL;
fifo->count--;
+
+ /* ensure stream is scrubbed of references to this fifo */
+ s->next = NULL;
}
return s;
diff --git a/lib/subdir.am b/lib/subdir.am
index 3b469d4524..c5719786d6 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -171,6 +171,7 @@ pkginclude_HEADERS += \
lib/pbr.h \
# end
+
nodist_pkginclude_HEADERS += \
lib/route_types.h \
lib/version.h \
@@ -232,7 +233,7 @@ lib_grammar_sandbox_SOURCES = \
lib_grammar_sandbox_LDADD = \
lib/libfrr.la
-lib_clippy_CPPFLAGS = $(AM_CPPFLAGS) -D_GNU_SOURCE
+lib_clippy_CPPFLAGS = $(AM_CPPFLAGS) -D_GNU_SOURCE -DBUILDING_CLIPPY
lib_clippy_CFLAGS = $(PYTHON_CFLAGS)
lib_clippy_LDADD = $(PYTHON_LIBS)
lib_clippy_SOURCES = \
diff --git a/lib/table.h b/lib/table.h
index 9637fec149..a9d788b35a 100644
--- a/lib/table.h
+++ b/lib/table.h
@@ -24,6 +24,7 @@
#include "memory.h"
#include "hash.h"
+#include "prefix.h"
DECLARE_MTYPE(ROUTE_TABLE)
DECLARE_MTYPE(ROUTE_NODE)
diff --git a/lib/thread.c b/lib/thread.c
index f3129e39e8..f9ff16b7b3 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -32,6 +32,7 @@
#include "sigevent.h"
#include "network.h"
#include "jhash.h"
+#include "frratomic.h"
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
@@ -104,25 +105,41 @@ static void vty_out_cpu_thread_history(struct vty *vty,
static void cpu_record_hash_print(struct hash_backet *bucket, void *args[])
{
struct cpu_thread_history *totals = args[0];
+ struct cpu_thread_history copy;
struct vty *vty = args[1];
- thread_type *filter = args[2];
+ uint8_t *filter = args[2];
struct cpu_thread_history *a = bucket->data;
- if (!(a->types & *filter))
+ copy.total_active =
+ atomic_load_explicit(&a->total_active, memory_order_seq_cst);
+ copy.total_calls =
+ atomic_load_explicit(&a->total_calls, memory_order_seq_cst);
+ copy.cpu.total =
+ atomic_load_explicit(&a->cpu.total, memory_order_seq_cst);
+ copy.cpu.max = atomic_load_explicit(&a->cpu.max, memory_order_seq_cst);
+ copy.real.total =
+ atomic_load_explicit(&a->real.total, memory_order_seq_cst);
+ copy.real.max =
+ atomic_load_explicit(&a->real.max, memory_order_seq_cst);
+ copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
+ copy.funcname = a->funcname;
+
+ if (!(copy.types & *filter))
return;
- vty_out_cpu_thread_history(vty, a);
- totals->total_active += a->total_active;
- totals->total_calls += a->total_calls;
- totals->real.total += a->real.total;
- if (totals->real.max < a->real.max)
- totals->real.max = a->real.max;
- totals->cpu.total += a->cpu.total;
- if (totals->cpu.max < a->cpu.max)
- totals->cpu.max = a->cpu.max;
+
+ vty_out_cpu_thread_history(vty, &copy);
+ totals->total_active += copy.total_active;
+ totals->total_calls += copy.total_calls;
+ totals->real.total += copy.real.total;
+ if (totals->real.max < copy.real.max)
+ totals->real.max = copy.real.max;
+ totals->cpu.total += copy.cpu.total;
+ if (totals->cpu.max < copy.cpu.max)
+ totals->cpu.max = copy.cpu.max;
}
-static void cpu_record_print(struct vty *vty, thread_type filter)
+static void cpu_record_print(struct vty *vty, uint8_t filter)
{
struct cpu_thread_history tmp;
void *args[3] = {&tmp, vty, &filter};
@@ -140,7 +157,7 @@ static void cpu_record_print(struct vty *vty, thread_type filter)
char underline[strlen(name) + 1];
memset(underline, '-', sizeof(underline));
- underline[sizeof(underline)] = '\0';
+ underline[sizeof(underline) - 1] = '\0';
vty_out(vty, "\n");
vty_out(vty, "Showing statistics for pthread %s\n",
@@ -183,7 +200,7 @@ static void cpu_record_print(struct vty *vty, thread_type filter)
static void cpu_record_hash_clear(struct hash_backet *bucket, void *args[])
{
- thread_type *filter = args[0];
+ uint8_t *filter = args[0];
struct hash *cpu_record = args[1];
struct cpu_thread_history *a = bucket->data;
@@ -194,9 +211,9 @@ static void cpu_record_hash_clear(struct hash_backet *bucket, void *args[])
hash_release(cpu_record, bucket->data);
}
-static void cpu_record_clear(thread_type filter)
+static void cpu_record_clear(uint8_t filter)
{
- thread_type *tmp = &filter;
+ uint8_t *tmp = &filter;
struct thread_master *m;
struct listnode *ln;
@@ -218,7 +235,7 @@ static void cpu_record_clear(thread_type filter)
pthread_mutex_unlock(&masters_mtx);
}
-static thread_type parse_filter(const char *filterstr)
+static uint8_t parse_filter(const char *filterstr)
{
int i = 0;
int filter = 0;
@@ -261,7 +278,7 @@ DEFUN (show_thread_cpu,
"Thread CPU usage\n"
"Display filter (rwtexb)\n")
{
- thread_type filter = (thread_type)-1U;
+ uint8_t filter = (uint8_t)-1U;
int idx = 0;
if (argv_find(argv, argc, "FILTER", &idx)) {
@@ -287,7 +304,7 @@ DEFUN (clear_thread_cpu,
"Thread CPU usage\n"
"Display filter (rwtexb)\n")
{
- thread_type filter = (thread_type)-1U;
+ uint8_t filter = (uint8_t)-1U;
int idx = 0;
if (argv_find(argv, argc, "FILTER", &idx)) {
@@ -1492,12 +1509,22 @@ void thread_getrusage(RUSAGE_T *r)
getrusage(RUSAGE_SELF, &(r->cpu));
}
-/* We check thread consumed time. If the system has getrusage, we'll
- use that to get in-depth stats on the performance of the thread in addition
- to wall clock time stats from gettimeofday. */
+/*
+ * Call a thread.
+ *
+ * This function will atomically update the thread's usage history. At present
+ * this is the only spot where usage history is written. Nevertheless the code
+ * has been written such that the introduction of writers in the future should
+ * not need to update it provided the writers atomically perform only the
+ * operations done here, i.e. updating the total and maximum times. In
+ * particular, the maximum real and cpu times must be monotonically increasing
+ * or this code is not correct.
+ */
void thread_call(struct thread *thread)
{
- unsigned long realtime, cputime;
+ _Atomic unsigned long realtime, cputime;
+ unsigned long exp;
+ unsigned long helper;
RUSAGE_T before, after;
GETRUSAGE(&before);
@@ -1509,16 +1536,35 @@ void thread_call(struct thread *thread)
GETRUSAGE(&after);
- realtime = thread_consumed_time(&after, &before, &cputime);
- thread->hist->real.total += realtime;
- if (thread->hist->real.max < realtime)
- thread->hist->real.max = realtime;
- thread->hist->cpu.total += cputime;
- if (thread->hist->cpu.max < cputime)
- thread->hist->cpu.max = cputime;
-
- ++(thread->hist->total_calls);
- thread->hist->types |= (1 << thread->add_type);
+ realtime = thread_consumed_time(&after, &before, &helper);
+ cputime = helper;
+
+ /* update realtime */
+ atomic_fetch_add_explicit(&thread->hist->real.total, realtime,
+ memory_order_seq_cst);
+ exp = atomic_load_explicit(&thread->hist->real.max,
+ memory_order_seq_cst);
+ while (exp < realtime
+ && !atomic_compare_exchange_weak_explicit(
+ &thread->hist->real.max, &exp, realtime,
+ memory_order_seq_cst, memory_order_seq_cst))
+ ;
+
+ /* update cputime */
+ atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
+ memory_order_seq_cst);
+ exp = atomic_load_explicit(&thread->hist->cpu.max,
+ memory_order_seq_cst);
+ while (exp < cputime
+ && !atomic_compare_exchange_weak_explicit(
+ &thread->hist->cpu.max, &exp, cputime,
+ memory_order_seq_cst, memory_order_seq_cst))
+ ;
+
+ atomic_fetch_add_explicit(&thread->hist->total_calls, 1,
+ memory_order_seq_cst);
+ atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type,
+ memory_order_seq_cst);
#ifdef CONSUMED_TIME_CHECK
if (realtime > CONSUMED_TIME_CHECK) {
diff --git a/lib/thread.h b/lib/thread.h
index f7c110914d..01ff4daf42 100644
--- a/lib/thread.h
+++ b/lib/thread.h
@@ -25,6 +25,7 @@
#include <pthread.h>
#include <poll.h>
#include "monotime.h"
+#include "frratomic.h"
struct rusage_t {
struct rusage cpu;
@@ -91,12 +92,10 @@ struct thread_master {
pthread_t owner;
};
-typedef unsigned char thread_type;
-
/* Thread itself. */
struct thread {
- thread_type type; /* thread type */
- thread_type add_type; /* thread type */
+ uint8_t type; /* thread type */
+ uint8_t add_type; /* thread type */
struct thread *next; /* next pointer of the thread */
struct thread *prev; /* previous pointer of the thread */
struct thread **ref; /* external reference (if given) */
@@ -120,13 +119,13 @@ struct thread {
struct cpu_thread_history {
int (*func)(struct thread *);
- unsigned int total_calls;
- unsigned int total_active;
+ _Atomic unsigned int total_calls;
+ _Atomic unsigned int total_active;
struct time_stats {
- unsigned long total, max;
+ _Atomic unsigned long total, max;
} real;
struct time_stats cpu;
- thread_type types;
+ _Atomic uint8_t types;
const char *funcname;
};
diff --git a/lib/zclient.c b/lib/zclient.c
index dc27cbef70..cb39099fc2 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -1374,6 +1374,26 @@ stream_failure:
return false;
}
+bool zapi_iptable_notify_decode(struct stream *s,
+ uint32_t *unique,
+ enum zapi_iptable_notify_owner *note)
+{
+ uint32_t uni;
+
+ STREAM_GET(note, s, sizeof(*note));
+
+ STREAM_GETL(s, uni);
+
+ if (zclient_debug)
+ zlog_debug("%s: %u", __PRETTY_FUNCTION__, uni);
+ *unique = uni;
+
+ return true;
+
+stream_failure:
+ return false;
+}
+
struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh)
{
struct nexthop *n = nexthop_new();
@@ -2765,6 +2785,22 @@ static int zclient_read(struct thread *thread)
(*zclient->label_chunk)(command, zclient, length,
vrf_id);
break;
+ case ZEBRA_IPSET_NOTIFY_OWNER:
+ if (zclient->ipset_notify_owner)
+ (*zclient->ipset_notify_owner)(command, zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_IPSET_ENTRY_NOTIFY_OWNER:
+ if (zclient->ipset_entry_notify_owner)
+ (*zclient->ipset_entry_notify_owner)(command,
+ zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_IPTABLE_NOTIFY_OWNER:
+ if (zclient->iptable_notify_owner)
+ (*zclient->iptable_notify_owner)(command,
+ zclient, length,
+ vrf_id);
default:
break;
}
diff --git a/lib/zclient.h b/lib/zclient.h
index 71f5b38384..8d26b7fe59 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -258,6 +258,10 @@ struct zclient {
struct zclient *zclient,
uint16_t length,
vrf_id_t vrf_id);
+ int (*iptable_notify_owner)(int command,
+ struct zclient *zclient,
+ uint16_t length,
+ vrf_id_t vrf_id);
};
/* Zebra API message flag. */
@@ -680,6 +684,9 @@ bool zapi_ipset_entry_notify_decode(struct stream *s,
uint32_t *unique,
char *ipset_name,
enum zapi_ipset_entry_notify_owner *note);
+bool zapi_iptable_notify_decode(struct stream *s,
+ uint32_t *unique,
+ enum zapi_iptable_notify_owner *note);
extern struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh);
extern bool zapi_nexthop_update_decode(struct stream *s,
diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c
index b895b5ad8b..bc1ce621ae 100644
--- a/ospf6d/ospf6_abr.c
+++ b/ospf6d/ospf6_abr.c
@@ -684,7 +684,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
{
struct prefix prefix, abr_prefix;
struct ospf6_route_table *table = NULL;
- struct ospf6_route *range, *route, *old = NULL;
+ struct ospf6_route *range, *route, *old = NULL, *old_route;
struct ospf6_route *abr_entry;
uint8_t type = 0;
char options[3] = {0, 0, 0};
@@ -695,14 +695,15 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
int is_debug = 0;
struct ospf6_inter_prefix_lsa *prefix_lsa = NULL;
struct ospf6_inter_router_lsa *router_lsa = NULL;
- struct ospf6_path *path;
+ bool old_entry_updated = false;
memset(&prefix, 0, sizeof(prefix));
if (lsa->header->type == htons(OSPF6_LSTYPE_INTER_PREFIX)) {
if (IS_OSPF6_DEBUG_EXAMIN(INTER_PREFIX)) {
is_debug++;
- zlog_debug("Examin %s in area %s", lsa->name, oa->name);
+ zlog_debug("%s: Examin %s in area %s",
+ __PRETTY_FUNCTION__, lsa->name, oa->name);
}
prefix_lsa =
@@ -720,7 +721,8 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
} else if (lsa->header->type == htons(OSPF6_LSTYPE_INTER_ROUTER)) {
if (IS_OSPF6_DEBUG_EXAMIN(INTER_ROUTER)) {
is_debug++;
- zlog_debug("Examin %s in area %s", lsa->name, oa->name);
+ zlog_debug("%s: Examin %s in area %s",
+ __PRETTY_FUNCTION__, lsa->name, oa->name);
}
router_lsa =
@@ -768,7 +770,8 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
}
if (OSPF6_LSA_IS_MAXAGE(lsa)) {
if (is_debug)
- zlog_debug("LSA is MaxAge, ignore");
+ zlog_debug("%s: LSA %s is MaxAge, ignore",
+ __PRETTY_FUNCTION__, lsa->name);
if (old)
ospf6_route_remove(old, table);
return;
@@ -845,9 +848,24 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
|| CHECK_FLAG(abr_entry->flag, OSPF6_ROUTE_REMOVE)
|| !CHECK_FLAG(abr_entry->path.router_bits, OSPF6_ROUTER_BIT_B)) {
if (is_debug)
- zlog_debug("ABR router entry does not exist, ignore");
- if (old)
- ospf6_route_remove(old, table);
+ zlog_debug("%s: ABR router entry does not exist, ignore",
+ __PRETTY_FUNCTION__);
+ if (old) {
+ if (old->type == OSPF6_DEST_TYPE_ROUTER &&
+ oa->intra_brouter_calc) {
+ if (is_debug)
+ zlog_debug(
+ "%s: intra_brouter_calc is on, skip brouter remove: %s (%p)",
+ __PRETTY_FUNCTION__, buf,
+ (void *)old);
+ } else {
+ if (is_debug)
+ zlog_debug("%s: remove old entry: %s %p ",
+ __PRETTY_FUNCTION__, buf,
+ (void *)old);
+ ospf6_route_remove(old, table);
+ }
+ }
return;
}
@@ -902,11 +920,11 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
route->path.type = OSPF6_PATH_TYPE_INTER;
route->path.cost = abr_entry->path.cost + cost;
- ospf6_route_copy_nexthops(route, abr_entry);
-
- path = ospf6_path_dup(&route->path);
- ospf6_copy_nexthops(path->nh_list, abr_entry->nh_list);
- listnode_add_sort(route->paths, path);
+ /* Inter abr_entry is same as brouter.
+ * Avoid duplicate nexthops to brouter and its
+ * learnt route. i.e. use merge nexthops.
+ */
+ ospf6_route_merge_nexthops(route, abr_entry);
/* (7) If the routes are identical, copy the next hops over to existing
route. ospf6's route table implementation will otherwise string both
@@ -915,11 +933,28 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
*/
old = ospf6_route_lookup(&prefix, table);
- if (old && (ospf6_route_cmp(route, old) == 0)) {
- ospf6_route_merge_nexthops(old, route);
+ for (old_route = old; old_route; old_route = old_route->next) {
+ if (!ospf6_route_is_same(old_route, route) ||
+ (old_route->type != route->type) ||
+ (old_route->path.type != route->path.type))
+ continue;
+
+ if ((ospf6_route_cmp(route, old_route) != 0)) {
+ if (is_debug) {
+ prefix2str(&prefix, buf, sizeof(buf));
+ zlog_debug("%s: old %p %s cost %u new route cost %u are not same",
+ __PRETTY_FUNCTION__,
+ (void *)old_route, buf,
+ old_route->path.cost,
+ route->path.cost);
+ }
+ continue;
+ }
+ old_entry_updated = true;
+ ospf6_route_merge_nexthops(old, route);
if (is_debug)
- zlog_debug("%s: Update route: %s old cost %u new cost %u nh count %u",
+ zlog_debug("%s: Update route: %s old cost %u new cost %u nh %u",
__PRETTY_FUNCTION__,
buf, old->path.cost, route->path.cost,
listcount(route->nh_list));
@@ -930,9 +965,12 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
/* Delete new route */
ospf6_route_delete(route);
- } else {
+ break;
+ }
+
+ if (old_entry_updated == false) {
if (is_debug)
- zlog_debug("%s: Install route: %s cost %u nh count %u",
+ zlog_debug("%s: Install route: %s cost %u nh %u",
__PRETTY_FUNCTION__, buf, route->path.cost,
listcount(route->nh_list));
/* ospf6_ia_add_nw_route (table, &prefix, route); */
diff --git a/ospf6d/ospf6_area.h b/ospf6d/ospf6_area.h
index eaf3e5c6de..ba497a168e 100644
--- a/ospf6d/ospf6_area.h
+++ b/ospf6d/ospf6_area.h
@@ -50,6 +50,9 @@ struct ospf6_area {
/* Area type */
int no_summary;
+ /* Brouter traversal protection */
+ int intra_brouter_calc;
+
/* OSPF interface list */
struct list *if_list;
diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c
index 1b46b9c68d..0fe0cada39 100644
--- a/ospf6d/ospf6_asbr.c
+++ b/ospf6d/ospf6_asbr.c
@@ -685,8 +685,9 @@ void ospf6_asbr_lsa_remove(struct ospf6_lsa *lsa,
if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL)) {
prefix2str(&prefix, buf, sizeof(buf));
zlog_debug(
- "%s: route %s path found with nh %u to remove.",
+ "%s: route %s path found with cost %u nh %u to remove.",
__PRETTY_FUNCTION__, buf,
+ route->path.cost,
listcount(o_path->nh_list));
}
@@ -738,28 +739,37 @@ void ospf6_asbr_lsa_remove(struct ospf6_lsa *lsa,
listcount(route->nh_list));
}
- /* Update RIB/FIB with effective nh_list */
- if (ospf6->route_table->hook_add)
- (*ospf6->route_table->hook_add)(route);
+ if (listcount(route->paths)) {
+ /* Update RIB/FIB with effective
+ * nh_list
+ */
+ if (ospf6->route_table->hook_add)
+ (*ospf6->route_table->hook_add)
+ (route);
- /* route's primary path is similar to LSA,
- * replace route's primary path with
- * route's paths list head.
- */
- if (route->path.origin.id == lsa->header->id
- && route->path.origin.adv_router
- == lsa->header->adv_router) {
- struct ospf6_path *h_path;
+ /* route's primary path is similar
+ * to LSA, replace route's primary
+ * path with route's paths list head.
+ */
+ if ((route->path.origin.id ==
+ lsa->header->id) &&
+ (route->path.origin.adv_router
+ == lsa->header->adv_router)) {
+ struct ospf6_path *h_path;
- h_path = (struct ospf6_path *)
+ h_path = (struct ospf6_path *)
listgetdata(
listhead(route->paths));
- route->path.origin.type =
- h_path->origin.type;
- route->path.origin.id =
- h_path->origin.id;
- route->path.origin.adv_router =
+ route->path.origin.type =
+ h_path->origin.type;
+ route->path.origin.id =
+ h_path->origin.id;
+ route->path.origin.adv_router =
h_path->origin.adv_router;
+ }
+ } else {
+ ospf6_route_remove(route,
+ ospf6->route_table);
}
}
continue;
diff --git a/ospf6d/ospf6_intra.c b/ospf6d/ospf6_intra.c
index de4ee2e1ac..26e6deadae 100644
--- a/ospf6d/ospf6_intra.c
+++ b/ospf6d/ospf6_intra.c
@@ -1314,17 +1314,60 @@ int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
return 0;
}
+static void ospf6_intra_prefix_update_route_origin(struct ospf6_route *oa_route)
+{
+ struct ospf6_path *h_path;
+ struct ospf6_route *g_route, *nroute;
+
+ /* Update Global ospf6 route path */
+ g_route = ospf6_route_lookup(&oa_route->prefix,
+ ospf6->route_table);
+
+ for (ospf6_route_lock(g_route); g_route &&
+ ospf6_route_is_prefix(&oa_route->prefix, g_route);
+ g_route = nroute) {
+ nroute = ospf6_route_next(g_route);
+ if (g_route->type != oa_route->type)
+ continue;
+ if (g_route->path.area_id != oa_route->path.area_id)
+ continue;
+ if (g_route->path.type != OSPF6_PATH_TYPE_INTRA)
+ continue;
+ if (g_route->path.cost != oa_route->path.cost)
+ continue;
+
+ if (ospf6_route_is_same_origin(g_route, oa_route)) {
+ h_path = (struct ospf6_path *)listgetdata(
+ listhead(g_route->paths));
+ g_route->path.origin.type = h_path->origin.type;
+ g_route->path.origin.id = h_path->origin.id;
+ g_route->path.origin.adv_router =
+ h_path->origin.adv_router;
+ break;
+ }
+ }
+
+ h_path = (struct ospf6_path *)listgetdata(
+ listhead(oa_route->paths));
+ oa_route->path.origin.type = h_path->origin.type;
+ oa_route->path.origin.id = h_path->origin.id;
+ oa_route->path.origin.adv_router = h_path->origin.adv_router;
+}
+
void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
struct ospf6_route *old,
struct ospf6_route *route)
{
- struct ospf6_route *old_route;
+ struct ospf6_route *old_route, *ls_entry;
struct ospf6_path *ecmp_path, *o_path = NULL;
struct listnode *anode, *anext;
struct listnode *nnode, *rnode, *rnext;
struct ospf6_nexthop *nh, *rnh;
char buf[PREFIX2STR_BUFFER];
bool route_found = false;
+ struct interface *ifp;
+ struct ospf6_lsa *lsa;
+ struct ospf6_intra_prefix_lsa *intra_prefix_lsa;
/* check for old entry match with new route origin,
* delete old entry.
@@ -1361,7 +1404,7 @@ void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
o_path->cost, route->path.cost);
}
- /* Remove selected current rout path's nh from
+ /* Remove selected current path's nh from
* effective nh list.
*/
for (ALL_LIST_ELEMENTS_RO(o_path->nh_list, nnode, nh)) {
@@ -1385,22 +1428,6 @@ void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
* Update FIB with effective NHs.
*/
if (listcount(old_route->paths)) {
- if (old_route->path.origin.id ==
- route->path.origin.id &&
- old_route->path.origin.adv_router ==
- route->path.origin.adv_router) {
- struct ospf6_path *h_path;
-
- h_path = (struct ospf6_path *)
- listgetdata(listhead(old_route->paths));
- old_route->path.origin.type =
- h_path->origin.type;
- old_route->path.origin.id =
- h_path->origin.id;
- old_route->path.origin.adv_router =
- h_path->origin.adv_router;
- }
-
if (route_updated) {
for (ALL_LIST_ELEMENTS(old_route->paths,
anode, anext, o_path)) {
@@ -1415,6 +1442,14 @@ void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
if (oa->route_table->hook_add)
(*oa->route_table->hook_add)
(old_route);
+
+ if (old_route->path.origin.id ==
+ route->path.origin.id &&
+ old_route->path.origin.adv_router ==
+ route->path.origin.adv_router) {
+ ospf6_intra_prefix_update_route_origin(
+ old_route);
+ }
break;
}
} else {
@@ -1426,8 +1461,12 @@ void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
old_route->path.cost,
route->path.cost);
}
- ospf6_route_remove(old_route,
+ if (oa->route_table->hook_remove)
+ ospf6_route_remove(old_route,
oa->route_table);
+ else
+ SET_FLAG(old_route->flag,
+ OSPF6_ROUTE_REMOVE);
break;
}
}
@@ -1467,72 +1506,101 @@ void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
/* Add a nh_list to new ecmp path */
ospf6_copy_nexthops(ecmp_path->nh_list,
route->nh_list);
- /* Merge nexthop to existing route's nh_list */
- ospf6_route_merge_nexthops(old_route, route);
/* Add the new path to route's path list */
listnode_add_sort(old_route->paths, ecmp_path);
- UNSET_FLAG(old_route->flag, OSPF6_ROUTE_REMOVE);
- SET_FLAG(old_route->flag, OSPF6_ROUTE_CHANGE);
- /* Update RIB/FIB */
- if (oa->route_table->hook_add)
- (*oa->route_table->hook_add)
- (old_route);
if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
prefix2str(&route->prefix, buf,
sizeof(buf));
- zlog_debug("%s: route %s %p another path added with nh %u, effective paths %u nh %u",
+ zlog_debug(
+ "%s: route %s %p another path added with nh %u, effective paths %u nh %u",
__PRETTY_FUNCTION__, buf,
(void *)old_route,
listcount(ecmp_path->nh_list),
old_route->paths ?
- listcount(old_route->paths)
- : 0,
+ listcount(old_route->paths) : 0,
listcount(old_route->nh_list));
- }
- } else {
- for (ALL_LIST_ELEMENTS_RO(o_path->nh_list,
- nnode, nh)) {
- for (ALL_LIST_ELEMENTS(
- old_route->nh_list,
- rnode, rnext, rnh)) {
- if (!ospf6_nexthop_is_same(rnh,
- nh))
- continue;
- listnode_delete(
- old_route->nh_list,
- rnh);
- ospf6_nexthop_delete(rnh);
- }
}
+ } else {
list_delete_all_node(o_path->nh_list);
ospf6_copy_nexthops(o_path->nh_list,
route->nh_list);
- /* Merge nexthop to existing route's nh_list */
- ospf6_route_merge_nexthops(old_route,
- route);
+ }
+
+ list_delete_all_node(old_route->nh_list);
- if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
- prefix2str(&route->prefix,
- buf, sizeof(buf));
- zlog_debug("%s: existing route %s %p with effective paths %u nh count %u",
- __PRETTY_FUNCTION__, buf,
- (void *)old_route,
- listcount(old_route->paths),
- old_route->nh_list ?
- listcount(old_route->nh_list)
- : 0);
+ for (ALL_LIST_ELEMENTS_RO(old_route->paths, anode,
+ o_path)) {
+ ls_entry = ospf6_route_lookup(
+ &o_path->ls_prefix,
+ oa->spf_table);
+ if (ls_entry == NULL) {
+ if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX))
+ zlog_debug("%s: ls_prfix %s ls_entry not found.",
+ __PRETTY_FUNCTION__,
+ buf);
+ continue;
}
+ lsa = ospf6_lsdb_lookup(o_path->origin.type,
+ o_path->origin.id,
+ o_path->origin.adv_router,
+ oa->lsdb);
+ if (lsa == NULL) {
+ if (IS_OSPF6_DEBUG_EXAMIN(
+ INTRA_PREFIX)) {
+ struct prefix adv_prefix;
- UNSET_FLAG(old_route->flag, OSPF6_ROUTE_REMOVE);
- SET_FLAG(old_route->flag, OSPF6_ROUTE_CHANGE);
- /* Update ospf6 route table and RIB/FIB */
- if (oa->route_table->hook_add)
- (*oa->route_table->hook_add)
- (old_route);
+ ospf6_linkstate_prefix(
+ o_path->origin.adv_router,
+ o_path->origin.id, &adv_prefix);
+ prefix2str(&adv_prefix, buf,
+ sizeof(buf));
+ zlog_debug("%s: adv_router %s lsa not found",
+ __PRETTY_FUNCTION__,
+ buf);
+ }
+ continue;
+ }
+ intra_prefix_lsa =
+ (struct ospf6_intra_prefix_lsa *)
+ OSPF6_LSA_HEADER_END(lsa->header);
+
+ if (intra_prefix_lsa->ref_adv_router
+ == oa->ospf6->router_id) {
+ ifp = if_lookup_prefix(
+ &old_route->prefix,
+ VRF_DEFAULT);
+ if (ifp)
+ ospf6_route_add_nexthop(
+ old_route,
+ ifp->ifindex,
+ NULL);
+ } else {
+ ospf6_route_merge_nexthops(old_route,
+ ls_entry);
+ }
+ }
+
+ if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
+ prefix2str(&route->prefix, buf, sizeof(buf));
+ zlog_debug("%s: route %s %p with final effective paths %u nh%u",
+ __PRETTY_FUNCTION__, buf,
+ (void *)old_route,
+ old_route->paths ?
+ listcount(old_route->paths) : 0,
+ listcount(old_route->nh_list));
}
+
+ /* used in intra_route_calculation() to add to
+ * global ospf6 route table.
+ */
+ UNSET_FLAG(old_route->flag, OSPF6_ROUTE_REMOVE);
+ SET_FLAG(old_route->flag, OSPF6_ROUTE_ADD);
+ /* Update ospf6 route table and RIB/FIB */
+ if (oa->route_table->hook_add)
+ (*oa->route_table->hook_add)(old_route);
/* Delete the new route its info added to existing
* route.
*/
@@ -1642,7 +1710,8 @@ void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa)
route->path.metric_type = 1;
route->path.cost =
ls_entry->path.cost + ntohs(op->prefix_metric);
-
+ memcpy(&route->path.ls_prefix, &ls_prefix,
+ sizeof(struct prefix));
if (direct_connect) {
ifp = if_lookup_prefix(&route->prefix, VRF_DEFAULT);
if (ifp)
@@ -1660,20 +1729,21 @@ void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa)
if (old && (ospf6_route_cmp(route, old) == 0)) {
if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
prefix2str(&route->prefix, buf, sizeof(buf));
- zlog_debug(" Update route: %s old cost %u new cost %u nh count %u paths %u",
- buf,
+ zlog_debug("%s Update route: %s old cost %u new cost %u paths %u nh %u",
+ __PRETTY_FUNCTION__, buf,
old->path.cost, route->path.cost,
- listcount(route->nh_list),
- listcount(route->paths));
+ listcount(route->paths),
+ listcount(route->nh_list));
}
ospf6_intra_prefix_route_ecmp_path(oa, old, route);
} else {
if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
prefix2str(&route->prefix, buf, sizeof(buf));
- zlog_debug(" route %s add with cost %u nh %u paths %u",
- buf, route->path.cost,
- listcount(route->nh_list),
- listcount(route->paths));
+ zlog_debug("%s route %s add with cost %u paths %u nh %u",
+ __PRETTY_FUNCTION__, buf,
+ route->path.cost,
+ listcount(route->paths),
+ listcount(route->nh_list));
}
ospf6_route_add(route, oa->route_table);
}
@@ -1684,12 +1754,102 @@ void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa)
zlog_debug("Trailing garbage ignored");
}
+static void ospf6_intra_prefix_lsa_remove_update_route(struct ospf6_lsa *lsa,
+ struct ospf6_area *oa,
+ struct ospf6_route *route)
+{
+ struct listnode *anode, *anext;
+ struct listnode *nnode, *rnode, *rnext;
+ struct ospf6_nexthop *nh, *rnh;
+ struct ospf6_path *o_path;
+ bool nh_updated = false;
+ char buf[PREFIX2STR_BUFFER];
+
+ /* Iterate all paths of route to find maching
+ * with LSA remove info.
+ * If route->path is same, replace
+ * from paths list.
+ */
+ for (ALL_LIST_ELEMENTS(route->paths, anode, anext, o_path)) {
+ if ((o_path->origin.type != lsa->header->type) ||
+ (o_path->origin.adv_router != lsa->header->adv_router) ||
+ (o_path->origin.id != lsa->header->id))
+ continue;
+
+ if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
+ prefix2str(&route->prefix, buf, sizeof(buf));
+ zlog_debug(
+ "%s: route %s path found with cost %u nh %u to remove.",
+ __PRETTY_FUNCTION__, buf, o_path->cost,
+ listcount(o_path->nh_list));
+ }
+
+ /* Remove found path's nh_list from
+ * the route's nh_list.
+ */
+ for (ALL_LIST_ELEMENTS_RO(o_path->nh_list, nnode, nh)) {
+ for (ALL_LIST_ELEMENTS(route->nh_list, rnode,
+ rnext, rnh)) {
+ if (!ospf6_nexthop_is_same(rnh, nh))
+ continue;
+ listnode_delete(route->nh_list, rnh);
+ ospf6_nexthop_delete(rnh);
+ }
+ }
+ /* Delete the path from route's
+ * path list
+ */
+ listnode_delete(route->paths, o_path);
+ ospf6_path_free(o_path);
+ nh_updated = true;
+ break;
+ }
+
+ if (nh_updated) {
+ /* Iterate all paths and merge nexthop,
+ * unlesss any of the nexthop similar to
+ * ones deleted as part of path deletion.
+ */
+ for (ALL_LIST_ELEMENTS(route->paths, anode, anext, o_path))
+ ospf6_merge_nexthops(route->nh_list, o_path->nh_list);
+
+
+ if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
+ prefix2str(&route->prefix, buf, sizeof(buf));
+ zlog_debug("%s: route %s update paths %u nh %u",
+ __PRETTY_FUNCTION__, buf,
+ route->paths ? listcount(route->paths) : 0,
+ route->nh_list ? listcount(route->nh_list)
+ : 0);
+ }
+
+ /* Update Global Route table and
+ * RIB/FIB with effective
+ * nh_list
+ */
+ if (oa->route_table->hook_add)
+ (*oa->route_table->hook_add)(route);
+
+ /* route's primary path is similar
+ * to LSA, replace route's primary
+ * path with route's paths list
+ * head.
+ */
+ if ((route->path.origin.id == lsa->header->id) &&
+ (route->path.origin.adv_router ==
+ lsa->header->adv_router)) {
+ ospf6_intra_prefix_update_route_origin(route);
+ }
+ }
+
+}
+
void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa)
{
struct ospf6_area *oa;
struct ospf6_intra_prefix_lsa *intra_prefix_lsa;
struct prefix prefix;
- struct ospf6_route *route, *nroute, *route_to_del;
+ struct ospf6_route *route, *nroute;
int prefix_num;
struct ospf6_prefix *op;
char *start, *current, *end;
@@ -1717,22 +1877,6 @@ void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa)
break;
prefix_num--;
- route_to_del = ospf6_route_create();
-
- memset(&route_to_del->prefix, 0, sizeof(struct prefix));
- route_to_del->prefix.family = AF_INET6;
- route_to_del->prefix.prefixlen = op->prefix_length;
- ospf6_prefix_in6_addr(&route_to_del->prefix.u.prefix6, op);
-
- route_to_del->type = OSPF6_DEST_TYPE_NETWORK;
- route_to_del->path.origin.type = lsa->header->type;
- route_to_del->path.origin.id = lsa->header->id;
- route_to_del->path.origin.adv_router = lsa->header->adv_router;
- route_to_del->path.prefix_options = op->prefix_options;
- route_to_del->path.area_id = oa->area_id;
- route_to_del->path.type = OSPF6_PATH_TYPE_INTRA;
- route_to_del->path.metric_type = 1;
-
memset(&prefix, 0, sizeof(struct prefix));
prefix.family = AF_INET6;
prefix.prefixlen = op->prefix_length;
@@ -1757,134 +1901,8 @@ void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa)
* after removal of one of the path.
*/
if (listcount(route->paths) > 1) {
- struct listnode *anode, *anext;
- struct listnode *nnode, *rnode, *rnext;
- struct ospf6_nexthop *nh, *rnh;
- struct ospf6_path *o_path;
- bool nh_updated = false;
-
- /* Iterate all paths of route to find maching
- * with LSA remove info.
- * If route->path is same, replace
- * from paths list.
- */
- for (ALL_LIST_ELEMENTS(route->paths, anode,
- anext, o_path)) {
- if ((o_path->origin.type !=
- lsa->header->type) ||
- (o_path->origin.adv_router !=
- lsa->header->adv_router) ||
- (o_path->origin.id !=
- lsa->header->id))
- continue;
-
- if (IS_OSPF6_DEBUG_EXAMIN
- (INTRA_PREFIX)) {
- prefix2str(&prefix, buf,
- sizeof(buf));
- zlog_debug(
- "%s: route %s path found with cost %u nh %u to remove.",
- __PRETTY_FUNCTION__,
- buf, o_path->cost,
- listcount(
- o_path->nh_list));
- }
- /* Remove old route from global
- * ospf6 route table.
- * nh_update section will add
- * back with effective nh.
- */
- if (oa->route_table->hook_remove)
- (*oa->route_table->hook_remove)
- (route);
- /* Remove found path's nh_list from
- * the route's nh_list.
- */
- for (ALL_LIST_ELEMENTS_RO(
- o_path->nh_list,
- nnode, nh)) {
- for (ALL_LIST_ELEMENTS(
- route->nh_list,
- rnode, rnext, rnh)) {
- if (
- !ospf6_nexthop_is_same(
- rnh, nh))
- continue;
- listnode_delete(
- route->nh_list,
- rnh);
- ospf6_nexthop_delete(
- rnh);
- }
- }
- /* Delete the path from route's
- * path list
- */
- listnode_delete(route->paths, o_path);
- ospf6_path_free(o_path);
- nh_updated = true;
- break;
- }
-
- if (nh_updated) {
-
- /* Iterate all paths and merge nexthop,
- * unlesss any of the nexthop similar to
- * ones deleted as part of path
- * deletion.
- */
- for (ALL_LIST_ELEMENTS(route->paths,
- anode, anext, o_path)) {
- ospf6_merge_nexthops(
- route->nh_list,
- o_path->nh_list);
- }
-
- if (IS_OSPF6_DEBUG_EXAMIN(
- INTRA_PREFIX)) {
- prefix2str(&route->prefix, buf,
- sizeof(buf));
- assert(route->nh_list);
- zlog_debug("%s: route %s update paths %u nh %u"
- , __PRETTY_FUNCTION__,
- buf,
- listcount(route->paths),
- listcount(
- route->nh_list));
- }
-
- /* route's primary path is similar
- * to LSA, replace route's primary
- * path with route's paths list
- * head.
- */
- if ((route->path.origin.id ==
- lsa->header->id) &&
- (route->path.origin.adv_router
- == lsa->header->adv_router)) {
- struct ospf6_path *h_path;
-
- h_path = (struct ospf6_path *)
- listgetdata(listhead(
- route->paths));
- route->path.origin.type =
- h_path->origin.type;
- route->path.origin.id =
- h_path->origin.id;
- route->path.origin.adv_router =
- h_path->origin.adv_router;
- }
-
- /* Update Global Route table and
- * RIB/FIB with effective
- * nh_list
- */
- if (oa->route_table->hook_add)
- (*oa->route_table->hook_add)
- (route);
- }
- continue;
-
+ ospf6_intra_prefix_lsa_remove_update_route(
+ lsa, oa, route);
} else {
if (route->path.origin.type != lsa->header->type
@@ -1896,9 +1914,11 @@ void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa)
if (IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX)) {
prefix2str(&route->prefix, buf,
sizeof(buf));
- zlog_debug("route remove %s with path %u cost %u nh %u",
- buf, route->path.type,
+ zlog_debug("%s: route remove %s with path type %u cost %u paths %u nh %u",
+ __PRETTY_FUNCTION__, buf,
+ route->path.type,
route->path.cost,
+ listcount(route->paths),
listcount(route->nh_list));
}
ospf6_route_remove(route, oa->route_table);
@@ -1906,8 +1926,6 @@ void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa)
}
if (route)
ospf6_route_unlock(route);
-
- ospf6_route_delete(route_to_del);
}
if (current != end && IS_OSPF6_DEBUG_EXAMIN(INTRA_PREFIX))
@@ -2029,8 +2047,10 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa)
uint32_t brouter_id;
char brouter_name[16];
- if (IS_OSPF6_DEBUG_BROUTER_SPECIFIC_AREA_ID(oa->area_id))
- zlog_info("border-router calculation for area %s", oa->name);
+ if (IS_OSPF6_DEBUG_BROUTER_SPECIFIC_AREA_ID(oa->area_id) ||
+ IS_OSPF6_DEBUG_ROUTE(MEMORY))
+ zlog_info("%s: border-router calculation for area %s",
+ __PRETTY_FUNCTION__, oa->name);
hook_add = oa->ospf6->brouter_table->hook_add;
hook_remove = oa->ospf6->brouter_table->hook_remove;
@@ -2096,6 +2116,7 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa)
for (brouter = ospf6_route_head(oa->ospf6->brouter_table); brouter;
brouter = nbrouter) {
+
/*
* brouter may have been "deleted" in the last loop iteration.
* If this is the case there is still 1 final refcount lock
@@ -2104,6 +2125,8 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa)
* skip processing the deleted route.
*/
if (brouter->lock == 1) {
+ if (IS_OSPF6_DEBUG_ROUTE(MEMORY))
+ ospf6_brouter_debug_print(brouter);
nbrouter = ospf6_route_next(brouter);
continue;
} else {
@@ -2155,8 +2178,14 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa)
brouter_id)
|| IS_OSPF6_DEBUG_BROUTER_SPECIFIC_AREA_ID(
oa->area_id))
- zlog_info("brouter %s disappears via area %s",
- brouter_name, oa->name);
+ zlog_info("%s: brouter %s disappears via area %s",
+ __PRETTY_FUNCTION__, brouter_name,
+ oa->name);
+ /* This is used to protect nbrouter from removed from
+ * the table. For an example, ospf6_abr_examin_summary,
+ * removes brouters which are marked for remove.
+ */
+ oa->intra_brouter_calc = 1;
ospf6_route_remove(brouter, oa->ospf6->brouter_table);
brouter = NULL;
} else if (CHECK_FLAG(brouter->flag, OSPF6_ROUTE_ADD)
@@ -2166,8 +2195,9 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa)
brouter_id)
|| IS_OSPF6_DEBUG_BROUTER_SPECIFIC_AREA_ID(
oa->area_id))
- zlog_info("brouter %s appears via area %s",
- brouter_name, oa->name);
+ zlog_info("%s: brouter %s appears via area %s",
+ __PRETTY_FUNCTION__, brouter_name,
+ oa->name);
/* newly added */
if (hook_add)
@@ -2187,11 +2217,14 @@ void ospf6_intra_brouter_calculation(struct ospf6_area *oa)
UNSET_FLAG(brouter->flag, OSPF6_ROUTE_ADD);
UNSET_FLAG(brouter->flag, OSPF6_ROUTE_CHANGE);
}
+ /* Reset for nbrouter */
+ oa->intra_brouter_calc = 0;
}
- if (IS_OSPF6_DEBUG_BROUTER_SPECIFIC_AREA_ID(oa->area_id))
- zlog_info("border-router calculation for area %s: done",
- oa->name);
+ if (IS_OSPF6_DEBUG_BROUTER_SPECIFIC_AREA_ID(oa->area_id) ||
+ IS_OSPF6_DEBUG_ROUTE(MEMORY))
+ zlog_info("%s: border-router calculation for area %s: done",
+ __PRETTY_FUNCTION__, oa->name);
}
struct ospf6_lsa_handler router_handler = {.lh_type = OSPF6_LSTYPE_ROUTER,
diff --git a/ospf6d/ospf6_route.c b/ospf6d/ospf6_route.c
index 8be00d9b41..15d8eb6cf2 100644
--- a/ospf6d/ospf6_route.c
+++ b/ospf6d/ospf6_route.c
@@ -611,9 +611,10 @@ struct ospf6_route *ospf6_route_add(struct ospf6_route *route,
prefix2str(&route->prefix, buf, sizeof(buf));
if (IS_OSPF6_DEBUG_ROUTE(MEMORY))
- zlog_debug("%s %p: route add %p: %s",
+ zlog_debug("%s %p: route add %p: %s paths %u nh %u",
ospf6_route_table_name(table), (void *)table,
- (void *)route, buf);
+ (void *)route, buf, listcount(route->paths),
+ listcount(route->nh_list));
else if (IS_OSPF6_DEBUG_ROUTE(TABLE))
zlog_debug("%s: route add: %s", ospf6_route_table_name(table),
buf);
@@ -664,11 +665,13 @@ struct ospf6_route *ospf6_route_add(struct ospf6_route *route,
if (IS_OSPF6_DEBUG_ROUTE(MEMORY))
zlog_debug(
- "%s %p: route add %p cost %u nh %u: update of %p old cost %u nh %u",
+ "%s %p: route add %p cost %u paths %u nh %u: update of %p cost %u paths %u nh %u",
ospf6_route_table_name(table), (void *)table,
(void *)route, route->path.cost,
+ listcount(route->paths),
listcount(route->nh_list), (void *)old,
- old->path.cost, listcount(old->nh_list));
+ old->path.cost, listcount(old->paths),
+ listcount(old->nh_list));
else if (IS_OSPF6_DEBUG_ROUTE(TABLE))
zlog_debug("%s: route add: update",
ospf6_route_table_name(table));
@@ -922,10 +925,11 @@ struct ospf6_route *ospf6_route_next(struct ospf6_route *route)
struct ospf6_route *next = route->next;
if (IS_OSPF6_DEBUG_ROUTE(MEMORY))
- zlog_info("%s %p: route next: %p<-[%p]->%p",
+ zlog_info("%s %p: route next: %p<-[%p]->%p , route ref count %u",
ospf6_route_table_name(route->table),
(void *)route->table, (void *)route->prev,
- (void *)route, (void *)route->next);
+ (void *)route, (void *)route->next,
+ route->lock);
ospf6_route_unlock(route);
if (next)
diff --git a/ospf6d/ospf6_route.h b/ospf6d/ospf6_route.h
index a69e9a920f..02002533e6 100644
--- a/ospf6d/ospf6_route.h
+++ b/ospf6d/ospf6_route.h
@@ -91,6 +91,9 @@ struct ospf6_path {
/* Cost */
uint8_t metric_type;
uint32_t cost;
+
+ struct prefix ls_prefix;
+
union {
uint32_t cost_e2;
uint32_t cost_config;
diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c
index e4a4891526..7bf099fbbf 100644
--- a/ospf6d/ospf6_top.c
+++ b/ospf6d/ospf6_top.c
@@ -97,7 +97,8 @@ static void ospf6_top_route_hook_remove(struct ospf6_route *route)
static void ospf6_top_brouter_hook_add(struct ospf6_route *route)
{
- if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL)) {
+ if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL) ||
+ IS_OSPF6_DEBUG_BROUTER) {
uint32_t brouter_id;
char brouter_name[16];
@@ -116,15 +117,17 @@ static void ospf6_top_brouter_hook_add(struct ospf6_route *route)
static void ospf6_top_brouter_hook_remove(struct ospf6_route *route)
{
- if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL)) {
+ if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL) ||
+ IS_OSPF6_DEBUG_BROUTER) {
uint32_t brouter_id;
char brouter_name[16];
brouter_id = ADV_ROUTER_IN_PREFIX(&route->prefix);
inet_ntop(AF_INET, &brouter_id, brouter_name,
sizeof(brouter_name));
- zlog_debug("%s: brouter %s del with nh count %u",
- __PRETTY_FUNCTION__, brouter_name,
+ zlog_debug("%s: brouter %p %s del with adv router %x nh %u",
+ __PRETTY_FUNCTION__, (void *)route, brouter_name,
+ route->path.origin.adv_router,
listcount(route->nh_list));
}
route->flag |= OSPF6_ROUTE_REMOVE;
diff --git a/ospf6d/ospf6d.c b/ospf6d/ospf6d.c
index 8d6d5b4a26..db61fe087b 100644
--- a/ospf6d/ospf6d.c
+++ b/ospf6d/ospf6d.c
@@ -97,7 +97,7 @@ DEFUN_NOSH (show_debugging_ospf6,
DEBUG_STR
OSPF6_STR)
{
- vty_out(vty, "OSPF6 debugging status:");
+ vty_out(vty, "OSPF6 debugging status:\n");
config_write_ospf6_debug(vty);
diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c
index 6487596706..141ece9c7a 100644
--- a/ospfd/ospf_zebra.c
+++ b/ospfd/ospf_zebra.c
@@ -448,13 +448,13 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p,
count++;
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
- char buf[2][INET_ADDRSTRLEN];
+ char buf[2][PREFIX2STR_BUFFER];
+
zlog_debug(
- "Zebra: Route add %s/%d nexthop %s, ifindex=%d",
- inet_ntop(AF_INET, &p->prefix, buf[0],
- sizeof(buf[0])),
- p->prefixlen, inet_ntop(AF_INET, &path->nexthop,
- buf[1], sizeof(buf[1])),
+ "Zebra: Route add %s nexthop %s, ifindex=%d",
+ prefix2str(p, buf[0], sizeof(buf[0])),
+ inet_ntop(AF_INET, &path->nexthop,
+ buf[1], sizeof(buf[1])),
path->ifindex);
}
}
@@ -476,10 +476,9 @@ void ospf_zebra_delete(struct ospf *ospf, struct prefix_ipv4 *p,
memcpy(&api.prefix, p, sizeof(*p));
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
- char buf[INET_ADDRSTRLEN];
- zlog_debug("Zebra: Route delete %s/%d",
- inet_ntop(AF_INET, &p->prefix, buf, sizeof(buf[0])),
- p->prefixlen);
+ char buf[PREFIX2STR_BUFFER];
+ zlog_debug("Zebra: Route delete %s",
+ prefix2str(p, buf, sizeof(buf)));
}
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
@@ -499,9 +498,11 @@ void ospf_zebra_add_discard(struct ospf *ospf, struct prefix_ipv4 *p)
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
- if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
- zlog_debug("Zebra: Route add discard %s/%d",
- inet_ntoa(p->prefix), p->prefixlen);
+ if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
+ char buf[PREFIX2STR_BUFFER];
+ zlog_debug("Zebra: Route add discard %s",
+ prefix2str(p, buf, sizeof(buf)));
+ }
}
void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p)
@@ -518,9 +519,11 @@ void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p)
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
- if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
- zlog_debug("Zebra: Route delete discard %s/%d",
- inet_ntoa(p->prefix), p->prefixlen);
+ if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
+ char buf[PREFIX2STR_BUFFER];
+ zlog_debug("Zebra: Route delete discard %s",
+ prefix2str(p, buf, sizeof(buf)));
+ }
}
struct ospf_external *ospf_external_lookup(struct ospf *ospf, uint8_t type,
@@ -868,12 +871,13 @@ int ospf_redistribute_check(struct ospf *ospf, struct external_info *ei,
if (DISTRIBUTE_LIST(ospf, type))
if (access_list_apply(DISTRIBUTE_LIST(ospf, type), p)
== FILTER_DENY) {
- if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
+ if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
+ char buf[PREFIX2STR_BUFFER];
zlog_debug(
- "Redistribute[%s]: %s/%d filtered by ditribute-list.",
+ "Redistribute[%s]: %s filtered by distribute-list.",
ospf_redist_string(type),
- inet_ntoa(p->prefix),
- p->prefixlen);
+ prefix2str(p, buf, sizeof(buf)));
+ }
return 0;
}
@@ -890,11 +894,13 @@ int ospf_redistribute_check(struct ospf *ospf, struct external_info *ei,
if (ret == RMAP_DENYMATCH) {
ei->route_map_set = save_values;
- if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
+ if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) {
+ char buf[PREFIX2STR_BUFFER];
zlog_debug(
- "Redistribute[%s]: %s/%d filtered by route-map.",
+ "Redistribute[%s]: %s filtered by route-map.",
ospf_redist_string(type),
- inet_ntoa(p->prefix), p->prefixlen);
+ prefix2str(p, buf, sizeof(buf)));
+ }
return 0;
}
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index 1ccf3ebffa..5be96e86d0 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -470,6 +470,18 @@ void pbr_nht_change_group(const char *name)
pbr_nht_install_nexthop_group(pnhgc, nhgc->nhg);
}
+/*
+ * Since we are writing into the name field which is PBR_MAP_NAMELEN
+ * size, we are expecting this to field to be at max 100 bytes.
+ * Newer compilers understand that the %s portion may be up to
+ * 100 bytes( because of the size of the string. The %u portion
+ * is expected to be 10 bytes. So in `theory` there are situations
+ * where we might truncate. The reality this is never going to
+ * happen( who is going to create a nexthop group name that is
+ * over say 30 characters? ). As such we are expecting the
+ * calling function to subtract 10 from the size_t l before
+ * we pass it in to get around this new gcc fun.
+ */
char *pbr_nht_nexthop_make_name(char *name, size_t l,
uint32_t seqno, char *buffer)
{
@@ -485,7 +497,7 @@ void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms)
struct pbr_nexthop_cache lookup;
memset(&find, 0, sizeof(find));
- pbr_nht_nexthop_make_name(pbrms->parent->name, PBR_MAP_NAMELEN,
+ pbr_nht_nexthop_make_name(pbrms->parent->name, PBR_MAP_NAMELEN - 10,
pbrms->seqno, find.name);
if (!pbrms->internal_nhg_name)
pbrms->internal_nhg_name = XSTRDUP(MTYPE_TMP, find.name);
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 475ad86b58..ba5c49ad5c 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -227,6 +227,11 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd,
memset(&nhop, 0, sizeof(nhop));
nhop.vrf_id = vrf->vrf_id;
+ /*
+ * Make SA happy. CLIPPY is not going to give us a NULL
+ * addr.
+ */
+ assert(addr);
if (addr->sa.sa_family == AF_INET) {
nhop.gate.ipv4.s_addr = addr->sin.sin_addr.s_addr;
if (intf) {
@@ -264,7 +269,7 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd,
if (pbrms->nhg)
nh = nexthop_exists(pbrms->nhg, &nhop);
else {
- char buf[100];
+ char buf[PBR_MAP_NAMELEN];
if (no) {
vty_out(vty, "No nexthops to delete");
@@ -275,7 +280,7 @@ DEFPY(pbr_map_nexthop, pbr_map_nexthop_cmd,
pbrms->internal_nhg_name =
XSTRDUP(MTYPE_TMP,
pbr_nht_nexthop_make_name(pbrms->parent->name,
- PBR_MAP_NAMELEN,
+ PBR_MAP_NAMELEN - 10,
pbrms->seqno,
buf));
nh = NULL;
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index 4e5b5f3dde..cdacfad4b4 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -60,7 +60,8 @@ struct pbr_interface *pbr_if_new(struct interface *ifp)
return 0;
}
- return (pbr_ifp);
+ ifp->info = pbr_ifp;
+ return pbr_ifp;
}
/* Inteface addition message from zebra. */
@@ -74,12 +75,8 @@ static int interface_add(int command, struct zclient *zclient,
if (!ifp)
return 0;
- if (!ifp->info) {
- struct pbr_interface *pbr_ifp;
-
- pbr_ifp = pbr_if_new(ifp);
- ifp->info = pbr_ifp;
- }
+ if (!ifp->info)
+ pbr_if_new(ifp);
return 0;
}
@@ -482,6 +479,7 @@ static void pbr_encode_pbr_map_sequence(struct stream *s,
stream_putw(s, 0); /* src port */
pbr_encode_pbr_map_sequence_prefix(s, pbrms->dst, family);
stream_putw(s, 0); /* dst port */
+ stream_putl(s, 0); /* fwmark */
if (pbrms->nhgrp_name)
stream_putl(s, pbr_nht_get_table(pbrms->nhgrp_name));
else if (pbrms->nhg)
@@ -494,7 +492,7 @@ void pbr_send_pbr_map(struct pbr_map_sequence *pbrms,
{
struct pbr_map *pbrm = pbrms->parent;
struct stream *s;
- uint64_t is_installed = 1 << pmi->install_bit;
+ uint64_t is_installed = (uint64_t)1 << pmi->install_bit;
is_installed &= pbrms->installed;
diff --git a/pimd/mtracebis.c b/pimd/mtracebis.c
index ce83b420b4..a073fa70be 100644
--- a/pimd/mtracebis.c
+++ b/pimd/mtracebis.c
@@ -22,6 +22,7 @@
#include "pim_igmp_mtrace.h"
#include "checksum.h"
+#include "prefix.h"
#include "mtracebis_routeget.h"
#include <sys/select.h>
@@ -50,7 +51,8 @@
static const char *progname;
static void usage(void)
{
- fprintf(stderr, "Usage : %s <multicast source>\n", progname);
+ fprintf(stderr, "Usage : %s <multicast source> [<multicast group>]\n",
+ progname);
}
static void version(void)
{
@@ -170,9 +172,21 @@ static void print_fwd_code(uint32_t fwd_code)
static void print_rsp(struct igmp_mtrace_rsp *rsp)
{
print_host(rsp->outgoing);
- if (rsp->fwd_code == 0) {
+ if (rsp->fwd_code == 0 || rsp->fwd_code == MTRACE_FWD_CODE_REACHED_RP) {
print_rtg_proto(rsp->rtg_proto);
printf(" ");
+ if (rsp->fwd_code == MTRACE_FWD_CODE_REACHED_RP)
+ printf("(RP) ");
+ if (rsp->rtg_proto == MTRACE_RTG_PROTO_PIM) {
+ switch (rsp->src_mask) {
+ case MTRACE_SRC_MASK_GROUP:
+ printf("(*,G) ");
+ break;
+ case MTRACE_SRC_MASK_SOURCE:
+ printf("(S,G) ");
+ break;
+ }
+ }
print_fwd_ttl(rsp->fwd_ttl);
} else {
print_fwd_code(rsp->fwd_code);
@@ -351,6 +365,7 @@ static bool check_end(struct igmp_mtrace *mtrace, int hops)
int main(int argc, char *const argv[])
{
struct in_addr mc_source;
+ struct in_addr mc_group;
struct in_addr iface_addr;
struct in_addr gw_addr;
struct in_addr mtrace_addr;
@@ -370,6 +385,7 @@ int main(int argc, char *const argv[])
int i, j;
char ifname[IF_NAMESIZE];
char mbuf[MTRACE_BUF_LEN];
+ bool not_group;
mtrace_addr.s_addr = inet_addr("224.0.1.32");
@@ -385,7 +401,7 @@ int main(int argc, char *const argv[])
else
progname = argv[0];
- if (argc != 2) {
+ if (argc != 2 && argc != 3) {
usage();
exit(EXIT_FAILURE);
}
@@ -416,11 +432,28 @@ int main(int argc, char *const argv[])
}
if (inet_pton(AF_INET, argv[1], &mc_source) != 1) {
usage();
- fprintf(stderr, "%s: %s not a valid IPv4 address\n", argv[0],
+ fprintf(stderr, "%s: %s is not a valid IPv4 address\n", argv[0],
argv[1]);
exit(EXIT_FAILURE);
}
+ mc_group.s_addr = 0;
+ not_group = false;
+
+ if (argc == 3) {
+ if (inet_pton(AF_INET, argv[2], &mc_group) != 1)
+ not_group = true;
+ if (!not_group && !IPV4_CLASS_DE(ntohl(mc_group.s_addr)))
+ not_group = true;
+ }
+
+ if (not_group) {
+ usage();
+ fprintf(stderr, "%s: %s is not a valid IPv4 group address\n",
+ argv[0], argv[2]);
+ exit(EXIT_FAILURE);
+ }
+
ifindex = routeget(mc_source, &iface_addr, &gw_addr);
if (ifindex < 0) {
fprintf(stderr, "%s: failed to get route to source %s\n",
@@ -441,7 +474,7 @@ int main(int argc, char *const argv[])
mtrace.type = PIM_IGMP_MTRACE_QUERY_REQUEST;
mtrace.hops = hops;
mtrace.checksum = 0;
- mtrace.grp_addr.s_addr = 0;
+ mtrace.grp_addr = mc_group;
mtrace.src_addr = mc_source;
mtrace.dst_addr = iface_addr;
mtrace.rsp_addr = unicast ? iface_addr : mtrace_addr;
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index ff7238ae97..5996a3ac96 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -1547,27 +1547,20 @@ int pim_if_connected_to_source(struct interface *ifp, struct in_addr src)
return 0;
}
-int pim_if_is_loopback(struct pim_instance *pim, struct interface *ifp)
+bool pim_if_is_loopback(struct interface *ifp)
{
- if (if_is_loopback(ifp))
- return 1;
-
- if (strcmp(ifp->name, pim->vrf->name) == 0)
- return 1;
+ if (if_is_loopback(ifp) || if_is_vrf(ifp))
+ return true;
- return 0;
+ return false;
}
-int pim_if_is_vrf_device(struct interface *ifp)
+bool pim_if_is_vrf_device(struct interface *ifp)
{
- struct vrf *vrf;
+ if (if_is_vrf(ifp))
+ return true;
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if (strncmp(ifp->name, vrf->name, strlen(ifp->name)) == 0)
- return 1;
- }
-
- return 0;
+ return false;
}
int pim_if_ifchannel_count(struct pim_interface *pim_ifp)
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 5ecd07d227..cf025cbd4a 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -207,9 +207,9 @@ void pim_if_create_pimreg(struct pim_instance *pim);
int pim_if_connected_to_source(struct interface *ifp, struct in_addr src);
int pim_update_source_set(struct interface *ifp, struct in_addr source);
-int pim_if_is_loopback(struct pim_instance *pim, struct interface *ifp);
+bool pim_if_is_loopback(struct interface *ifp);
-int pim_if_is_vrf_device(struct interface *ifp);
+bool pim_if_is_vrf_device(struct interface *ifp);
int pim_if_ifchannel_count(struct pim_interface *pim_ifp);
#endif /* PIM_IFACE_H */
diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c
index 8274d08a26..d3ae185709 100644
--- a/pimd/pim_igmp_mtrace.c
+++ b/pimd/pim_igmp_mtrace.c
@@ -17,6 +17,8 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/* based on draft-ietf-idmr-traceroute-ipm-07 */
+
#include <zebra.h>
#include "pimd.h"
@@ -56,15 +58,131 @@ static struct in_addr mtrace_primary_address(struct interface *ifp)
return any;
}
+static bool mtrace_fwd_info_weak(struct pim_instance *pim,
+ struct igmp_mtrace *mtracep,
+ struct igmp_mtrace_rsp *rspp,
+ struct interface **ifpp)
+{
+ struct pim_nexthop nexthop;
+ struct interface *ifp_in;
+ struct in_addr nh_addr;
+ int ret;
+ char nexthop_str[INET_ADDRSTRLEN];
+
+ nh_addr.s_addr = 0;
+
+ memset(&nexthop, 0, sizeof(nexthop));
+
+ ret = pim_nexthop_lookup(pim, &nexthop, mtracep->src_addr, 1);
+
+ if (ret != 0) {
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace not found neighbor");
+ return false;
+ }
+
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace pim_nexthop_lookup OK");
+
+ if (PIM_DEBUG_MTRACE)
+ zlog_warn("mtrace next_hop=%s",
+ inet_ntop(nexthop.mrib_nexthop_addr.family,
+ &nexthop.mrib_nexthop_addr.u.prefix,
+ nexthop_str, sizeof(nexthop_str)));
+
+ if (nexthop.mrib_nexthop_addr.family == AF_INET)
+ nh_addr = nexthop.mrib_nexthop_addr.u.prefix4;
+
+ ifp_in = nexthop.interface;
+
+ /* return interface for forwarding mtrace packets */
+ *ifpp = ifp_in;
+
+ /* 6.2.2. 4. Fill in the Incoming Interface Address... */
+ rspp->incoming = mtrace_primary_address(ifp_in);
+ rspp->prev_hop = nh_addr;
+ rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->total = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->rtg_proto = MTRACE_RTG_PROTO_PIM;
+ return true;
+}
+
+static bool mtrace_fwd_info(struct pim_instance *pim,
+ struct igmp_mtrace *mtracep,
+ struct igmp_mtrace_rsp *rspp,
+ struct interface **ifpp)
+{
+ struct prefix_sg sg;
+ struct pim_upstream *up;
+ struct interface *ifp_in;
+ struct in_addr nh_addr;
+ uint32_t total;
+ char up_str[INET_ADDRSTRLEN];
+
+ memset(&sg, 0, sizeof(struct prefix_sg));
+ sg.src = mtracep->src_addr;
+ sg.grp = mtracep->grp_addr;
+
+ up = pim_upstream_find(pim, &sg);
+
+ if (!up) {
+ sg.src.s_addr = 0;
+ up = pim_upstream_find(pim, &sg);
+ }
+
+ if (!up)
+ return false;
+
+ ifp_in = up->rpf.source_nexthop.interface;
+ nh_addr = up->rpf.source_nexthop.mrib_nexthop_addr.u.prefix4;
+ total = htonl(MTRACE_UNKNOWN_COUNT);
+
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("fwd_info: upstream next hop=%s",
+ inet_ntop(AF_INET, &(nh_addr), up_str,
+ sizeof(up_str)));
+
+ if (up->channel_oil)
+ total = up->channel_oil->cc.pktcnt;
+
+ /* return interface for forwarding mtrace packets */
+ *ifpp = ifp_in;
+
+ /* 6.2.2. 4. Fill in the Incoming Interface Address... */
+ rspp->incoming = mtrace_primary_address(ifp_in);
+ rspp->prev_hop = nh_addr;
+ rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->total = total;
+ rspp->rtg_proto = MTRACE_RTG_PROTO_PIM;
+
+ /* 6.2.2. 4. Fill in ... S, and Src Mask */
+ if (sg.src.s_addr) {
+ rspp->s = 1;
+ rspp->src_mask = MTRACE_SRC_MASK_SOURCE;
+ } else {
+ rspp->s = 0;
+ rspp->src_mask = MTRACE_SRC_MASK_GROUP;
+ }
+
+ return true;
+}
+
+static void mtrace_rsp_set_fwd_code(struct igmp_mtrace_rsp *mtrace_rspp,
+ enum mtrace_fwd_code fwd_code)
+{
+ if (mtrace_rspp->fwd_code == MTRACE_FWD_CODE_NO_ERROR)
+ mtrace_rspp->fwd_code = fwd_code;
+}
+
static void mtrace_rsp_init(struct igmp_mtrace_rsp *mtrace_rspp)
{
mtrace_rspp->arrival = 0;
mtrace_rspp->incoming.s_addr = 0;
mtrace_rspp->outgoing.s_addr = 0;
mtrace_rspp->prev_hop.s_addr = 0;
- mtrace_rspp->in_count = MTRACE_UNKNOWN_COUNT;
- mtrace_rspp->out_count = MTRACE_UNKNOWN_COUNT;
- mtrace_rspp->total = MTRACE_UNKNOWN_COUNT;
+ mtrace_rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
+ mtrace_rspp->out_count = htonl(MTRACE_UNKNOWN_COUNT);
+ mtrace_rspp->total = htonl(MTRACE_UNKNOWN_COUNT);
mtrace_rspp->rtg_proto = 0;
mtrace_rspp->fwd_ttl = 0;
mtrace_rspp->mbz = 0;
@@ -394,7 +512,6 @@ static int mtrace_forward_packet(struct pim_instance *pim, struct ip *ip_hdr)
return mtrace_un_forward_packet(pim, ip_hdr, NULL);
}
-/* 6.5 Sending Traceroute Responses */
static int mtrace_send_mc_response(struct pim_instance *pim,
struct igmp_mtrace *mtracep,
size_t mtrace_len)
@@ -439,6 +556,7 @@ static int mtrace_send_mc_response(struct pim_instance *pim,
return ret;
}
+/* 6.5 Sending Traceroute Responses */
static int mtrace_send_response(struct pim_instance *pim,
struct igmp_mtrace *mtracep, size_t mtrace_len)
{
@@ -496,7 +614,6 @@ int igmp_mtrace_recv_qry_req(struct igmp_sock *igmp, struct ip *ip_hdr,
{
static uint32_t qry_id, qry_src;
char mtrace_buf[MTRACE_HDR_SIZE + MTRACE_MAX_HOPS * MTRACE_RSP_SIZE];
- struct pim_nexthop nexthop;
struct interface *ifp;
struct interface *out_ifp;
struct pim_interface *pim_ifp;
@@ -505,12 +622,13 @@ int igmp_mtrace_recv_qry_req(struct igmp_sock *igmp, struct ip *ip_hdr,
struct igmp_mtrace_rsp *rspp;
struct in_addr nh_addr;
enum mtrace_fwd_code fwd_code = MTRACE_FWD_CODE_NO_ERROR;
- int ret;
size_t r_len;
int last_rsp_ind = 0;
size_t mtrace_len;
uint16_t recv_checksum;
uint16_t checksum;
+ bool reached_source;
+ bool fwd_info;
ifp = igmp->interface;
pim_ifp = ifp->info;
@@ -575,6 +693,8 @@ int igmp_mtrace_recv_qry_req(struct igmp_sock *igmp, struct ip *ip_hdr,
}
/* Unicast query on wrong interface */
fwd_code = MTRACE_FWD_CODE_WRONG_IF;
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("Multicast query on wrong interface");
}
if (qry_id == mtracep->qry_id && qry_src == from.s_addr) {
if (PIM_DEBUG_MTRACE)
@@ -619,16 +739,19 @@ int igmp_mtrace_recv_qry_req(struct igmp_sock *igmp, struct ip *ip_hdr,
/* 6.2.2. Normal Processing */
- /* 6.2.2. 1. */
+ /* 6.2.2. 1. If there is room in the current buffer? */
if (last_rsp_ind == MTRACE_MAX_HOPS) {
+ /* ...there was no room... */
mtracep->rsp[MTRACE_MAX_HOPS - 1].fwd_code =
MTRACE_FWD_CODE_NO_SPACE;
return mtrace_send_response(pim_ifp->pim, mtracep,
igmp_msg_len);
}
- /* calculate new mtrace mtrace lenght with extra response */
+ /* ...insert new response block... */
+
+ /* calculate new mtrace lenght with extra response */
mtrace_len = igmp_msg_len + sizeof(struct igmp_mtrace_rsp);
/* copy received query/request */
@@ -643,84 +766,86 @@ int igmp_mtrace_recv_qry_req(struct igmp_sock *igmp, struct ip *ip_hdr,
/* initialize extra response field */
mtrace_rsp_init(rspp);
+ /* carry over any error noted when receiving the query */
+ rspp->fwd_code = fwd_code;
+
+ /* ...and fill in Query Arrival Time... */
rspp->arrival = htonl(query_arrival_time());
rspp->outgoing = pim_ifp->primary_address;
rspp->out_count = htonl(MTRACE_UNKNOWN_COUNT);
+ rspp->fwd_ttl = 1;
- /* 6.2.2. 2. Attempt to determine forwarding information */
-
- nh_addr.s_addr = 0;
-
- memset(&nexthop, 0, sizeof(nexthop));
- ret = pim_nexthop_lookup(pim, &nexthop, mtracep->src_addr, 1);
-
- if (ret == 0) {
- char nexthop_str[INET_ADDRSTRLEN];
+ /* 6.2.2. 2. Attempt to determine the forwarding information... */
- if (PIM_DEBUG_MTRACE)
- zlog_debug("mtrace pim_nexthop_lookup OK");
+ if (mtracep->grp_addr.s_addr)
+ fwd_info = mtrace_fwd_info(pim, mtracep, rspp, &out_ifp);
+ else
+ fwd_info = mtrace_fwd_info_weak(pim, mtracep, rspp, &out_ifp);
+ /* 6.2.2 3. If no forwarding information... */
+ if (!fwd_info) {
if (PIM_DEBUG_MTRACE)
- zlog_warn("mtrace next_hop=%s",
- inet_ntop(nexthop.mrib_nexthop_addr.family,
- &nexthop.mrib_nexthop_addr.u.prefix,
- nexthop_str, sizeof(nexthop_str)));
-
- if (nexthop.mrib_nexthop_addr.family == AF_INET)
- nh_addr = nexthop.mrib_nexthop_addr.u.prefix4;
- }
- /* 6.4 Forwarding Traceroute Requests: ... Otherwise, ... */
- else {
- if (PIM_DEBUG_MTRACE)
- zlog_debug("mtrace not found neighbor");
- if (!fwd_code)
- rspp->fwd_code = MTRACE_FWD_CODE_NO_ROUTE;
- else
- rspp->fwd_code = fwd_code;
- /* 6.5 Sending Traceroute Responses */
+ zlog_debug("mtrace not found multicast state");
+ mtrace_rsp_set_fwd_code(rspp, MTRACE_FWD_CODE_NO_ROUTE);
+ /* 6.2.2. 3. forward the packet to requester */
return mtrace_send_response(pim, mtracep, mtrace_len);
}
- out_ifp = nexthop.interface;
+ nh_addr = rspp->prev_hop;
- rspp->incoming = mtrace_primary_address(out_ifp);
- rspp->prev_hop = nh_addr;
- rspp->in_count = htonl(MTRACE_UNKNOWN_COUNT);
- rspp->total = htonl(MTRACE_UNKNOWN_COUNT);
- rspp->rtg_proto = MTRACE_RTG_PROTO_PIM;
- rspp->fwd_ttl = 1;
- rspp->s = 1;
- rspp->src_mask = 32;
+ reached_source = false;
if (nh_addr.s_addr == 0) {
- /* no pim? */
+ /* no pim? i.e. 7.5.3. No Previous Hop */
if (!out_ifp->info) {
- rspp->fwd_code = MTRACE_FWD_CODE_NO_MULTICAST;
+ if (PIM_DEBUG_MTRACE)
+ zlog_debug("mtrace not found incoming if w/ pim");
+ mtrace_rsp_set_fwd_code(rspp,
+ MTRACE_FWD_CODE_NO_MULTICAST);
return mtrace_send_response(pim, mtracep, mtrace_len);
}
- /* reached source? */
+ /* reached source? i.e. 7.5.1 Arriving at source */
if (pim_if_connected_to_source(out_ifp, mtracep->src_addr)) {
+ reached_source = true;
rspp->prev_hop = mtracep->src_addr;
- return mtrace_send_response(pim, mtracep, mtrace_len);
}
/*
* 6.4 Forwarding Traceroute Requests:
- * Previous-hop router not known
+ * Previous-hop router not known,
+ * packet is sent to an appropriate multicast address
*/
inet_aton(MCAST_ALL_ROUTERS, &nh_addr);
}
+ /* 6.2.2 8. If this router is the Rendez-vous Point */
+ if (pim_rp_i_am_rp(pim, mtracep->grp_addr)) {
+ mtrace_rsp_set_fwd_code(rspp, MTRACE_FWD_CODE_REACHED_RP);
+ /* 7.7.1. PIM-SM ...RP has not performed source-specific join */
+ if (rspp->src_mask == MTRACE_SRC_MASK_GROUP)
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+ }
+
+ /*
+ * 6.4 Forwarding Traceroute Requests: the number of response
+ * blocks exceeds number of responses, so forward to the requester.
+ */
if (mtracep->hops <= (last_rsp_ind + 1))
return mtrace_send_response(pim, mtracep, mtrace_len);
+ /* 7.5.1. Arriving at source: terminate trace */
+ if (reached_source)
+ return mtrace_send_response(pim, mtracep, mtrace_len);
+
mtracep->checksum = 0;
mtracep->checksum = in_cksum(mtrace_buf, mtrace_len);
+ /* 6.4 Forwarding Traceroute Requests: response blocks less than req. */
return mtrace_send_packet(out_ifp, mtracep, mtrace_len, nh_addr,
mtracep->grp_addr);
}
+/* 6.3. Traceroute responses */
int igmp_mtrace_recv_response(struct igmp_sock *igmp, struct ip *ip_hdr,
struct in_addr from, const char *from_str,
char *igmp_msg, int igmp_msg_len)
diff --git a/pimd/pim_igmp_mtrace.h b/pimd/pim_igmp_mtrace.h
index d47da3557a..4ab562ed97 100644
--- a/pimd/pim_igmp_mtrace.h
+++ b/pimd/pim_igmp_mtrace.h
@@ -26,6 +26,8 @@
#define MTRACE_MAX_HOPS (255)
#define MTRACE_UNKNOWN_COUNT (0xffffffff)
+#define MTRACE_SRC_MASK_GROUP (0x3f) /* forwarding on group state (*,G) */
+#define MTRACE_SRC_MASK_SOURCE (0x20) /* i.e. 32 forwarding on (S,G) */
enum mtrace_fwd_code {
MTRACE_FWD_CODE_NO_ERROR = 0x00,
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
index 53a3382987..829c917b0f 100644
--- a/pimd/pim_msdp.c
+++ b/pimd/pim_msdp.c
@@ -228,6 +228,8 @@ static void pim_msdp_sa_upstream_update(struct pim_msdp_sa *sa,
/* release all mem associated with a sa */
static void pim_msdp_sa_free(struct pim_msdp_sa *sa)
{
+ pim_msdp_sa_state_timer_setup(sa, false);
+
XFREE(MTYPE_PIM_MSDP_SA, sa);
}
@@ -1170,6 +1172,13 @@ enum pim_msdp_err pim_msdp_peer_add(struct pim_instance *pim,
/* release all mem associated with a peer */
static void pim_msdp_peer_free(struct pim_msdp_peer *mp)
{
+ /*
+ * Let's make sure we are not running when we delete
+ * the underlying data structure
+ */
+ pim_msdp_peer_cr_timer_setup(mp, false);
+ pim_msdp_peer_ka_timer_setup(mp, false);
+
if (mp->ibuf) {
stream_free(mp->ibuf);
}
@@ -1611,6 +1620,8 @@ void pim_msdp_init(struct pim_instance *pim, struct thread_master *master)
/* counterpart to MSDP init; XXX: unused currently */
void pim_msdp_exit(struct pim_instance *pim)
{
+ pim_msdp_sa_adv_timer_setup(pim, false);
+
/* XXX: stop listener and delete all peer sessions */
if (pim->msdp.peer_hash) {
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
index ffe5d52a15..de09b070f4 100644
--- a/pimd/pim_pim.c
+++ b/pimd/pim_pim.c
@@ -653,7 +653,7 @@ static int pim_hello_send(struct interface *ifp, uint16_t holdtime)
{
struct pim_interface *pim_ifp = ifp->info;
- if (pim_if_is_loopback(pim_ifp->pim, ifp))
+ if (pim_if_is_loopback(ifp))
return 0;
if (hello_send(ifp, holdtime)) {
@@ -755,7 +755,7 @@ void pim_hello_restart_triggered(struct interface *ifp)
/*
* No need to ever start loopback or vrf device hello's
*/
- if (pim_if_is_loopback(pim_ifp->pim, ifp))
+ if (pim_if_is_loopback(ifp))
return;
/*
diff --git a/redhat/frr.service b/redhat/frr.service
index cc6ec429a3..3ae0aabfe2 100644
--- a/redhat/frr.service
+++ b/redhat/frr.service
@@ -9,7 +9,7 @@ Type=forking
NotifyAccess=all
StartLimitInterval=3m
StartLimitBurst=3
-TimeoutSec=1m
+TimeoutSec=2m
WatchdogSec=60s
RestartSec=5
Restart=on-abnormal
diff --git a/snapcraft/defaults/babeld.conf.default b/snapcraft/defaults/babeld.conf.default
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/snapcraft/defaults/babeld.conf.default
diff --git a/snapcraft/defaults/eigrpd.conf.default b/snapcraft/defaults/eigrpd.conf.default
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/snapcraft/defaults/eigrpd.conf.default
diff --git a/snapcraft/defaults/pbrd.conf.default b/snapcraft/defaults/pbrd.conf.default
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/snapcraft/defaults/pbrd.conf.default
diff --git a/snapcraft/scripts/Makefile b/snapcraft/scripts/Makefile
index 966623d11d..110f4b2878 100644
--- a/snapcraft/scripts/Makefile
+++ b/snapcraft/scripts/Makefile
@@ -12,6 +12,9 @@ install:
install -D -m 0755 pimd-service $(DESTDIR)/bin/
install -D -m 0755 ldpd-service $(DESTDIR)/bin/
install -D -m 0755 nhrpd-service $(DESTDIR)/bin/
+ install -D -m 0755 babeld-service $(DESTDIR)/bin/
+ install -D -m 0755 eigrpd-service $(DESTDIR)/bin/
+ install -D -m 0755 pbrd-service $(DESTDIR)/bin/
install -D -m 0755 set-options $(DESTDIR)/bin/
install -D -m 0755 show_version $(DESTDIR)/bin/
diff --git a/snapcraft/scripts/babeld-service b/snapcraft/scripts/babeld-service
new file mode 100644
index 0000000000..9e022f8569
--- /dev/null
+++ b/snapcraft/scripts/babeld-service
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e -x
+
+if ! [ -e $SNAP_DATA/babeld.conf ]; then
+ cp $SNAP/etc/frr/babeld.conf.default $SNAP_DATA/babeld.conf
+fi
+exec $SNAP/sbin/babeld \
+ -f $SNAP_DATA/babeld.conf \
+ --pid_file $SNAP_DATA/babeld.pid \
+ --socket $SNAP_DATA/zsock \
+ --vty_socket $SNAP_DATA
+
diff --git a/snapcraft/scripts/eigrpd-service b/snapcraft/scripts/eigrpd-service
new file mode 100644
index 0000000000..fe945e5f7d
--- /dev/null
+++ b/snapcraft/scripts/eigrpd-service
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e -x
+
+if ! [ -e $SNAP_DATA/eigrpd.conf ]; then
+ cp $SNAP/etc/frr/eigrpd.conf.default $SNAP_DATA/eigrpd.conf
+fi
+exec $SNAP/sbin/eigrpd \
+ -f $SNAP_DATA/eigrpd.conf \
+ --pid_file $SNAP_DATA/eigrpd.pid \
+ --socket $SNAP_DATA/zsock \
+ --vty_socket $SNAP_DATA
+
diff --git a/snapcraft/scripts/pbrd-service b/snapcraft/scripts/pbrd-service
new file mode 100644
index 0000000000..a9265a1ae6
--- /dev/null
+++ b/snapcraft/scripts/pbrd-service
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e -x
+
+if ! [ -e $SNAP_DATA/pbrd.conf ]; then
+ cp $SNAP/etc/frr/pbrd.conf.default $SNAP_DATA/pbrd.conf
+fi
+exec $SNAP/sbin/pbrd \
+ -f $SNAP_DATA/pbrd.conf \
+ --pid_file $SNAP_DATA/pbrd.pid \
+ --socket $SNAP_DATA/zsock \
+ --vty_socket $SNAP_DATA
+
diff --git a/snapcraft/snapcraft.yaml.in b/snapcraft/snapcraft.yaml.in
index 17fabb16f8..b4e1812c86 100644
--- a/snapcraft/snapcraft.yaml.in
+++ b/snapcraft/snapcraft.yaml.in
@@ -4,7 +4,8 @@ summary: FRRouting BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP routing daemon
description: BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM routing daemon
FRRouting (FRR) is free software which manages TCP/IP based routing
protocols. It supports BGP4, BGP4+, OSPFv2, OSPFv3, IS-IS, RIPv1, RIPv2,
- RIPng, PIM and LDP as well as the IPv6 versions of these.
+ RIPng, PIM, LDP, Babel, EIGRP and PBR (Policy-based routing) as well as
+ the IPv6 versions of these.
FRRouting (frr) is a fork of Quagga.
confinement: strict
grade: devel
@@ -91,6 +92,27 @@ apps:
- network
- network-bind
- network-control
+ babeld:
+ command: bin/babeld-service
+ daemon: simple
+ plugs:
+ - network
+ - network-bind
+ - network-control
+ eigrpd:
+ command: bin/eigrpd-service
+ daemon: simple
+ plugs:
+ - network
+ - network-bind
+ - network-control
+ pbrd:
+ command: bin/pbrd-service
+ daemon: simple
+ plugs:
+ - network
+ - network-bind
+ - network-control
set:
command: bin/set-options
zebra-debug:
@@ -153,7 +175,25 @@ apps:
- network
- network-bind
- network-control
-
+ babeld-debug:
+ command: sbin/babeld -f $SNAP_DATA/babeld.conf --pid_file $SNAP_DATA/babeld.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA
+ plugs:
+ - network
+ - network-bind
+ - network-control
+ eigrpd-debug:
+ command: sbin/eigrpd -f $SNAP_DATA/eigrpd.conf --pid_file $SNAP_DATA/eigrpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA
+ plugs:
+ - network
+ - network-bind
+ - network-control
+ pbrd-debug:
+ command: sbin/pbrd -f $SNAP_DATA/pbrd.conf --pid_file $SNAP_DATA/pbrd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA
+ plugs:
+ - network
+ - network-bind
+ - network-control
+
parts:
frr:
build-packages:
@@ -190,6 +230,9 @@ parts:
- libtinfo5
- libreadline6
- libjson-c2
+ - libc-ares2
+ - libatm1
+ - libprotobuf-c1
plugin: autotools
source: ../frr-@PACKAGE_VERSION@.tar.gz
configflags:
@@ -228,6 +271,9 @@ parts:
ripngd.conf.default: etc/frr/ripngd.conf.default
ldpd.conf.default: etc/frr/ldpd.conf.default
nhrpd.conf.default: etc/frr/nhrpd.conf.default
+ babeld.conf.default: etc/frr/babeld.conf.default
+ eigrpd.conf.default: etc/frr/eigrpd.conf.default
+ pbrd.conf.default: etc/frr/pbrd.conf.default
vtysh.conf.default: etc/frr/vtysh.conf.default
frr-scripts:
plugin: make
diff --git a/tests/.gitignore b/tests/.gitignore
index c2fe9c8890..1708a4b7b0 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -50,5 +50,7 @@ __pycache__
/lib/test_timer_performance
/lib/test_ttable
/lib/test_zmq
+/lib/test_zlog
+/lib/test_graph
/ospf6d/test_lsdb
/ospf6d/test_lsdb_clippy.c
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 0c9a5684da..703c1d05fc 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -74,6 +74,7 @@ check_PROGRAMS = \
lib/test_timer_performance \
lib/test_ttable \
lib/test_zlog \
+ lib/test_graph \
lib/cli/test_cli \
lib/cli/test_commands \
$(TESTS_BGPD) \
@@ -129,6 +130,7 @@ lib_test_timer_performance_SOURCES = lib/test_timer_performance.c \
helpers/c/prng.c
lib_test_ttable_SOURCES = lib/test_ttable.c
lib_test_zlog_SOURCES = lib/test_zlog.c
+lib_test_graph_SOURCES = lib/test_graph.c
lib_test_zmq_SOURCES = lib/test_zmq.c
lib_test_zmq_CFLAGS = $(AM_CFLAGS) $(ZEROMQ_CFLAGS)
lib_cli_test_cli_SOURCES = lib/cli/test_cli.c lib/cli/common_cli.c
@@ -170,6 +172,7 @@ lib_test_timer_correctness_LDADD = $(ALL_TESTS_LDADD)
lib_test_timer_performance_LDADD = $(ALL_TESTS_LDADD)
lib_test_ttable_LDADD = $(ALL_TESTS_LDADD)
lib_test_zlog_LDADD = $(ALL_TESTS_LDADD)
+lib_test_graph_LDADD = $(ALL_TESTS_LDADD)
lib_test_zmq_LDADD = ../lib/libfrrzmq.la $(ALL_TESTS_LDADD) $(ZEROMQ_LIBS)
lib_cli_test_cli_LDADD = $(ALL_TESTS_LDADD)
lib_cli_test_commands_LDADD = $(ALL_TESTS_LDADD)
@@ -211,6 +214,7 @@ EXTRA_DIST = \
lib/test_ttable.py \
lib/test_ttable.refout \
lib/test_zlog.py \
+ lib/test_graph.py \
ospf6d/test_lsdb.py \
ospf6d/test_lsdb.in \
ospf6d/test_lsdb.refout \
diff --git a/tests/bgpd/test_mp_attr.c b/tests/bgpd/test_mp_attr.c
index 34c35cfcc5..8db1cb2ca1 100644
--- a/tests/bgpd/test_mp_attr.c
+++ b/tests/bgpd/test_mp_attr.c
@@ -945,6 +945,24 @@ static struct test_segment mp_unreach_segments[] = {
},
{NULL, NULL, {0}, 0, 0}};
+static struct test_segment mp_prefix_sid[] = {
+ {
+ "PREFIX-SID",
+ "PREFIX-SID Test 1",
+ {
+ 0x01, 0x00, 0x07,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02,
+ 0x03, 0x00, 0x08, 0x00,
+ 0x00, 0x0a, 0x1b, 0xfe,
+ 0x00, 0x00, 0x0a
+ },
+ .len = 21,
+ .parses = SHOULD_PARSE,
+ },
+ {NULL, NULL, { 0 }, 0, 0},
+};
+
/* nlri_parse indicates 0 on successful parse, and -1 otherwise.
* attr_parse indicates BGP_ATTR_PARSE_PROCEED/0 on success,
* and BGP_ATTR_PARSE_ERROR/-1 or lower negative ret on err.
@@ -1000,10 +1018,20 @@ static void parse_test(struct peer *peer, struct test_segment *t, int type)
printf("%s: %s\n", t->name, t->desc);
- if (type == BGP_ATTR_MP_REACH_NLRI)
+ switch (type) {
+ case BGP_ATTR_MP_REACH_NLRI:
parse_ret = bgp_mp_reach_parse(&attr_args, &nlri);
- else
+ break;
+ case BGP_ATTR_MP_UNREACH_NLRI:
parse_ret = bgp_mp_unreach_parse(&attr_args, &nlri);
+ break;
+ case BGP_ATTR_PREFIX_SID:
+ parse_ret = bgp_attr_prefix_sid(t->len, &attr_args, &nlri);
+ break;
+ default:
+ printf("unknown type");
+ return;
+ }
if (!parse_ret) {
iana_afi_t pkt_afi;
iana_safi_t pkt_safi;
@@ -1022,7 +1050,7 @@ static void parse_test(struct peer *peer, struct test_segment *t, int type)
if (!parse_ret) {
if (type == BGP_ATTR_MP_REACH_NLRI)
nlri_ret = bgp_nlri_parse(peer, &attr, &nlri, 0);
- else
+ else if (type == BGP_ATTR_MP_UNREACH_NLRI)
nlri_ret = bgp_nlri_parse(peer, &attr, &nlri, 1);
}
handle_result(peer, t, parse_ret, nlri_ret);
@@ -1033,6 +1061,7 @@ static as_t asn = 100;
int main(void)
{
+ struct interface ifp;
struct peer *peer;
int i, j;
@@ -1065,6 +1094,9 @@ int main(void)
peer->status = Established;
peer->curr = stream_new(BGP_MAX_PACKET_SIZE);
+ ifp.ifindex = 0;
+ peer->nexthop.ifp = &ifp;
+
for (i = AFI_IP; i < AFI_MAX; i++)
for (j = SAFI_UNICAST; j < SAFI_MAX; j++) {
peer->afc[i][j] = 1;
@@ -1081,6 +1113,10 @@ int main(void)
parse_test(peer, &mp_unreach_segments[i++],
BGP_ATTR_MP_UNREACH_NLRI);
+ i = 0;
+ while (mp_prefix_sid[i].name)
+ parse_test(peer, &mp_prefix_sid[i++],
+ BGP_ATTR_PREFIX_SID);
printf("failures: %d\n", failed);
return failed;
}
diff --git a/tests/lib/test_graph.c b/tests/lib/test_graph.c
new file mode 100644
index 0000000000..f21f8b793c
--- /dev/null
+++ b/tests/lib/test_graph.c
@@ -0,0 +1,77 @@
+/*
+ * Test graph data structure.
+ * Copyright (C) 2018 Cumulus Networks, Inc.
+ * Quentin Young
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <zebra.h>
+#include <graph.h>
+#include <memory.h>
+#include <buffer.h>
+
+#define NUMNODES 32
+
+static void graph_custom_print_cb(struct graph_node *gn, struct buffer *buf)
+{
+ char nbuf[64];
+ char *gname = gn->data;
+
+ for (unsigned int i = 0; i < vector_active(gn->to); i++) {
+ struct graph_node *adj = vector_slot(gn->to, i);
+ char *name = adj->data;
+
+ snprintf(nbuf, sizeof(nbuf), " n%s -> n%s;\n", gname, name);
+ buffer_putstr(buf, nbuf);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct graph *g = graph_new();
+ struct graph_node *gn[NUMNODES];
+ char names[NUMNODES][16];
+
+ /* create vertices */
+ for (unsigned int i = 0; i < NUMNODES; i++) {
+ snprintf(names[i], sizeof(names[i]), "%d", i);
+ gn[i] = graph_new_node(g, names[i], NULL);
+ }
+
+ /* create edges */
+ for (unsigned int i = 1; i < NUMNODES - 1; i++) {
+ graph_add_edge(gn[0], gn[i]);
+ graph_add_edge(gn[i], gn[i + 1]);
+ }
+ graph_add_edge(gn[0], gn[NUMNODES - 1]);
+ graph_add_edge(gn[NUMNODES - 1], gn[1]);
+
+ /* print DOT */
+ char *dumped = graph_dump_dot(g, gn[0], graph_custom_print_cb);
+
+ fprintf(stdout, "%s", dumped);
+ XFREE(MTYPE_TMP, dumped);
+
+ /* remove some edges */
+ for (unsigned int i = NUMNODES - 1; i > NUMNODES / 2; --i)
+ for (unsigned int j = 0; j < NUMNODES; j++)
+ graph_remove_edge(gn[i], gn[j]);
+
+ /* remove some nodes */
+ for (unsigned int i = 0; i < NUMNODES / 2; i++)
+ graph_delete_node(g, gn[i]);
+
+ graph_delete_graph(g);
+}
diff --git a/tests/lib/test_graph.py b/tests/lib/test_graph.py
new file mode 100644
index 0000000000..697e56c149
--- /dev/null
+++ b/tests/lib/test_graph.py
@@ -0,0 +1,4 @@
+import frrtest
+
+class TestGraph(frrtest.TestRefOut):
+ program = './test_graph'
diff --git a/tests/lib/test_graph.refout b/tests/lib/test_graph.refout
new file mode 100644
index 0000000000..955f55293a
--- /dev/null
+++ b/tests/lib/test_graph.refout
@@ -0,0 +1,64 @@
+digraph {
+ n0 -> n1;
+ n0 -> n2;
+ n0 -> n3;
+ n0 -> n4;
+ n0 -> n5;
+ n0 -> n6;
+ n0 -> n7;
+ n0 -> n8;
+ n0 -> n9;
+ n0 -> n10;
+ n0 -> n11;
+ n0 -> n12;
+ n0 -> n13;
+ n0 -> n14;
+ n0 -> n15;
+ n0 -> n16;
+ n0 -> n17;
+ n0 -> n18;
+ n0 -> n19;
+ n0 -> n20;
+ n0 -> n21;
+ n0 -> n22;
+ n0 -> n23;
+ n0 -> n24;
+ n0 -> n25;
+ n0 -> n26;
+ n0 -> n27;
+ n0 -> n28;
+ n0 -> n29;
+ n0 -> n30;
+ n0 -> n31;
+ n31 -> n1;
+ n1 -> n2;
+ n2 -> n3;
+ n3 -> n4;
+ n4 -> n5;
+ n5 -> n6;
+ n6 -> n7;
+ n7 -> n8;
+ n8 -> n9;
+ n9 -> n10;
+ n10 -> n11;
+ n11 -> n12;
+ n12 -> n13;
+ n13 -> n14;
+ n14 -> n15;
+ n15 -> n16;
+ n16 -> n17;
+ n17 -> n18;
+ n18 -> n19;
+ n19 -> n20;
+ n20 -> n21;
+ n21 -> n22;
+ n22 -> n23;
+ n23 -> n24;
+ n24 -> n25;
+ n25 -> n26;
+ n26 -> n27;
+ n27 -> n28;
+ n28 -> n29;
+ n29 -> n30;
+ n30 -> n31;
+}
diff --git a/tools/etc/iproute2/rt_protos.d/frr.conf b/tools/etc/iproute2/rt_protos.d/frr.conf
index b8d4c1c03b..cac75bdfba 100644
--- a/tools/etc/iproute2/rt_protos.d/frr.conf
+++ b/tools/etc/iproute2/rt_protos.d/frr.conf
@@ -8,4 +8,5 @@
191 nhrp
192 eigrp
193 ldp
-194 sharp \ No newline at end of file
+194 sharp
+195 pbr
diff --git a/tools/frr b/tools/frr
index 27136bb762..fec94af689 100755
--- a/tools/frr
+++ b/tools/frr
@@ -552,16 +552,19 @@ case "$1" in
# Additionally if a new protocol is added
# we need to add it here as well as
# in rt_netlink.h( follow the directions! )
+ ip route flush proto 4
+ ip route flush proto 11
+ ip route flush proto 42
ip route flush proto 186
+ ip route flush proto 187
ip route flush proto 188
- ip route flush proto 4
ip route flush proto 189
ip route flush proto 190
- ip route flush proto 11
- ip route flush proto 187
- ip route flush proto 192
- ip route flush proto 42
ip route flush proto 191
+ ip route flush proto 192
+ ip route flush proto 193
+ ip route flush proto 194
+ ip route flush proto 195
else
[ -n "$dmn" ] && eval "${dmn/-/_}=0"
start_watchfrr
diff --git a/tools/frr.service b/tools/frr.service
index 8800bf6b0f..5f44274ec3 100644
--- a/tools/frr.service
+++ b/tools/frr.service
@@ -10,7 +10,7 @@ Type=forking
NotifyAccess=all
StartLimitInterval=3m
StartLimitBurst=3
-TimeoutSec=1m
+TimeoutSec=2m
WatchdogSec=60s
RestartSec=5
Restart=on-abnormal
diff --git a/vtysh/Makefile.am b/vtysh/Makefile.am
index 52641de72c..d82f9fd1b8 100644
--- a/vtysh/Makefile.am
+++ b/vtysh/Makefile.am
@@ -54,6 +54,7 @@ vtysh_scan += $(top_srcdir)/bgpd/bgp_nexthop.c
vtysh_scan += $(top_srcdir)/bgpd/bgp_route.c
vtysh_scan += $(top_srcdir)/bgpd/bgp_routemap.c
vtysh_scan += $(top_srcdir)/bgpd/bgp_vty.c
+vtysh_scan += $(top_srcdir)/bgpd/bgp_flowspec_vty.c
endif
if RPKI
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index ec212233f6..867dc9cd15 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -43,6 +43,7 @@
#include "ns.h"
#include "vrf.h"
#include "libfrr.h"
+#include "command_graph.h"
DEFINE_MTYPE_STATIC(MVTYSH, VTYSH_CMD, "Vtysh cmd copy")
@@ -2161,7 +2162,7 @@ DEFUN (vtysh_show_memory,
SHOW_STR
"Memory statistics\n")
{
- return show_per_daemon("show memory\n", "Memory statistics for %s:\n");
+ return show_per_daemon("do show memory\n", "Memory statistics for %s:\n");
}
DEFUN (vtysh_show_modules,
@@ -2170,7 +2171,7 @@ DEFUN (vtysh_show_modules,
SHOW_STR
"Loaded modules\n")
{
- return show_per_daemon("show modules\n",
+ return show_per_daemon("do show modules\n",
"Module information for %s:\n");
}
@@ -2610,20 +2611,25 @@ DEFUN (vtysh_write_memory,
/* If integrated frr.conf explicitely set. */
if (want_config_integrated()) {
ret = CMD_WARNING_CONFIG_FAILED;
+
+ /* first attempt to use watchfrr if it's available */
+ bool used_watchfrr = false;
+
for (i = 0; i < array_size(vtysh_client); i++)
if (vtysh_client[i].flag == VTYSH_WATCHFRR)
break;
- if (i < array_size(vtysh_client) && vtysh_client[i].fd != -1)
+ if (i < array_size(vtysh_client) && vtysh_client[i].fd != -1) {
+ used_watchfrr = true;
ret = vtysh_client_execute(&vtysh_client[i],
"do write integrated",
outputfile);
+ }
/*
- * If watchfrr returns CMD_WARNING_CONFIG_FAILED this means
- * that it could not write the config, but additionally
- * indicates that we should not try either
+ * If we didn't use watchfrr, fallback to writing the config
+ * ourselves
*/
- if (ret != CMD_SUCCESS && ret != CMD_WARNING_CONFIG_FAILED) {
+ if (!used_watchfrr) {
printf("\nWarning: attempting direct configuration write without "
"watchfrr.\nFile permissions and ownership may be "
"incorrect, or write may fail.\n\n");
@@ -2792,14 +2798,15 @@ ALIAS(vtysh_traceroute, vtysh_traceroute_ip_cmd, "traceroute ip WORD",
DEFUN (vtysh_mtrace,
vtysh_mtrace_cmd,
- "mtrace WORD",
+ "mtrace WORD [WORD]",
"Multicast trace route to multicast source\n"
- "Multicast trace route to multicast source address\n")
+ "Multicast trace route to multicast source address\n"
+ "Multicast trace route for multicast group address\n")
{
- int idx = 1;
-
- argv_find(argv, argc, "WORD", &idx);
- execute_command("mtracebis", 1, argv[idx]->arg, NULL);
+ if (argc == 2)
+ execute_command("mtracebis", 1, argv[1]->arg, NULL);
+ else
+ execute_command("mtracebis", 2, argv[1]->arg, argv[2]->arg);
return CMD_SUCCESS;
}
@@ -2957,10 +2964,26 @@ DEFUN(find,
return CMD_SUCCESS;
}
+DEFUN_HIDDEN(show_cli_graph_vtysh,
+ show_cli_graph_vtysh_cmd,
+ "show cli graph",
+ SHOW_STR
+ "CLI reflection\n"
+ "Dump current command space as DOT graph\n")
+{
+ struct cmd_node *cn = vector_slot(cmdvec, vty->node);
+ char *dot = cmd_graph_dump_dot(cn->cmdgraph);
+
+ vty_out(vty, "%s\n", dot);
+ XFREE(MTYPE_TMP, dot);
+ return CMD_SUCCESS;
+}
+
static void vtysh_install_default(enum node_type node)
{
install_element(node, &config_list_cmd);
install_element(node, &find_cmd);
+ install_element(node, &show_cli_graph_vtysh_cmd);
install_element(node, &vtysh_output_file_cmd);
install_element(node, &no_vtysh_output_file_cmd);
}
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index 1a94807317..e60e05bcdf 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -1386,7 +1386,7 @@ static void routing_socket(struct zebra_ns *zns)
zlog_err("routing_socket: Can't raise privileges");
routing_sock =
- ns_socket(AF_ROUTE, SOCK_RAW, 0, (ns_id_t)zns->ns->ns_id);
+ ns_socket(AF_ROUTE, SOCK_RAW, 0, zns->ns_id);
if (routing_sock < 0) {
if (zserv_privs.change(ZPRIVS_LOWER))
diff --git a/zebra/label_manager.c b/zebra/label_manager.c
index 38869e80ec..190ac1e57f 100644
--- a/zebra/label_manager.c
+++ b/zebra/label_manager.c
@@ -236,6 +236,40 @@ static void lm_zclient_init(char *lm_zserv_path)
}
/**
+ * Release label chunks from a client.
+ *
+ * Called on client disconnection or reconnection. It only releases chunks
+ * with empty keep value.
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @return Number of chunks released
+ */
+int release_daemon_label_chunks(struct zserv *client)
+{
+ uint8_t proto = client->proto;
+ uint16_t instance = client->instance;
+ struct listnode *node;
+ struct label_manager_chunk *lmc;
+ int count = 0;
+ int ret;
+
+ for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {
+ if (lmc->proto == proto && lmc->instance == instance
+ && lmc->keep == 0) {
+ ret = release_label_chunk(lmc->proto, lmc->instance,
+ lmc->start, lmc->end);
+ if (ret == 0)
+ count++;
+ }
+ }
+
+ zlog_debug("%s: Released %d label chunks", __func__, count);
+
+ return count;
+}
+
+/**
* Init label manager (or proxy to an external one)
*/
void label_manager_init(char *lm_zserv_path)
@@ -255,6 +289,8 @@ void label_manager_init(char *lm_zserv_path)
ibuf = stream_new(ZEBRA_MAX_PACKET_SIZ);
obuf = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ hook_register(zapi_client_close, release_daemon_label_chunks);
}
/**
@@ -353,37 +389,6 @@ int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t start,
return ret;
}
-/**
- * Release label chunks from a client.
- *
- * Called on client disconnection or reconnection. It only releases chunks
- * with empty keep value.
- *
- * @param proto Daemon protocol of client, to identify the owner
- * @param instance Instance, to identify the owner
- * @return Number of chunks released
- */
-int release_daemon_label_chunks(uint8_t proto, unsigned short instance)
-{
- struct listnode *node;
- struct label_manager_chunk *lmc;
- int count = 0;
- int ret;
-
- for (ALL_LIST_ELEMENTS_RO(lbl_mgr.lc_list, node, lmc)) {
- if (lmc->proto == proto && lmc->instance == instance
- && lmc->keep == 0) {
- ret = release_label_chunk(lmc->proto, lmc->instance,
- lmc->start, lmc->end);
- if (ret == 0)
- count++;
- }
- }
-
- zlog_debug("%s: Released %d label chunks", __func__, count);
-
- return count;
-}
void label_manager_close()
{
diff --git a/zebra/label_manager.h b/zebra/label_manager.h
index 4395e6897e..b998372224 100644
--- a/zebra/label_manager.h
+++ b/zebra/label_manager.h
@@ -29,6 +29,8 @@
#include "lib/linklist.h"
#include "lib/thread.h"
+#include "zebra/zserv.h"
+
#define NO_PROTO 0
/*
@@ -69,7 +71,7 @@ struct label_manager_chunk *assign_label_chunk(uint8_t proto,
uint8_t keep, uint32_t size);
int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t start,
uint32_t end);
-int release_daemon_label_chunks(uint8_t proto, unsigned short instance);
+int release_daemon_label_chunks(struct zserv *client);
void label_manager_close(void);
#endif /* _LABEL_MANAGER_H */
diff --git a/zebra/main.c b/zebra/main.c
index c8d7f83fb7..9a495c8940 100644
--- a/zebra/main.c
+++ b/zebra/main.c
@@ -36,6 +36,7 @@
#include "vrf.h"
#include "logicalrouter.h"
#include "libfrr.h"
+#include "routemap.h"
#include "zebra/rib.h"
#include "zebra/zserv.h"
@@ -49,6 +50,7 @@
#include "zebra/zebra_mpls.h"
#include "zebra/label_manager.h"
#include "zebra/zebra_netns_notify.h"
+#include "zebra/zebra_rnh.h"
#define ZEBRA_PTM_SUPPORT
@@ -371,6 +373,9 @@ int main(int argc, char **argv)
/* Init label manager */
label_manager_init(lblmgr_path);
+ /* RNH init */
+ zebra_rnh_init();
+
frr_run(zebrad.master);
/* Not reached... */
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index a51cd6ecc9..810ee33839 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -40,6 +40,7 @@
#include "zebra/redistribute.h"
#include "zebra/debug.h"
#include "zebra/router-id.h"
+#include "zebra/zapi_msg.h"
#include "zebra/zebra_memory.h"
#include "zebra/zebra_vxlan.h"
@@ -113,18 +114,20 @@ static void zebra_redistribute(struct zserv *client, int type,
for (rn = route_top(table); rn; rn = srcdest_route_next(rn))
RNODE_FOREACH_RE (rn, newre) {
struct prefix *dst_p, *src_p;
+ char buf[PREFIX_STRLEN];
+
srcdest_rnode_prefixes(rn, &dst_p, &src_p);
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug(
- "%s: client %s vrf %d checking: selected=%d, type=%d, distance=%d, "
- "zebra_check_addr=%d",
+ "%s: client %s %s(%d) checking: selected=%d, type=%d, distance=%d, metric=%d zebra_check_addr=%d",
__func__,
zebra_route_string(client->proto),
+ prefix2str(dst_p, buf, sizeof(buf)),
vrf_id, CHECK_FLAG(newre->flags,
ZEBRA_FLAG_SELECTED),
newre->type, newre->distance,
- zebra_check_addr(dst_p));
+ newre->metric, zebra_check_addr(dst_p));
if (!CHECK_FLAG(newre->flags, ZEBRA_FLAG_SELECTED))
continue;
@@ -151,13 +154,13 @@ void redistribute_update(struct prefix *p, struct prefix *src_p,
struct zserv *client;
int send_redistribute;
int afi;
- char buf[INET6_ADDRSTRLEN];
+ char buf[PREFIX_STRLEN];
if (IS_ZEBRA_DEBUG_RIB) {
- inet_ntop(p->family, &p->u.prefix, buf, INET6_ADDRSTRLEN);
zlog_debug(
- "%u:%s/%d: Redist update re %p (type %d), old %p (type %d)",
- re->vrf_id, buf, p->prefixlen, re, re->type, prev_re,
+ "%u:%s: Redist update re %p (type %d), old %p (type %d)",
+ re->vrf_id, prefix2str(p, buf, sizeof(buf)),
+ re, re->type, prev_re,
prev_re ? prev_re->type : -1);
}
@@ -187,6 +190,15 @@ void redistribute_update(struct prefix *p, struct prefix *src_p,
send_redistribute = 1;
if (send_redistribute) {
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ zlog_debug(
+ "%s: client %s %s(%d), type=%d, distance=%d, metric=%d",
+ __func__,
+ zebra_route_string(client->proto),
+ prefix2str(p, buf, sizeof(buf)),
+ re->vrf_id, re->type,
+ re->distance, re->metric);
+ }
zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD,
client, p, src_p, re);
} else if (prev_re
diff --git a/zebra/redistribute.h b/zebra/redistribute.h
index c78480d9a3..9b4820acd4 100644
--- a/zebra/redistribute.h
+++ b/zebra/redistribute.h
@@ -23,10 +23,12 @@
#define _ZEBRA_REDISTRIBUTE_H
#include "table.h"
-#include "zserv.h"
#include "vty.h"
#include "vrf.h"
+#include "zebra/zserv.h"
+#include "zebra/rib.h"
+
/* ZAPI command handlers */
extern void zebra_redistribute_add(ZAPI_HANDLER_ARGS);
extern void zebra_redistribute_delete(ZAPI_HANDLER_ARGS);
diff --git a/zebra/router-id.c b/zebra/router-id.c
index f6c88a2162..252b558a8b 100644
--- a/zebra/router-id.c
+++ b/zebra/router-id.c
@@ -39,6 +39,7 @@
#include "vrf.h"
#include "zebra/zserv.h"
+#include "zebra/zapi_msg.h"
#include "zebra/zebra_vrf.h"
#include "zebra/router-id.h"
#include "zebra/redistribute.h"
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index e68e3aaafd..a35dc9a177 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -50,7 +50,7 @@
#include "mpls.h"
#include "vxlan.h"
-#include "zebra/zserv.h"
+#include "zebra/zapi_msg.h"
#include "zebra/zebra_ns.h"
#include "zebra/zebra_vrf.h"
#include "zebra/rt.h"
@@ -98,7 +98,8 @@ static inline int is_selfroute(int proto)
|| (proto == RTPROT_ISIS) || (proto == RTPROT_RIPNG)
|| (proto == RTPROT_NHRP) || (proto == RTPROT_EIGRP)
|| (proto == RTPROT_LDP) || (proto == RTPROT_BABEL)
- || (proto == RTPROT_RIP) || (proto == RTPROT_SHARP)) {
+ || (proto == RTPROT_RIP) || (proto == RTPROT_SHARP)
+ || (proto == RTPROT_PBR)) {
return 1;
}
@@ -142,7 +143,18 @@ static inline int zebra2proto(int proto)
case ZEBRA_ROUTE_SHARP:
proto = RTPROT_SHARP;
break;
+ case ZEBRA_ROUTE_PBR:
+ proto = RTPROT_PBR;
+ break;
default:
+ /*
+ * When a user adds a new protocol this will show up
+ * to let them know to do something about it. This
+ * is intentionally a warn because we should see
+ * this as part of development of a new protocol
+ */
+ zlog_warn("%s: Please add this protocol(%d) to proper rt_netlink.c handling",
+ __PRETTY_FUNCTION__, proto);
proto = RTPROT_ZEBRA;
break;
}
@@ -184,7 +196,22 @@ static inline int proto2zebra(int proto, int family)
case RTPROT_STATIC:
proto = ZEBRA_ROUTE_STATIC;
break;
+ case RTPROT_SHARP:
+ proto = ZEBRA_ROUTE_SHARP;
+ break;
+ case RTPROT_PBR:
+ proto = ZEBRA_ROUTE_PBR;
+ break;
default:
+ /*
+ * When a user adds a new protocol this will show up
+ * to let them know to do something about it. This
+ * is intentionally a warn because we should see
+ * this as part of development of a new protocol
+ */
+ zlog_warn("%s: Please add this protocol(%d) to proper rt_netlink.c handling",
+ __PRETTY_FUNCTION__,
+ proto);
proto = ZEBRA_ROUTE_KERNEL;
break;
}
diff --git a/zebra/rt_netlink.h b/zebra/rt_netlink.h
index 51350fd6fb..78888f48ca 100644
--- a/zebra/rt_netlink.h
+++ b/zebra/rt_netlink.h
@@ -52,6 +52,7 @@
#define RTPROT_EIGRP 192
#define RTPROT_LDP 193
#define RTPROT_SHARP 194
+#define RTPROT_PBR 195
void rt_netlink_init(void);
diff --git a/zebra/rtadv.c b/zebra/rtadv.c
index c695b65660..dc918b1a9b 100644
--- a/zebra/rtadv.c
+++ b/zebra/rtadv.c
@@ -40,7 +40,7 @@
#include "zebra/rtadv.h"
#include "zebra/debug.h"
#include "zebra/rib.h"
-#include "zebra/zserv.h"
+#include "zebra/zapi_msg.h"
#include "zebra/zebra_ns.h"
#include "zebra/zebra_vrf.h"
diff --git a/zebra/subdir.am b/zebra/subdir.am
index 9dbff7d40c..45e285a9e7 100644
--- a/zebra/subdir.am
+++ b/zebra/subdir.am
@@ -71,6 +71,7 @@ zebra_zebra_SOURCES = \
zebra/zebra_netns_id.c \
zebra/zebra_netns_notify.c \
zebra/table_manager.c \
+ zebra/zapi_msg.c \
# end
zebra/zebra_vty_clippy.c: $(CLIPPY_DEPS)
@@ -115,6 +116,7 @@ noinst_HEADERS += \
zebra/zebra_netns_id.h \
zebra/zebra_netns_notify.h \
zebra/table_manager.h \
+ zebra/zapi_msg.h \
# end
zebra_zebra_irdp_la_SOURCES = \
diff --git a/zebra/table_manager.c b/zebra/table_manager.c
index db07f402f3..cb8c384436 100644
--- a/zebra/table_manager.c
+++ b/zebra/table_manager.c
@@ -16,12 +16,12 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "zebra.h"
+
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
-#include "zebra.h"
-#include "zserv.h"
#include "lib/log.h"
#include "lib/memory.h"
#include "lib/table.h"
@@ -31,9 +31,10 @@
#include "lib/libfrr.h"
#include "lib/vrf.h"
-#include "zebra_vrf.h"
-#include "label_manager.h" /* for NO_PROTO */
-#include "table_manager.h"
+#include "zebra/zserv.h"
+#include "zebra/zebra_vrf.h"
+#include "zebra/label_manager.h" /* for NO_PROTO */
+#include "zebra/table_manager.h"
/* routing table identifiers
*
@@ -77,6 +78,7 @@ void table_manager_enable(ns_id_t ns_id)
return;
tbl_mgr.lc_list = list_new();
tbl_mgr.lc_list->del = delete_table_chunk;
+ hook_register(zapi_client_close, release_daemon_table_chunks);
}
/**
@@ -202,12 +204,13 @@ int release_table_chunk(uint8_t proto, uint16_t instance, uint32_t start,
* Called on client disconnection or reconnection. It only releases chunks
* with empty keep value.
*
- * @param proto Daemon protocol of client, to identify the owner
- * @param instance Instance, to identify the owner
+ * @param client the client to release chunks from
* @return Number of chunks released
*/
-int release_daemon_table_chunks(uint8_t proto, uint16_t instance)
+int release_daemon_table_chunks(struct zserv *client)
{
+ uint8_t proto = client->proto;
+ uint16_t instance = client->instance;
struct listnode *node;
struct table_manager_chunk *tmc;
int count = 0;
diff --git a/zebra/table_manager.h b/zebra/table_manager.h
index 527d5c29e8..5196162c4c 100644
--- a/zebra/table_manager.h
+++ b/zebra/table_manager.h
@@ -23,6 +23,9 @@
#include "lib/linklist.h"
#include "lib/thread.h"
+#include "lib/ns.h"
+
+#include "zebra/zserv.h"
/*
* Table chunk struct
@@ -57,7 +60,7 @@ struct table_manager_chunk *assign_table_chunk(uint8_t proto, uint16_t instance,
uint32_t size);
int release_table_chunk(uint8_t proto, uint16_t instance, uint32_t start,
uint32_t end);
-int release_daemon_table_chunks(uint8_t proto, uint16_t instance);
+int release_daemon_table_chunks(struct zserv *client);
void table_manager_disable(ns_id_t ns_id);
#endif /* _TABLE_MANAGER_H */
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
new file mode 100644
index 0000000000..2ff660b3f9
--- /dev/null
+++ b/zebra/zapi_msg.c
@@ -0,0 +1,2961 @@
+/*
+ * Zebra API message creation & consumption.
+ * Portions:
+ * Copyright (C) 1997-1999 Kunihiro Ishiguro
+ * Copyright (C) 2015-2018 Cumulus Networks, Inc.
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include <libgen.h>
+
+#include "lib/prefix.h"
+#include "lib/command.h"
+#include "lib/if.h"
+#include "lib/thread.h"
+#include "lib/stream.h"
+#include "lib/memory.h"
+#include "lib/table.h"
+#include "lib/network.h"
+#include "lib/sockunion.h"
+#include "lib/log.h"
+#include "lib/zclient.h"
+#include "lib/privs.h"
+#include "lib/network.h"
+#include "lib/buffer.h"
+#include "lib/nexthop.h"
+#include "lib/vrf.h"
+#include "lib/libfrr.h"
+#include "lib/sockopt.h"
+
+#include "zebra/rib.h"
+#include "zebra/zebra_memory.h"
+#include "zebra/zebra_ns.h"
+#include "zebra/zebra_vrf.h"
+#include "zebra/router-id.h"
+#include "zebra/redistribute.h"
+#include "zebra/debug.h"
+#include "zebra/zebra_rnh.h"
+#include "zebra/rt_netlink.h"
+#include "zebra/interface.h"
+#include "zebra/zebra_ptm.h"
+#include "zebra/rtadv.h"
+#include "zebra/zebra_mpls.h"
+#include "zebra/zebra_mroute.h"
+#include "zebra/label_manager.h"
+#include "zebra/zebra_vxlan.h"
+#include "zebra/rt.h"
+#include "zebra/zebra_pbr.h"
+#include "zebra/table_manager.h"
+#include "zebra/zapi_msg.h"
+
+/* Encoding helpers -------------------------------------------------------- */
+
+static void zserv_encode_interface(struct stream *s, struct interface *ifp)
+{
+ /* Interface information. */
+ stream_put(s, ifp->name, INTERFACE_NAMSIZ);
+ stream_putl(s, ifp->ifindex);
+ stream_putc(s, ifp->status);
+ stream_putq(s, ifp->flags);
+ stream_putc(s, ifp->ptm_enable);
+ stream_putc(s, ifp->ptm_status);
+ stream_putl(s, ifp->metric);
+ stream_putl(s, ifp->speed);
+ stream_putl(s, ifp->mtu);
+ stream_putl(s, ifp->mtu6);
+ stream_putl(s, ifp->bandwidth);
+ stream_putl(s, ifp->ll_type);
+ stream_putl(s, ifp->hw_addr_len);
+ if (ifp->hw_addr_len)
+ stream_put(s, ifp->hw_addr, ifp->hw_addr_len);
+
+ /* Then, Traffic Engineering parameters if any */
+ if (HAS_LINK_PARAMS(ifp) && IS_LINK_PARAMS_SET(ifp->link_params)) {
+ stream_putc(s, 1);
+ zebra_interface_link_params_write(s, ifp);
+ } else
+ stream_putc(s, 0);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+}
+
+static void zserv_encode_vrf(struct stream *s, struct zebra_vrf *zvrf)
+{
+ struct vrf_data data;
+ const char *netns_name = zvrf_ns_name(zvrf);
+
+ data.l.table_id = zvrf->table_id;
+
+ if (netns_name)
+ strlcpy(data.l.netns_name, basename((char *)netns_name),
+ NS_NAMSIZ);
+ else
+ memset(data.l.netns_name, 0, NS_NAMSIZ);
+ /* Pass the tableid and the netns NAME */
+ stream_put(s, &data, sizeof(struct vrf_data));
+ /* Interface information. */
+ stream_put(s, zvrf_name(zvrf), VRF_NAMSIZ);
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+}
+
+static int zserv_encode_nexthop(struct stream *s, struct nexthop *nexthop)
+{
+ stream_putc(s, nexthop->type);
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ stream_put_in_addr(s, &nexthop->gate.ipv4);
+ stream_putl(s, nexthop->ifindex);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ stream_put(s, &nexthop->gate.ipv6, 16);
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ stream_put(s, &nexthop->gate.ipv6, 16);
+ stream_putl(s, nexthop->ifindex);
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ stream_putl(s, nexthop->ifindex);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ return 1;
+}
+
+/* Send handlers ----------------------------------------------------------- */
+
+/* Interface is added. Send ZEBRA_INTERFACE_ADD to client. */
+/*
+ * This function is called in the following situations:
+ * - in response to a 3-byte ZEBRA_INTERFACE_ADD request
+ * from the client.
+ * - at startup, when zebra figures out the available interfaces
+ * - when an interface is added (where support for
+ * RTM_IFANNOUNCE or AF_NETLINK sockets is available), or when
+ * an interface is marked IFF_UP (i.e., an RTM_IFINFO message is
+ * received)
+ */
+int zsend_interface_add(struct zserv *client, struct interface *ifp)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_INTERFACE_ADD, ifp->vrf_id);
+ zserv_encode_interface(s, ifp);
+
+ client->ifadd_cnt++;
+ return zebra_server_send_message(client, s);
+}
+
+/* Interface deletion from zebra daemon. */
+int zsend_interface_delete(struct zserv *client, struct interface *ifp)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_INTERFACE_DELETE, ifp->vrf_id);
+ zserv_encode_interface(s, ifp);
+
+ client->ifdel_cnt++;
+ return zebra_server_send_message(client, s);
+}
+
+int zsend_vrf_add(struct zserv *client, struct zebra_vrf *zvrf)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_VRF_ADD, zvrf_id(zvrf));
+ zserv_encode_vrf(s, zvrf);
+
+ client->vrfadd_cnt++;
+ return zebra_server_send_message(client, s);
+}
+
+/* VRF deletion from zebra daemon. */
+int zsend_vrf_delete(struct zserv *client, struct zebra_vrf *zvrf)
+
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_VRF_DELETE, zvrf_id(zvrf));
+ zserv_encode_vrf(s, zvrf);
+
+ client->vrfdel_cnt++;
+ return zebra_server_send_message(client, s);
+}
+
+int zsend_interface_link_params(struct zserv *client, struct interface *ifp)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ /* Check this client need interface information. */
+ if (!client->ifinfo) {
+ stream_free(s);
+ return 0;
+ }
+
+ if (!ifp->link_params) {
+ stream_free(s);
+ return 0;
+ }
+
+ zclient_create_header(s, ZEBRA_INTERFACE_LINK_PARAMS, ifp->vrf_id);
+
+ /* Add Interface Index */
+ stream_putl(s, ifp->ifindex);
+
+ /* Then TE Link Parameters */
+ if (zebra_interface_link_params_write(s, ifp) == 0) {
+ stream_free(s);
+ return 0;
+ }
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+/* Interface address is added/deleted. Send ZEBRA_INTERFACE_ADDRESS_ADD or
+ * ZEBRA_INTERFACE_ADDRESS_DELETE to the client.
+ *
+ * A ZEBRA_INTERFACE_ADDRESS_ADD is sent in the following situations:
+ * - in response to a 3-byte ZEBRA_INTERFACE_ADD request
+ * from the client, after the ZEBRA_INTERFACE_ADD has been
+ * sent from zebra to the client
+ * - redistribute new address info to all clients in the following situations
+ * - at startup, when zebra figures out the available interfaces
+ * - when an interface is added (where support for
+ * RTM_IFANNOUNCE or AF_NETLINK sockets is available), or when
+ * an interface is marked IFF_UP (i.e., an RTM_IFINFO message is
+ * received)
+ * - for the vty commands "ip address A.B.C.D/M [<secondary>|<label LINE>]"
+ * and "no bandwidth <1-10000000>", "ipv6 address X:X::X:X/M"
+ * - when an RTM_NEWADDR message is received from the kernel,
+ *
+ * The call tree that triggers ZEBRA_INTERFACE_ADDRESS_DELETE:
+ *
+ * zsend_interface_address(DELETE)
+ * ^
+ * |
+ * zebra_interface_address_delete_update
+ * ^ ^ ^
+ * | | if_delete_update
+ * | |
+ * ip_address_uninstall connected_delete_ipv4
+ * [ipv6_addresss_uninstall] [connected_delete_ipv6]
+ * ^ ^
+ * | |
+ * | RTM_NEWADDR on routing/netlink socket
+ * |
+ * vty commands:
+ * "no ip address A.B.C.D/M [label LINE]"
+ * "no ip address A.B.C.D/M secondary"
+ * ["no ipv6 address X:X::X:X/M"]
+ *
+ */
+int zsend_interface_address(int cmd, struct zserv *client,
+ struct interface *ifp, struct connected *ifc)
+{
+ int blen;
+ struct prefix *p;
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, cmd, ifp->vrf_id);
+ stream_putl(s, ifp->ifindex);
+
+ /* Interface address flag. */
+ stream_putc(s, ifc->flags);
+
+ /* Prefix information. */
+ p = ifc->address;
+ stream_putc(s, p->family);
+ blen = prefix_blen(p);
+ stream_put(s, &p->u.prefix, blen);
+
+ /*
+ * XXX gnu version does not send prefixlen for
+ * ZEBRA_INTERFACE_ADDRESS_DELETE
+ * but zebra_interface_address_delete_read() in the gnu version
+ * expects to find it
+ */
+ stream_putc(s, p->prefixlen);
+
+ /* Destination. */
+ p = ifc->destination;
+ if (p)
+ stream_put(s, &p->u.prefix, blen);
+ else
+ stream_put(s, NULL, blen);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ client->connected_rt_add_cnt++;
+ return zebra_server_send_message(client, s);
+}
+
+static int zsend_interface_nbr_address(int cmd, struct zserv *client,
+ struct interface *ifp,
+ struct nbr_connected *ifc)
+{
+ int blen;
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct prefix *p;
+
+ zclient_create_header(s, cmd, ifp->vrf_id);
+ stream_putl(s, ifp->ifindex);
+
+ /* Prefix information. */
+ p = ifc->address;
+ stream_putc(s, p->family);
+ blen = prefix_blen(p);
+ stream_put(s, &p->u.prefix, blen);
+
+ /*
+ * XXX gnu version does not send prefixlen for
+ * ZEBRA_INTERFACE_ADDRESS_DELETE
+ * but zebra_interface_address_delete_read() in the gnu version
+ * expects to find it
+ */
+ stream_putc(s, p->prefixlen);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+/* Interface address addition. */
+static void zebra_interface_nbr_address_add_update(struct interface *ifp,
+ struct nbr_connected *ifc)
+{
+ struct listnode *node, *nnode;
+ struct zserv *client;
+ struct prefix *p;
+
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ char buf[INET6_ADDRSTRLEN];
+
+ p = ifc->address;
+ zlog_debug(
+ "MESSAGE: ZEBRA_INTERFACE_NBR_ADDRESS_ADD %s/%d on %s",
+ inet_ntop(p->family, &p->u.prefix, buf,
+ INET6_ADDRSTRLEN),
+ p->prefixlen, ifc->ifp->name);
+ }
+
+ for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
+ zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_ADD,
+ client, ifp, ifc);
+}
+
+/* Interface address deletion. */
+static void zebra_interface_nbr_address_delete_update(struct interface *ifp,
+ struct nbr_connected *ifc)
+{
+ struct listnode *node, *nnode;
+ struct zserv *client;
+ struct prefix *p;
+
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ char buf[INET6_ADDRSTRLEN];
+
+ p = ifc->address;
+ zlog_debug(
+ "MESSAGE: ZEBRA_INTERFACE_NBR_ADDRESS_DELETE %s/%d on %s",
+ inet_ntop(p->family, &p->u.prefix, buf,
+ INET6_ADDRSTRLEN),
+ p->prefixlen, ifc->ifp->name);
+ }
+
+ for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
+ zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_DELETE,
+ client, ifp, ifc);
+}
+
+/* Send addresses on interface to client */
+int zsend_interface_addresses(struct zserv *client, struct interface *ifp)
+{
+ struct listnode *cnode, *cnnode;
+ struct connected *c;
+ struct nbr_connected *nc;
+
+ /* Send interface addresses. */
+ for (ALL_LIST_ELEMENTS(ifp->connected, cnode, cnnode, c)) {
+ if (!CHECK_FLAG(c->conf, ZEBRA_IFC_REAL))
+ continue;
+
+ if (zsend_interface_address(ZEBRA_INTERFACE_ADDRESS_ADD, client,
+ ifp, c)
+ < 0)
+ return -1;
+ }
+
+ /* Send interface neighbors. */
+ for (ALL_LIST_ELEMENTS(ifp->nbr_connected, cnode, cnnode, nc)) {
+ if (zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_ADD,
+ client, ifp, nc)
+ < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Notify client about interface moving from one VRF to another.
+ * Whether client is interested in old and new VRF is checked by caller.
+ */
+int zsend_interface_vrf_update(struct zserv *client, struct interface *ifp,
+ vrf_id_t vrf_id)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_INTERFACE_VRF_UPDATE, ifp->vrf_id);
+
+ /* Fill in the ifIndex of the interface and its new VRF (id) */
+ stream_putl(s, ifp->ifindex);
+ stream_putl(s, vrf_id);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ client->if_vrfchg_cnt++;
+ return zebra_server_send_message(client, s);
+}
+
+/* Add new nbr connected IPv6 address */
+void nbr_connected_add_ipv6(struct interface *ifp, struct in6_addr *address)
+{
+ struct nbr_connected *ifc;
+ struct prefix p;
+
+ p.family = AF_INET6;
+ IPV6_ADDR_COPY(&p.u.prefix, address);
+ p.prefixlen = IPV6_MAX_PREFIXLEN;
+
+ ifc = listnode_head(ifp->nbr_connected);
+ if (!ifc) {
+ /* new addition */
+ ifc = nbr_connected_new();
+ ifc->address = prefix_new();
+ ifc->ifp = ifp;
+ listnode_add(ifp->nbr_connected, ifc);
+ }
+
+ prefix_copy(ifc->address, &p);
+
+ zebra_interface_nbr_address_add_update(ifp, ifc);
+
+ if_nbr_ipv6ll_to_ipv4ll_neigh_update(ifp, address, 1);
+}
+
+void nbr_connected_delete_ipv6(struct interface *ifp, struct in6_addr *address)
+{
+ struct nbr_connected *ifc;
+ struct prefix p;
+
+ p.family = AF_INET6;
+ IPV6_ADDR_COPY(&p.u.prefix, address);
+ p.prefixlen = IPV6_MAX_PREFIXLEN;
+
+ ifc = nbr_connected_check(ifp, &p);
+ if (!ifc)
+ return;
+
+ listnode_delete(ifp->nbr_connected, ifc);
+
+ zebra_interface_nbr_address_delete_update(ifp, ifc);
+
+ if_nbr_ipv6ll_to_ipv4ll_neigh_update(ifp, address, 0);
+
+ nbr_connected_free(ifc);
+}
+
+/*
+ * The cmd passed to zsend_interface_update may be ZEBRA_INTERFACE_UP or
+ * ZEBRA_INTERFACE_DOWN.
+ *
+ * The ZEBRA_INTERFACE_UP message is sent from the zebra server to
+ * the clients in one of 2 situations:
+ * - an if_up is detected e.g., as a result of an RTM_IFINFO message
+ * - a vty command modifying the bandwidth of an interface is received.
+ * The ZEBRA_INTERFACE_DOWN message is sent when an if_down is detected.
+ */
+int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, cmd, ifp->vrf_id);
+ zserv_encode_interface(s, ifp);
+
+ if (cmd == ZEBRA_INTERFACE_UP)
+ client->ifup_cnt++;
+ else
+ client->ifdown_cnt++;
+
+ return zebra_server_send_message(client, s);
+}
+
+int zsend_redistribute_route(int cmd, struct zserv *client, struct prefix *p,
+ struct prefix *src_p, struct route_entry *re)
+{
+ struct zapi_route api;
+ struct zapi_nexthop *api_nh;
+ struct nexthop *nexthop;
+ int count = 0;
+
+ memset(&api, 0, sizeof(api));
+ api.vrf_id = re->vrf_id;
+ api.type = re->type;
+ api.instance = re->instance;
+ api.flags = re->flags;
+
+ /* Prefix. */
+ api.prefix = *p;
+ if (src_p) {
+ SET_FLAG(api.message, ZAPI_MESSAGE_SRCPFX);
+ memcpy(&api.src_prefix, src_p, sizeof(api.src_prefix));
+ }
+
+ /* Nexthops. */
+ if (re->nexthop_active_num) {
+ SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
+ api.nexthop_num = re->nexthop_active_num;
+ }
+ for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next) {
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ continue;
+
+ api_nh = &api.nexthops[count];
+ api_nh->vrf_id = nexthop->vrf_id;
+ api_nh->type = nexthop->type;
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ api_nh->bh_type = nexthop->bh_type;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ api_nh->gate.ipv4 = nexthop->gate.ipv4;
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ api_nh->gate.ipv4 = nexthop->gate.ipv4;
+ api_nh->ifindex = nexthop->ifindex;
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ api_nh->ifindex = nexthop->ifindex;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ api_nh->gate.ipv6 = nexthop->gate.ipv6;
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ api_nh->gate.ipv6 = nexthop->gate.ipv6;
+ api_nh->ifindex = nexthop->ifindex;
+ }
+ count++;
+ }
+
+ /* Attributes. */
+ SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
+ api.distance = re->distance;
+ SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
+ api.metric = re->metric;
+ if (re->tag) {
+ SET_FLAG(api.message, ZAPI_MESSAGE_TAG);
+ api.tag = re->tag;
+ }
+ SET_FLAG(api.message, ZAPI_MESSAGE_MTU);
+ api.mtu = re->mtu;
+
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ /* Encode route and send. */
+ if (zapi_route_encode(cmd, s, &api) < 0) {
+ stream_free(s);
+ return -1;
+ }
+
+ if (IS_ZEBRA_DEBUG_SEND) {
+ char buf_prefix[PREFIX_STRLEN];
+
+ prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
+
+ zlog_debug("%s: %s to client %s: type %s, vrf_id %d, p %s",
+ __func__, zserv_command_string(cmd),
+ zebra_route_string(client->proto),
+ zebra_route_string(api.type), api.vrf_id,
+ buf_prefix);
+ }
+ return zebra_server_send_message(client, s);
+}
+
+/*
+ * Modified version of zsend_ipv4_nexthop_lookup(): Query unicast rib if
+ * nexthop is not found on mrib. Returns both route metric and protocol
+ * distance.
+ */
+static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,
+ struct in_addr addr,
+ struct route_entry *re,
+ struct zebra_vrf *zvrf)
+{
+ struct stream *s;
+ unsigned long nump;
+ uint8_t num;
+ struct nexthop *nexthop;
+
+ /* Get output stream. */
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ stream_reset(s);
+
+ /* Fill in result. */
+ zclient_create_header(s, ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf));
+ stream_put_in_addr(s, &addr);
+
+ if (re) {
+ stream_putc(s, re->distance);
+ stream_putl(s, re->metric);
+ num = 0;
+ /* remember position for nexthop_num */
+ nump = stream_get_endp(s);
+ /* reserve room for nexthop_num */
+ stream_putc(s, 0);
+ /*
+ * Only non-recursive routes are elegible to resolve the
+ * nexthop we are looking up. Therefore, we will just iterate
+ * over the top chain of nexthops.
+ */
+ for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next)
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ num += zserv_encode_nexthop(s, nexthop);
+
+ /* store nexthop_num */
+ stream_putc_at(s, nump, num);
+ } else {
+ stream_putc(s, 0); /* distance */
+ stream_putl(s, 0); /* metric */
+ stream_putc(s, 0); /* nexthop_num */
+ }
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
+ enum zapi_route_notify_owner note)
+{
+ struct zserv *client;
+ struct stream *s;
+ uint8_t blen;
+
+ client = zebra_find_client(re->type, re->instance);
+ if (!client || !client->notify_owner) {
+ if (IS_ZEBRA_DEBUG_PACKET) {
+ char buff[PREFIX_STRLEN];
+
+ zlog_debug(
+ "Not Notifying Owner: %u about prefix %s(%u) %d vrf: %u",
+ re->type, prefix2str(p, buff, sizeof(buff)),
+ re->table, note, re->vrf_id);
+ }
+ return 0;
+ }
+
+ if (IS_ZEBRA_DEBUG_PACKET) {
+ char buff[PREFIX_STRLEN];
+
+ zlog_debug("Notifying Owner: %u about prefix %s(%u) %d vrf: %u",
+ re->type, prefix2str(p, buff, sizeof(buff)),
+ re->table, note, re->vrf_id);
+ }
+
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ stream_reset(s);
+
+ zclient_create_header(s, ZEBRA_ROUTE_NOTIFY_OWNER, re->vrf_id);
+
+ stream_put(s, &note, sizeof(note));
+
+ stream_putc(s, p->family);
+
+ blen = prefix_blen(p);
+ stream_putc(s, p->prefixlen);
+ stream_put(s, &p->u.prefix, blen);
+
+ stream_putl(s, re->table);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
+ enum zapi_rule_notify_owner note)
+{
+ struct listnode *node;
+ struct zserv *client;
+ struct stream *s;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
+ rule->rule.unique);
+
+ for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
+ if (rule->sock == client->sock)
+ break;
+ }
+
+ if (!client)
+ return;
+
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_RULE_NOTIFY_OWNER, VRF_DEFAULT);
+ stream_put(s, &note, sizeof(note));
+ stream_putl(s, rule->rule.seq);
+ stream_putl(s, rule->rule.priority);
+ stream_putl(s, rule->rule.unique);
+ if (rule->ifp)
+ stream_putl(s, rule->ifp->ifindex);
+ else
+ stream_putl(s, 0);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ zebra_server_send_message(client, s);
+}
+
+void zsend_ipset_notify_owner(struct zebra_pbr_ipset *ipset,
+ enum zapi_ipset_notify_owner note)
+{
+ struct listnode *node;
+ struct zserv *client;
+ struct stream *s;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
+ ipset->unique);
+
+ for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
+ if (ipset->sock == client->sock)
+ break;
+ }
+
+ if (!client)
+ return;
+
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_IPSET_NOTIFY_OWNER, VRF_DEFAULT);
+ stream_put(s, &note, sizeof(note));
+ stream_putl(s, ipset->unique);
+ stream_put(s, ipset->ipset_name, ZEBRA_IPSET_NAME_SIZE);
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ zebra_server_send_message(client, s);
+}
+
+void zsend_ipset_entry_notify_owner(struct zebra_pbr_ipset_entry *ipset,
+ enum zapi_ipset_entry_notify_owner note)
+{
+ struct listnode *node;
+ struct zserv *client;
+ struct stream *s;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
+ ipset->unique);
+
+ for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
+ if (ipset->sock == client->sock)
+ break;
+ }
+
+ if (!client)
+ return;
+
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_IPSET_ENTRY_NOTIFY_OWNER, VRF_DEFAULT);
+ stream_put(s, &note, sizeof(note));
+ stream_putl(s, ipset->unique);
+ stream_put(s, ipset->backpointer->ipset_name, ZEBRA_IPSET_NAME_SIZE);
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ zebra_server_send_message(client, s);
+}
+
+void zsend_iptable_notify_owner(struct zebra_pbr_iptable *iptable,
+ enum zapi_iptable_notify_owner note)
+{
+ struct listnode *node;
+ struct zserv *client;
+ struct stream *s;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
+ iptable->unique);
+
+ for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
+ if (iptable->sock == client->sock)
+ break;
+ }
+
+ if (!client)
+ return;
+
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_IPTABLE_NOTIFY_OWNER, VRF_DEFAULT);
+ stream_put(s, &note, sizeof(note));
+ stream_putl(s, iptable->unique);
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ zebra_server_send_message(client, s);
+}
+
+/* Router-id is updated. Send ZEBRA_ROUTER_ID_ADD to client. */
+int zsend_router_id_update(struct zserv *client, struct prefix *p,
+ vrf_id_t vrf_id)
+{
+ int blen;
+
+ /* Check this client need interface information. */
+ if (!vrf_bitmap_check(client->ridinfo, vrf_id))
+ return 0;
+
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ /* Message type. */
+ zclient_create_header(s, ZEBRA_ROUTER_ID_UPDATE, vrf_id);
+
+ /* Prefix information. */
+ stream_putc(s, p->family);
+ blen = prefix_blen(p);
+ stream_put(s, &p->u.prefix, blen);
+ stream_putc(s, p->prefixlen);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+/*
+ * Function used by Zebra to send a PW status update to LDP daemon
+ */
+int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_PW_STATUS_UPDATE, pw->vrf_id);
+ stream_write(s, pw->ifname, IF_NAMESIZE);
+ stream_putl(s, pw->ifindex);
+ stream_putl(s, pw->status);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+/* Send response to a get label chunk request to client */
+static int zsend_assign_label_chunk_response(struct zserv *client,
+ vrf_id_t vrf_id,
+ struct label_manager_chunk *lmc)
+{
+ int ret;
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_GET_LABEL_CHUNK, vrf_id);
+
+ if (lmc) {
+ /* keep */
+ stream_putc(s, lmc->keep);
+ /* start and end labels */
+ stream_putl(s, lmc->start);
+ stream_putl(s, lmc->end);
+ }
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ ret = writen(client->sock, s->data, stream_get_endp(s));
+ stream_free(s);
+ return ret;
+}
+
+/* Send response to a label manager connect request to client */
+static int zsend_label_manager_connect_response(struct zserv *client,
+ vrf_id_t vrf_id,
+ unsigned short result)
+{
+ int ret;
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_LABEL_MANAGER_CONNECT, vrf_id);
+
+ /* result */
+ stream_putc(s, result);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ ret = writen(client->sock, s->data, stream_get_endp(s));
+ stream_free(s);
+
+ return ret;
+}
+
+/* Send response to a get table chunk request to client */
+static int zsend_assign_table_chunk_response(struct zserv *client,
+ vrf_id_t vrf_id,
+ struct table_manager_chunk *tmc)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_GET_TABLE_CHUNK, vrf_id);
+
+ if (tmc) {
+ /* start and end labels */
+ stream_putl(s, tmc->start);
+ stream_putl(s, tmc->end);
+ }
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+static int zsend_table_manager_connect_response(struct zserv *client,
+ vrf_id_t vrf_id,
+ uint16_t result)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_TABLE_MANAGER_CONNECT, vrf_id);
+
+ /* result */
+ stream_putc(s, result);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zebra_server_send_message(client, s);
+}
+
+/* Inbound message handling ------------------------------------------------ */
+
+int cmd2type[] = {
+ [ZEBRA_NEXTHOP_REGISTER] = RNH_NEXTHOP_TYPE,
+ [ZEBRA_NEXTHOP_UNREGISTER] = RNH_NEXTHOP_TYPE,
+ [ZEBRA_IMPORT_ROUTE_REGISTER] = RNH_IMPORT_CHECK_TYPE,
+ [ZEBRA_IMPORT_ROUTE_UNREGISTER] = RNH_IMPORT_CHECK_TYPE,
+};
+
+/* Nexthop register */
+static void zread_rnh_register(ZAPI_HANDLER_ARGS)
+{
+ struct rnh *rnh;
+ struct stream *s;
+ struct prefix p;
+ unsigned short l = 0;
+ uint8_t flags = 0;
+ uint16_t type = cmd2type[hdr->command];
+
+ if (IS_ZEBRA_DEBUG_NHT)
+ zlog_debug(
+ "rnh_register msg from client %s: hdr->length=%d, type=%s vrf=%u\n",
+ zebra_route_string(client->proto), hdr->length,
+ (type == RNH_NEXTHOP_TYPE) ? "nexthop" : "route",
+ zvrf->vrf->vrf_id);
+
+ s = msg;
+
+ client->nh_reg_time = monotime(NULL);
+
+ while (l < hdr->length) {
+ STREAM_GETC(s, flags);
+ STREAM_GETW(s, p.family);
+ STREAM_GETC(s, p.prefixlen);
+ l += 4;
+ if (p.family == AF_INET) {
+ if (p.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix hdr->length %d is too large for a v4 address",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ return;
+ }
+ STREAM_GET(&p.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
+ l += IPV4_MAX_BYTELEN;
+ } else if (p.family == AF_INET6) {
+ if (p.prefixlen > IPV6_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix hdr->length %d is to large for a v6 address",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ return;
+ }
+ STREAM_GET(&p.u.prefix6, s, IPV6_MAX_BYTELEN);
+ l += IPV6_MAX_BYTELEN;
+ } else {
+ zlog_err(
+ "rnh_register: Received unknown family type %d\n",
+ p.family);
+ return;
+ }
+ rnh = zebra_add_rnh(&p, zvrf_id(zvrf), type);
+ if (type == RNH_NEXTHOP_TYPE) {
+ if (flags
+ && !CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED))
+ SET_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED);
+ else if (!flags
+ && CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED))
+ UNSET_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED);
+ } else if (type == RNH_IMPORT_CHECK_TYPE) {
+ if (flags
+ && !CHECK_FLAG(rnh->flags, ZEBRA_NHT_EXACT_MATCH))
+ SET_FLAG(rnh->flags, ZEBRA_NHT_EXACT_MATCH);
+ else if (!flags
+ && CHECK_FLAG(rnh->flags,
+ ZEBRA_NHT_EXACT_MATCH))
+ UNSET_FLAG(rnh->flags, ZEBRA_NHT_EXACT_MATCH);
+ }
+
+ zebra_add_rnh_client(rnh, client, type, zvrf_id(zvrf));
+ /* Anything not AF_INET/INET6 has been filtered out above */
+ zebra_evaluate_rnh(zvrf_id(zvrf), p.family, 1, type, &p);
+ }
+
+stream_failure:
+ return;
+}
+
+/* Nexthop register */
+static void zread_rnh_unregister(ZAPI_HANDLER_ARGS)
+{
+ struct rnh *rnh;
+ struct stream *s;
+ struct prefix p;
+ unsigned short l = 0;
+ uint16_t type = cmd2type[hdr->command];
+
+ if (IS_ZEBRA_DEBUG_NHT)
+ zlog_debug(
+ "rnh_unregister msg from client %s: hdr->length=%d vrf: %u\n",
+ zebra_route_string(client->proto), hdr->length,
+ zvrf->vrf->vrf_id);
+
+ s = msg;
+
+ while (l < hdr->length) {
+ uint8_t flags;
+
+ STREAM_GETC(s, flags);
+ if (flags != 0)
+ goto stream_failure;
+
+ STREAM_GETW(s, p.family);
+ STREAM_GETC(s, p.prefixlen);
+ l += 4;
+ if (p.family == AF_INET) {
+ if (p.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix hdr->length %d is to large for a v4 address",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ return;
+ }
+ STREAM_GET(&p.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
+ l += IPV4_MAX_BYTELEN;
+ } else if (p.family == AF_INET6) {
+ if (p.prefixlen > IPV6_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix hdr->length %d is to large for a v6 address",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ return;
+ }
+ STREAM_GET(&p.u.prefix6, s, IPV6_MAX_BYTELEN);
+ l += IPV6_MAX_BYTELEN;
+ } else {
+ zlog_err(
+ "rnh_register: Received unknown family type %d\n",
+ p.family);
+ return;
+ }
+ rnh = zebra_lookup_rnh(&p, zvrf_id(zvrf), type);
+ if (rnh) {
+ client->nh_dereg_time = monotime(NULL);
+ zebra_remove_rnh_client(rnh, client, type);
+ }
+ }
+stream_failure:
+ return;
+}
+
+#define ZEBRA_MIN_FEC_LENGTH 5
+
+/* FEC register */
+static void zread_fec_register(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ unsigned short l = 0;
+ struct prefix p;
+ uint16_t flags;
+ uint32_t label_index = MPLS_INVALID_LABEL_INDEX;
+
+ s = msg;
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+
+ /*
+ * The minimum amount of data that can be sent for one fec
+ * registration
+ */
+ if (hdr->length < ZEBRA_MIN_FEC_LENGTH) {
+ zlog_err(
+ "fec_register: Received a fec register of hdr->length %d, it is of insufficient size to properly decode",
+ hdr->length);
+ return;
+ }
+
+ while (l < hdr->length) {
+ STREAM_GETW(s, flags);
+ memset(&p, 0, sizeof(p));
+ STREAM_GETW(s, p.family);
+ if (p.family != AF_INET && p.family != AF_INET6) {
+ zlog_err(
+ "fec_register: Received unknown family type %d\n",
+ p.family);
+ return;
+ }
+ STREAM_GETC(s, p.prefixlen);
+ if ((p.family == AF_INET && p.prefixlen > IPV4_MAX_BITLEN)
+ || (p.family == AF_INET6
+ && p.prefixlen > IPV6_MAX_BITLEN)) {
+ zlog_warn(
+ "%s: Specified prefix hdr->length: %d is to long for %d",
+ __PRETTY_FUNCTION__, p.prefixlen, p.family);
+ return;
+ }
+ l += 5;
+ STREAM_GET(&p.u.prefix, s, PSIZE(p.prefixlen));
+ l += PSIZE(p.prefixlen);
+ if (flags & ZEBRA_FEC_REGISTER_LABEL_INDEX) {
+ STREAM_GETL(s, label_index);
+ l += 4;
+ } else
+ label_index = MPLS_INVALID_LABEL_INDEX;
+ zebra_mpls_fec_register(zvrf, &p, label_index, client);
+ }
+
+stream_failure:
+ return;
+}
+
+/* FEC unregister */
+static void zread_fec_unregister(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ unsigned short l = 0;
+ struct prefix p;
+ uint16_t flags;
+
+ s = msg;
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+
+ /*
+ * The minimum amount of data that can be sent for one
+ * fec unregistration
+ */
+ if (hdr->length < ZEBRA_MIN_FEC_LENGTH) {
+ zlog_err(
+ "fec_unregister: Received a fec unregister of hdr->length %d, it is of insufficient size to properly decode",
+ hdr->length);
+ return;
+ }
+
+ while (l < hdr->length) {
+ STREAM_GETW(s, flags);
+ if (flags != 0)
+ goto stream_failure;
+
+ memset(&p, 0, sizeof(p));
+ STREAM_GETW(s, p.family);
+ if (p.family != AF_INET && p.family != AF_INET6) {
+ zlog_err(
+ "fec_unregister: Received unknown family type %d\n",
+ p.family);
+ return;
+ }
+ STREAM_GETC(s, p.prefixlen);
+ if ((p.family == AF_INET && p.prefixlen > IPV4_MAX_BITLEN)
+ || (p.family == AF_INET6
+ && p.prefixlen > IPV6_MAX_BITLEN)) {
+ zlog_warn(
+ "%s: Received prefix hdr->length %d which is greater than %d can support",
+ __PRETTY_FUNCTION__, p.prefixlen, p.family);
+ return;
+ }
+ l += 5;
+ STREAM_GET(&p.u.prefix, s, PSIZE(p.prefixlen));
+ l += PSIZE(p.prefixlen);
+ zebra_mpls_fec_unregister(zvrf, &p, client);
+ }
+
+stream_failure:
+ return;
+}
+
+
+/*
+ * Register zebra server interface information.
+ * Send current all interface and address information.
+ */
+static void zread_interface_add(ZAPI_HANDLER_ARGS)
+{
+ struct vrf *vrf;
+ struct interface *ifp;
+
+ /* Interface information is needed. */
+ vrf_bitmap_set(client->ifinfo, zvrf_id(zvrf));
+
+ RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ /* Skip pseudo interface. */
+ if (!CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE))
+ continue;
+
+ zsend_interface_add(client, ifp);
+ zsend_interface_addresses(client, ifp);
+ }
+ }
+}
+
+/* Unregister zebra server interface information. */
+static void zread_interface_delete(ZAPI_HANDLER_ARGS)
+{
+ vrf_bitmap_unset(client->ifinfo, zvrf_id(zvrf));
+}
+
+void zserv_nexthop_num_warn(const char *caller, const struct prefix *p,
+ const unsigned int nexthop_num)
+{
+ if (nexthop_num > multipath_num) {
+ char buff[PREFIX2STR_BUFFER];
+
+ prefix2str(p, buff, sizeof(buff));
+ zlog_warn(
+ "%s: Prefix %s has %d nexthops, but we can only use the first %d",
+ caller, buff, nexthop_num, multipath_num);
+ }
+}
+
+static void zread_route_add(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ struct zapi_route api;
+ struct zapi_nexthop *api_nh;
+ afi_t afi;
+ struct prefix_ipv6 *src_p = NULL;
+ struct route_entry *re;
+ struct nexthop *nexthop = NULL;
+ int i, ret;
+ vrf_id_t vrf_id = 0;
+ struct ipaddr vtep_ip;
+
+ s = msg;
+ if (zapi_route_decode(s, &api) < 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: Unable to decode zapi_route sent",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+
+ if (IS_ZEBRA_DEBUG_RECV) {
+ char buf_prefix[PREFIX_STRLEN];
+
+ prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
+ zlog_debug("%s: p=%s, ZAPI_MESSAGE_LABEL: %sset, flags=0x%x",
+ __func__, buf_prefix,
+ (CHECK_FLAG(api.message, ZAPI_MESSAGE_LABEL) ? ""
+ : "un"),
+ api.flags);
+ }
+
+ /* Allocate new route. */
+ vrf_id = zvrf_id(zvrf);
+ re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
+ re->type = api.type;
+ re->instance = api.instance;
+ re->flags = api.flags;
+ re->uptime = time(NULL);
+ re->vrf_id = vrf_id;
+ if (api.tableid && vrf_id == VRF_DEFAULT)
+ re->table = api.tableid;
+ else
+ re->table = zvrf->table_id;
+
+ /*
+ * TBD should _all_ of the nexthop add operations use
+ * api_nh->vrf_id instead of re->vrf_id ? I only changed
+ * for cases NEXTHOP_TYPE_IPV4 and NEXTHOP_TYPE_IPV6.
+ */
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP))
+ for (i = 0; i < api.nexthop_num; i++) {
+ api_nh = &api.nexthops[i];
+ ifindex_t ifindex = 0;
+
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("nh type %d", api_nh->type);
+
+ switch (api_nh->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ nexthop = route_entry_nexthop_ifindex_add(
+ re, api_nh->ifindex, api_nh->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ if (IS_ZEBRA_DEBUG_RECV) {
+ char nhbuf[INET6_ADDRSTRLEN] = {0};
+
+ inet_ntop(AF_INET, &api_nh->gate.ipv4,
+ nhbuf, INET6_ADDRSTRLEN);
+ zlog_debug("%s: nh=%s, vrf_id=%d",
+ __func__, nhbuf,
+ api_nh->vrf_id);
+ }
+ nexthop = route_entry_nexthop_ipv4_add(
+ re, &api_nh->gate.ipv4, NULL,
+ api_nh->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+
+ memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ if (CHECK_FLAG(api.flags,
+ ZEBRA_FLAG_EVPN_ROUTE)) {
+ ifindex = get_l3vni_svi_ifindex(vrf_id);
+ } else {
+ ifindex = api_nh->ifindex;
+ }
+
+ if (IS_ZEBRA_DEBUG_RECV) {
+ char nhbuf[INET6_ADDRSTRLEN] = {0};
+
+ inet_ntop(AF_INET, &api_nh->gate.ipv4,
+ nhbuf, INET6_ADDRSTRLEN);
+ zlog_debug(
+ "%s: nh=%s, vrf_id=%d (re->vrf_id=%d), ifindex=%d",
+ __func__, nhbuf, api_nh->vrf_id,
+ re->vrf_id, ifindex);
+ }
+ nexthop = route_entry_nexthop_ipv4_ifindex_add(
+ re, &api_nh->gate.ipv4, NULL, ifindex,
+ api_nh->vrf_id);
+
+ /* if this an EVPN route entry,
+ * program the nh as neigh
+ */
+ if (CHECK_FLAG(api.flags,
+ ZEBRA_FLAG_EVPN_ROUTE)) {
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_EVPN_RVTEP);
+ vtep_ip.ipa_type = IPADDR_V4;
+ memcpy(&(vtep_ip.ipaddr_v4),
+ &(api_nh->gate.ipv4),
+ sizeof(struct in_addr));
+ zebra_vxlan_evpn_vrf_route_add(
+ vrf_id, &api.rmac, &vtep_ip,
+ &api.prefix);
+ }
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ nexthop = route_entry_nexthop_ipv6_add(
+ re, &api_nh->gate.ipv6, api_nh->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ if (CHECK_FLAG(api.flags,
+ ZEBRA_FLAG_EVPN_ROUTE)) {
+ ifindex = get_l3vni_svi_ifindex(vrf_id);
+ } else {
+ ifindex = api_nh->ifindex;
+ }
+
+ nexthop = route_entry_nexthop_ipv6_ifindex_add(
+ re, &api_nh->gate.ipv6, ifindex,
+ api_nh->vrf_id);
+
+ /* if this an EVPN route entry,
+ * program the nh as neigh
+ */
+ if (CHECK_FLAG(api.flags,
+ ZEBRA_FLAG_EVPN_ROUTE)) {
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_EVPN_RVTEP);
+ vtep_ip.ipa_type = IPADDR_V6;
+ memcpy(&vtep_ip.ipaddr_v6,
+ &(api_nh->gate.ipv6),
+ sizeof(struct in6_addr));
+ zebra_vxlan_evpn_vrf_route_add(
+ vrf_id, &api.rmac, &vtep_ip,
+ &api.prefix);
+ }
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ nexthop = route_entry_nexthop_blackhole_add(
+ re, api_nh->bh_type);
+ break;
+ }
+
+ if (!nexthop) {
+ zlog_warn(
+ "%s: Nexthops Specified: %d but we failed to properly create one",
+ __PRETTY_FUNCTION__, api.nexthop_num);
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ /* MPLS labels for BGP-LU or Segment Routing */
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_LABEL)
+ && api_nh->type != NEXTHOP_TYPE_IFINDEX
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
+ enum lsp_types_t label_type;
+
+ label_type =
+ lsp_type_from_re_type(client->proto);
+
+ if (IS_ZEBRA_DEBUG_RECV) {
+ zlog_debug(
+ "%s: adding %d labels of type %d (1st=%u)",
+ __func__, api_nh->label_num,
+ label_type, api_nh->labels[0]);
+ }
+
+ nexthop_add_labels(nexthop, label_type,
+ api_nh->label_num,
+ &api_nh->labels[0]);
+ }
+ }
+
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE))
+ re->distance = api.distance;
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_METRIC))
+ re->metric = api.metric;
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_TAG))
+ re->tag = api.tag;
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_MTU))
+ re->mtu = api.mtu;
+
+ afi = family2afi(api.prefix.family);
+ if (afi != AFI_IP6 && CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX)) {
+ zlog_warn("%s: Received SRC Prefix but afi is not v6",
+ __PRETTY_FUNCTION__);
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX))
+ src_p = &api.src_prefix;
+
+ ret = rib_add_multipath(afi, api.safi, &api.prefix, src_p, re);
+
+ /* Stats */
+ switch (api.prefix.family) {
+ case AF_INET:
+ if (ret > 0)
+ client->v4_route_add_cnt++;
+ else if (ret < 0)
+ client->v4_route_upd8_cnt++;
+ break;
+ case AF_INET6:
+ if (ret > 0)
+ client->v6_route_add_cnt++;
+ else if (ret < 0)
+ client->v6_route_upd8_cnt++;
+ break;
+ }
+}
+
+static void zread_route_del(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ struct zapi_route api;
+ afi_t afi;
+ struct prefix_ipv6 *src_p = NULL;
+ uint32_t table_id;
+
+ s = msg;
+ if (zapi_route_decode(s, &api) < 0)
+ return;
+
+ afi = family2afi(api.prefix.family);
+ if (afi != AFI_IP6 && CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX)) {
+ zlog_warn("%s: Received a src prefix while afi is not v6",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX))
+ src_p = &api.src_prefix;
+
+ if (api.vrf_id == VRF_DEFAULT && api.tableid != 0)
+ table_id = api.tableid;
+ else
+ table_id = zvrf->table_id;
+
+ rib_delete(afi, api.safi, zvrf_id(zvrf), api.type, api.instance,
+ api.flags, &api.prefix, src_p, NULL, table_id, api.metric,
+ false, &api.rmac);
+
+ /* Stats */
+ switch (api.prefix.family) {
+ case AF_INET:
+ client->v4_route_del_cnt++;
+ break;
+ case AF_INET6:
+ client->v6_route_del_cnt++;
+ break;
+ }
+}
+
+/* This function support multiple nexthop. */
+/*
+ * Parse the ZEBRA_IPV4_ROUTE_ADD sent from client. Update re and
+ * add kernel route.
+ */
+static void zread_ipv4_add(ZAPI_HANDLER_ARGS)
+{
+ int i;
+ struct route_entry *re;
+ struct prefix p;
+ uint8_t message;
+ struct in_addr nhop_addr;
+ uint8_t nexthop_num;
+ uint8_t nexthop_type;
+ struct stream *s;
+ ifindex_t ifindex;
+ safi_t safi;
+ int ret;
+ enum lsp_types_t label_type = ZEBRA_LSP_NONE;
+ mpls_label_t label;
+ struct nexthop *nexthop;
+ enum blackhole_type bh_type = BLACKHOLE_NULL;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Allocate new re. */
+ re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
+
+ /* Type, flags, message. */
+ STREAM_GETC(s, re->type);
+ if (re->type > ZEBRA_ROUTE_MAX) {
+ zlog_warn("%s: Specified route type %d is not a legal value\n",
+ __PRETTY_FUNCTION__, re->type);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GETW(s, re->instance);
+ STREAM_GETL(s, re->flags);
+ STREAM_GETC(s, message);
+ STREAM_GETW(s, safi);
+ re->uptime = time(NULL);
+
+ /* IPv4 prefix. */
+ memset(&p, 0, sizeof(struct prefix_ipv4));
+ p.family = AF_INET;
+ STREAM_GETC(s, p.prefixlen);
+ if (p.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix length %d is greater than what v4 can be",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GET(&p.u.prefix4, s, PSIZE(p.prefixlen));
+
+ /* VRF ID */
+ re->vrf_id = zvrf_id(zvrf);
+
+ /* Nexthop parse. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_NEXTHOP)) {
+ STREAM_GETC(s, nexthop_num);
+ zserv_nexthop_num_warn(__func__, (const struct prefix *)&p,
+ nexthop_num);
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
+ label_type = lsp_type_from_re_type(client->proto);
+
+ for (i = 0; i < nexthop_num; i++) {
+ STREAM_GETC(s, nexthop_type);
+
+ switch (nexthop_type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ STREAM_GETL(s, ifindex);
+ route_entry_nexthop_ifindex_add(re, ifindex,
+ re->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ STREAM_GET(&nhop_addr.s_addr, s,
+ IPV4_MAX_BYTELEN);
+ nexthop = route_entry_nexthop_ipv4_add(
+ re, &nhop_addr, NULL, re->vrf_id);
+ /*
+ * For labeled-unicast, each nexthop is followed
+ * by the label.
+ */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL)) {
+ STREAM_GETL(s, label);
+ nexthop_add_labels(nexthop, label_type,
+ 1, &label);
+ }
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ STREAM_GET(&nhop_addr.s_addr, s,
+ IPV4_MAX_BYTELEN);
+ STREAM_GETL(s, ifindex);
+ route_entry_nexthop_ipv4_ifindex_add(
+ re, &nhop_addr, NULL, ifindex,
+ re->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ zlog_warn(
+ "%s: Please use ZEBRA_ROUTE_ADD if you want to pass v6 nexthops",
+ __PRETTY_FUNCTION__);
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+ return;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ route_entry_nexthop_blackhole_add(re, bh_type);
+ break;
+ default:
+ zlog_warn(
+ "%s: Specified nexthop type: %d does not exist",
+ __PRETTY_FUNCTION__, nexthop_type);
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ }
+ }
+
+ /* Distance. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_DISTANCE))
+ STREAM_GETC(s, re->distance);
+
+ /* Metric. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_METRIC))
+ STREAM_GETL(s, re->metric);
+
+ /* Tag */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_TAG))
+ STREAM_GETL(s, re->tag);
+ else
+ re->tag = 0;
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_MTU))
+ STREAM_GETL(s, re->mtu);
+ else
+ re->mtu = 0;
+
+ /* Table */
+ re->table = zvrf->table_id;
+
+ ret = rib_add_multipath(AFI_IP, safi, &p, NULL, re);
+
+ /* Stats */
+ if (ret > 0)
+ client->v4_route_add_cnt++;
+ else if (ret < 0)
+ client->v4_route_upd8_cnt++;
+
+ return;
+
+stream_failure:
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+}
+
+/* Zebra server IPv4 prefix delete function. */
+static void zread_ipv4_delete(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ struct zapi_ipv4 api;
+ struct prefix p;
+ uint32_t table_id;
+
+ s = msg;
+
+ /* Type, flags, message. */
+ STREAM_GETC(s, api.type);
+ STREAM_GETW(s, api.instance);
+ STREAM_GETL(s, api.flags);
+ STREAM_GETC(s, api.message);
+ STREAM_GETW(s, api.safi);
+
+ /* IPv4 prefix. */
+ memset(&p, 0, sizeof(struct prefix));
+ p.family = AF_INET;
+ STREAM_GETC(s, p.prefixlen);
+ if (p.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_warn("%s: Passed in prefixlen %d is impossible",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ return;
+ }
+ STREAM_GET(&p.u.prefix4, s, PSIZE(p.prefixlen));
+
+ table_id = zvrf->table_id;
+
+ rib_delete(AFI_IP, api.safi, zvrf_id(zvrf), api.type, api.instance,
+ api.flags, &p, NULL, NULL, table_id, 0, false, NULL);
+ client->v4_route_del_cnt++;
+
+stream_failure:
+ return;
+}
+
+/* MRIB Nexthop lookup for IPv4. */
+static void zread_ipv4_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS)
+{
+ struct in_addr addr;
+ struct route_entry *re;
+
+ STREAM_GET(&addr.s_addr, msg, IPV4_MAX_BYTELEN);
+ re = rib_match_ipv4_multicast(zvrf_id(zvrf), addr, NULL);
+ zsend_ipv4_nexthop_lookup_mrib(client, addr, re, zvrf);
+
+stream_failure:
+ return;
+}
+
+/* Zebra server IPv6 prefix add function. */
+static void zread_ipv4_route_ipv6_nexthop_add(ZAPI_HANDLER_ARGS)
+{
+ unsigned int i;
+ struct stream *s;
+ struct in6_addr nhop_addr;
+ struct route_entry *re;
+ uint8_t message;
+ uint8_t nexthop_num;
+ uint8_t nexthop_type;
+ struct prefix p;
+ safi_t safi;
+ static struct in6_addr nexthops[MULTIPATH_NUM];
+ static unsigned int ifindices[MULTIPATH_NUM];
+ int ret;
+ static mpls_label_t labels[MULTIPATH_NUM];
+ enum lsp_types_t label_type = ZEBRA_LSP_NONE;
+ mpls_label_t label;
+ struct nexthop *nexthop;
+ enum blackhole_type bh_type = BLACKHOLE_NULL;
+
+ /* Get input stream. */
+ s = msg;
+
+ memset(&nhop_addr, 0, sizeof(struct in6_addr));
+
+ /* Allocate new re. */
+ re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
+
+ /* Type, flags, message. */
+ STREAM_GETC(s, re->type);
+ if (re->type > ZEBRA_ROUTE_MAX) {
+ zlog_warn("%s: Specified route type: %d is not a legal value\n",
+ __PRETTY_FUNCTION__, re->type);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GETW(s, re->instance);
+ STREAM_GETL(s, re->flags);
+ STREAM_GETC(s, message);
+ STREAM_GETW(s, safi);
+ re->uptime = time(NULL);
+
+ /* IPv4 prefix. */
+ memset(&p, 0, sizeof(struct prefix_ipv4));
+ p.family = AF_INET;
+ STREAM_GETC(s, p.prefixlen);
+ if (p.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Prefix Length %d is greater than what a v4 address can use",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GET(&p.u.prefix4, s, PSIZE(p.prefixlen));
+
+ /* VRF ID */
+ re->vrf_id = zvrf_id(zvrf);
+
+ /*
+ * We need to give nh-addr, nh-ifindex with the same next-hop object
+ * to the re to ensure that IPv6 multipathing works; need to coalesce
+ * these. Clients should send the same number of paired set of
+ * next-hop-addr/next-hop-ifindices.
+ */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_NEXTHOP)) {
+ unsigned int nh_count = 0;
+ unsigned int if_count = 0;
+ unsigned int max_nh_if = 0;
+
+ STREAM_GETC(s, nexthop_num);
+ zserv_nexthop_num_warn(__func__, (const struct prefix *)&p,
+ nexthop_num);
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
+ label_type = lsp_type_from_re_type(client->proto);
+
+ for (i = 0; i < nexthop_num; i++) {
+ STREAM_GETC(s, nexthop_type);
+
+ switch (nexthop_type) {
+ case NEXTHOP_TYPE_IPV6:
+ STREAM_GET(&nhop_addr, s, 16);
+ if (nh_count < MULTIPATH_NUM) {
+ /*
+ * For labeled-unicast, each nexthop is
+ * followed by the label.
+ */
+ if (CHECK_FLAG(message,
+ ZAPI_MESSAGE_LABEL)) {
+ STREAM_GETL(s, label);
+ labels[nh_count] = label;
+ }
+ nexthops[nh_count] = nhop_addr;
+ nh_count++;
+ }
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ if (if_count < multipath_num)
+ STREAM_GETL(s, ifindices[if_count++]);
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ route_entry_nexthop_blackhole_add(re, bh_type);
+ break;
+ default:
+ zlog_warn(
+ "%s: Please use ZEBRA_ROUTE_ADD if you want to pass non v6 nexthops",
+ __PRETTY_FUNCTION__);
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ }
+
+ max_nh_if = (nh_count > if_count) ? nh_count : if_count;
+ for (i = 0; i < max_nh_if; i++) {
+ if ((i < nh_count)
+ && !IN6_IS_ADDR_UNSPECIFIED(&nexthops[i])) {
+ if ((i < if_count) && ifindices[i])
+ nexthop =
+ route_entry_nexthop_ipv6_ifindex_add(
+ re, &nexthops[i],
+ ifindices[i],
+ re->vrf_id);
+ else
+ nexthop = route_entry_nexthop_ipv6_add(
+ re, &nexthops[i], re->vrf_id);
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
+ nexthop_add_labels(nexthop, label_type,
+ 1, &labels[i]);
+ } else {
+ if ((i < if_count) && ifindices[i])
+ route_entry_nexthop_ifindex_add(
+ re, ifindices[i], re->vrf_id);
+ }
+ }
+ }
+
+ /* Distance. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_DISTANCE))
+ STREAM_GETC(s, re->distance);
+
+ /* Metric. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_METRIC))
+ STREAM_GETL(s, re->metric);
+
+ /* Tag */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_TAG))
+ STREAM_GETL(s, re->tag);
+ else
+ re->tag = 0;
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_MTU))
+ STREAM_GETL(s, re->mtu);
+ else
+ re->mtu = 0;
+
+ /* Table */
+ re->table = zvrf->table_id;
+
+ ret = rib_add_multipath(AFI_IP6, safi, &p, NULL, re);
+ /* Stats */
+ if (ret > 0)
+ client->v4_route_add_cnt++;
+ else if (ret < 0)
+ client->v4_route_upd8_cnt++;
+
+ return;
+
+stream_failure:
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+}
+
+static void zread_ipv6_add(ZAPI_HANDLER_ARGS)
+{
+ unsigned int i;
+ struct stream *s;
+ struct in6_addr nhop_addr;
+ ifindex_t ifindex;
+ struct route_entry *re;
+ uint8_t message;
+ uint8_t nexthop_num;
+ uint8_t nexthop_type;
+ struct prefix p;
+ struct prefix_ipv6 src_p, *src_pp;
+ safi_t safi;
+ static struct in6_addr nexthops[MULTIPATH_NUM];
+ static unsigned int ifindices[MULTIPATH_NUM];
+ int ret;
+ static mpls_label_t labels[MULTIPATH_NUM];
+ enum lsp_types_t label_type = ZEBRA_LSP_NONE;
+ mpls_label_t label;
+ struct nexthop *nexthop;
+ enum blackhole_type bh_type = BLACKHOLE_NULL;
+
+ /* Get input stream. */
+ s = msg;
+
+ memset(&nhop_addr, 0, sizeof(struct in6_addr));
+
+ /* Allocate new re. */
+ re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
+
+ /* Type, flags, message. */
+ STREAM_GETC(s, re->type);
+ if (re->type > ZEBRA_ROUTE_MAX) {
+ zlog_warn("%s: Specified route type: %d is not a legal value\n",
+ __PRETTY_FUNCTION__, re->type);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GETW(s, re->instance);
+ STREAM_GETL(s, re->flags);
+ STREAM_GETC(s, message);
+ STREAM_GETW(s, safi);
+ re->uptime = time(NULL);
+
+ /* IPv6 prefix. */
+ memset(&p, 0, sizeof(p));
+ p.family = AF_INET6;
+ STREAM_GETC(s, p.prefixlen);
+ if (p.prefixlen > IPV6_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix length %d is to large for v6 prefix",
+ __PRETTY_FUNCTION__, p.prefixlen);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GET(&p.u.prefix6, s, PSIZE(p.prefixlen));
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_SRCPFX)) {
+ memset(&src_p, 0, sizeof(src_p));
+ src_p.family = AF_INET6;
+ STREAM_GETC(s, src_p.prefixlen);
+ if (src_p.prefixlen > IPV6_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified src prefix length %d is to large for v6 prefix",
+ __PRETTY_FUNCTION__, src_p.prefixlen);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ STREAM_GET(&src_p.prefix, s, PSIZE(src_p.prefixlen));
+ src_pp = &src_p;
+ } else
+ src_pp = NULL;
+
+ /* VRF ID */
+ re->vrf_id = zvrf_id(zvrf);
+
+ /*
+ * We need to give nh-addr, nh-ifindex with the same next-hop object
+ * to the re to ensure that IPv6 multipathing works; need to coalesce
+ * these. Clients should send the same number of paired set of
+ * next-hop-addr/next-hop-ifindices.
+ */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_NEXTHOP)) {
+ unsigned int nh_count = 0;
+ unsigned int if_count = 0;
+ unsigned int max_nh_if = 0;
+
+ STREAM_GETC(s, nexthop_num);
+ zserv_nexthop_num_warn(__func__, (const struct prefix *)&p,
+ nexthop_num);
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
+ label_type = lsp_type_from_re_type(client->proto);
+
+ for (i = 0; i < nexthop_num; i++) {
+ STREAM_GETC(s, nexthop_type);
+
+ switch (nexthop_type) {
+ case NEXTHOP_TYPE_IPV6:
+ STREAM_GET(&nhop_addr, s, 16);
+ if (nh_count < MULTIPATH_NUM) {
+ /*
+ * For labeled-unicast, each nexthop is
+ * followed by label.
+ */
+ if (CHECK_FLAG(message,
+ ZAPI_MESSAGE_LABEL)) {
+ STREAM_GETL(s, label);
+ labels[nh_count] = label;
+ }
+ nexthops[nh_count++] = nhop_addr;
+ }
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ STREAM_GET(&nhop_addr, s, 16);
+ STREAM_GETL(s, ifindex);
+ route_entry_nexthop_ipv6_ifindex_add(
+ re, &nhop_addr, ifindex, re->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ if (if_count < multipath_num)
+ STREAM_GETL(s, ifindices[if_count++]);
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ route_entry_nexthop_blackhole_add(re, bh_type);
+ break;
+ default:
+ zlog_warn(
+ "%s: Please use ZEBRA_ROUTE_ADD if you want to pass non v6 nexthops",
+ __PRETTY_FUNCTION__);
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
+ }
+
+ max_nh_if = (nh_count > if_count) ? nh_count : if_count;
+ for (i = 0; i < max_nh_if; i++) {
+ if ((i < nh_count)
+ && !IN6_IS_ADDR_UNSPECIFIED(&nexthops[i])) {
+ if ((i < if_count) && ifindices[i])
+ nexthop =
+ route_entry_nexthop_ipv6_ifindex_add(
+ re, &nexthops[i],
+ ifindices[i],
+ re->vrf_id);
+ else
+ nexthop = route_entry_nexthop_ipv6_add(
+ re, &nexthops[i], re->vrf_id);
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
+ nexthop_add_labels(nexthop, label_type,
+ 1, &labels[i]);
+ } else {
+ if ((i < if_count) && ifindices[i])
+ route_entry_nexthop_ifindex_add(
+ re, ifindices[i], re->vrf_id);
+ }
+ }
+ }
+
+ /* Distance. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_DISTANCE))
+ STREAM_GETC(s, re->distance);
+
+ /* Metric. */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_METRIC))
+ STREAM_GETL(s, re->metric);
+
+ /* Tag */
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_TAG))
+ STREAM_GETL(s, re->tag);
+ else
+ re->tag = 0;
+
+ if (CHECK_FLAG(message, ZAPI_MESSAGE_MTU))
+ STREAM_GETL(s, re->mtu);
+ else
+ re->mtu = 0;
+
+ re->table = zvrf->table_id;
+
+ ret = rib_add_multipath(AFI_IP6, safi, &p, src_pp, re);
+ /* Stats */
+ if (ret > 0)
+ client->v6_route_add_cnt++;
+ else if (ret < 0)
+ client->v6_route_upd8_cnt++;
+
+ return;
+
+stream_failure:
+ nexthops_free(re->ng.nexthop);
+ XFREE(MTYPE_RE, re);
+}
+
+/* Zebra server IPv6 prefix delete function. */
+static void zread_ipv6_delete(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ struct zapi_ipv6 api;
+ struct prefix p;
+ struct prefix_ipv6 src_p, *src_pp;
+
+ s = msg;
+
+ /* Type, flags, message. */
+ STREAM_GETC(s, api.type);
+ STREAM_GETW(s, api.instance);
+ STREAM_GETL(s, api.flags);
+ STREAM_GETC(s, api.message);
+ STREAM_GETW(s, api.safi);
+
+ /* IPv4 prefix. */
+ memset(&p, 0, sizeof(struct prefix));
+ p.family = AF_INET6;
+ STREAM_GETC(s, p.prefixlen);
+ STREAM_GET(&p.u.prefix6, s, PSIZE(p.prefixlen));
+
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX)) {
+ memset(&src_p, 0, sizeof(struct prefix_ipv6));
+ src_p.family = AF_INET6;
+ STREAM_GETC(s, src_p.prefixlen);
+ STREAM_GET(&src_p.prefix, s, PSIZE(src_p.prefixlen));
+ src_pp = &src_p;
+ } else
+ src_pp = NULL;
+
+ rib_delete(AFI_IP6, api.safi, zvrf_id(zvrf), api.type, api.instance,
+ api.flags, &p, src_pp, NULL, client->rtm_table, 0, false,
+ NULL);
+
+ client->v6_route_del_cnt++;
+
+stream_failure:
+ return;
+}
+
+/* Register zebra server router-id information. Send current router-id */
+static void zread_router_id_add(ZAPI_HANDLER_ARGS)
+{
+ struct prefix p;
+
+ /* Router-id information is needed. */
+ vrf_bitmap_set(client->ridinfo, zvrf_id(zvrf));
+
+ router_id_get(&p, zvrf_id(zvrf));
+
+ zsend_router_id_update(client, &p, zvrf_id(zvrf));
+}
+
+/* Unregister zebra server router-id information. */
+static void zread_router_id_delete(ZAPI_HANDLER_ARGS)
+{
+ vrf_bitmap_unset(client->ridinfo, zvrf_id(zvrf));
+}
+
+static void zsend_capabilities(struct zserv *client, struct zebra_vrf *zvrf)
+{
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, ZEBRA_CAPABILITIES, zvrf->vrf->vrf_id);
+ stream_putc(s, mpls_enabled);
+ stream_putl(s, multipath_num);
+
+ stream_putw_at(s, 0, stream_get_endp(s));
+ zebra_server_send_message(client, s);
+}
+
+/* Tie up route-type and client->sock */
+static void zread_hello(ZAPI_HANDLER_ARGS)
+{
+ /* type of protocol (lib/zebra.h) */
+ uint8_t proto;
+ unsigned short instance;
+ uint8_t notify;
+
+ STREAM_GETC(msg, proto);
+ STREAM_GETW(msg, instance);
+ STREAM_GETC(msg, notify);
+ if (notify)
+ client->notify_owner = true;
+
+ /* accept only dynamic routing protocols */
+ if ((proto < ZEBRA_ROUTE_MAX) && (proto > ZEBRA_ROUTE_STATIC)) {
+ zlog_notice(
+ "client %d says hello and bids fair to announce only %s routes vrf=%u",
+ client->sock, zebra_route_string(proto),
+ zvrf->vrf->vrf_id);
+ if (instance)
+ zlog_notice("client protocol instance %d", instance);
+
+ client->proto = proto;
+ client->instance = instance;
+ }
+
+ zsend_capabilities(client, zvrf);
+stream_failure:
+ return;
+}
+
+/* Unregister all information in a VRF. */
+static void zread_vrf_unregister(ZAPI_HANDLER_ARGS)
+{
+ int i;
+ afi_t afi;
+
+ for (afi = AFI_IP; afi < AFI_MAX; afi++)
+ for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
+ vrf_bitmap_unset(client->redist[afi][i], zvrf_id(zvrf));
+ vrf_bitmap_unset(client->redist_default, zvrf_id(zvrf));
+ vrf_bitmap_unset(client->ifinfo, zvrf_id(zvrf));
+ vrf_bitmap_unset(client->ridinfo, zvrf_id(zvrf));
+}
+
+static void zread_mpls_labels(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ enum lsp_types_t type;
+ struct prefix prefix;
+ enum nexthop_types_t gtype;
+ union g_addr gate;
+ ifindex_t ifindex;
+ mpls_label_t in_label, out_label;
+ uint8_t distance;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETC(s, type);
+ STREAM_GETL(s, prefix.family);
+ switch (prefix.family) {
+ case AF_INET:
+ STREAM_GET(&prefix.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
+ STREAM_GETC(s, prefix.prefixlen);
+ if (prefix.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix length %d is greater than a v4 address can support",
+ __PRETTY_FUNCTION__, prefix.prefixlen);
+ return;
+ }
+ STREAM_GET(&gate.ipv4.s_addr, s, IPV4_MAX_BYTELEN);
+ break;
+ case AF_INET6:
+ STREAM_GET(&prefix.u.prefix6, s, 16);
+ STREAM_GETC(s, prefix.prefixlen);
+ if (prefix.prefixlen > IPV6_MAX_BITLEN) {
+ zlog_warn(
+ "%s: Specified prefix length %d is greater than a v6 address can support",
+ __PRETTY_FUNCTION__, prefix.prefixlen);
+ return;
+ }
+ STREAM_GET(&gate.ipv6, s, 16);
+ break;
+ default:
+ zlog_warn("%s: Specified AF %d is not supported for this call",
+ __PRETTY_FUNCTION__, prefix.family);
+ return;
+ }
+ STREAM_GETL(s, ifindex);
+ STREAM_GETC(s, distance);
+ STREAM_GETL(s, in_label);
+ STREAM_GETL(s, out_label);
+
+ switch (prefix.family) {
+ case AF_INET:
+ if (ifindex)
+ gtype = NEXTHOP_TYPE_IPV4_IFINDEX;
+ else
+ gtype = NEXTHOP_TYPE_IPV4;
+ break;
+ case AF_INET6:
+ if (ifindex)
+ gtype = NEXTHOP_TYPE_IPV6_IFINDEX;
+ else
+ gtype = NEXTHOP_TYPE_IPV6;
+ break;
+ default:
+ return;
+ }
+
+ if (!mpls_enabled)
+ return;
+
+ if (hdr->command == ZEBRA_MPLS_LABELS_ADD) {
+ mpls_lsp_install(zvrf, type, in_label, out_label, gtype, &gate,
+ ifindex);
+ mpls_ftn_update(1, zvrf, type, &prefix, gtype, &gate, ifindex,
+ distance, out_label);
+ } else if (hdr->command == ZEBRA_MPLS_LABELS_DELETE) {
+ mpls_lsp_uninstall(zvrf, type, in_label, gtype, &gate, ifindex);
+ mpls_ftn_update(0, zvrf, type, &prefix, gtype, &gate, ifindex,
+ distance, out_label);
+ }
+stream_failure:
+ return;
+}
+
+/* Send response to a table manager connect request to client */
+static void zread_table_manager_connect(struct zserv *client,
+ struct stream *msg, vrf_id_t vrf_id)
+{
+ struct stream *s;
+ uint8_t proto;
+ uint16_t instance;
+
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETC(s, proto);
+ STREAM_GETW(s, instance);
+
+ /* accept only dynamic routing protocols */
+ if ((proto >= ZEBRA_ROUTE_MAX) || (proto <= ZEBRA_ROUTE_STATIC)) {
+ zlog_err("client %d has wrong protocol %s", client->sock,
+ zebra_route_string(proto));
+ zsend_table_manager_connect_response(client, vrf_id, 1);
+ return;
+ }
+ zlog_notice("client %d with vrf %u instance %u connected as %s",
+ client->sock, vrf_id, instance, zebra_route_string(proto));
+ client->proto = proto;
+ client->instance = instance;
+
+ /*
+ * Release previous labels of same protocol and instance.
+ * This is done in case it restarted from an unexpected shutdown.
+ */
+ release_daemon_table_chunks(client);
+
+ zsend_table_manager_connect_response(client, vrf_id, 0);
+
+stream_failure:
+ return;
+}
+
+static void zread_label_manager_connect(struct zserv *client,
+ struct stream *msg, vrf_id_t vrf_id)
+{
+ struct stream *s;
+ /* type of protocol (lib/zebra.h) */
+ uint8_t proto;
+ unsigned short instance;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETC(s, proto);
+ STREAM_GETW(s, instance);
+
+ /* accept only dynamic routing protocols */
+ if ((proto >= ZEBRA_ROUTE_MAX) || (proto <= ZEBRA_ROUTE_STATIC)) {
+ zlog_err("client %d has wrong protocol %s", client->sock,
+ zebra_route_string(proto));
+ zsend_label_manager_connect_response(client, vrf_id, 1);
+ return;
+ }
+ zlog_notice("client %d with vrf %u instance %u connected as %s",
+ client->sock, vrf_id, instance, zebra_route_string(proto));
+ client->proto = proto;
+ client->instance = instance;
+
+ /*
+ * Release previous labels of same protocol and instance.
+ * This is done in case it restarted from an unexpected shutdown.
+ */
+ release_daemon_label_chunks(client);
+
+ zlog_debug(
+ " Label Manager client connected: sock %d, proto %s, vrf %u instance %u",
+ client->sock, zebra_route_string(proto), vrf_id, instance);
+ /* send response back */
+ zsend_label_manager_connect_response(client, vrf_id, 0);
+
+stream_failure:
+ return;
+}
+
+static void zread_get_label_chunk(struct zserv *client, struct stream *msg,
+ vrf_id_t vrf_id)
+{
+ struct stream *s;
+ uint8_t keep;
+ uint32_t size;
+ struct label_manager_chunk *lmc;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETC(s, keep);
+ STREAM_GETL(s, size);
+
+ lmc = assign_label_chunk(client->proto, client->instance, keep, size);
+ if (!lmc)
+ zlog_err("%s: Unable to assign Label Chunk of size %u",
+ __func__, size);
+ else
+ zlog_debug("Assigned Label Chunk %u - %u to %u", lmc->start,
+ lmc->end, keep);
+ /* send response back */
+ zsend_assign_label_chunk_response(client, vrf_id, lmc);
+
+stream_failure:
+ return;
+}
+
+static void zread_release_label_chunk(struct zserv *client, struct stream *msg)
+{
+ struct stream *s;
+ uint32_t start, end;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETL(s, start);
+ STREAM_GETL(s, end);
+
+ release_label_chunk(client->proto, client->instance, start, end);
+
+stream_failure:
+ return;
+}
+static void zread_label_manager_request(ZAPI_HANDLER_ARGS)
+{
+ /* to avoid sending other messages like ZERBA_INTERFACE_UP */
+ if (hdr->command == ZEBRA_LABEL_MANAGER_CONNECT)
+ client->is_synchronous = 1;
+
+ /* external label manager */
+ if (lm_is_external)
+ zread_relay_label_manager_request(hdr->command, client,
+ zvrf_id(zvrf));
+ /* this is a label manager */
+ else {
+ if (hdr->command == ZEBRA_LABEL_MANAGER_CONNECT)
+ zread_label_manager_connect(client, msg, zvrf_id(zvrf));
+ else {
+ /* Sanity: don't allow 'unidentified' requests */
+ if (!client->proto) {
+ zlog_err(
+ "Got label request from an unidentified client");
+ return;
+ }
+ if (hdr->command == ZEBRA_GET_LABEL_CHUNK)
+ zread_get_label_chunk(client, msg,
+ zvrf_id(zvrf));
+ else if (hdr->command == ZEBRA_RELEASE_LABEL_CHUNK)
+ zread_release_label_chunk(client, msg);
+ }
+ }
+}
+
+static void zread_get_table_chunk(struct zserv *client, struct stream *msg,
+ vrf_id_t vrf_id)
+{
+ struct stream *s;
+ uint32_t size;
+ struct table_manager_chunk *tmc;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETL(s, size);
+
+ tmc = assign_table_chunk(client->proto, client->instance, size);
+ if (!tmc)
+ zlog_err("%s: Unable to assign Table Chunk of size %u",
+ __func__, size);
+ else
+ zlog_debug("Assigned Table Chunk %u - %u", tmc->start,
+ tmc->end);
+ /* send response back */
+ zsend_assign_table_chunk_response(client, vrf_id, tmc);
+
+stream_failure:
+ return;
+}
+
+static void zread_release_table_chunk(struct zserv *client, struct stream *msg)
+{
+ struct stream *s;
+ uint32_t start, end;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GETL(s, start);
+ STREAM_GETL(s, end);
+
+ release_table_chunk(client->proto, client->instance, start, end);
+
+stream_failure:
+ return;
+}
+
+static void zread_table_manager_request(ZAPI_HANDLER_ARGS)
+{
+ /* to avoid sending other messages like ZERBA_INTERFACE_UP */
+ if (hdr->command == ZEBRA_TABLE_MANAGER_CONNECT)
+ zread_table_manager_connect(client, msg, zvrf_id(zvrf));
+ else {
+ /* Sanity: don't allow 'unidentified' requests */
+ if (!client->proto) {
+ zlog_err(
+ "Got table request from an unidentified client");
+ return;
+ }
+ if (hdr->command == ZEBRA_GET_TABLE_CHUNK)
+ zread_get_table_chunk(client, msg, zvrf_id(zvrf));
+ else if (hdr->command == ZEBRA_RELEASE_TABLE_CHUNK)
+ zread_release_table_chunk(client, msg);
+ }
+}
+
+static void zread_pseudowire(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ char ifname[IF_NAMESIZE];
+ ifindex_t ifindex;
+ int type;
+ int af;
+ union g_addr nexthop;
+ uint32_t local_label;
+ uint32_t remote_label;
+ uint8_t flags;
+ union pw_protocol_fields data;
+ uint8_t protocol;
+ struct zebra_pw *pw;
+
+ /* Get input stream. */
+ s = msg;
+
+ /* Get data. */
+ STREAM_GET(ifname, s, IF_NAMESIZE);
+ STREAM_GETL(s, ifindex);
+ STREAM_GETL(s, type);
+ STREAM_GETL(s, af);
+ switch (af) {
+ case AF_INET:
+ STREAM_GET(&nexthop.ipv4.s_addr, s, IPV4_MAX_BYTELEN);
+ break;
+ case AF_INET6:
+ STREAM_GET(&nexthop.ipv6, s, 16);
+ break;
+ default:
+ return;
+ }
+ STREAM_GETL(s, local_label);
+ STREAM_GETL(s, remote_label);
+ STREAM_GETC(s, flags);
+ STREAM_GET(&data, s, sizeof(data));
+ protocol = client->proto;
+
+ pw = zebra_pw_find(zvrf, ifname);
+ switch (hdr->command) {
+ case ZEBRA_PW_ADD:
+ if (pw) {
+ zlog_warn("%s: pseudowire %s already exists [%s]",
+ __func__, ifname,
+ zserv_command_string(hdr->command));
+ return;
+ }
+
+ zebra_pw_add(zvrf, ifname, protocol, client);
+ break;
+ case ZEBRA_PW_DELETE:
+ if (!pw) {
+ zlog_warn("%s: pseudowire %s not found [%s]", __func__,
+ ifname, zserv_command_string(hdr->command));
+ return;
+ }
+
+ zebra_pw_del(zvrf, pw);
+ break;
+ case ZEBRA_PW_SET:
+ case ZEBRA_PW_UNSET:
+ if (!pw) {
+ zlog_warn("%s: pseudowire %s not found [%s]", __func__,
+ ifname, zserv_command_string(hdr->command));
+ return;
+ }
+
+ switch (hdr->command) {
+ case ZEBRA_PW_SET:
+ pw->enabled = 1;
+ break;
+ case ZEBRA_PW_UNSET:
+ pw->enabled = 0;
+ break;
+ }
+
+ zebra_pw_change(pw, ifindex, type, af, &nexthop, local_label,
+ remote_label, flags, &data);
+ break;
+ }
+
+stream_failure:
+ return;
+}
+
+static void zread_interface_set_master(ZAPI_HANDLER_ARGS)
+{
+ struct interface *master;
+ struct interface *slave;
+ struct stream *s = msg;
+ int ifindex;
+ vrf_id_t vrf_id;
+
+ STREAM_GETL(s, vrf_id);
+ STREAM_GETL(s, ifindex);
+ master = if_lookup_by_index(ifindex, vrf_id);
+
+ STREAM_GETL(s, vrf_id);
+ STREAM_GETL(s, ifindex);
+ slave = if_lookup_by_index(ifindex, vrf_id);
+
+ if (!master || !slave)
+ return;
+
+ kernel_interface_set_master(master, slave);
+
+stream_failure:
+ return;
+}
+
+
+static void zread_vrf_label(ZAPI_HANDLER_ARGS)
+{
+ struct interface *ifp;
+ mpls_label_t nlabel;
+ afi_t afi;
+ struct stream *s;
+ struct zebra_vrf *def_zvrf;
+ enum lsp_types_t ltype;
+
+ s = msg;
+ STREAM_GETL(s, nlabel);
+ STREAM_GETC(s, afi);
+ if (nlabel == zvrf->label[afi]) {
+ /*
+ * Nothing to do here move along
+ */
+ return;
+ }
+
+ STREAM_GETC(s, ltype);
+
+ if (zvrf->vrf->vrf_id != VRF_DEFAULT)
+ ifp = if_lookup_by_name(zvrf->vrf->name, zvrf->vrf->vrf_id);
+ else
+ ifp = if_lookup_by_name("lo", VRF_DEFAULT);
+
+ if (!ifp) {
+ zlog_debug("Unable to find specified Interface for %s",
+ zvrf->vrf->name);
+ return;
+ }
+
+ def_zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (zvrf->label[afi] != MPLS_LABEL_NONE) {
+ afi_t scrubber;
+ bool really_remove;
+
+ really_remove = true;
+ for (scrubber = AFI_IP; scrubber < AFI_MAX; scrubber++) {
+ if (scrubber == afi)
+ continue;
+
+ if (zvrf->label[scrubber] == MPLS_LABEL_NONE)
+ continue;
+
+ if (zvrf->label[afi] == zvrf->label[scrubber]) {
+ really_remove = false;
+ break;
+ }
+ }
+
+ if (really_remove)
+ mpls_lsp_uninstall(def_zvrf, ltype, zvrf->label[afi],
+ NEXTHOP_TYPE_IFINDEX, NULL,
+ ifp->ifindex);
+ }
+
+ if (nlabel != MPLS_LABEL_NONE)
+ mpls_lsp_install(def_zvrf, ltype, nlabel,
+ MPLS_LABEL_IMPLICIT_NULL, NEXTHOP_TYPE_IFINDEX,
+ NULL, ifp->ifindex);
+
+ zvrf->label[afi] = nlabel;
+stream_failure:
+ return;
+}
+
+static inline void zread_rule(ZAPI_HANDLER_ARGS)
+{
+ struct zebra_pbr_rule zpr;
+ struct stream *s;
+ uint32_t total, i;
+ ifindex_t ifindex;
+
+ s = msg;
+ STREAM_GETL(s, total);
+
+ for (i = 0; i < total; i++) {
+ memset(&zpr, 0, sizeof(zpr));
+
+ zpr.sock = client->sock;
+ zpr.rule.vrf_id = hdr->vrf_id;
+ STREAM_GETL(s, zpr.rule.seq);
+ STREAM_GETL(s, zpr.rule.priority);
+ STREAM_GETL(s, zpr.rule.unique);
+ STREAM_GETC(s, zpr.rule.filter.src_ip.family);
+ STREAM_GETC(s, zpr.rule.filter.src_ip.prefixlen);
+ STREAM_GET(&zpr.rule.filter.src_ip.u.prefix, s,
+ prefix_blen(&zpr.rule.filter.src_ip));
+ STREAM_GETW(s, zpr.rule.filter.src_port);
+ STREAM_GETC(s, zpr.rule.filter.dst_ip.family);
+ STREAM_GETC(s, zpr.rule.filter.dst_ip.prefixlen);
+ STREAM_GET(&zpr.rule.filter.dst_ip.u.prefix, s,
+ prefix_blen(&zpr.rule.filter.dst_ip));
+ STREAM_GETW(s, zpr.rule.filter.dst_port);
+ STREAM_GETL(s, zpr.rule.filter.fwmark);
+ STREAM_GETL(s, zpr.rule.action.table);
+ STREAM_GETL(s, ifindex);
+
+ if (ifindex) {
+ zpr.ifp = if_lookup_by_index(ifindex, VRF_UNKNOWN);
+ if (!zpr.ifp) {
+ zlog_debug("Failed to lookup ifindex: %u",
+ ifindex);
+ return;
+ }
+ }
+
+ if (!is_default_prefix(&zpr.rule.filter.src_ip))
+ zpr.rule.filter.filter_bm |= PBR_FILTER_SRC_IP;
+
+ if (!is_default_prefix(&zpr.rule.filter.dst_ip))
+ zpr.rule.filter.filter_bm |= PBR_FILTER_DST_IP;
+
+ if (zpr.rule.filter.src_port)
+ zpr.rule.filter.filter_bm |= PBR_FILTER_SRC_PORT;
+
+ if (zpr.rule.filter.dst_port)
+ zpr.rule.filter.filter_bm |= PBR_FILTER_DST_PORT;
+
+ if (zpr.rule.filter.fwmark)
+ zpr.rule.filter.filter_bm |= PBR_FILTER_FWMARK;
+
+ if (hdr->command == ZEBRA_RULE_ADD)
+ zebra_pbr_add_rule(zvrf->zns, &zpr);
+ else
+ zebra_pbr_del_rule(zvrf->zns, &zpr);
+ }
+
+stream_failure:
+ return;
+}
+
+static inline void zread_ipset(ZAPI_HANDLER_ARGS)
+{
+ struct zebra_pbr_ipset zpi;
+ struct stream *s;
+ uint32_t total, i;
+
+ s = msg;
+ STREAM_GETL(s, total);
+
+ for (i = 0; i < total; i++) {
+ memset(&zpi, 0, sizeof(zpi));
+
+ zpi.sock = client->sock;
+ STREAM_GETL(s, zpi.unique);
+ STREAM_GETL(s, zpi.type);
+ STREAM_GET(&zpi.ipset_name, s, ZEBRA_IPSET_NAME_SIZE);
+
+ if (hdr->command == ZEBRA_IPSET_CREATE)
+ zebra_pbr_create_ipset(zvrf->zns, &zpi);
+ else
+ zebra_pbr_destroy_ipset(zvrf->zns, &zpi);
+ }
+
+stream_failure:
+ return;
+}
+
+static inline void zread_ipset_entry(ZAPI_HANDLER_ARGS)
+{
+ struct zebra_pbr_ipset_entry zpi;
+ struct zebra_pbr_ipset ipset;
+ struct stream *s;
+ uint32_t total, i;
+
+ s = msg;
+ STREAM_GETL(s, total);
+
+ for (i = 0; i < total; i++) {
+ memset(&zpi, 0, sizeof(zpi));
+ memset(&ipset, 0, sizeof(ipset));
+
+ zpi.sock = client->sock;
+ STREAM_GETL(s, zpi.unique);
+ STREAM_GET(&ipset.ipset_name, s, ZEBRA_IPSET_NAME_SIZE);
+ STREAM_GETC(s, zpi.src.family);
+ STREAM_GETC(s, zpi.src.prefixlen);
+ STREAM_GET(&zpi.src.u.prefix, s, prefix_blen(&zpi.src));
+ STREAM_GETC(s, zpi.dst.family);
+ STREAM_GETC(s, zpi.dst.prefixlen);
+ STREAM_GET(&zpi.dst.u.prefix, s, prefix_blen(&zpi.dst));
+
+ if (!is_default_prefix(&zpi.src))
+ zpi.filter_bm |= PBR_FILTER_SRC_IP;
+
+ if (!is_default_prefix(&zpi.dst))
+ zpi.filter_bm |= PBR_FILTER_DST_IP;
+
+ /* calculate backpointer */
+ zpi.backpointer = zebra_pbr_lookup_ipset_pername(
+ zvrf->zns, ipset.ipset_name);
+ if (hdr->command == ZEBRA_IPSET_ENTRY_ADD)
+ zebra_pbr_add_ipset_entry(zvrf->zns, &zpi);
+ else
+ zebra_pbr_del_ipset_entry(zvrf->zns, &zpi);
+ }
+
+stream_failure:
+ return;
+}
+
+static inline void zread_iptable(ZAPI_HANDLER_ARGS)
+{
+ struct zebra_pbr_iptable zpi;
+ struct stream *s;
+
+ s = msg;
+
+ memset(&zpi, 0, sizeof(zpi));
+
+ zpi.sock = client->sock;
+ STREAM_GETL(s, zpi.unique);
+ STREAM_GETL(s, zpi.type);
+ STREAM_GETL(s, zpi.filter_bm);
+ STREAM_GETL(s, zpi.action);
+ STREAM_GETL(s, zpi.fwmark);
+ STREAM_GET(&zpi.ipset_name, s, ZEBRA_IPSET_NAME_SIZE);
+
+ if (hdr->command == ZEBRA_IPTABLE_ADD)
+ zebra_pbr_add_iptable(zvrf->zns, &zpi);
+ else
+ zebra_pbr_del_iptable(zvrf->zns, &zpi);
+stream_failure:
+ return;
+}
+
+void (*zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
+ [ZEBRA_ROUTER_ID_ADD] = zread_router_id_add,
+ [ZEBRA_ROUTER_ID_DELETE] = zread_router_id_delete,
+ [ZEBRA_INTERFACE_ADD] = zread_interface_add,
+ [ZEBRA_INTERFACE_DELETE] = zread_interface_delete,
+ [ZEBRA_ROUTE_ADD] = zread_route_add,
+ [ZEBRA_ROUTE_DELETE] = zread_route_del,
+ [ZEBRA_IPV4_ROUTE_ADD] = zread_ipv4_add,
+ [ZEBRA_IPV4_ROUTE_DELETE] = zread_ipv4_delete,
+ [ZEBRA_IPV4_ROUTE_IPV6_NEXTHOP_ADD] = zread_ipv4_route_ipv6_nexthop_add,
+ [ZEBRA_IPV6_ROUTE_ADD] = zread_ipv6_add,
+ [ZEBRA_IPV6_ROUTE_DELETE] = zread_ipv6_delete,
+ [ZEBRA_REDISTRIBUTE_ADD] = zebra_redistribute_add,
+ [ZEBRA_REDISTRIBUTE_DELETE] = zebra_redistribute_delete,
+ [ZEBRA_REDISTRIBUTE_DEFAULT_ADD] = zebra_redistribute_default_add,
+ [ZEBRA_REDISTRIBUTE_DEFAULT_DELETE] = zebra_redistribute_default_delete,
+ [ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB] = zread_ipv4_nexthop_lookup_mrib,
+ [ZEBRA_HELLO] = zread_hello,
+ [ZEBRA_NEXTHOP_REGISTER] = zread_rnh_register,
+ [ZEBRA_NEXTHOP_UNREGISTER] = zread_rnh_unregister,
+ [ZEBRA_IMPORT_ROUTE_REGISTER] = zread_rnh_register,
+ [ZEBRA_IMPORT_ROUTE_UNREGISTER] = zread_rnh_unregister,
+ [ZEBRA_BFD_DEST_UPDATE] = zebra_ptm_bfd_dst_register,
+ [ZEBRA_BFD_DEST_REGISTER] = zebra_ptm_bfd_dst_register,
+ [ZEBRA_BFD_DEST_DEREGISTER] = zebra_ptm_bfd_dst_deregister,
+ [ZEBRA_VRF_UNREGISTER] = zread_vrf_unregister,
+ [ZEBRA_VRF_LABEL] = zread_vrf_label,
+ [ZEBRA_BFD_CLIENT_REGISTER] = zebra_ptm_bfd_client_register,
+#if defined(HAVE_RTADV)
+ [ZEBRA_INTERFACE_ENABLE_RADV] = zebra_interface_radv_enable,
+ [ZEBRA_INTERFACE_DISABLE_RADV] = zebra_interface_radv_disable,
+#else
+ [ZEBRA_INTERFACE_ENABLE_RADV] = NULL,
+ [ZEBRA_INTERFACE_DISABLE_RADV] = NULL,
+#endif
+ [ZEBRA_MPLS_LABELS_ADD] = zread_mpls_labels,
+ [ZEBRA_MPLS_LABELS_DELETE] = zread_mpls_labels,
+ [ZEBRA_IPMR_ROUTE_STATS] = zebra_ipmr_route_stats,
+ [ZEBRA_LABEL_MANAGER_CONNECT] = zread_label_manager_request,
+ [ZEBRA_GET_LABEL_CHUNK] = zread_label_manager_request,
+ [ZEBRA_RELEASE_LABEL_CHUNK] = zread_label_manager_request,
+ [ZEBRA_FEC_REGISTER] = zread_fec_register,
+ [ZEBRA_FEC_UNREGISTER] = zread_fec_unregister,
+ [ZEBRA_ADVERTISE_DEFAULT_GW] = zebra_vxlan_advertise_gw_macip,
+ [ZEBRA_ADVERTISE_SUBNET] = zebra_vxlan_advertise_subnet,
+ [ZEBRA_ADVERTISE_ALL_VNI] = zebra_vxlan_advertise_all_vni,
+ [ZEBRA_REMOTE_VTEP_ADD] = zebra_vxlan_remote_vtep_add,
+ [ZEBRA_REMOTE_VTEP_DEL] = zebra_vxlan_remote_vtep_del,
+ [ZEBRA_REMOTE_MACIP_ADD] = zebra_vxlan_remote_macip_add,
+ [ZEBRA_REMOTE_MACIP_DEL] = zebra_vxlan_remote_macip_del,
+ [ZEBRA_INTERFACE_SET_MASTER] = zread_interface_set_master,
+ [ZEBRA_PW_ADD] = zread_pseudowire,
+ [ZEBRA_PW_DELETE] = zread_pseudowire,
+ [ZEBRA_PW_SET] = zread_pseudowire,
+ [ZEBRA_PW_UNSET] = zread_pseudowire,
+ [ZEBRA_RULE_ADD] = zread_rule,
+ [ZEBRA_RULE_DELETE] = zread_rule,
+ [ZEBRA_TABLE_MANAGER_CONNECT] = zread_table_manager_request,
+ [ZEBRA_GET_TABLE_CHUNK] = zread_table_manager_request,
+ [ZEBRA_RELEASE_TABLE_CHUNK] = zread_table_manager_request,
+ [ZEBRA_IPSET_CREATE] = zread_ipset,
+ [ZEBRA_IPSET_DESTROY] = zread_ipset,
+ [ZEBRA_IPSET_ENTRY_ADD] = zread_ipset_entry,
+ [ZEBRA_IPSET_ENTRY_DELETE] = zread_ipset_entry,
+ [ZEBRA_IPTABLE_ADD] = zread_iptable,
+ [ZEBRA_IPTABLE_DELETE] = zread_iptable,
+};
+
+void zserv_handle_commands(struct zserv *client, struct zmsghdr *hdr,
+ struct stream *msg, struct zebra_vrf *zvrf)
+{
+ if (hdr->command > array_size(zserv_handlers)
+ || zserv_handlers[hdr->command] == NULL)
+ zlog_info("Zebra received unknown command %d", hdr->command);
+ else
+ zserv_handlers[hdr->command](client, hdr, msg, zvrf);
+
+ stream_free(msg);
+}
diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h
new file mode 100644
index 0000000000..1658c9852d
--- /dev/null
+++ b/zebra/zapi_msg.h
@@ -0,0 +1,88 @@
+/*
+ * Zebra API message creation & consumption.
+ * Portions:
+ * Copyright (C) 1997-1999 Kunihiro Ishiguro
+ * Copyright (C) 2015-2018 Cumulus Networks, Inc.
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "lib/if.h"
+#include "lib/vrf.h"
+#include "lib/zclient.h"
+#include "lib/pbr.h"
+
+#include "zebra/rib.h"
+#include "zebra/zserv.h"
+#include "zebra/zebra_pbr.h"
+
+/*
+ * This is called to process inbound ZAPI messages.
+ *
+ * client
+ * the client datastructure
+ *
+ * hdr
+ * the message header
+ *
+ * msg
+ * the message contents, without the header
+ *
+ * zvrf
+ * the vrf
+ */
+extern void zserv_handle_commands(struct zserv *client, struct zmsghdr *hdr,
+ struct stream *msg, struct zebra_vrf *zvrf);
+
+extern int zsend_vrf_add(struct zserv *zclient, struct zebra_vrf *zvrf);
+extern int zsend_vrf_delete(struct zserv *zclient, struct zebra_vrf *zvrf);
+extern int zsend_interface_add(struct zserv *zclient, struct interface *ifp);
+extern int zsend_interface_delete(struct zserv *zclient, struct interface *ifp);
+extern int zsend_interface_addresses(struct zserv *zclient,
+ struct interface *ifp);
+extern int zsend_interface_address(int cmd, struct zserv *zclient,
+ struct interface *ifp,
+ struct connected *ifc);
+extern void nbr_connected_add_ipv6(struct interface *ifp,
+ struct in6_addr *address);
+extern void nbr_connected_delete_ipv6(struct interface *ifp,
+ struct in6_addr *address);
+extern int zsend_interface_update(int cmd, struct zserv *client,
+ struct interface *ifp);
+extern int zsend_redistribute_route(int cmd, struct zserv *zclient,
+ struct prefix *p, struct prefix *src_p,
+ struct route_entry *re);
+extern int zsend_router_id_update(struct zserv *zclient, struct prefix *p,
+ vrf_id_t vrf_id);
+extern int zsend_interface_vrf_update(struct zserv *zclient,
+ struct interface *ifp, vrf_id_t vrf_id);
+extern int zsend_interface_link_params(struct zserv *zclient,
+ struct interface *ifp);
+extern int zsend_pw_update(struct zserv *client, struct zebra_pw *pw);
+extern int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
+ enum zapi_route_notify_owner note);
+
+extern void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
+ enum zapi_rule_notify_owner note);
+extern void zsend_ipset_notify_owner(struct zebra_pbr_ipset *ipset,
+ enum zapi_ipset_notify_owner note);
+extern void
+zsend_ipset_entry_notify_owner(struct zebra_pbr_ipset_entry *ipset,
+ enum zapi_ipset_entry_notify_owner note);
+extern void zsend_iptable_notify_owner(struct zebra_pbr_iptable *iptable,
+ enum zapi_iptable_notify_owner note);
+extern void zserv_nexthop_num_warn(const char *caller, const struct prefix *p,
+ const unsigned int nexthop_num);
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 7df03efc10..3ad640653f 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -1913,9 +1913,9 @@ int zebra_mpls_fec_unregister(struct zebra_vrf *zvrf, struct prefix *p,
/*
* Cleanup any FECs registered by this client.
*/
-int zebra_mpls_cleanup_fecs_for_client(struct zebra_vrf *zvrf,
- struct zserv *client)
+static int zebra_mpls_cleanup_fecs_for_client(struct zserv *client)
{
+ struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
struct route_node *rn;
zebra_fec_t *fec;
struct listnode *node;
@@ -2915,4 +2915,6 @@ void zebra_mpls_init(void)
if (!mpls_processq_init(&zebrad))
mpls_enabled = 1;
+
+ hook_register(zapi_client_close, zebra_mpls_cleanup_fecs_for_client);
}
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index 98905a2831..65204a67dc 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -211,12 +211,6 @@ int zebra_mpls_fec_unregister(struct zebra_vrf *zvrf, struct prefix *p,
struct zserv *client);
/*
- * Cleanup any FECs registered by this client.
- */
-int zebra_mpls_cleanup_fecs_for_client(struct zebra_vrf *zvrf,
- struct zserv *client);
-
-/*
* Return FEC (if any) to which this label is bound.
* Note: Only works for per-prefix binding and when the label is not
* implicit-null.
diff --git a/zebra/zebra_ns.h b/zebra/zebra_ns.h
index fbf88ae6ea..c93db2a764 100644
--- a/zebra/zebra_ns.h
+++ b/zebra/zebra_ns.h
@@ -25,6 +25,9 @@
#include <lib/ns.h>
#include <lib/vrf.h>
+#include "zebra/rib.h"
+#include "zebra/zebra_vrf.h"
+
#ifdef HAVE_NETLINK
/* Socket interface to kernel */
struct nlsock {
diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c
index 758365d716..93c523bf50 100644
--- a/zebra/zebra_pbr.c
+++ b/zebra/zebra_pbr.c
@@ -26,6 +26,7 @@
#include "zebra/zebra_pbr.h"
#include "zebra/rt.h"
+#include "zebra/zapi_msg.h"
/* definitions */
diff --git a/zebra/zebra_ptm.c b/zebra/zebra_ptm.c
index 2fe6797eef..07e81aa020 100644
--- a/zebra/zebra_ptm.c
+++ b/zebra/zebra_ptm.c
@@ -91,6 +91,7 @@ static int zebra_ptm_handle_msg_cb(void *arg, void *in_ctxt);
void zebra_bfd_peer_replay_req(void);
void zebra_ptm_send_status_req(void);
void zebra_ptm_reset_status(int ptm_disable);
+static int zebra_ptm_bfd_client_deregister(struct zserv *client);
const char ZEBRA_PTM_SOCK_NAME[] = "\0/var/run/ptmd.socket";
@@ -124,17 +125,12 @@ void zebra_ptm_init(void)
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_INITIAL;
ptm_cb.ptm_sock = -1;
+
+ hook_register(zapi_client_close, zebra_ptm_bfd_client_deregister);
}
void zebra_ptm_finish(void)
{
- int proto;
-
- for (proto = 0; proto < ZEBRA_ROUTE_MAX; proto++)
- if (CHECK_FLAG(ptm_cb.client_flags[proto],
- ZEBRA_PTM_BFD_CLIENT_FLAG_REG))
- zebra_ptm_bfd_client_deregister(proto);
-
buffer_flush_all(ptm_cb.wb, ptm_cb.ptm_sock);
free(ptm_hdl);
@@ -1013,15 +1009,16 @@ stream_failure:
}
/* BFD client deregister */
-void zebra_ptm_bfd_client_deregister(int proto)
+int zebra_ptm_bfd_client_deregister(struct zserv *client)
{
+ uint8_t proto = client->proto;
void *out_ctxt;
char tmp_buf[64];
int data_len = ZEBRA_PTM_SEND_MAX_SOCKBUF;
if (proto != ZEBRA_ROUTE_OSPF && proto != ZEBRA_ROUTE_BGP
&& proto != ZEBRA_ROUTE_OSPF6 && proto != ZEBRA_ROUTE_PIM)
- return;
+ return 0;
if (IS_ZEBRA_DEBUG_EVENT)
zlog_err("bfd_client_deregister msg for client %s",
@@ -1031,7 +1028,7 @@ void zebra_ptm_bfd_client_deregister(int proto)
ptm_cb.t_timer = NULL;
thread_add_timer(zebrad.master, zebra_ptm_connect, NULL,
ptm_cb.reconnect_time, &ptm_cb.t_timer);
- return;
+ return 0;
}
ptm_lib_init_msg(ptm_hdl, 0, PTMLIB_MSG_TYPE_CMD, NULL, &out_ctxt);
@@ -1051,6 +1048,8 @@ void zebra_ptm_bfd_client_deregister(int proto)
zebra_ptm_send_message(ptm_cb.out_data, data_len);
UNSET_FLAG(ptm_cb.client_flags[proto], ZEBRA_PTM_BFD_CLIENT_FLAG_REG);
+
+ return 0;
}
int zebra_ptm_get_enable_state(void)
diff --git a/zebra/zebra_ptm.h b/zebra/zebra_ptm.h
index 937c2584f5..0e55574a02 100644
--- a/zebra/zebra_ptm.h
+++ b/zebra/zebra_ptm.h
@@ -29,6 +29,7 @@ extern const char ZEBRA_PTM_SOCK_NAME[];
#define ZEBRA_PTM_BFD_CLIENT_FLAG_REG (1 << 1) /* client registered with BFD */
#include "zebra/zserv.h"
+#include "zebra/interface.h"
/* Zebra ptm context block */
struct zebra_ptm_cb {
@@ -74,5 +75,4 @@ void zebra_ptm_if_init(struct zebra_if *zebra_ifp);
void zebra_ptm_if_set_ptm_state(struct interface *ifp,
struct zebra_if *zebra_ifp);
void zebra_ptm_if_write(struct vty *vty, struct zebra_if *zebra_ifp);
-void zebra_ptm_bfd_client_deregister(int proto);
#endif
diff --git a/zebra/zebra_ptm_redistribute.c b/zebra/zebra_ptm_redistribute.c
index fe788ac4d7..7477147698 100644
--- a/zebra/zebra_ptm_redistribute.c
+++ b/zebra/zebra_ptm_redistribute.c
@@ -23,6 +23,7 @@
#include "vty.h"
#include "stream.h"
#include "zebra/zserv.h"
+#include "zebra/zapi_msg.h"
#include "zebra/zebra_ptm_redistribute.h"
#include "zebra/zebra_memory.h"
diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c
index 68ad69397f..28e09fe193 100644
--- a/zebra/zebra_pw.c
+++ b/zebra/zebra_pw.c
@@ -28,6 +28,7 @@
#include "zebra/debug.h"
#include "zebra/rib.h"
#include "zebra/zserv.h"
+#include "zebra/zapi_msg.h"
#include "zebra/zebra_rnh.h"
#include "zebra/zebra_vrf.h"
#include "zebra/zebra_pw.h"
@@ -268,7 +269,7 @@ static int zebra_pw_check_reachability(struct zebra_pw *pw)
return 0;
}
-void zebra_pw_client_close(struct zserv *client)
+static int zebra_pw_client_close(struct zserv *client)
{
struct vrf *vrf;
struct zebra_vrf *zvrf;
@@ -282,12 +283,16 @@ void zebra_pw_client_close(struct zserv *client)
zebra_pw_del(zvrf, pw);
}
}
+
+ return 0;
}
void zebra_pw_init(struct zebra_vrf *zvrf)
{
RB_INIT(zebra_pw_head, &zvrf->pseudowires);
RB_INIT(zebra_static_pw_head, &zvrf->static_pseudowires);
+
+ hook_register(zapi_client_close, zebra_pw_client_close);
}
void zebra_pw_exit(struct zebra_vrf *zvrf)
diff --git a/zebra/zebra_pw.h b/zebra/zebra_pw.h
index 417d26fe65..e6e0a22c21 100644
--- a/zebra/zebra_pw.h
+++ b/zebra/zebra_pw.h
@@ -23,8 +23,11 @@
#include <net/if.h>
#include <netinet/in.h>
-#include "hook.h"
-#include "qobj.h"
+#include "lib/hook.h"
+#include "lib/qobj.h"
+#include "lib/pw.h"
+
+#include "zebra/zebra_vrf.h"
#define PW_INSTALL_RETRY_INTERVAL 30
@@ -67,7 +70,6 @@ void zebra_pw_change(struct zebra_pw *, ifindex_t, int, int, union g_addr *,
struct zebra_pw *zebra_pw_find(struct zebra_vrf *, const char *);
void zebra_pw_update(struct zebra_pw *);
void zebra_pw_install_failure(struct zebra_pw *);
-void zebra_pw_client_close(struct zserv *);
void zebra_pw_init(struct zebra_vrf *);
void zebra_pw_exit(struct zebra_vrf *);
void zebra_pw_vty_init(void);
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 6ad60a6fff..67832f2d3f 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -42,7 +42,6 @@
#include "zebra/rib.h"
#include "zebra/rt.h"
#include "zebra/zebra_ns.h"
-#include "zebra/zserv.h"
#include "zebra/zebra_vrf.h"
#include "zebra/redistribute.h"
#include "zebra/zebra_routemap.h"
@@ -51,6 +50,7 @@
#include "zebra/interface.h"
#include "zebra/connected.h"
#include "zebra/zebra_vxlan.h"
+#include "zebra/zapi_msg.h"
DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason),
(rn, reason))
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index 22a04ee23d..90c39bcc6f 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -66,10 +66,16 @@ static int compare_state(struct route_entry *r1, struct route_entry *r2);
static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type,
vrf_id_t vrf_id);
static void print_rnh(struct route_node *rn, struct vty *vty);
+static int zebra_client_cleanup_rnh(struct zserv *client);
int zebra_rnh_ip_default_route = 0;
int zebra_rnh_ipv6_default_route = 0;
+void zebra_rnh_init(void)
+{
+ hook_register(zapi_client_close, zebra_client_cleanup_rnh);
+}
+
static inline struct route_table *get_rnh_table(vrf_id_t vrfid, int family,
rnh_type_t type)
{
@@ -945,34 +951,6 @@ void zebra_print_rnh_table(vrf_id_t vrfid, int af, struct vty *vty,
print_rnh(rn, vty);
}
-int zebra_cleanup_rnh_client(vrf_id_t vrf_id, int family, struct zserv *client,
- rnh_type_t type)
-{
- struct route_table *ntable;
- struct route_node *nrn;
- struct rnh *rnh;
-
- if (IS_ZEBRA_DEBUG_NHT)
- zlog_debug("%u: Client %s RNH cleanup for family %d type %d",
- vrf_id, zebra_route_string(client->proto), family,
- type);
-
- ntable = get_rnh_table(vrf_id, family, type);
- if (!ntable) {
- zlog_debug("cleanup_rnh_client: rnh table not found\n");
- return -1;
- }
-
- for (nrn = route_top(ntable); nrn; nrn = route_next(nrn)) {
- if (!nrn->info)
- continue;
-
- rnh = nrn->info;
- zebra_remove_rnh_client(rnh, client, type);
- }
- return 1;
-}
-
/**
* free_state - free up the re structure associated with the rnh.
*/
@@ -1202,3 +1180,61 @@ static void print_rnh(struct route_node *rn, struct vty *vty)
vty_out(vty, " zebra[pseudowires]");
vty_out(vty, "\n");
}
+
+static int zebra_cleanup_rnh_client(vrf_id_t vrf_id, int family,
+ struct zserv *client, rnh_type_t type)
+{
+ struct route_table *ntable;
+ struct route_node *nrn;
+ struct rnh *rnh;
+
+ if (IS_ZEBRA_DEBUG_NHT)
+ zlog_debug("%u: Client %s RNH cleanup for family %d type %d",
+ vrf_id, zebra_route_string(client->proto), family,
+ type);
+
+ ntable = get_rnh_table(vrf_id, family, type);
+ if (!ntable) {
+ zlog_debug("cleanup_rnh_client: rnh table not found\n");
+ return -1;
+ }
+
+ for (nrn = route_top(ntable); nrn; nrn = route_next(nrn)) {
+ if (!nrn->info)
+ continue;
+
+ rnh = nrn->info;
+ zebra_remove_rnh_client(rnh, client, type);
+ }
+ return 1;
+}
+
+/* Cleanup registered nexthops (across VRFs) upon client disconnect. */
+static int zebra_client_cleanup_rnh(struct zserv *client)
+{
+ struct vrf *vrf;
+ struct zebra_vrf *zvrf;
+
+ RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
+ zvrf = vrf->info;
+ if (zvrf) {
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET, client,
+ RNH_NEXTHOP_TYPE);
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET6,
+ client, RNH_NEXTHOP_TYPE);
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET, client,
+ RNH_IMPORT_CHECK_TYPE);
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET6,
+ client, RNH_IMPORT_CHECK_TYPE);
+ if (client->proto == ZEBRA_ROUTE_LDP) {
+ hash_iterate(zvrf->lsp_table,
+ mpls_ldp_lsp_uninstall_all,
+ zvrf->lsp_table);
+ mpls_ldp_ftn_uninstall_all(zvrf, AFI_IP);
+ mpls_ldp_ftn_uninstall_all(zvrf, AFI_IP6);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/zebra/zebra_rnh.h b/zebra/zebra_rnh.h
index f783696c0e..ea7d5545e8 100644
--- a/zebra/zebra_rnh.h
+++ b/zebra/zebra_rnh.h
@@ -54,6 +54,8 @@ typedef enum { RNH_NEXTHOP_TYPE, RNH_IMPORT_CHECK_TYPE } rnh_type_t;
extern int zebra_rnh_ip_default_route;
extern int zebra_rnh_ipv6_default_route;
+extern void zebra_rnh_init(void);
+
static inline int rnh_resolve_via_default(int family)
{
if (((family == AF_INET) && zebra_rnh_ip_default_route)
@@ -87,6 +89,4 @@ extern void zebra_evaluate_rnh(vrf_id_t vrfid, int family, int force,
extern void zebra_print_rnh_table(vrf_id_t vrfid, int family, struct vty *vty,
rnh_type_t);
extern char *rnh_str(struct rnh *rnh, char *buf, int size);
-extern int zebra_cleanup_rnh_client(vrf_id_t vrf, int family,
- struct zserv *client, rnh_type_t type);
#endif /*_ZEBRA_RNH_H */
diff --git a/zebra/zebra_routemap.h b/zebra/zebra_routemap.h
index 60bf7c3f59..14c7c58848 100644
--- a/zebra/zebra_routemap.h
+++ b/zebra/zebra_routemap.h
@@ -22,6 +22,8 @@
#ifndef __ZEBRA_ROUTEMAP_H__
#define __ZEBRA_ROUTEMAP_H__
+#include "lib/routemap.h"
+
extern void zebra_route_map_init(void);
extern void zebra_routemap_config_write_protocol(struct vty *vty);
extern char *zebra_get_import_table_route_map(afi_t afi, uint32_t table);
diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c
index 18d53292b1..46443dec4f 100644
--- a/zebra/zebra_vrf.c
+++ b/zebra/zebra_vrf.c
@@ -29,7 +29,7 @@
#include "vty.h"
#include "zebra/debug.h"
-#include "zebra/zserv.h"
+#include "zebra/zapi_msg.h"
#include "zebra/rib.h"
#include "zebra/zebra_vrf.h"
#include "zebra/zebra_rnh.h"
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 16aece9747..2ae9ac5082 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -1215,6 +1215,10 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
json_object_int_add(json_route, "metric", re->metric);
}
+ json_object_int_add(json_route, "internalStatus",
+ re->status);
+ json_object_int_add(json_route, "internalFlags",
+ re->flags);
if (uptime < ONE_DAY_SECOND)
sprintf(buf, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
tm->tm_sec);
@@ -1231,6 +1235,9 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
for (ALL_NEXTHOPS(re->ng, nexthop)) {
json_nexthop = json_object_new_object();
+ json_object_int_add(json_nexthop, "flags",
+ nexthop->flags);
+
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
json_object_boolean_true_add(json_nexthop,
"duplicate");
diff --git a/zebra/zserv.c b/zebra/zserv.c
index fa1679b387..7dcd654240 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -1,17 +1,19 @@
-/* Zebra daemon server routine.
- * Copyright (C) 1997, 98, 99 Kunihiro Ishiguro
- *
- * This file is part of GNU Zebra.
+/*
+ * Zebra API server.
+ * Portions:
+ * Copyright (C) 1997-1999 Kunihiro Ishiguro
+ * Copyright (C) 2015-2018 Cumulus Networks, Inc.
+ * et al.
*
- * GNU Zebra is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
*
- * GNU Zebra is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
@@ -19,49 +21,44 @@
*/
#include <zebra.h>
-#include <sys/un.h>
-/* for basename */
-#include <libgen.h>
-
-#include "prefix.h"
-#include "command.h"
-#include "if.h"
-#include "thread.h"
-#include "stream.h"
-#include "memory.h"
-#include "zebra_memory.h"
-#include "table.h"
-#include "rib.h"
-#include "network.h"
-#include "sockunion.h"
-#include "log.h"
-#include "zclient.h"
-#include "privs.h"
-#include "network.h"
-#include "buffer.h"
-#include "nexthop.h"
-#include "vrf.h"
-#include "libfrr.h"
-#include "sockopt.h"
-
-#include "zebra/zserv.h"
-#include "zebra/zebra_ns.h"
-#include "zebra/zebra_vrf.h"
-#include "zebra/router-id.h"
-#include "zebra/redistribute.h"
-#include "zebra/debug.h"
-#include "zebra/zebra_rnh.h"
-#include "zebra/rt_netlink.h"
-#include "zebra/interface.h"
-#include "zebra/zebra_ptm.h"
-#include "zebra/rtadv.h"
-#include "zebra/zebra_mpls.h"
-#include "zebra/zebra_mroute.h"
-#include "zebra/label_manager.h"
-#include "zebra/zebra_vxlan.h"
-#include "zebra/rt.h"
-#include "zebra/zebra_pbr.h"
-#include "zebra/table_manager.h"
+
+/* clang-format off */
+#include <errno.h> /* for errno */
+#include <netinet/in.h> /* for sockaddr_in */
+#include <stdint.h> /* for uint8_t */
+#include <stdio.h> /* for snprintf */
+#include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */
+#include <sys/stat.h> /* for umask, mode_t */
+#include <sys/un.h> /* for sockaddr_un */
+#include <time.h> /* for NULL, tm, gmtime, time_t */
+#include <unistd.h> /* for close, unlink, ssize_t */
+
+#include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */
+#include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */
+#include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */
+#include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */
+#include "lib/libfrr.h" /* for frr_zclient_addr */
+#include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */
+#include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */
+#include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */
+#include "lib/network.h" /* for set_nonblocking */
+#include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */
+#include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */
+#include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */
+#include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */
+#include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */
+#include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */
+#include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */
+#include "lib/vty.h" /* for vty_out, vty (ptr only) */
+#include "lib/zassert.h" /* for assert */
+#include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
+
+#include "zebra/debug.h" /* for various debugging macros */
+#include "zebra/rib.h" /* for rib_score_proto */
+#include "zebra/zapi_msg.h" /* for zserv_handle_commands */
+#include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */
+#include "zebra/zserv.h" /* for zserv */
+/* clang-format on */
/* Event list of zebra. */
enum event { ZEBRA_READ, ZEBRA_WRITE };
@@ -71,7 +68,7 @@ extern struct zebra_privs_t zserv_privs;
static void zebra_event(struct zserv *client, enum event event);
-/* Public interface ======================================================== */
+/* Public interface --------------------------------------------------------- */
int zebra_server_send_message(struct zserv *client, struct stream *msg)
{
@@ -80,2957 +77,16 @@ int zebra_server_send_message(struct zserv *client, struct stream *msg)
return 0;
}
-/* Encoding helpers -------------------------------------------------------- */
-
-static void zserv_encode_interface(struct stream *s, struct interface *ifp)
-{
- /* Interface information. */
- stream_put(s, ifp->name, INTERFACE_NAMSIZ);
- stream_putl(s, ifp->ifindex);
- stream_putc(s, ifp->status);
- stream_putq(s, ifp->flags);
- stream_putc(s, ifp->ptm_enable);
- stream_putc(s, ifp->ptm_status);
- stream_putl(s, ifp->metric);
- stream_putl(s, ifp->speed);
- stream_putl(s, ifp->mtu);
- stream_putl(s, ifp->mtu6);
- stream_putl(s, ifp->bandwidth);
- stream_putl(s, ifp->ll_type);
- stream_putl(s, ifp->hw_addr_len);
- if (ifp->hw_addr_len)
- stream_put(s, ifp->hw_addr, ifp->hw_addr_len);
-
- /* Then, Traffic Engineering parameters if any */
- if (HAS_LINK_PARAMS(ifp) && IS_LINK_PARAMS_SET(ifp->link_params)) {
- stream_putc(s, 1);
- zebra_interface_link_params_write(s, ifp);
- } else
- stream_putc(s, 0);
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-}
-
-static void zserv_encode_vrf(struct stream *s, struct zebra_vrf *zvrf)
-{
- struct vrf_data data;
- const char *netns_name = zvrf_ns_name(zvrf);
-
- data.l.table_id = zvrf->table_id;
-
- if (netns_name)
- strlcpy(data.l.netns_name, basename((char *)netns_name),
- NS_NAMSIZ);
- else
- memset(data.l.netns_name, 0, NS_NAMSIZ);
- /* Pass the tableid and the netns NAME */
- stream_put(s, &data, sizeof(struct vrf_data));
- /* Interface information. */
- stream_put(s, zvrf_name(zvrf), VRF_NAMSIZ);
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-}
-
-static int zserv_encode_nexthop(struct stream *s, struct nexthop *nexthop)
-{
- stream_putc(s, nexthop->type);
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- stream_put_in_addr(s, &nexthop->gate.ipv4);
- stream_putl(s, nexthop->ifindex);
- break;
- case NEXTHOP_TYPE_IPV6:
- stream_put(s, &nexthop->gate.ipv6, 16);
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- stream_put(s, &nexthop->gate.ipv6, 16);
- stream_putl(s, nexthop->ifindex);
- break;
- case NEXTHOP_TYPE_IFINDEX:
- stream_putl(s, nexthop->ifindex);
- break;
- default:
- /* do nothing */
- break;
- }
- return 1;
-}
-
-/* Send handlers ----------------------------------------------------------- */
-
-/* Interface is added. Send ZEBRA_INTERFACE_ADD to client. */
-/*
- * This function is called in the following situations:
- * - in response to a 3-byte ZEBRA_INTERFACE_ADD request
- * from the client.
- * - at startup, when zebra figures out the available interfaces
- * - when an interface is added (where support for
- * RTM_IFANNOUNCE or AF_NETLINK sockets is available), or when
- * an interface is marked IFF_UP (i.e., an RTM_IFINFO message is
- * received)
- */
-int zsend_interface_add(struct zserv *client, struct interface *ifp)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_INTERFACE_ADD, ifp->vrf_id);
- zserv_encode_interface(s, ifp);
-
- client->ifadd_cnt++;
- return zebra_server_send_message(client, s);
-}
-
-/* Interface deletion from zebra daemon. */
-int zsend_interface_delete(struct zserv *client, struct interface *ifp)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_INTERFACE_DELETE, ifp->vrf_id);
- zserv_encode_interface(s, ifp);
-
- client->ifdel_cnt++;
- return zebra_server_send_message(client, s);
-}
-
-int zsend_vrf_add(struct zserv *client, struct zebra_vrf *zvrf)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_VRF_ADD, zvrf_id(zvrf));
- zserv_encode_vrf(s, zvrf);
-
- client->vrfadd_cnt++;
- return zebra_server_send_message(client, s);
-}
-
-/* VRF deletion from zebra daemon. */
-int zsend_vrf_delete(struct zserv *client, struct zebra_vrf *zvrf)
-
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_VRF_DELETE, zvrf_id(zvrf));
- zserv_encode_vrf(s, zvrf);
-
- client->vrfdel_cnt++;
- return zebra_server_send_message(client, s);
-}
-
-int zsend_interface_link_params(struct zserv *client, struct interface *ifp)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- /* Check this client need interface information. */
- if (!client->ifinfo) {
- stream_free(s);
- return 0;
- }
-
- if (!ifp->link_params) {
- stream_free(s);
- return 0;
- }
-
- zclient_create_header(s, ZEBRA_INTERFACE_LINK_PARAMS, ifp->vrf_id);
-
- /* Add Interface Index */
- stream_putl(s, ifp->ifindex);
-
- /* Then TE Link Parameters */
- if (zebra_interface_link_params_write(s, ifp) == 0) {
- stream_free(s);
- return 0;
- }
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-/* Interface address is added/deleted. Send ZEBRA_INTERFACE_ADDRESS_ADD or
- * ZEBRA_INTERFACE_ADDRESS_DELETE to the client.
- *
- * A ZEBRA_INTERFACE_ADDRESS_ADD is sent in the following situations:
- * - in response to a 3-byte ZEBRA_INTERFACE_ADD request
- * from the client, after the ZEBRA_INTERFACE_ADD has been
- * sent from zebra to the client
- * - redistribute new address info to all clients in the following situations
- * - at startup, when zebra figures out the available interfaces
- * - when an interface is added (where support for
- * RTM_IFANNOUNCE or AF_NETLINK sockets is available), or when
- * an interface is marked IFF_UP (i.e., an RTM_IFINFO message is
- * received)
- * - for the vty commands "ip address A.B.C.D/M [<secondary>|<label LINE>]"
- * and "no bandwidth <1-10000000>", "ipv6 address X:X::X:X/M"
- * - when an RTM_NEWADDR message is received from the kernel,
- *
- * The call tree that triggers ZEBRA_INTERFACE_ADDRESS_DELETE:
- *
- * zsend_interface_address(DELETE)
- * ^
- * |
- * zebra_interface_address_delete_update
- * ^ ^ ^
- * | | if_delete_update
- * | |
- * ip_address_uninstall connected_delete_ipv4
- * [ipv6_addresss_uninstall] [connected_delete_ipv6]
- * ^ ^
- * | |
- * | RTM_NEWADDR on routing/netlink socket
- * |
- * vty commands:
- * "no ip address A.B.C.D/M [label LINE]"
- * "no ip address A.B.C.D/M secondary"
- * ["no ipv6 address X:X::X:X/M"]
- *
- */
-int zsend_interface_address(int cmd, struct zserv *client,
- struct interface *ifp, struct connected *ifc)
-{
- int blen;
- struct prefix *p;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, cmd, ifp->vrf_id);
- stream_putl(s, ifp->ifindex);
-
- /* Interface address flag. */
- stream_putc(s, ifc->flags);
-
- /* Prefix information. */
- p = ifc->address;
- stream_putc(s, p->family);
- blen = prefix_blen(p);
- stream_put(s, &p->u.prefix, blen);
-
- /*
- * XXX gnu version does not send prefixlen for
- * ZEBRA_INTERFACE_ADDRESS_DELETE
- * but zebra_interface_address_delete_read() in the gnu version
- * expects to find it
- */
- stream_putc(s, p->prefixlen);
-
- /* Destination. */
- p = ifc->destination;
- if (p)
- stream_put(s, &p->u.prefix, blen);
- else
- stream_put(s, NULL, blen);
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- client->connected_rt_add_cnt++;
- return zebra_server_send_message(client, s);
-}
-
-static int zsend_interface_nbr_address(int cmd, struct zserv *client,
- struct interface *ifp,
- struct nbr_connected *ifc)
-{
- int blen;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
- struct prefix *p;
-
- zclient_create_header(s, cmd, ifp->vrf_id);
- stream_putl(s, ifp->ifindex);
-
- /* Prefix information. */
- p = ifc->address;
- stream_putc(s, p->family);
- blen = prefix_blen(p);
- stream_put(s, &p->u.prefix, blen);
-
- /*
- * XXX gnu version does not send prefixlen for
- * ZEBRA_INTERFACE_ADDRESS_DELETE
- * but zebra_interface_address_delete_read() in the gnu version
- * expects to find it
- */
- stream_putc(s, p->prefixlen);
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-/* Interface address addition. */
-static void zebra_interface_nbr_address_add_update(struct interface *ifp,
- struct nbr_connected *ifc)
-{
- struct listnode *node, *nnode;
- struct zserv *client;
- struct prefix *p;
-
- if (IS_ZEBRA_DEBUG_EVENT) {
- char buf[INET6_ADDRSTRLEN];
-
- p = ifc->address;
- zlog_debug(
- "MESSAGE: ZEBRA_INTERFACE_NBR_ADDRESS_ADD %s/%d on %s",
- inet_ntop(p->family, &p->u.prefix, buf,
- INET6_ADDRSTRLEN),
- p->prefixlen, ifc->ifp->name);
- }
-
- for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
- zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_ADD,
- client, ifp, ifc);
-}
-
-/* Interface address deletion. */
-static void zebra_interface_nbr_address_delete_update(struct interface *ifp,
- struct nbr_connected *ifc)
-{
- struct listnode *node, *nnode;
- struct zserv *client;
- struct prefix *p;
-
- if (IS_ZEBRA_DEBUG_EVENT) {
- char buf[INET6_ADDRSTRLEN];
-
- p = ifc->address;
- zlog_debug(
- "MESSAGE: ZEBRA_INTERFACE_NBR_ADDRESS_DELETE %s/%d on %s",
- inet_ntop(p->family, &p->u.prefix, buf,
- INET6_ADDRSTRLEN),
- p->prefixlen, ifc->ifp->name);
- }
-
- for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
- zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_DELETE,
- client, ifp, ifc);
-}
-
-/* Send addresses on interface to client */
-int zsend_interface_addresses(struct zserv *client, struct interface *ifp)
-{
- struct listnode *cnode, *cnnode;
- struct connected *c;
- struct nbr_connected *nc;
-
- /* Send interface addresses. */
- for (ALL_LIST_ELEMENTS(ifp->connected, cnode, cnnode, c)) {
- if (!CHECK_FLAG(c->conf, ZEBRA_IFC_REAL))
- continue;
-
- if (zsend_interface_address(ZEBRA_INTERFACE_ADDRESS_ADD, client,
- ifp, c)
- < 0)
- return -1;
- }
-
- /* Send interface neighbors. */
- for (ALL_LIST_ELEMENTS(ifp->nbr_connected, cnode, cnnode, nc)) {
- if (zsend_interface_nbr_address(ZEBRA_INTERFACE_NBR_ADDRESS_ADD,
- client, ifp, nc)
- < 0)
- return -1;
- }
-
- return 0;
-}
-
-/* Notify client about interface moving from one VRF to another.
- * Whether client is interested in old and new VRF is checked by caller.
- */
-int zsend_interface_vrf_update(struct zserv *client, struct interface *ifp,
- vrf_id_t vrf_id)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_INTERFACE_VRF_UPDATE, ifp->vrf_id);
-
- /* Fill in the ifIndex of the interface and its new VRF (id) */
- stream_putl(s, ifp->ifindex);
- stream_putl(s, vrf_id);
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- client->if_vrfchg_cnt++;
- return zebra_server_send_message(client, s);
-}
-
-/* Add new nbr connected IPv6 address */
-void nbr_connected_add_ipv6(struct interface *ifp, struct in6_addr *address)
-{
- struct nbr_connected *ifc;
- struct prefix p;
-
- p.family = AF_INET6;
- IPV6_ADDR_COPY(&p.u.prefix, address);
- p.prefixlen = IPV6_MAX_PREFIXLEN;
-
- if (!(ifc = listnode_head(ifp->nbr_connected))) {
- /* new addition */
- ifc = nbr_connected_new();
- ifc->address = prefix_new();
- ifc->ifp = ifp;
- listnode_add(ifp->nbr_connected, ifc);
- }
-
- prefix_copy(ifc->address, &p);
-
- zebra_interface_nbr_address_add_update(ifp, ifc);
-
- if_nbr_ipv6ll_to_ipv4ll_neigh_update(ifp, address, 1);
-}
-
-void nbr_connected_delete_ipv6(struct interface *ifp, struct in6_addr *address)
-{
- struct nbr_connected *ifc;
- struct prefix p;
-
- p.family = AF_INET6;
- IPV6_ADDR_COPY(&p.u.prefix, address);
- p.prefixlen = IPV6_MAX_PREFIXLEN;
-
- ifc = nbr_connected_check(ifp, &p);
- if (!ifc)
- return;
-
- listnode_delete(ifp->nbr_connected, ifc);
-
- zebra_interface_nbr_address_delete_update(ifp, ifc);
-
- if_nbr_ipv6ll_to_ipv4ll_neigh_update(ifp, address, 0);
-
- nbr_connected_free(ifc);
-}
-
-/*
- * The cmd passed to zsend_interface_update may be ZEBRA_INTERFACE_UP or
- * ZEBRA_INTERFACE_DOWN.
- *
- * The ZEBRA_INTERFACE_UP message is sent from the zebra server to
- * the clients in one of 2 situations:
- * - an if_up is detected e.g., as a result of an RTM_IFINFO message
- * - a vty command modifying the bandwidth of an interface is received.
- * The ZEBRA_INTERFACE_DOWN message is sent when an if_down is detected.
- */
-int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, cmd, ifp->vrf_id);
- zserv_encode_interface(s, ifp);
-
- if (cmd == ZEBRA_INTERFACE_UP)
- client->ifup_cnt++;
- else
- client->ifdown_cnt++;
-
- return zebra_server_send_message(client, s);
-}
-
-int zsend_redistribute_route(int cmd, struct zserv *client, struct prefix *p,
- struct prefix *src_p, struct route_entry *re)
-{
- struct zapi_route api;
- struct zapi_nexthop *api_nh;
- struct nexthop *nexthop;
- int count = 0;
-
- memset(&api, 0, sizeof(api));
- api.vrf_id = re->vrf_id;
- api.type = re->type;
- api.instance = re->instance;
- api.flags = re->flags;
-
- /* Prefix. */
- api.prefix = *p;
- if (src_p) {
- SET_FLAG(api.message, ZAPI_MESSAGE_SRCPFX);
- memcpy(&api.src_prefix, src_p, sizeof(api.src_prefix));
- }
-
- /* Nexthops. */
- if (re->nexthop_active_num) {
- SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
- api.nexthop_num = re->nexthop_active_num;
- }
- for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next) {
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- continue;
-
- api_nh = &api.nexthops[count];
- api_nh->vrf_id = nexthop->vrf_id;
- api_nh->type = nexthop->type;
- switch (nexthop->type) {
- case NEXTHOP_TYPE_BLACKHOLE:
- api_nh->bh_type = nexthop->bh_type;
- break;
- case NEXTHOP_TYPE_IPV4:
- api_nh->gate.ipv4 = nexthop->gate.ipv4;
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- api_nh->gate.ipv4 = nexthop->gate.ipv4;
- api_nh->ifindex = nexthop->ifindex;
- break;
- case NEXTHOP_TYPE_IFINDEX:
- api_nh->ifindex = nexthop->ifindex;
- break;
- case NEXTHOP_TYPE_IPV6:
- api_nh->gate.ipv6 = nexthop->gate.ipv6;
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- api_nh->gate.ipv6 = nexthop->gate.ipv6;
- api_nh->ifindex = nexthop->ifindex;
- }
- count++;
- }
-
- /* Attributes. */
- SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
- api.distance = re->distance;
- SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
- api.metric = re->metric;
- if (re->tag) {
- SET_FLAG(api.message, ZAPI_MESSAGE_TAG);
- api.tag = re->tag;
- }
- SET_FLAG(api.message, ZAPI_MESSAGE_MTU);
- api.mtu = re->mtu;
-
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- /* Encode route and send. */
- if (zapi_route_encode(cmd, s, &api) < 0) {
- stream_free(s);
- return -1;
- }
-
- if (IS_ZEBRA_DEBUG_SEND) {
- char buf_prefix[PREFIX_STRLEN];
- prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
-
- zlog_debug("%s: %s to client %s: type %s, vrf_id %d, p %s",
- __func__, zserv_command_string(cmd),
- zebra_route_string(client->proto),
- zebra_route_string(api.type), api.vrf_id,
- buf_prefix);
- }
- return zebra_server_send_message(client, s);
-}
-
-/*
- * Modified version of zsend_ipv4_nexthop_lookup(): Query unicast rib if
- * nexthop is not found on mrib. Returns both route metric and protocol
- * distance.
- */
-static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,
- struct in_addr addr,
- struct route_entry *re,
- struct zebra_vrf *zvrf)
-{
- struct stream *s;
- unsigned long nump;
- uint8_t num;
- struct nexthop *nexthop;
-
- /* Get output stream. */
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
- stream_reset(s);
-
- /* Fill in result. */
- zclient_create_header(s, ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf));
- stream_put_in_addr(s, &addr);
-
- if (re) {
- stream_putc(s, re->distance);
- stream_putl(s, re->metric);
- num = 0;
- nump = stream_get_endp(
- s); /* remember position for nexthop_num */
- stream_putc(s, 0); /* reserve room for nexthop_num */
- /* Only non-recursive routes are elegible to resolve the nexthop
- * we
- * are looking up. Therefore, we will just iterate over the top
- * chain of nexthops. */
- for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next)
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- num += zserv_encode_nexthop(s, nexthop);
-
- stream_putc_at(s, nump, num); /* store nexthop_num */
- } else {
- stream_putc(s, 0); /* distance */
- stream_putl(s, 0); /* metric */
- stream_putc(s, 0); /* nexthop_num */
- }
-
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
- enum zapi_route_notify_owner note)
-{
- struct zserv *client;
- struct stream *s;
- uint8_t blen;
-
- client = zebra_find_client(re->type, re->instance);
- if (!client || !client->notify_owner) {
- if (IS_ZEBRA_DEBUG_PACKET) {
- char buff[PREFIX_STRLEN];
-
- zlog_debug(
- "Not Notifying Owner: %u about prefix %s(%u) %d vrf: %u",
- re->type, prefix2str(p, buff, sizeof(buff)),
- re->table, note, re->vrf_id);
- }
- return 0;
- }
-
- if (IS_ZEBRA_DEBUG_PACKET) {
- char buff[PREFIX_STRLEN];
-
- zlog_debug("Notifying Owner: %u about prefix %s(%u) %d vrf: %u",
- re->type, prefix2str(p, buff, sizeof(buff)),
- re->table, note, re->vrf_id);
- }
-
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
- stream_reset(s);
-
- zclient_create_header(s, ZEBRA_ROUTE_NOTIFY_OWNER, re->vrf_id);
-
- stream_put(s, &note, sizeof(note));
-
- stream_putc(s, p->family);
-
- blen = prefix_blen(p);
- stream_putc(s, p->prefixlen);
- stream_put(s, &p->u.prefix, blen);
-
- stream_putl(s, re->table);
-
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
- enum zapi_rule_notify_owner note)
-{
- struct listnode *node;
- struct zserv *client;
- struct stream *s;
-
- if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
- rule->rule.unique);
-
- for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
- if (rule->sock == client->sock)
- break;
- }
-
- if (!client)
- return;
-
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_RULE_NOTIFY_OWNER, VRF_DEFAULT);
- stream_put(s, &note, sizeof(note));
- stream_putl(s, rule->rule.seq);
- stream_putl(s, rule->rule.priority);
- stream_putl(s, rule->rule.unique);
- if (rule->ifp)
- stream_putl(s, rule->ifp->ifindex);
- else
- stream_putl(s, 0);
-
- stream_putw_at(s, 0, stream_get_endp(s));
-
- zebra_server_send_message(client, s);
-}
-
-void zsend_ipset_notify_owner(struct zebra_pbr_ipset *ipset,
- enum zapi_ipset_notify_owner note)
-{
- struct listnode *node;
- struct zserv *client;
- struct stream *s;
-
- if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
- ipset->unique);
-
- for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
- if (ipset->sock == client->sock)
- break;
- }
-
- if (!client)
- return;
-
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_IPSET_NOTIFY_OWNER, VRF_DEFAULT);
- stream_put(s, &note, sizeof(note));
- stream_putl(s, ipset->unique);
- stream_put(s, ipset->ipset_name, ZEBRA_IPSET_NAME_SIZE);
- stream_putw_at(s, 0, stream_get_endp(s));
-
- zebra_server_send_message(client, s);
-}
-
-void zsend_ipset_entry_notify_owner(
- struct zebra_pbr_ipset_entry *ipset,
- enum zapi_ipset_entry_notify_owner note)
-{
- struct listnode *node;
- struct zserv *client;
- struct stream *s;
-
- if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
- ipset->unique);
-
- for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
- if (ipset->sock == client->sock)
- break;
- }
-
- if (!client)
- return;
-
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_IPSET_ENTRY_NOTIFY_OWNER,
- VRF_DEFAULT);
- stream_put(s, &note, sizeof(note));
- stream_putl(s, ipset->unique);
- stream_put(s, ipset->backpointer->ipset_name,
- ZEBRA_IPSET_NAME_SIZE);
- stream_putw_at(s, 0, stream_get_endp(s));
-
- zebra_server_send_message(client, s);
-}
-
-void zsend_iptable_notify_owner(struct zebra_pbr_iptable *iptable,
- enum zapi_iptable_notify_owner note)
-{
- struct listnode *node;
- struct zserv *client;
- struct stream *s;
-
- if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("%s: Notifying %u", __PRETTY_FUNCTION__,
- iptable->unique);
-
- for (ALL_LIST_ELEMENTS_RO(zebrad.client_list, node, client)) {
- if (iptable->sock == client->sock)
- break;
- }
-
- if (!client)
- return;
-
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_IPTABLE_NOTIFY_OWNER, VRF_DEFAULT);
- stream_put(s, &note, sizeof(note));
- stream_putl(s, iptable->unique);
- stream_putw_at(s, 0, stream_get_endp(s));
-
- zebra_server_send_message(client, s);
-}
-
-/* Router-id is updated. Send ZEBRA_ROUTER_ID_ADD to client. */
-int zsend_router_id_update(struct zserv *client, struct prefix *p,
- vrf_id_t vrf_id)
-{
- int blen;
-
- /* Check this client need interface information. */
- if (!vrf_bitmap_check(client->ridinfo, vrf_id))
- return 0;
-
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- /* Message type. */
- zclient_create_header(s, ZEBRA_ROUTER_ID_UPDATE, vrf_id);
-
- /* Prefix information. */
- stream_putc(s, p->family);
- blen = prefix_blen(p);
- stream_put(s, &p->u.prefix, blen);
- stream_putc(s, p->prefixlen);
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-/*
- * Function used by Zebra to send a PW status update to LDP daemon
- */
-int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_PW_STATUS_UPDATE, pw->vrf_id);
- stream_write(s, pw->ifname, IF_NAMESIZE);
- stream_putl(s, pw->ifindex);
- stream_putl(s, pw->status);
-
- /* Put length at the first point of the stream. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-/* Send response to a get label chunk request to client */
-static int zsend_assign_label_chunk_response(struct zserv *client,
- vrf_id_t vrf_id,
- struct label_manager_chunk *lmc)
-{
- int ret;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_GET_LABEL_CHUNK, vrf_id);
-
- if (lmc) {
- /* keep */
- stream_putc(s, lmc->keep);
- /* start and end labels */
- stream_putl(s, lmc->start);
- stream_putl(s, lmc->end);
- }
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- ret = writen(client->sock, s->data, stream_get_endp(s));
- stream_free(s);
- return ret;
-}
-
-/* Send response to a label manager connect request to client */
-static int zsend_label_manager_connect_response(struct zserv *client,
- vrf_id_t vrf_id,
- unsigned short result)
-{
- int ret;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_LABEL_MANAGER_CONNECT, vrf_id);
-
- /* result */
- stream_putc(s, result);
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- ret = writen(client->sock, s->data, stream_get_endp(s));
- stream_free(s);
-
- return ret;
-}
-
-/* Inbound message handling ------------------------------------------------ */
-
-int cmd2type[] = {
- [ZEBRA_NEXTHOP_REGISTER] = RNH_NEXTHOP_TYPE,
- [ZEBRA_NEXTHOP_UNREGISTER] = RNH_NEXTHOP_TYPE,
- [ZEBRA_IMPORT_ROUTE_REGISTER] = RNH_IMPORT_CHECK_TYPE,
- [ZEBRA_IMPORT_ROUTE_UNREGISTER] = RNH_IMPORT_CHECK_TYPE,
-};
-
-/* Nexthop register */
-static void zread_rnh_register(ZAPI_HANDLER_ARGS)
-{
- struct rnh *rnh;
- struct stream *s;
- struct prefix p;
- unsigned short l = 0;
- uint8_t flags = 0;
- uint16_t type = cmd2type[hdr->command];
-
- if (IS_ZEBRA_DEBUG_NHT)
- zlog_debug(
- "rnh_register msg from client %s: hdr->length=%d, type=%s vrf=%u\n",
- zebra_route_string(client->proto), hdr->length,
- (type == RNH_NEXTHOP_TYPE) ? "nexthop" : "route",
- zvrf->vrf->vrf_id);
-
- s = msg;
-
- client->nh_reg_time = monotime(NULL);
-
- while (l < hdr->length) {
- STREAM_GETC(s, flags);
- STREAM_GETW(s, p.family);
- STREAM_GETC(s, p.prefixlen);
- l += 4;
- if (p.family == AF_INET) {
- if (p.prefixlen > IPV4_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix hdr->length %d is too large for a v4 address",
- __PRETTY_FUNCTION__, p.prefixlen);
- return;
- }
- STREAM_GET(&p.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
- l += IPV4_MAX_BYTELEN;
- } else if (p.family == AF_INET6) {
- if (p.prefixlen > IPV6_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix hdr->length %d is to large for a v6 address",
- __PRETTY_FUNCTION__, p.prefixlen);
- return;
- }
- STREAM_GET(&p.u.prefix6, s, IPV6_MAX_BYTELEN);
- l += IPV6_MAX_BYTELEN;
- } else {
- zlog_err(
- "rnh_register: Received unknown family type %d\n",
- p.family);
- return;
- }
- rnh = zebra_add_rnh(&p, zvrf_id(zvrf), type);
- if (type == RNH_NEXTHOP_TYPE) {
- if (flags
- && !CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED))
- SET_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED);
- else if (!flags
- && CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED))
- UNSET_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED);
- } else if (type == RNH_IMPORT_CHECK_TYPE) {
- if (flags
- && !CHECK_FLAG(rnh->flags, ZEBRA_NHT_EXACT_MATCH))
- SET_FLAG(rnh->flags, ZEBRA_NHT_EXACT_MATCH);
- else if (!flags
- && CHECK_FLAG(rnh->flags,
- ZEBRA_NHT_EXACT_MATCH))
- UNSET_FLAG(rnh->flags, ZEBRA_NHT_EXACT_MATCH);
- }
-
- zebra_add_rnh_client(rnh, client, type, zvrf_id(zvrf));
- /* Anything not AF_INET/INET6 has been filtered out above */
- zebra_evaluate_rnh(zvrf_id(zvrf), p.family, 1, type, &p);
- }
-
-stream_failure:
- return;
-}
-
-/* Nexthop register */
-static void zread_rnh_unregister(ZAPI_HANDLER_ARGS)
-{
- struct rnh *rnh;
- struct stream *s;
- struct prefix p;
- unsigned short l = 0;
- uint16_t type = cmd2type[hdr->command];
-
- if (IS_ZEBRA_DEBUG_NHT)
- zlog_debug(
- "rnh_unregister msg from client %s: hdr->length=%d vrf: %u\n",
- zebra_route_string(client->proto), hdr->length,
- zvrf->vrf->vrf_id);
-
- s = msg;
-
- while (l < hdr->length) {
- uint8_t flags;
-
- STREAM_GETC(s, flags);
- if (flags != 0)
- goto stream_failure;
-
- STREAM_GETW(s, p.family);
- STREAM_GETC(s, p.prefixlen);
- l += 4;
- if (p.family == AF_INET) {
- if (p.prefixlen > IPV4_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix hdr->length %d is to large for a v4 address",
- __PRETTY_FUNCTION__, p.prefixlen);
- return;
- }
- STREAM_GET(&p.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
- l += IPV4_MAX_BYTELEN;
- } else if (p.family == AF_INET6) {
- if (p.prefixlen > IPV6_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix hdr->length %d is to large for a v6 address",
- __PRETTY_FUNCTION__, p.prefixlen);
- return;
- }
- STREAM_GET(&p.u.prefix6, s, IPV6_MAX_BYTELEN);
- l += IPV6_MAX_BYTELEN;
- } else {
- zlog_err(
- "rnh_register: Received unknown family type %d\n",
- p.family);
- return;
- }
- rnh = zebra_lookup_rnh(&p, zvrf_id(zvrf), type);
- if (rnh) {
- client->nh_dereg_time = monotime(NULL);
- zebra_remove_rnh_client(rnh, client, type);
- }
- }
-stream_failure:
- return;
-}
-
-#define ZEBRA_MIN_FEC_LENGTH 5
-
-/* FEC register */
-static void zread_fec_register(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- unsigned short l = 0;
- struct prefix p;
- uint16_t flags;
- uint32_t label_index = MPLS_INVALID_LABEL_INDEX;
-
- s = msg;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
- if (!zvrf)
- return; // unexpected
-
- /*
- * The minimum amount of data that can be sent for one fec
- * registration
- */
- if (hdr->length < ZEBRA_MIN_FEC_LENGTH) {
- zlog_err(
- "fec_register: Received a fec register of hdr->length %d, it is of insufficient size to properly decode",
- hdr->length);
- return;
- }
-
- while (l < hdr->length) {
- STREAM_GETW(s, flags);
- memset(&p, 0, sizeof(p));
- STREAM_GETW(s, p.family);
- if (p.family != AF_INET && p.family != AF_INET6) {
- zlog_err(
- "fec_register: Received unknown family type %d\n",
- p.family);
- return;
- }
- STREAM_GETC(s, p.prefixlen);
- if ((p.family == AF_INET && p.prefixlen > IPV4_MAX_BITLEN)
- || (p.family == AF_INET6
- && p.prefixlen > IPV6_MAX_BITLEN)) {
- zlog_warn(
- "%s: Specified prefix hdr->length: %d is to long for %d",
- __PRETTY_FUNCTION__, p.prefixlen, p.family);
- return;
- }
- l += 5;
- STREAM_GET(&p.u.prefix, s, PSIZE(p.prefixlen));
- l += PSIZE(p.prefixlen);
- if (flags & ZEBRA_FEC_REGISTER_LABEL_INDEX) {
- STREAM_GETL(s, label_index);
- l += 4;
- } else
- label_index = MPLS_INVALID_LABEL_INDEX;
- zebra_mpls_fec_register(zvrf, &p, label_index, client);
- }
-
-stream_failure:
- return;
-}
-
-/* FEC unregister */
-static void zread_fec_unregister(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- unsigned short l = 0;
- struct prefix p;
- uint16_t flags;
-
- s = msg;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
- if (!zvrf)
- return; // unexpected
-
- /*
- * The minimum amount of data that can be sent for one
- * fec unregistration
- */
- if (hdr->length < ZEBRA_MIN_FEC_LENGTH) {
- zlog_err(
- "fec_unregister: Received a fec unregister of hdr->length %d, it is of insufficient size to properly decode",
- hdr->length);
- return;
- }
-
- while (l < hdr->length) {
- STREAM_GETW(s, flags);
- if (flags != 0)
- goto stream_failure;
-
- memset(&p, 0, sizeof(p));
- STREAM_GETW(s, p.family);
- if (p.family != AF_INET && p.family != AF_INET6) {
- zlog_err(
- "fec_unregister: Received unknown family type %d\n",
- p.family);
- return;
- }
- STREAM_GETC(s, p.prefixlen);
- if ((p.family == AF_INET && p.prefixlen > IPV4_MAX_BITLEN)
- || (p.family == AF_INET6
- && p.prefixlen > IPV6_MAX_BITLEN)) {
- zlog_warn(
- "%s: Received prefix hdr->length %d which is greater than %d can support",
- __PRETTY_FUNCTION__, p.prefixlen, p.family);
- return;
- }
- l += 5;
- STREAM_GET(&p.u.prefix, s, PSIZE(p.prefixlen));
- l += PSIZE(p.prefixlen);
- zebra_mpls_fec_unregister(zvrf, &p, client);
- }
-
-stream_failure:
- return;
-}
-
-
-/*
- * Register zebra server interface information.
- * Send current all interface and address information.
- */
-static void zread_interface_add(ZAPI_HANDLER_ARGS)
-{
- struct vrf *vrf;
- struct interface *ifp;
-
- /* Interface information is needed. */
- vrf_bitmap_set(client->ifinfo, zvrf_id(zvrf));
-
- RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
- FOR_ALL_INTERFACES (vrf, ifp) {
- /* Skip pseudo interface. */
- if (!CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE))
- continue;
-
- zsend_interface_add(client, ifp);
- zsend_interface_addresses(client, ifp);
- }
- }
-}
-
-/* Unregister zebra server interface information. */
-static void zread_interface_delete(ZAPI_HANDLER_ARGS)
-{
- vrf_bitmap_unset(client->ifinfo, zvrf_id(zvrf));
-}
-
-void zserv_nexthop_num_warn(const char *caller, const struct prefix *p,
- const unsigned int nexthop_num)
-{
- if (nexthop_num > multipath_num) {
- char buff[PREFIX2STR_BUFFER];
- prefix2str(p, buff, sizeof(buff));
- zlog_warn(
- "%s: Prefix %s has %d nexthops, but we can only use the first %d",
- caller, buff, nexthop_num, multipath_num);
- }
-}
-
-static void zread_route_add(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- struct zapi_route api;
- struct zapi_nexthop *api_nh;
- afi_t afi;
- struct prefix_ipv6 *src_p = NULL;
- struct route_entry *re;
- struct nexthop *nexthop = NULL;
- int i, ret;
- vrf_id_t vrf_id = 0;
- struct ipaddr vtep_ip;
-
- s = msg;
- if (zapi_route_decode(s, &api) < 0) {
- if (IS_ZEBRA_DEBUG_RECV)
- zlog_debug("%s: Unable to decode zapi_route sent",
- __PRETTY_FUNCTION__);
- return;
- }
-
- if (IS_ZEBRA_DEBUG_RECV) {
- char buf_prefix[PREFIX_STRLEN];
- prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
- zlog_debug("%s: p=%s, ZAPI_MESSAGE_LABEL: %sset, flags=0x%x",
- __func__, buf_prefix,
- (CHECK_FLAG(api.message, ZAPI_MESSAGE_LABEL) ? ""
- : "un"),
- api.flags);
- }
-
- /* Allocate new route. */
- vrf_id = zvrf_id(zvrf);
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
- re->type = api.type;
- re->instance = api.instance;
- re->flags = api.flags;
- re->uptime = time(NULL);
- re->vrf_id = vrf_id;
- if (api.tableid && vrf_id == VRF_DEFAULT)
- re->table = api.tableid;
- else
- re->table = zvrf->table_id;
-
- /*
- * TBD should _all_ of the nexthop add operations use
- * api_nh->vrf_id instead of re->vrf_id ? I only changed
- * for cases NEXTHOP_TYPE_IPV4 and NEXTHOP_TYPE_IPV6.
- */
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP)) {
- for (i = 0; i < api.nexthop_num; i++) {
- api_nh = &api.nexthops[i];
- ifindex_t ifindex = 0;
-
- if (IS_ZEBRA_DEBUG_RECV) {
- zlog_debug("nh type %d", api_nh->type);
- }
-
- switch (api_nh->type) {
- case NEXTHOP_TYPE_IFINDEX:
- nexthop = route_entry_nexthop_ifindex_add(
- re, api_nh->ifindex, api_nh->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV4:
- if (IS_ZEBRA_DEBUG_RECV) {
- char nhbuf[INET6_ADDRSTRLEN] = {0};
- inet_ntop(AF_INET, &api_nh->gate.ipv4,
- nhbuf, INET6_ADDRSTRLEN);
- zlog_debug("%s: nh=%s, vrf_id=%d",
- __func__, nhbuf,
- api_nh->vrf_id);
- }
- nexthop = route_entry_nexthop_ipv4_add(
- re, &api_nh->gate.ipv4, NULL,
- api_nh->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
-
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
- if (CHECK_FLAG(api.flags,
- ZEBRA_FLAG_EVPN_ROUTE)) {
- ifindex = get_l3vni_svi_ifindex(vrf_id);
- } else {
- ifindex = api_nh->ifindex;
- }
-
- if (IS_ZEBRA_DEBUG_RECV) {
- char nhbuf[INET6_ADDRSTRLEN] = {0};
- inet_ntop(AF_INET, &api_nh->gate.ipv4,
- nhbuf, INET6_ADDRSTRLEN);
- zlog_debug(
- "%s: nh=%s, vrf_id=%d (re->vrf_id=%d), ifindex=%d",
- __func__, nhbuf, api_nh->vrf_id,
- re->vrf_id, ifindex);
- }
- nexthop = route_entry_nexthop_ipv4_ifindex_add(
- re, &api_nh->gate.ipv4, NULL, ifindex,
- api_nh->vrf_id);
-
- /* if this an EVPN route entry,
- * program the nh as neigh
- */
- if (CHECK_FLAG(api.flags,
- ZEBRA_FLAG_EVPN_ROUTE)) {
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_EVPN_RVTEP);
- vtep_ip.ipa_type = IPADDR_V4;
- memcpy(&(vtep_ip.ipaddr_v4),
- &(api_nh->gate.ipv4),
- sizeof(struct in_addr));
- zebra_vxlan_evpn_vrf_route_add(
- vrf_id, &api.rmac, &vtep_ip,
- &api.prefix);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- nexthop = route_entry_nexthop_ipv6_add(
- re, &api_nh->gate.ipv6, api_nh->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
- if (CHECK_FLAG(api.flags,
- ZEBRA_FLAG_EVPN_ROUTE)) {
- ifindex =
- get_l3vni_svi_ifindex(vrf_id);
- } else {
- ifindex = api_nh->ifindex;
- }
-
- nexthop = route_entry_nexthop_ipv6_ifindex_add(
- re, &api_nh->gate.ipv6, ifindex,
- api_nh->vrf_id);
-
- /* if this an EVPN route entry,
- * program the nh as neigh
- */
- if (CHECK_FLAG(api.flags,
- ZEBRA_FLAG_EVPN_ROUTE)) {
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_EVPN_RVTEP);
- vtep_ip.ipa_type = IPADDR_V6;
- memcpy(&vtep_ip.ipaddr_v6,
- &(api_nh->gate.ipv6),
- sizeof(struct in6_addr));
- zebra_vxlan_evpn_vrf_route_add(
- vrf_id,
- &api.rmac,
- &vtep_ip,
- &api.prefix);
- }
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- nexthop = route_entry_nexthop_blackhole_add(
- re, api_nh->bh_type);
- break;
- }
-
- if (!nexthop) {
- zlog_warn(
- "%s: Nexthops Specified: %d but we failed to properly create one",
- __PRETTY_FUNCTION__, api.nexthop_num);
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
- return;
- }
- /* MPLS labels for BGP-LU or Segment Routing */
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_LABEL)
- && api_nh->type != NEXTHOP_TYPE_IFINDEX
- && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
- enum lsp_types_t label_type;
-
- label_type =
- lsp_type_from_re_type(client->proto);
-
- if (IS_ZEBRA_DEBUG_RECV) {
- zlog_debug(
- "%s: adding %d labels of type %d (1st=%u)",
- __func__, api_nh->label_num,
- label_type, api_nh->labels[0]);
- }
-
- nexthop_add_labels(nexthop, label_type,
- api_nh->label_num,
- &api_nh->labels[0]);
- }
- }
- }
-
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE))
- re->distance = api.distance;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_METRIC))
- re->metric = api.metric;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_TAG))
- re->tag = api.tag;
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_MTU))
- re->mtu = api.mtu;
-
- afi = family2afi(api.prefix.family);
- if (afi != AFI_IP6 && CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX)) {
- zlog_warn("%s: Received SRC Prefix but afi is not v6",
- __PRETTY_FUNCTION__);
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
- return;
- }
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX))
- src_p = &api.src_prefix;
-
- ret = rib_add_multipath(afi, api.safi, &api.prefix, src_p, re);
-
- /* Stats */
- switch (api.prefix.family) {
- case AF_INET:
- if (ret > 0)
- client->v4_route_add_cnt++;
- else if (ret < 0)
- client->v4_route_upd8_cnt++;
- break;
- case AF_INET6:
- if (ret > 0)
- client->v6_route_add_cnt++;
- else if (ret < 0)
- client->v6_route_upd8_cnt++;
- break;
- }
-}
-
-static void zread_route_del(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- struct zapi_route api;
- afi_t afi;
- struct prefix_ipv6 *src_p = NULL;
- uint32_t table_id;
-
- s = msg;
- if (zapi_route_decode(s, &api) < 0)
- return;
-
- afi = family2afi(api.prefix.family);
- if (afi != AFI_IP6 && CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX)) {
- zlog_warn("%s: Received a src prefix while afi is not v6",
- __PRETTY_FUNCTION__);
- return;
- }
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX))
- src_p = &api.src_prefix;
-
- if (api.vrf_id == VRF_DEFAULT && api.tableid != 0)
- table_id = api.tableid;
- else
- table_id = zvrf->table_id;
-
- rib_delete(afi, api.safi, zvrf_id(zvrf), api.type, api.instance,
- api.flags, &api.prefix, src_p, NULL, table_id,
- api.metric, false, &api.rmac);
-
- /* Stats */
- switch (api.prefix.family) {
- case AF_INET:
- client->v4_route_del_cnt++;
- break;
- case AF_INET6:
- client->v6_route_del_cnt++;
- break;
- }
-}
-
-/* This function support multiple nexthop. */
-/*
- * Parse the ZEBRA_IPV4_ROUTE_ADD sent from client. Update re and
- * add kernel route.
- */
-static void zread_ipv4_add(ZAPI_HANDLER_ARGS)
-{
- int i;
- struct route_entry *re;
- struct prefix p;
- uint8_t message;
- struct in_addr nhop_addr;
- uint8_t nexthop_num;
- uint8_t nexthop_type;
- struct stream *s;
- ifindex_t ifindex;
- safi_t safi;
- int ret;
- enum lsp_types_t label_type = ZEBRA_LSP_NONE;
- mpls_label_t label;
- struct nexthop *nexthop;
- enum blackhole_type bh_type = BLACKHOLE_NULL;
-
- /* Get input stream. */
- s = msg;
-
- /* Allocate new re. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
-
- /* Type, flags, message. */
- STREAM_GETC(s, re->type);
- if (re->type > ZEBRA_ROUTE_MAX) {
- zlog_warn("%s: Specified route type %d is not a legal value\n",
- __PRETTY_FUNCTION__, re->type);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GETW(s, re->instance);
- STREAM_GETL(s, re->flags);
- STREAM_GETC(s, message);
- STREAM_GETW(s, safi);
- re->uptime = time(NULL);
-
- /* IPv4 prefix. */
- memset(&p, 0, sizeof(struct prefix_ipv4));
- p.family = AF_INET;
- STREAM_GETC(s, p.prefixlen);
- if (p.prefixlen > IPV4_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix length %d is greater than what v4 can be",
- __PRETTY_FUNCTION__, p.prefixlen);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GET(&p.u.prefix4, s, PSIZE(p.prefixlen));
-
- /* VRF ID */
- re->vrf_id = zvrf_id(zvrf);
-
- /* Nexthop parse. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_NEXTHOP)) {
- STREAM_GETC(s, nexthop_num);
- zserv_nexthop_num_warn(__func__, (const struct prefix *)&p,
- nexthop_num);
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
- label_type = lsp_type_from_re_type(client->proto);
-
- for (i = 0; i < nexthop_num; i++) {
- STREAM_GETC(s, nexthop_type);
-
- switch (nexthop_type) {
- case NEXTHOP_TYPE_IFINDEX:
- STREAM_GETL(s, ifindex);
- route_entry_nexthop_ifindex_add(re, ifindex,
- re->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV4:
- STREAM_GET(&nhop_addr.s_addr, s,
- IPV4_MAX_BYTELEN);
- nexthop = route_entry_nexthop_ipv4_add(
- re, &nhop_addr, NULL, re->vrf_id);
- /* For labeled-unicast, each nexthop is followed
- * by label. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL)) {
- STREAM_GETL(s, label);
- nexthop_add_labels(nexthop, label_type,
- 1, &label);
- }
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- STREAM_GET(&nhop_addr.s_addr, s,
- IPV4_MAX_BYTELEN);
- STREAM_GETL(s, ifindex);
- route_entry_nexthop_ipv4_ifindex_add(
- re, &nhop_addr, NULL, ifindex,
- re->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV6:
- zlog_warn(
- "%s: Please use ZEBRA_ROUTE_ADD if you want to pass v6 nexthops",
- __PRETTY_FUNCTION__);
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
- return;
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- route_entry_nexthop_blackhole_add(re, bh_type);
- break;
- default:
- zlog_warn(
- "%s: Specified nexthop type: %d does not exist",
- __PRETTY_FUNCTION__, nexthop_type);
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
- return;
- }
- }
- }
-
- /* Distance. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_DISTANCE))
- STREAM_GETC(s, re->distance);
-
- /* Metric. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_METRIC))
- STREAM_GETL(s, re->metric);
-
- /* Tag */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_TAG))
- STREAM_GETL(s, re->tag);
- else
- re->tag = 0;
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_MTU))
- STREAM_GETL(s, re->mtu);
- else
- re->mtu = 0;
-
- /* Table */
- re->table = zvrf->table_id;
-
- ret = rib_add_multipath(AFI_IP, safi, &p, NULL, re);
-
- /* Stats */
- if (ret > 0)
- client->v4_route_add_cnt++;
- else if (ret < 0)
- client->v4_route_upd8_cnt++;
-
- return;
-
-stream_failure:
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
-}
-
-/* Zebra server IPv4 prefix delete function. */
-static void zread_ipv4_delete(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- struct zapi_ipv4 api;
- struct prefix p;
- uint32_t table_id;
-
- s = msg;
-
- /* Type, flags, message. */
- STREAM_GETC(s, api.type);
- STREAM_GETW(s, api.instance);
- STREAM_GETL(s, api.flags);
- STREAM_GETC(s, api.message);
- STREAM_GETW(s, api.safi);
-
- /* IPv4 prefix. */
- memset(&p, 0, sizeof(struct prefix));
- p.family = AF_INET;
- STREAM_GETC(s, p.prefixlen);
- if (p.prefixlen > IPV4_MAX_BITLEN) {
- zlog_warn("%s: Passed in prefixlen %d is impossible",
- __PRETTY_FUNCTION__, p.prefixlen);
- return;
- }
- STREAM_GET(&p.u.prefix4, s, PSIZE(p.prefixlen));
-
- table_id = zvrf->table_id;
-
- rib_delete(AFI_IP, api.safi, zvrf_id(zvrf), api.type, api.instance,
- api.flags, &p, NULL, NULL, table_id, 0, false, NULL);
- client->v4_route_del_cnt++;
-
-stream_failure:
- return;
-}
-
-/* MRIB Nexthop lookup for IPv4. */
-static void zread_ipv4_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS)
-{
- struct in_addr addr;
- struct route_entry *re;
-
- STREAM_GET(&addr.s_addr, msg, IPV4_MAX_BYTELEN);
- re = rib_match_ipv4_multicast(zvrf_id(zvrf), addr, NULL);
- zsend_ipv4_nexthop_lookup_mrib(client, addr, re, zvrf);
-
-stream_failure:
- return;
-}
-
-/* Zebra server IPv6 prefix add function. */
-static void zread_ipv4_route_ipv6_nexthop_add(ZAPI_HANDLER_ARGS)
-{
- unsigned int i;
- struct stream *s;
- struct in6_addr nhop_addr;
- struct route_entry *re;
- uint8_t message;
- uint8_t nexthop_num;
- uint8_t nexthop_type;
- struct prefix p;
- safi_t safi;
- static struct in6_addr nexthops[MULTIPATH_NUM];
- static unsigned int ifindices[MULTIPATH_NUM];
- int ret;
- static mpls_label_t labels[MULTIPATH_NUM];
- enum lsp_types_t label_type = ZEBRA_LSP_NONE;
- mpls_label_t label;
- struct nexthop *nexthop;
- enum blackhole_type bh_type = BLACKHOLE_NULL;
-
- /* Get input stream. */
- s = msg;
-
- memset(&nhop_addr, 0, sizeof(struct in6_addr));
-
- /* Allocate new re. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
-
- /* Type, flags, message. */
- STREAM_GETC(s, re->type);
- if (re->type > ZEBRA_ROUTE_MAX) {
- zlog_warn("%s: Specified route type: %d is not a legal value\n",
- __PRETTY_FUNCTION__, re->type);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GETW(s, re->instance);
- STREAM_GETL(s, re->flags);
- STREAM_GETC(s, message);
- STREAM_GETW(s, safi);
- re->uptime = time(NULL);
-
- /* IPv4 prefix. */
- memset(&p, 0, sizeof(struct prefix_ipv4));
- p.family = AF_INET;
- STREAM_GETC(s, p.prefixlen);
- if (p.prefixlen > IPV4_MAX_BITLEN) {
- zlog_warn(
- "%s: Prefix Length %d is greater than what a v4 address can use",
- __PRETTY_FUNCTION__, p.prefixlen);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GET(&p.u.prefix4, s, PSIZE(p.prefixlen));
-
- /* VRF ID */
- re->vrf_id = zvrf_id(zvrf);
-
- /* We need to give nh-addr, nh-ifindex with the same next-hop object
- * to the re to ensure that IPv6 multipathing works; need to coalesce
- * these. Clients should send the same number of paired set of
- * next-hop-addr/next-hop-ifindices. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_NEXTHOP)) {
- unsigned int nh_count = 0;
- unsigned int if_count = 0;
- unsigned int max_nh_if = 0;
-
- STREAM_GETC(s, nexthop_num);
- zserv_nexthop_num_warn(__func__, (const struct prefix *)&p,
- nexthop_num);
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
- label_type = lsp_type_from_re_type(client->proto);
-
- for (i = 0; i < nexthop_num; i++) {
- STREAM_GETC(s, nexthop_type);
-
- switch (nexthop_type) {
- case NEXTHOP_TYPE_IPV6:
- STREAM_GET(&nhop_addr, s, 16);
- if (nh_count < MULTIPATH_NUM) {
- /* For labeled-unicast, each nexthop is
- * followed by label. */
- if (CHECK_FLAG(message,
- ZAPI_MESSAGE_LABEL)) {
- STREAM_GETL(s, label);
- labels[nh_count] = label;
- }
- nexthops[nh_count] = nhop_addr;
- nh_count++;
- }
- break;
- case NEXTHOP_TYPE_IFINDEX:
- if (if_count < multipath_num) {
- STREAM_GETL(s, ifindices[if_count++]);
- }
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- route_entry_nexthop_blackhole_add(re, bh_type);
- break;
- default:
- zlog_warn(
- "%s: Please use ZEBRA_ROUTE_ADD if you want to pass non v6 nexthops",
- __PRETTY_FUNCTION__);
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
- return;
- }
- }
-
- max_nh_if = (nh_count > if_count) ? nh_count : if_count;
- for (i = 0; i < max_nh_if; i++) {
- if ((i < nh_count)
- && !IN6_IS_ADDR_UNSPECIFIED(&nexthops[i])) {
- if ((i < if_count) && ifindices[i])
- nexthop =
- route_entry_nexthop_ipv6_ifindex_add(
- re, &nexthops[i],
- ifindices[i],
- re->vrf_id);
- else
- nexthop = route_entry_nexthop_ipv6_add(
- re, &nexthops[i], re->vrf_id);
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
- nexthop_add_labels(nexthop, label_type,
- 1, &labels[i]);
- } else {
- if ((i < if_count) && ifindices[i])
- route_entry_nexthop_ifindex_add(
- re, ifindices[i], re->vrf_id);
- }
- }
- }
-
- /* Distance. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_DISTANCE))
- STREAM_GETC(s, re->distance);
-
- /* Metric. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_METRIC))
- STREAM_GETL(s, re->metric);
-
- /* Tag */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_TAG))
- STREAM_GETL(s, re->tag);
- else
- re->tag = 0;
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_MTU))
- STREAM_GETL(s, re->mtu);
- else
- re->mtu = 0;
-
- /* Table */
- re->table = zvrf->table_id;
-
- ret = rib_add_multipath(AFI_IP6, safi, &p, NULL, re);
- /* Stats */
- if (ret > 0)
- client->v4_route_add_cnt++;
- else if (ret < 0)
- client->v4_route_upd8_cnt++;
-
- return;
-
-stream_failure:
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
-}
-
-static void zread_ipv6_add(ZAPI_HANDLER_ARGS)
-{
- unsigned int i;
- struct stream *s;
- struct in6_addr nhop_addr;
- ifindex_t ifindex;
- struct route_entry *re;
- uint8_t message;
- uint8_t nexthop_num;
- uint8_t nexthop_type;
- struct prefix p;
- struct prefix_ipv6 src_p, *src_pp;
- safi_t safi;
- static struct in6_addr nexthops[MULTIPATH_NUM];
- static unsigned int ifindices[MULTIPATH_NUM];
- int ret;
- static mpls_label_t labels[MULTIPATH_NUM];
- enum lsp_types_t label_type = ZEBRA_LSP_NONE;
- mpls_label_t label;
- struct nexthop *nexthop;
- enum blackhole_type bh_type = BLACKHOLE_NULL;
-
- /* Get input stream. */
- s = msg;
-
- memset(&nhop_addr, 0, sizeof(struct in6_addr));
-
- /* Allocate new re. */
- re = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
-
- /* Type, flags, message. */
- STREAM_GETC(s, re->type);
- if (re->type > ZEBRA_ROUTE_MAX) {
- zlog_warn("%s: Specified route type: %d is not a legal value\n",
- __PRETTY_FUNCTION__, re->type);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GETW(s, re->instance);
- STREAM_GETL(s, re->flags);
- STREAM_GETC(s, message);
- STREAM_GETW(s, safi);
- re->uptime = time(NULL);
-
- /* IPv6 prefix. */
- memset(&p, 0, sizeof(p));
- p.family = AF_INET6;
- STREAM_GETC(s, p.prefixlen);
- if (p.prefixlen > IPV6_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix length %d is to large for v6 prefix",
- __PRETTY_FUNCTION__, p.prefixlen);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GET(&p.u.prefix6, s, PSIZE(p.prefixlen));
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_SRCPFX)) {
- memset(&src_p, 0, sizeof(src_p));
- src_p.family = AF_INET6;
- STREAM_GETC(s, src_p.prefixlen);
- if (src_p.prefixlen > IPV6_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified src prefix length %d is to large for v6 prefix",
- __PRETTY_FUNCTION__, src_p.prefixlen);
- XFREE(MTYPE_RE, re);
- return;
- }
- STREAM_GET(&src_p.prefix, s, PSIZE(src_p.prefixlen));
- src_pp = &src_p;
- } else
- src_pp = NULL;
-
- /* VRF ID */
- re->vrf_id = zvrf_id(zvrf);
-
- /* We need to give nh-addr, nh-ifindex with the same next-hop object
- * to the re to ensure that IPv6 multipathing works; need to coalesce
- * these. Clients should send the same number of paired set of
- * next-hop-addr/next-hop-ifindices. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_NEXTHOP)) {
- unsigned int nh_count = 0;
- unsigned int if_count = 0;
- unsigned int max_nh_if = 0;
-
- STREAM_GETC(s, nexthop_num);
- zserv_nexthop_num_warn(__func__, (const struct prefix *)&p,
- nexthop_num);
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
- label_type = lsp_type_from_re_type(client->proto);
-
- for (i = 0; i < nexthop_num; i++) {
- STREAM_GETC(s, nexthop_type);
-
- switch (nexthop_type) {
- case NEXTHOP_TYPE_IPV6:
- STREAM_GET(&nhop_addr, s, 16);
- if (nh_count < MULTIPATH_NUM) {
- /* For labeled-unicast, each nexthop is
- * followed by label. */
- if (CHECK_FLAG(message,
- ZAPI_MESSAGE_LABEL)) {
- STREAM_GETL(s, label);
- labels[nh_count] = label;
- }
- nexthops[nh_count++] = nhop_addr;
- }
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- STREAM_GET(&nhop_addr, s, 16);
- STREAM_GETL(s, ifindex);
- route_entry_nexthop_ipv6_ifindex_add(
- re, &nhop_addr, ifindex, re->vrf_id);
- break;
- case NEXTHOP_TYPE_IFINDEX:
- if (if_count < multipath_num) {
- STREAM_GETL(s, ifindices[if_count++]);
- }
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- route_entry_nexthop_blackhole_add(re, bh_type);
- break;
- default:
- zlog_warn(
- "%s: Please use ZEBRA_ROUTE_ADD if you want to pass non v6 nexthops",
- __PRETTY_FUNCTION__);
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
- return;
- }
- }
-
- max_nh_if = (nh_count > if_count) ? nh_count : if_count;
- for (i = 0; i < max_nh_if; i++) {
- if ((i < nh_count)
- && !IN6_IS_ADDR_UNSPECIFIED(&nexthops[i])) {
- if ((i < if_count) && ifindices[i])
- nexthop =
- route_entry_nexthop_ipv6_ifindex_add(
- re, &nexthops[i],
- ifindices[i],
- re->vrf_id);
- else
- nexthop = route_entry_nexthop_ipv6_add(
- re, &nexthops[i], re->vrf_id);
- if (CHECK_FLAG(message, ZAPI_MESSAGE_LABEL))
- nexthop_add_labels(nexthop, label_type,
- 1, &labels[i]);
- } else {
- if ((i < if_count) && ifindices[i])
- route_entry_nexthop_ifindex_add(
- re, ifindices[i], re->vrf_id);
- }
- }
- }
-
- /* Distance. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_DISTANCE))
- STREAM_GETC(s, re->distance);
-
- /* Metric. */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_METRIC))
- STREAM_GETL(s, re->metric);
-
- /* Tag */
- if (CHECK_FLAG(message, ZAPI_MESSAGE_TAG))
- STREAM_GETL(s, re->tag);
- else
- re->tag = 0;
-
- if (CHECK_FLAG(message, ZAPI_MESSAGE_MTU))
- STREAM_GETL(s, re->mtu);
- else
- re->mtu = 0;
-
- re->table = zvrf->table_id;
-
- ret = rib_add_multipath(AFI_IP6, safi, &p, src_pp, re);
- /* Stats */
- if (ret > 0)
- client->v6_route_add_cnt++;
- else if (ret < 0)
- client->v6_route_upd8_cnt++;
-
- return;
-
-stream_failure:
- nexthops_free(re->ng.nexthop);
- XFREE(MTYPE_RE, re);
-}
-
-/* Zebra server IPv6 prefix delete function. */
-static void zread_ipv6_delete(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- struct zapi_ipv6 api;
- struct prefix p;
- struct prefix_ipv6 src_p, *src_pp;
-
- s = msg;
-
- /* Type, flags, message. */
- STREAM_GETC(s, api.type);
- STREAM_GETW(s, api.instance);
- STREAM_GETL(s, api.flags);
- STREAM_GETC(s, api.message);
- STREAM_GETW(s, api.safi);
-
- /* IPv4 prefix. */
- memset(&p, 0, sizeof(struct prefix));
- p.family = AF_INET6;
- STREAM_GETC(s, p.prefixlen);
- STREAM_GET(&p.u.prefix6, s, PSIZE(p.prefixlen));
-
- if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX)) {
- memset(&src_p, 0, sizeof(struct prefix_ipv6));
- src_p.family = AF_INET6;
- STREAM_GETC(s, src_p.prefixlen);
- STREAM_GET(&src_p.prefix, s, PSIZE(src_p.prefixlen));
- src_pp = &src_p;
- } else
- src_pp = NULL;
-
- rib_delete(AFI_IP6, api.safi, zvrf_id(zvrf), api.type, api.instance,
- api.flags, &p, src_pp, NULL, client->rtm_table, 0, false,
- NULL);
-
- client->v6_route_del_cnt++;
-
-stream_failure:
- return;
-}
-
-/* Register zebra server router-id information. Send current router-id */
-static void zread_router_id_add(ZAPI_HANDLER_ARGS)
-{
- struct prefix p;
-
- /* Router-id information is needed. */
- vrf_bitmap_set(client->ridinfo, zvrf_id(zvrf));
-
- router_id_get(&p, zvrf_id(zvrf));
-
- zsend_router_id_update(client, &p, zvrf_id(zvrf));
-}
-
-/* Unregister zebra server router-id information. */
-static void zread_router_id_delete(ZAPI_HANDLER_ARGS)
-{
- vrf_bitmap_unset(client->ridinfo, zvrf_id(zvrf));
-}
-
-static void zsend_capabilities(struct zserv *client, struct zebra_vrf *zvrf)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_CAPABILITIES, zvrf->vrf->vrf_id);
- stream_putc(s, mpls_enabled);
- stream_putl(s, multipath_num);
-
- stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
-}
-
-/* Tie up route-type and client->sock */
-static void zread_hello(ZAPI_HANDLER_ARGS)
-{
- /* type of protocol (lib/zebra.h) */
- uint8_t proto;
- unsigned short instance;
- uint8_t notify;
-
- STREAM_GETC(msg, proto);
- STREAM_GETW(msg, instance);
- STREAM_GETC(msg, notify);
- if (notify)
- client->notify_owner = true;
-
- /* accept only dynamic routing protocols */
- if ((proto < ZEBRA_ROUTE_MAX) && (proto > ZEBRA_ROUTE_STATIC)) {
- zlog_notice(
- "client %d says hello and bids fair to announce only %s routes vrf=%u",
- client->sock, zebra_route_string(proto),
- zvrf->vrf->vrf_id);
- if (instance)
- zlog_notice("client protocol instance %d", instance);
-
- client->proto = proto;
- client->instance = instance;
- }
-
- zsend_capabilities(client, zvrf);
-stream_failure:
- return;
-}
-
-/* Unregister all information in a VRF. */
-static void zread_vrf_unregister(ZAPI_HANDLER_ARGS)
-{
- int i;
- afi_t afi;
-
- for (afi = AFI_IP; afi < AFI_MAX; afi++)
- for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
- vrf_bitmap_unset(client->redist[afi][i], zvrf_id(zvrf));
- vrf_bitmap_unset(client->redist_default, zvrf_id(zvrf));
- vrf_bitmap_unset(client->ifinfo, zvrf_id(zvrf));
- vrf_bitmap_unset(client->ridinfo, zvrf_id(zvrf));
-}
-
-static void zread_mpls_labels(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- enum lsp_types_t type;
- struct prefix prefix;
- enum nexthop_types_t gtype;
- union g_addr gate;
- ifindex_t ifindex;
- mpls_label_t in_label, out_label;
- uint8_t distance;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GETC(s, type);
- STREAM_GETL(s, prefix.family);
- switch (prefix.family) {
- case AF_INET:
- STREAM_GET(&prefix.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
- STREAM_GETC(s, prefix.prefixlen);
- if (prefix.prefixlen > IPV4_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix length %d is greater than a v4 address can support",
- __PRETTY_FUNCTION__, prefix.prefixlen);
- return;
- }
- STREAM_GET(&gate.ipv4.s_addr, s, IPV4_MAX_BYTELEN);
- break;
- case AF_INET6:
- STREAM_GET(&prefix.u.prefix6, s, 16);
- STREAM_GETC(s, prefix.prefixlen);
- if (prefix.prefixlen > IPV6_MAX_BITLEN) {
- zlog_warn(
- "%s: Specified prefix length %d is greater than a v6 address can support",
- __PRETTY_FUNCTION__, prefix.prefixlen);
- return;
- }
- STREAM_GET(&gate.ipv6, s, 16);
- break;
- default:
- zlog_warn("%s: Specified AF %d is not supported for this call",
- __PRETTY_FUNCTION__, prefix.family);
- return;
- }
- STREAM_GETL(s, ifindex);
- STREAM_GETC(s, distance);
- STREAM_GETL(s, in_label);
- STREAM_GETL(s, out_label);
-
- switch (prefix.family) {
- case AF_INET:
- if (ifindex)
- gtype = NEXTHOP_TYPE_IPV4_IFINDEX;
- else
- gtype = NEXTHOP_TYPE_IPV4;
- break;
- case AF_INET6:
- if (ifindex)
- gtype = NEXTHOP_TYPE_IPV6_IFINDEX;
- else
- gtype = NEXTHOP_TYPE_IPV6;
- break;
- default:
- return;
- }
-
- if (!mpls_enabled)
- return;
-
- if (hdr->command == ZEBRA_MPLS_LABELS_ADD) {
- mpls_lsp_install(zvrf, type, in_label, out_label, gtype, &gate,
- ifindex);
- mpls_ftn_update(1, zvrf, type, &prefix, gtype, &gate, ifindex,
- distance, out_label);
- } else if (hdr->command == ZEBRA_MPLS_LABELS_DELETE) {
- mpls_lsp_uninstall(zvrf, type, in_label, gtype, &gate, ifindex);
- mpls_ftn_update(0, zvrf, type, &prefix, gtype, &gate, ifindex,
- distance, out_label);
- }
-stream_failure:
- return;
-}
-
-static int zsend_table_manager_connect_response(struct zserv *client,
- vrf_id_t vrf_id, uint16_t result)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_TABLE_MANAGER_CONNECT, vrf_id);
-
- /* result */
- stream_putc(s, result);
-
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-/* Send response to a table manager connect request to client */
-static void zread_table_manager_connect(struct zserv *client,
- struct stream *msg,
- vrf_id_t vrf_id)
-{
- struct stream *s;
- uint8_t proto;
- uint16_t instance;
-
- s = msg;
-
- /* Get data. */
- STREAM_GETC(s, proto);
- STREAM_GETW(s, instance);
-
- /* accept only dynamic routing protocols */
- if ((proto >= ZEBRA_ROUTE_MAX) || (proto <= ZEBRA_ROUTE_STATIC)) {
- zlog_err("client %d has wrong protocol %s", client->sock,
- zebra_route_string(proto));
- zsend_table_manager_connect_response(client, vrf_id, 1);
- return;
- }
- zlog_notice("client %d with vrf %u instance %u connected as %s",
- client->sock, vrf_id, instance, zebra_route_string(proto));
- client->proto = proto;
- client->instance = instance;
-
- /*
- * Release previous labels of same protocol and instance.
- * This is done in case it restarted from an unexpected shutdown.
- */
- release_daemon_table_chunks(proto, instance);
-
- zsend_table_manager_connect_response(client, vrf_id, 0);
-
- stream_failure:
- return;
-}
-
-static void zread_label_manager_connect(struct zserv *client,
- struct stream *msg, vrf_id_t vrf_id)
-{
- struct stream *s;
- /* type of protocol (lib/zebra.h) */
- uint8_t proto;
- unsigned short instance;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GETC(s, proto);
- STREAM_GETW(s, instance);
-
- /* accept only dynamic routing protocols */
- if ((proto >= ZEBRA_ROUTE_MAX) || (proto <= ZEBRA_ROUTE_STATIC)) {
- zlog_err("client %d has wrong protocol %s", client->sock,
- zebra_route_string(proto));
- zsend_label_manager_connect_response(client, vrf_id, 1);
- return;
- }
- zlog_notice("client %d with vrf %u instance %u connected as %s",
- client->sock, vrf_id, instance, zebra_route_string(proto));
- client->proto = proto;
- client->instance = instance;
-
- /*
- Release previous labels of same protocol and instance.
- This is done in case it restarted from an unexpected shutdown.
- */
- release_daemon_label_chunks(proto, instance);
-
- zlog_debug(
- " Label Manager client connected: sock %d, proto %s, vrf %u instance %u",
- client->sock, zebra_route_string(proto), vrf_id, instance);
- /* send response back */
- zsend_label_manager_connect_response(client, vrf_id, 0);
-
- stream_failure:
- return;
-}
-
-static void zread_get_label_chunk(struct zserv *client, struct stream *msg,
- vrf_id_t vrf_id)
-{
- struct stream *s;
- uint8_t keep;
- uint32_t size;
- struct label_manager_chunk *lmc;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GETC(s, keep);
- STREAM_GETL(s, size);
-
- lmc = assign_label_chunk(client->proto, client->instance, keep, size);
- if (!lmc)
- zlog_err("%s: Unable to assign Label Chunk of size %u",
- __func__, size);
- else
- zlog_debug("Assigned Label Chunk %u - %u to %u", lmc->start,
- lmc->end, keep);
- /* send response back */
- zsend_assign_label_chunk_response(client, vrf_id, lmc);
-
-stream_failure:
- return;
-}
-
-static void zread_release_label_chunk(struct zserv *client, struct stream *msg)
-{
- struct stream *s;
- uint32_t start, end;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GETL(s, start);
- STREAM_GETL(s, end);
-
- release_label_chunk(client->proto, client->instance, start, end);
-
-stream_failure:
- return;
-}
-static void zread_label_manager_request(ZAPI_HANDLER_ARGS)
-{
- /* to avoid sending other messages like ZERBA_INTERFACE_UP */
- if (hdr->command == ZEBRA_LABEL_MANAGER_CONNECT)
- client->is_synchronous = 1;
-
- /* external label manager */
- if (lm_is_external)
- zread_relay_label_manager_request(hdr->command, client,
- zvrf_id(zvrf));
- /* this is a label manager */
- else {
- if (hdr->command == ZEBRA_LABEL_MANAGER_CONNECT)
- zread_label_manager_connect(client, msg, zvrf_id(zvrf));
- else {
- /* Sanity: don't allow 'unidentified' requests */
- if (!client->proto) {
- zlog_err(
- "Got label request from an unidentified client");
- return;
- }
- if (hdr->command == ZEBRA_GET_LABEL_CHUNK)
- zread_get_label_chunk(client, msg,
- zvrf_id(zvrf));
- else if (hdr->command == ZEBRA_RELEASE_LABEL_CHUNK)
- zread_release_label_chunk(client, msg);
- }
- }
-}
-
-/* Send response to a get table chunk request to client */
-static int zsend_assign_table_chunk_response(struct zserv *client,
- vrf_id_t vrf_id,
- struct table_manager_chunk *tmc)
-{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
-
- zclient_create_header(s, ZEBRA_GET_TABLE_CHUNK, vrf_id);
-
- if (tmc) {
- /* start and end labels */
- stream_putl(s, tmc->start);
- stream_putl(s, tmc->end);
- }
-
- /* Write packet size. */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- return zebra_server_send_message(client, s);
-}
-
-static void zread_get_table_chunk(struct zserv *client, struct stream *msg,
- vrf_id_t vrf_id)
-{
- struct stream *s;
- uint32_t size;
- struct table_manager_chunk *tmc;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GETL(s, size);
-
- tmc = assign_table_chunk(client->proto, client->instance, size);
- if (!tmc)
- zlog_err("%s: Unable to assign Table Chunk of size %u",
- __func__, size);
- else
- zlog_debug("Assigned Table Chunk %u - %u", tmc->start,
- tmc->end);
- /* send response back */
- zsend_assign_table_chunk_response(client, vrf_id, tmc);
-
-stream_failure:
- return;
-}
-
-static void zread_release_table_chunk(struct zserv *client, struct stream *msg)
-{
- struct stream *s;
- uint32_t start, end;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GETL(s, start);
- STREAM_GETL(s, end);
-
- release_table_chunk(client->proto, client->instance, start, end);
-
-stream_failure:
- return;
-}
-
-static void zread_table_manager_request(ZAPI_HANDLER_ARGS)
-{
- /* to avoid sending other messages like ZERBA_INTERFACE_UP */
- if (hdr->command == ZEBRA_TABLE_MANAGER_CONNECT)
- zread_table_manager_connect(client, msg, zvrf_id(zvrf));
- else {
- /* Sanity: don't allow 'unidentified' requests */
- if (!client->proto) {
- zlog_err(
- "Got table request from an unidentified client");
- return;
- }
- if (hdr->command == ZEBRA_GET_TABLE_CHUNK)
- zread_get_table_chunk(client, msg,
- zvrf_id(zvrf));
- else if (hdr->command == ZEBRA_RELEASE_TABLE_CHUNK)
- zread_release_table_chunk(client, msg);
- }
-}
-
-static void zread_pseudowire(ZAPI_HANDLER_ARGS)
-{
- struct stream *s;
- char ifname[IF_NAMESIZE];
- ifindex_t ifindex;
- int type;
- int af;
- union g_addr nexthop;
- uint32_t local_label;
- uint32_t remote_label;
- uint8_t flags;
- union pw_protocol_fields data;
- uint8_t protocol;
- struct zebra_pw *pw;
-
- /* Get input stream. */
- s = msg;
-
- /* Get data. */
- STREAM_GET(ifname, s, IF_NAMESIZE);
- STREAM_GETL(s, ifindex);
- STREAM_GETL(s, type);
- STREAM_GETL(s, af);
- switch (af) {
- case AF_INET:
- STREAM_GET(&nexthop.ipv4.s_addr, s, IPV4_MAX_BYTELEN);
- break;
- case AF_INET6:
- STREAM_GET(&nexthop.ipv6, s, 16);
- break;
- default:
- return;
- }
- STREAM_GETL(s, local_label);
- STREAM_GETL(s, remote_label);
- STREAM_GETC(s, flags);
- STREAM_GET(&data, s, sizeof(data));
- protocol = client->proto;
-
- pw = zebra_pw_find(zvrf, ifname);
- switch (hdr->command) {
- case ZEBRA_PW_ADD:
- if (pw) {
- zlog_warn("%s: pseudowire %s already exists [%s]",
- __func__, ifname,
- zserv_command_string(hdr->command));
- return;
- }
-
- zebra_pw_add(zvrf, ifname, protocol, client);
- break;
- case ZEBRA_PW_DELETE:
- if (!pw) {
- zlog_warn("%s: pseudowire %s not found [%s]", __func__,
- ifname, zserv_command_string(hdr->command));
- return;
- }
-
- zebra_pw_del(zvrf, pw);
- break;
- case ZEBRA_PW_SET:
- case ZEBRA_PW_UNSET:
- if (!pw) {
- zlog_warn("%s: pseudowire %s not found [%s]", __func__,
- ifname, zserv_command_string(hdr->command));
- return;
- }
-
- switch (hdr->command) {
- case ZEBRA_PW_SET:
- pw->enabled = 1;
- break;
- case ZEBRA_PW_UNSET:
- pw->enabled = 0;
- break;
- }
-
- zebra_pw_change(pw, ifindex, type, af, &nexthop, local_label,
- remote_label, flags, &data);
- break;
- }
-
-stream_failure:
- return;
-}
-
-/* Cleanup registered nexthops (across VRFs) upon client disconnect. */
-static void zebra_client_close_cleanup_rnh(struct zserv *client)
-{
- struct vrf *vrf;
- struct zebra_vrf *zvrf;
-
- RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
- if ((zvrf = vrf->info) != NULL) {
- zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET, client,
- RNH_NEXTHOP_TYPE);
- zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET6,
- client, RNH_NEXTHOP_TYPE);
- zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET, client,
- RNH_IMPORT_CHECK_TYPE);
- zebra_cleanup_rnh_client(zvrf_id(zvrf), AF_INET6,
- client, RNH_IMPORT_CHECK_TYPE);
- if (client->proto == ZEBRA_ROUTE_LDP) {
- hash_iterate(zvrf->lsp_table,
- mpls_ldp_lsp_uninstall_all,
- zvrf->lsp_table);
- mpls_ldp_ftn_uninstall_all(zvrf, AFI_IP);
- mpls_ldp_ftn_uninstall_all(zvrf, AFI_IP6);
- }
- }
- }
-}
-
-static void zread_interface_set_master(ZAPI_HANDLER_ARGS)
-{
- struct interface *master;
- struct interface *slave;
- struct stream *s = msg;
- int ifindex;
- vrf_id_t vrf_id;
-
- STREAM_GETL(s, vrf_id);
- STREAM_GETL(s, ifindex);
- master = if_lookup_by_index(ifindex, vrf_id);
-
- STREAM_GETL(s, vrf_id);
- STREAM_GETL(s, ifindex);
- slave = if_lookup_by_index(ifindex, vrf_id);
-
- if (!master || !slave)
- return;
-
- kernel_interface_set_master(master, slave);
-
-stream_failure:
- return;
-}
-
-
-static void zread_vrf_label(ZAPI_HANDLER_ARGS)
-{
- struct interface *ifp;
- mpls_label_t nlabel;
- afi_t afi;
- struct stream *s;
- struct zebra_vrf *def_zvrf;
- enum lsp_types_t ltype;
-
- s = msg;
- STREAM_GETL(s, nlabel);
- STREAM_GETC(s, afi);
- if (nlabel == zvrf->label[afi]) {
- /*
- * Nothing to do here move along
- */
- return;
- }
-
- STREAM_GETC(s, ltype);
-
- if (zvrf->vrf->vrf_id != VRF_DEFAULT)
- ifp = if_lookup_by_name(zvrf->vrf->name, zvrf->vrf->vrf_id);
- else
- ifp = if_lookup_by_name("lo", VRF_DEFAULT);
-
- if (!ifp) {
- zlog_debug("Unable to find specified Interface for %s",
- zvrf->vrf->name);
- return;
- }
-
- def_zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
-
- if (zvrf->label[afi] != MPLS_LABEL_NONE) {
- afi_t scrubber;
- bool really_remove;
-
- really_remove = true;
- for (scrubber = AFI_IP; scrubber < AFI_MAX; scrubber++) {
- if (scrubber == afi)
- continue;
-
- if (zvrf->label[scrubber] == MPLS_LABEL_NONE)
- continue;
-
- if (zvrf->label[afi] == zvrf->label[scrubber]) {
- really_remove = false;
- break;
- }
- }
-
- if (really_remove)
- mpls_lsp_uninstall(def_zvrf, ltype, zvrf->label[afi],
- NEXTHOP_TYPE_IFINDEX, NULL,
- ifp->ifindex);
- }
-
- if (nlabel != MPLS_LABEL_NONE)
- mpls_lsp_install(def_zvrf, ltype, nlabel,
- MPLS_LABEL_IMPLICIT_NULL, NEXTHOP_TYPE_IFINDEX,
- NULL, ifp->ifindex);
-
- zvrf->label[afi] = nlabel;
-stream_failure:
- return;
-}
-
-static inline void zread_rule(ZAPI_HANDLER_ARGS)
-{
- struct zebra_pbr_rule zpr;
- struct stream *s;
- uint32_t total, i;
- ifindex_t ifindex;
-
- s = msg;
- STREAM_GETL(s, total);
-
- for (i = 0; i < total; i++) {
- memset(&zpr, 0, sizeof(zpr));
-
- zpr.sock = client->sock;
- zpr.rule.vrf_id = hdr->vrf_id;
- STREAM_GETL(s, zpr.rule.seq);
- STREAM_GETL(s, zpr.rule.priority);
- STREAM_GETL(s, zpr.rule.unique);
- STREAM_GETC(s, zpr.rule.filter.src_ip.family);
- STREAM_GETC(s, zpr.rule.filter.src_ip.prefixlen);
- STREAM_GET(&zpr.rule.filter.src_ip.u.prefix, s,
- prefix_blen(&zpr.rule.filter.src_ip));
- STREAM_GETW(s, zpr.rule.filter.src_port);
- STREAM_GETC(s, zpr.rule.filter.dst_ip.family);
- STREAM_GETC(s, zpr.rule.filter.dst_ip.prefixlen);
- STREAM_GET(&zpr.rule.filter.dst_ip.u.prefix, s,
- prefix_blen(&zpr.rule.filter.dst_ip));
- STREAM_GETW(s, zpr.rule.filter.dst_port);
- STREAM_GETL(s, zpr.rule.filter.fwmark);
- STREAM_GETL(s, zpr.rule.action.table);
- STREAM_GETL(s, ifindex);
-
- if (ifindex) {
- zpr.ifp = if_lookup_by_index(ifindex, VRF_UNKNOWN);
- if (!zpr.ifp) {
- zlog_debug("Failed to lookup ifindex: %u",
- ifindex);
- return;
- }
- }
-
- if (!is_default_prefix(&zpr.rule.filter.src_ip))
- zpr.rule.filter.filter_bm |= PBR_FILTER_SRC_IP;
-
- if (!is_default_prefix(&zpr.rule.filter.dst_ip))
- zpr.rule.filter.filter_bm |= PBR_FILTER_DST_IP;
-
- if (zpr.rule.filter.src_port)
- zpr.rule.filter.filter_bm |= PBR_FILTER_SRC_PORT;
-
- if (zpr.rule.filter.dst_port)
- zpr.rule.filter.filter_bm |= PBR_FILTER_DST_PORT;
-
- if (zpr.rule.filter.fwmark)
- zpr.rule.filter.filter_bm |= PBR_FILTER_FWMARK;
-
- if (hdr->command == ZEBRA_RULE_ADD)
- zebra_pbr_add_rule(zvrf->zns, &zpr);
- else
- zebra_pbr_del_rule(zvrf->zns, &zpr);
- }
-
-stream_failure:
- return;
-}
-
-
-static inline void zread_ipset(ZAPI_HANDLER_ARGS)
-{
- struct zebra_pbr_ipset zpi;
- struct stream *s;
- uint32_t total, i;
-
- s = msg;
- STREAM_GETL(s, total);
-
- for (i = 0; i < total; i++) {
- memset(&zpi, 0, sizeof(zpi));
-
- zpi.sock = client->sock;
- STREAM_GETL(s, zpi.unique);
- STREAM_GETL(s, zpi.type);
- STREAM_GET(&zpi.ipset_name, s,
- ZEBRA_IPSET_NAME_SIZE);
-
- if (hdr->command == ZEBRA_IPSET_CREATE)
- zebra_pbr_create_ipset(zvrf->zns, &zpi);
- else
- zebra_pbr_destroy_ipset(zvrf->zns, &zpi);
- }
-
-stream_failure:
- return;
-}
-
-static inline void zread_ipset_entry(ZAPI_HANDLER_ARGS)
-{
- struct zebra_pbr_ipset_entry zpi;
- struct zebra_pbr_ipset ipset;
- struct stream *s;
- uint32_t total, i;
-
- s = msg;
- STREAM_GETL(s, total);
-
- for (i = 0; i < total; i++) {
- memset(&zpi, 0, sizeof(zpi));
- memset(&ipset, 0, sizeof(ipset));
-
- zpi.sock = client->sock;
- STREAM_GETL(s, zpi.unique);
- STREAM_GET(&ipset.ipset_name, s,
- ZEBRA_IPSET_NAME_SIZE);
- STREAM_GETC(s, zpi.src.family);
- STREAM_GETC(s, zpi.src.prefixlen);
- STREAM_GET(&zpi.src.u.prefix, s,
- prefix_blen(&zpi.src));
- STREAM_GETC(s, zpi.dst.family);
- STREAM_GETC(s, zpi.dst.prefixlen);
- STREAM_GET(&zpi.dst.u.prefix, s,
- prefix_blen(&zpi.dst));
-
- if (!is_default_prefix(&zpi.src))
- zpi.filter_bm |= PBR_FILTER_SRC_IP;
-
- if (!is_default_prefix(&zpi.dst))
- zpi.filter_bm |= PBR_FILTER_DST_IP;
-
- /* calculate backpointer */
- zpi.backpointer = zebra_pbr_lookup_ipset_pername(zvrf->zns,
- ipset.ipset_name);
- if (hdr->command == ZEBRA_IPSET_ENTRY_ADD)
- zebra_pbr_add_ipset_entry(zvrf->zns, &zpi);
- else
- zebra_pbr_del_ipset_entry(zvrf->zns, &zpi);
- }
-
-stream_failure:
- return;
-}
-
-static inline void zread_iptable(ZAPI_HANDLER_ARGS)
-{
- struct zebra_pbr_iptable zpi;
- struct stream *s;
-
- s = msg;
-
- memset(&zpi, 0, sizeof(zpi));
-
- zpi.sock = client->sock;
- STREAM_GETL(s, zpi.unique);
- STREAM_GETL(s, zpi.type);
- STREAM_GETL(s, zpi.filter_bm);
- STREAM_GETL(s, zpi.action);
- STREAM_GETL(s, zpi.fwmark);
- STREAM_GET(&zpi.ipset_name, s,
- ZEBRA_IPSET_NAME_SIZE);
-
- if (hdr->command == ZEBRA_IPTABLE_ADD)
- zebra_pbr_add_iptable(zvrf->zns, &zpi);
- else
- zebra_pbr_del_iptable(zvrf->zns, &zpi);
-stream_failure:
- return;
-}
-
-void (*zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
- [ZEBRA_ROUTER_ID_ADD] = zread_router_id_add,
- [ZEBRA_ROUTER_ID_DELETE] = zread_router_id_delete,
- [ZEBRA_INTERFACE_ADD] = zread_interface_add,
- [ZEBRA_INTERFACE_DELETE] = zread_interface_delete,
- [ZEBRA_ROUTE_ADD] = zread_route_add,
- [ZEBRA_ROUTE_DELETE] = zread_route_del,
- [ZEBRA_IPV4_ROUTE_ADD] = zread_ipv4_add,
- [ZEBRA_IPV4_ROUTE_DELETE] = zread_ipv4_delete,
- [ZEBRA_IPV4_ROUTE_IPV6_NEXTHOP_ADD] = zread_ipv4_route_ipv6_nexthop_add,
- [ZEBRA_IPV6_ROUTE_ADD] = zread_ipv6_add,
- [ZEBRA_IPV6_ROUTE_DELETE] = zread_ipv6_delete,
- [ZEBRA_REDISTRIBUTE_ADD] = zebra_redistribute_add,
- [ZEBRA_REDISTRIBUTE_DELETE] = zebra_redistribute_delete,
- [ZEBRA_REDISTRIBUTE_DEFAULT_ADD] = zebra_redistribute_default_add,
- [ZEBRA_REDISTRIBUTE_DEFAULT_DELETE] = zebra_redistribute_default_delete,
- [ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB] = zread_ipv4_nexthop_lookup_mrib,
- [ZEBRA_HELLO] = zread_hello,
- [ZEBRA_NEXTHOP_REGISTER] = zread_rnh_register,
- [ZEBRA_NEXTHOP_UNREGISTER] = zread_rnh_unregister,
- [ZEBRA_IMPORT_ROUTE_REGISTER] = zread_rnh_register,
- [ZEBRA_IMPORT_ROUTE_UNREGISTER] = zread_rnh_unregister,
- [ZEBRA_BFD_DEST_UPDATE] = zebra_ptm_bfd_dst_register,
- [ZEBRA_BFD_DEST_REGISTER] = zebra_ptm_bfd_dst_register,
- [ZEBRA_BFD_DEST_DEREGISTER] = zebra_ptm_bfd_dst_deregister,
- [ZEBRA_VRF_UNREGISTER] = zread_vrf_unregister,
- [ZEBRA_VRF_LABEL] = zread_vrf_label,
- [ZEBRA_BFD_CLIENT_REGISTER] = zebra_ptm_bfd_client_register,
-#if defined(HAVE_RTADV)
- [ZEBRA_INTERFACE_ENABLE_RADV] = zebra_interface_radv_enable,
- [ZEBRA_INTERFACE_DISABLE_RADV] = zebra_interface_radv_disable,
-#else
- [ZEBRA_INTERFACE_ENABLE_RADV] = NULL,
- [ZEBRA_INTERFACE_DISABLE_RADV] = NULL,
-#endif
- [ZEBRA_MPLS_LABELS_ADD] = zread_mpls_labels,
- [ZEBRA_MPLS_LABELS_DELETE] = zread_mpls_labels,
- [ZEBRA_IPMR_ROUTE_STATS] = zebra_ipmr_route_stats,
- [ZEBRA_LABEL_MANAGER_CONNECT] = zread_label_manager_request,
- [ZEBRA_GET_LABEL_CHUNK] = zread_label_manager_request,
- [ZEBRA_RELEASE_LABEL_CHUNK] = zread_label_manager_request,
- [ZEBRA_FEC_REGISTER] = zread_fec_register,
- [ZEBRA_FEC_UNREGISTER] = zread_fec_unregister,
- [ZEBRA_ADVERTISE_DEFAULT_GW] = zebra_vxlan_advertise_gw_macip,
- [ZEBRA_ADVERTISE_SUBNET] = zebra_vxlan_advertise_subnet,
- [ZEBRA_ADVERTISE_ALL_VNI] = zebra_vxlan_advertise_all_vni,
- [ZEBRA_REMOTE_VTEP_ADD] = zebra_vxlan_remote_vtep_add,
- [ZEBRA_REMOTE_VTEP_DEL] = zebra_vxlan_remote_vtep_del,
- [ZEBRA_REMOTE_MACIP_ADD] = zebra_vxlan_remote_macip_add,
- [ZEBRA_REMOTE_MACIP_DEL] = zebra_vxlan_remote_macip_del,
- [ZEBRA_INTERFACE_SET_MASTER] = zread_interface_set_master,
- [ZEBRA_PW_ADD] = zread_pseudowire,
- [ZEBRA_PW_DELETE] = zread_pseudowire,
- [ZEBRA_PW_SET] = zread_pseudowire,
- [ZEBRA_PW_UNSET] = zread_pseudowire,
- [ZEBRA_RULE_ADD] = zread_rule,
- [ZEBRA_RULE_DELETE] = zread_rule,
- [ZEBRA_TABLE_MANAGER_CONNECT] = zread_table_manager_request,
- [ZEBRA_GET_TABLE_CHUNK] = zread_table_manager_request,
- [ZEBRA_RELEASE_TABLE_CHUNK] = zread_table_manager_request,
- [ZEBRA_IPSET_CREATE] = zread_ipset,
- [ZEBRA_IPSET_DESTROY] = zread_ipset,
- [ZEBRA_IPSET_ENTRY_ADD] = zread_ipset_entry,
- [ZEBRA_IPSET_ENTRY_DELETE] = zread_ipset_entry,
- [ZEBRA_IPTABLE_ADD] = zread_iptable,
- [ZEBRA_IPTABLE_DELETE] = zread_iptable,
-};
-
-static inline void zserv_handle_commands(struct zserv *client,
- struct zmsghdr *hdr,
- struct stream *msg,
- struct zebra_vrf *zvrf)
-{
- if (hdr->command > array_size(zserv_handlers)
- || zserv_handlers[hdr->command] == NULL)
- zlog_info("Zebra received unknown command %d", hdr->command);
- else
- zserv_handlers[hdr->command](client, hdr, msg, zvrf);
-
- stream_free(msg);
-}
-
/* Lifecycle ---------------------------------------------------------------- */
+/* Hooks for client connect / disconnect */
+DEFINE_HOOK(zapi_client_connect, (struct zserv *client), (client));
+DEFINE_KOOH(zapi_client_close, (struct zserv *client), (client));
+
/* free zebra client information. */
static void zebra_client_free(struct zserv *client)
{
- /* Send client de-registration to BFD */
- zebra_ptm_bfd_client_deregister(client->proto);
-
- /* Cleanup any rules installed from this client */
- zebra_pbr_client_close_cleanup(client->sock);
-
- /* Cleanup any registered nexthops - across all VRFs. */
- zebra_client_close_cleanup_rnh(client);
-
- /* Release Label Manager chunks */
- release_daemon_label_chunks(client->proto, client->instance);
-
- /* Release Table Manager chunks */
- release_daemon_table_chunks(client->proto, client->instance);
-
- /* Cleanup any FECs registered by this client. */
- zebra_mpls_cleanup_fecs_for_client(vrf_info_lookup(VRF_DEFAULT),
- client);
-
- /* Remove pseudowires associated with this client */
- zebra_pw_client_close(client);
+ hook_call(zapi_client_close, client);
/* Close file descriptor. */
if (client->sock) {
@@ -3123,6 +179,8 @@ static void zebra_client_create(int sock)
zebra_vrf_update_all(client);
+ hook_call(zapi_client_connect, client);
+
/* start read loop */
zebra_event(client, ZEBRA_READ);
}
diff --git a/zebra/zserv.h b/zebra/zserv.h
index 503d85f5b6..a5b5acbb33 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -1,17 +1,19 @@
-/* Zebra daemon server header.
- * Copyright (C) 1997, 98 Kunihiro Ishiguro
+/*
+ * Zebra API server.
+ * Portions:
+ * Copyright (C) 1997-1999 Kunihiro Ishiguro
+ * Copyright (C) 2015-2018 Cumulus Networks, Inc.
+ * et al.
*
- * This file is part of GNU Zebra.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
*
- * GNU Zebra is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * GNU Zebra is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; see the file COPYING; if not, write to the Free Software
@@ -21,18 +23,22 @@
#ifndef _ZEBRA_ZSERV_H
#define _ZEBRA_ZSERV_H
-#include "rib.h"
-#include "if.h"
-#include "workqueue.h"
-#include "vrf.h"
-#include "routemap.h"
-#include "vty.h"
-#include "zclient.h"
-#include "pbr.h"
+/* clang-format off */
+#include <stdint.h> /* for uint32_t, uint8_t */
+#include <time.h> /* for time_t */
+
+#include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */
+#include "lib/zebra.h" /* for AFI_MAX */
+#include "lib/vrf.h" /* for vrf_bitmap_t */
+#include "lib/zclient.h" /* for redist_proto */
+#include "lib/stream.h" /* for stream, stream_fifo */
+#include "lib/thread.h" /* for thread, thread_master */
+#include "lib/linklist.h" /* for list */
+#include "lib/workqueue.h" /* for work_queue */
+#include "lib/hook.h" /* for DECLARE_HOOK, DECLARE_KOOH */
-#include "zebra/zebra_ns.h"
-#include "zebra/zebra_pw.h"
-//#include "zebra/zebra_pbr.h"
+#include "zebra/zebra_vrf.h" /* for zebra_vrf */
+/* clang-format on */
/* Default port information. */
#define ZEBRA_VTY_PORT 2601
@@ -85,7 +91,7 @@ struct zserv {
/* client's protocol */
uint8_t proto;
- unsigned short instance;
+ uint16_t instance;
uint8_t is_synchronous;
/* Statistics */
@@ -138,6 +144,10 @@ struct zserv {
struct zserv *client, struct zmsghdr *hdr, struct stream *msg, \
struct zebra_vrf *zvrf
+/* Hooks for client connect / disconnect */
+DECLARE_HOOK(zapi_client_connect, (struct zserv *client), (client));
+DECLARE_KOOH(zapi_client_close, (struct zserv *client), (client));
+
/* Zebra instance */
struct zebra_t {
/* Thread master */
@@ -164,48 +174,6 @@ extern unsigned int multipath_num;
/* Prototypes. */
extern void zserv_init(void);
extern void zebra_zserv_socket_init(char *path);
-
-extern int zsend_vrf_add(struct zserv *, struct zebra_vrf *);
-extern int zsend_vrf_delete(struct zserv *, struct zebra_vrf *);
-
-extern int zsend_interface_add(struct zserv *, struct interface *);
-extern int zsend_interface_delete(struct zserv *, struct interface *);
-extern int zsend_interface_addresses(struct zserv *, struct interface *);
-extern int zsend_interface_address(int, struct zserv *, struct interface *,
- struct connected *);
-extern void nbr_connected_add_ipv6(struct interface *, struct in6_addr *);
-extern void nbr_connected_delete_ipv6(struct interface *, struct in6_addr *);
-extern int zsend_interface_update(int, struct zserv *, struct interface *);
-extern int zsend_redistribute_route(int, struct zserv *, struct prefix *,
- struct prefix *, struct route_entry *);
-extern int zsend_router_id_update(struct zserv *, struct prefix *, vrf_id_t);
-extern int zsend_interface_vrf_update(struct zserv *, struct interface *,
- vrf_id_t);
-
-extern int zsend_interface_link_params(struct zserv *, struct interface *);
-extern int zsend_pw_update(struct zserv *, struct zebra_pw *);
-
-extern int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
- enum zapi_route_notify_owner note);
-
-struct zebra_pbr_ipset;
-struct zebra_pbr_ipset_entry;
-struct zebra_pbr_iptable;
-struct zebra_pbr_rule;
-extern void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
- enum zapi_rule_notify_owner note);
-extern void zsend_ipset_notify_owner(
- struct zebra_pbr_ipset *ipset,
- enum zapi_ipset_notify_owner note);
-extern void zsend_ipset_entry_notify_owner(
- struct zebra_pbr_ipset_entry *ipset,
- enum zapi_ipset_entry_notify_owner note);
-extern void zsend_iptable_notify_owner(
- struct zebra_pbr_iptable *iptable,
- enum zapi_iptable_notify_owner note);
-
-extern void zserv_nexthop_num_warn(const char *, const struct prefix *,
- const unsigned int);
extern int zebra_server_send_message(struct zserv *client, struct stream *msg);
extern struct zserv *zebra_find_client(uint8_t proto, unsigned short instance);