summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_evpn_vty.c6
-rw-r--r--bgpd/bgp_nht.c13
-rw-r--r--bgpd/bgp_routemap.c27
-rw-r--r--doc/developer/zebra.rst12
-rw-r--r--lib/filter.c4
-rw-r--r--lib/log.c5
-rw-r--r--lib/stream.c47
-rw-r--r--lib/stream.h30
-rw-r--r--lib/zclient.c124
-rw-r--r--lib/zclient.h47
-rw-r--r--sharpd/sharp_vty.c42
-rw-r--r--sharpd/sharp_zebra.c78
-rw-r--r--sharpd/sharp_zebra.h7
-rw-r--r--zebra/main.c12
-rw-r--r--zebra/subdir.am46
-rw-r--r--zebra/zapi_msg.c91
-rw-r--r--zebra/zapi_msg.h7
-rw-r--r--zebra/zebra_dplane.c1
-rw-r--r--zebra/zebra_fpm_netlink.c2
-rw-r--r--zebra/zebra_opaque.c699
-rw-r--r--zebra/zebra_opaque.h63
-rw-r--r--zebra/zserv.c218
-rw-r--r--zebra/zserv.h42
23 files changed, 1491 insertions, 132 deletions
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 85604d856d..eac1af2ea9 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -5657,6 +5657,9 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi,
char buf1[RD_ADDRSTRLEN];
char buf2[INET6_ADDRSTRLEN];
+ if (bgp->advertise_all_vni)
+ vty_out(vty, " advertise-all-vni\n");
+
if (bgp->vnihash) {
struct list *vnilist = hash_to_list(bgp->vnihash);
struct listnode *ln;
@@ -5669,9 +5672,6 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi,
list_delete(&vnilist);
}
- if (bgp->advertise_all_vni)
- vty_out(vty, " advertise-all-vni\n");
-
if (bgp->advertise_autort_rfc8365)
vty_out(vty, " autort rfc8365-compatible\n");
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index e3b05e8b79..0c2195527b 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -559,7 +559,18 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
p->u.prefix6 = p_orig->u.prefix6;
p->prefixlen = p_orig->prefixlen;
} else {
- p->u.prefix6 = pi->attr->mp_nexthop_global;
+ /* If we receive MP_REACH nexthop with ::(LL)
+ * or LL(LL), use LL address as nexthop cache.
+ */
+ if (pi->attr->mp_nexthop_len
+ == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
+ && (IN6_IS_ADDR_UNSPECIFIED(
+ &pi->attr->mp_nexthop_global)
+ || IN6_IS_ADDR_LINKLOCAL(
+ &pi->attr->mp_nexthop_global)))
+ p->u.prefix6 = pi->attr->mp_nexthop_local;
+ else
+ p->u.prefix6 = pi->attr->mp_nexthop_global;
p->prefixlen = IPV6_MAX_BITLEN;
}
break;
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index 6b57afc5c1..d096859322 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -5359,8 +5359,7 @@ DEFUN (no_match_ipv6_next_hop,
DEFPY (match_ipv4_next_hop,
match_ipv4_next_hop_cmd,
- "[no$no] match ip next-hop address [A.B.C.D]",
- NO_STR
+ "match ip next-hop address A.B.C.D",
MATCH_STR
IP_STR
"Match IP next-hop address of route\n"
@@ -5369,15 +5368,22 @@ DEFPY (match_ipv4_next_hop,
{
int idx_ipv4 = 4;
- if (no)
- return bgp_route_match_delete(vty, "ip next-hop address", NULL,
- RMAP_EVENT_MATCH_DELETED);
+ return bgp_route_match_add(vty, "ip next-hop address",
+ argv[idx_ipv4]->arg, RMAP_EVENT_MATCH_ADDED);
+}
- if (argv[idx_ipv4]->arg)
- return bgp_route_match_add(vty, "ip next-hop address",
- argv[idx_ipv4]->arg,
- RMAP_EVENT_MATCH_ADDED);
- return CMD_SUCCESS;
+DEFPY (no_match_ipv4_next_hop,
+ no_match_ipv4_next_hop_cmd,
+ "no match ip next-hop address [A.B.C.D]",
+ NO_STR
+ MATCH_STR
+ IP_STR
+ "Match IP next-hop address of route\n"
+ "IP address\n"
+ "IP address of next-hop\n")
+{
+ return bgp_route_match_delete(vty, "ip next-hop address", NULL,
+ RMAP_EVENT_MATCH_DELETED);
}
DEFUN (set_ipv6_nexthop_peer,
@@ -5847,6 +5853,7 @@ void bgp_route_map_init(void)
install_element(RMAP_NODE, &match_ipv6_next_hop_cmd);
install_element(RMAP_NODE, &no_match_ipv6_next_hop_cmd);
install_element(RMAP_NODE, &match_ipv4_next_hop_cmd);
+ install_element(RMAP_NODE, &no_match_ipv4_next_hop_cmd);
install_element(RMAP_NODE, &set_ipv6_nexthop_global_cmd);
install_element(RMAP_NODE, &no_set_ipv6_nexthop_global_cmd);
install_element(RMAP_NODE, &set_ipv6_nexthop_prefer_global_cmd);
diff --git a/doc/developer/zebra.rst b/doc/developer/zebra.rst
index e2f887ef28..6a73803d01 100644
--- a/doc/developer/zebra.rst
+++ b/doc/developer/zebra.rst
@@ -250,7 +250,7 @@ Zebra Protocol Commands
+------------------------------------+-------+
| ZEBRA_INTERFACE_DISABLE_RADV | 43 |
+------------------------------------+-------+
-| ZEBRA_IPV3_NEXTHOP_LOOKUP_MRIB | 44 |
+| ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB | 44 |
+------------------------------------+-------+
| ZEBRA_INTERFACE_LINK_PARAMS | 45 |
+------------------------------------+-------+
@@ -372,5 +372,13 @@ Zebra Protocol Commands
+------------------------------------+-------+
| ZEBRA_MLAG_FORWARD_MSG | 104 |
+------------------------------------+-------+
-| ZEBRA_CLIENT_CAPABILITIES | 105 |
+| ZEBRA_ERROR | 105 |
++------------------------------------+-------+
+| ZEBRA_CLIENT_CAPABILITIES | 106 |
++------------------------------------+-------+
+| ZEBRA_OPAQUE_MESSAGE | 107 |
++------------------------------------+-------+
+| ZEBRA_OPAQUE_REGISTER | 108 |
++------------------------------------+-------+
+| ZEBRA_OPAQUE_UNREGISTER | 109 |
+------------------------------------+-------+
diff --git a/lib/filter.c b/lib/filter.c
index 4a83b8b043..d61b03cbe2 100644
--- a/lib/filter.c
+++ b/lib/filter.c
@@ -868,7 +868,7 @@ DEFUN (no_access_list_standard,
"Address to match\n"
"Wildcard bits\n")
{
- int idx_acl = 1;
+ int idx_acl = 2;
int idx = 0;
char *seq = NULL;
char *permit_deny = NULL;
@@ -1949,7 +1949,7 @@ DEFUN (no_mac_access_list,
mac = argv[idx]->arg;
assert(mac);
- return filter_set_zebra(vty, argv[2]->arg, seq, permit_deny, AFI_L2VPN,
+ return filter_set_zebra(vty, argv[3]->arg, seq, permit_deny, AFI_L2VPN,
mac, 0, 0);
}
diff --git a/lib/log.c b/lib/log.c
index 9b0f5b3d85..089a3e3a07 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -443,7 +443,10 @@ static const struct zebra_desc_table command_types[] = {
DESC_ENTRY(ZEBRA_MLAG_CLIENT_UNREGISTER),
DESC_ENTRY(ZEBRA_MLAG_FORWARD_MSG),
DESC_ENTRY(ZEBRA_ERROR),
- DESC_ENTRY(ZEBRA_CLIENT_CAPABILITIES)};
+ DESC_ENTRY(ZEBRA_CLIENT_CAPABILITIES),
+ DESC_ENTRY(ZEBRA_OPAQUE_MESSAGE),
+ DESC_ENTRY(ZEBRA_OPAQUE_REGISTER),
+ DESC_ENTRY(ZEBRA_OPAQUE_UNREGISTER)};
#undef DESC_ENTRY
static const struct zebra_desc_table unknown = {0, "unknown", '?'};
diff --git a/lib/stream.c b/lib/stream.c
index fcfbc78e3d..17520f978e 100644
--- a/lib/stream.c
+++ b/lib/stream.c
@@ -120,34 +120,34 @@ void stream_free(struct stream *s)
XFREE(MTYPE_STREAM, s);
}
-struct stream *stream_copy(struct stream *new, struct stream *src)
+struct stream *stream_copy(struct stream *dest, const struct stream *src)
{
STREAM_VERIFY_SANE(src);
- assert(new != NULL);
- assert(STREAM_SIZE(new) >= src->endp);
+ assert(dest != NULL);
+ assert(STREAM_SIZE(dest) >= src->endp);
- new->endp = src->endp;
- new->getp = src->getp;
+ dest->endp = src->endp;
+ dest->getp = src->getp;
- memcpy(new->data, src->data, src->endp);
+ memcpy(dest->data, src->data, src->endp);
- return new;
+ return dest;
}
-struct stream *stream_dup(struct stream *s)
+struct stream *stream_dup(const struct stream *s)
{
- struct stream *new;
+ struct stream *snew;
STREAM_VERIFY_SANE(s);
- if ((new = stream_new(s->endp)) == NULL)
+ if ((snew = stream_new(s->endp)) == NULL)
return NULL;
- return (stream_copy(new, s));
+ return (stream_copy(snew, s));
}
-struct stream *stream_dupcat(struct stream *s1, struct stream *s2,
+struct stream *stream_dupcat(const struct stream *s1, const struct stream *s2,
size_t offset)
{
struct stream *new;
@@ -187,19 +187,19 @@ size_t stream_resize_inplace(struct stream **sptr, size_t newsize)
return orig->size;
}
-size_t stream_get_getp(struct stream *s)
+size_t stream_get_getp(const struct stream *s)
{
STREAM_VERIFY_SANE(s);
return s->getp;
}
-size_t stream_get_endp(struct stream *s)
+size_t stream_get_endp(const struct stream *s)
{
STREAM_VERIFY_SANE(s);
return s->endp;
}
-size_t stream_get_size(struct stream *s)
+size_t stream_get_size(const struct stream *s)
{
STREAM_VERIFY_SANE(s);
return s->size;
@@ -1133,11 +1133,17 @@ struct stream_fifo *stream_fifo_new(void)
{
struct stream_fifo *new;
- new = XCALLOC(MTYPE_STREAM_FIFO, sizeof(struct stream_fifo));
- pthread_mutex_init(&new->mtx, NULL);
+ new = XMALLOC(MTYPE_STREAM_FIFO, sizeof(struct stream_fifo));
+ stream_fifo_init(new);
return new;
}
+void stream_fifo_init(struct stream_fifo *fifo)
+{
+ memset(fifo, 0, sizeof(struct stream_fifo));
+ pthread_mutex_init(&fifo->mtx, NULL);
+}
+
/* Add new stream to fifo. */
void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
{
@@ -1245,9 +1251,14 @@ size_t stream_fifo_count_safe(struct stream_fifo *fifo)
return atomic_load_explicit(&fifo->count, memory_order_acquire);
}
-void stream_fifo_free(struct stream_fifo *fifo)
+void stream_fifo_deinit(struct stream_fifo *fifo)
{
stream_fifo_clean(fifo);
pthread_mutex_destroy(&fifo->mtx);
+}
+
+void stream_fifo_free(struct stream_fifo *fifo)
+{
+ stream_fifo_deinit(fifo);
XFREE(MTYPE_STREAM_FIFO, fifo);
}
diff --git a/lib/stream.h b/lib/stream.h
index 23d83bf930..7cacf57d27 100644
--- a/lib/stream.h
+++ b/lib/stream.h
@@ -150,23 +150,25 @@ struct stream_fifo {
*/
extern struct stream *stream_new(size_t);
extern void stream_free(struct stream *);
-extern struct stream *stream_copy(struct stream *, struct stream *src);
-extern struct stream *stream_dup(struct stream *);
+/* Copy 'src' into 'dest', returns 'dest' */
+extern struct stream *stream_copy(struct stream *dest,
+ const struct stream *src);
+extern struct stream *stream_dup(const struct stream *s);
extern size_t stream_resize_inplace(struct stream **sptr, size_t newsize);
-extern size_t stream_get_getp(struct stream *);
-extern size_t stream_get_endp(struct stream *);
-extern size_t stream_get_size(struct stream *);
-extern uint8_t *stream_get_data(struct stream *);
+extern size_t stream_get_getp(const struct stream *s);
+extern size_t stream_get_endp(const struct stream *s);
+extern size_t stream_get_size(const struct stream *s);
+extern uint8_t *stream_get_data(struct stream *s);
/**
* Create a new stream structure; copy offset bytes from s1 to the new
* stream; copy s2 data to the new stream; copy rest of s1 data to the
* new stream.
*/
-extern struct stream *stream_dupcat(struct stream *s1, struct stream *s2,
- size_t offset);
+extern struct stream *stream_dupcat(const struct stream *s1,
+ const struct stream *s2, size_t offset);
extern void stream_set_getp(struct stream *, size_t);
extern void stream_set_endp(struct stream *, size_t);
@@ -283,6 +285,18 @@ extern uint8_t *stream_pnt(struct stream *);
extern struct stream_fifo *stream_fifo_new(void);
/*
+ * Init or re-init an on-stack fifo. This allows use of a fifo struct without
+ * requiring a malloc/free cycle.
+ * Note well that the fifo must be de-inited with the 'fifo_deinit' api.
+ */
+void stream_fifo_init(struct stream_fifo *fifo);
+
+/*
+ * Deinit an on-stack fifo.
+ */
+void stream_fifo_deinit(struct stream_fifo *fifo);
+
+/*
* Push a stream onto a stream_fifo.
*
* fifo
diff --git a/lib/zclient.c b/lib/zclient.c
index 02532e7069..9b1ff7237f 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -3112,6 +3112,114 @@ static void zclient_mlag_handle_msg(ZAPI_CALLBACK_ARGS)
(*zclient->mlag_handle_msg)(zclient->ibuf, length);
}
+/*
+ * Send an OPAQUE message, contents opaque to zebra. The message header
+ * is a message subtype.
+ */
+int zclient_send_opaque(struct zclient *zclient, uint32_t type,
+ const uint8_t *data, size_t datasize)
+{
+ int ret;
+ struct stream *s;
+
+ /* Check buffer size */
+ if (STREAM_SIZE(zclient->obuf) <
+ (ZEBRA_HEADER_SIZE + sizeof(type) + datasize))
+ return -1;
+
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s, ZEBRA_OPAQUE_MESSAGE, VRF_DEFAULT);
+
+ /* Send sub-type */
+ stream_putl(s, type);
+
+ /* Send opaque data */
+ stream_write(s, data, datasize);
+
+ /* Put length into the header at the start of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ ret = zclient_send_message(zclient);
+
+ return ret;
+}
+
+/*
+ * Send a registration request for opaque messages with a specified subtype.
+ */
+int zclient_register_opaque(struct zclient *zclient, uint32_t type)
+{
+ int ret;
+ struct stream *s;
+
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s, ZEBRA_OPAQUE_REGISTER, VRF_DEFAULT);
+
+ /* Send sub-type */
+ stream_putl(s, type);
+
+ /* Add zclient info */
+ stream_putc(s, zclient->redist_default);
+ stream_putw(s, zclient->instance);
+ stream_putl(s, zclient->session_id);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ ret = zclient_send_message(zclient);
+
+ return ret;
+}
+
+/*
+ * Send an un-registration request for a specified opaque subtype.
+ */
+int zclient_unregister_opaque(struct zclient *zclient, uint32_t type)
+{
+ int ret;
+ struct stream *s;
+
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s, ZEBRA_OPAQUE_UNREGISTER, VRF_DEFAULT);
+
+ /* Send sub-type */
+ stream_putl(s, type);
+
+ /* Add zclient info */
+ stream_putc(s, zclient->redist_default);
+ stream_putw(s, zclient->instance);
+ stream_putl(s, zclient->session_id);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ ret = zclient_send_message(zclient);
+
+ return ret;
+}
+
+/* Utility to parse opaque registration info */
+int zapi_parse_opaque_reg(struct stream *s,
+ struct zapi_opaque_reg_info *info)
+{
+ STREAM_GETL(s, info->type);
+ STREAM_GETC(s, info->proto);
+ STREAM_GETW(s, info->instance);
+ STREAM_GETL(s, info->session_id);
+
+ return 0;
+
+stream_failure:
+
+ return -1;
+}
+
/* Zebra client message read function. */
static int zclient_read(struct thread *thread)
{
@@ -3417,6 +3525,22 @@ static int zclient_read(struct thread *thread)
break;
case ZEBRA_ERROR:
zclient_handle_error(command, zclient, length, vrf_id);
+ break;
+ case ZEBRA_OPAQUE_MESSAGE:
+ if (zclient->opaque_msg_handler)
+ (*zclient->opaque_msg_handler)(command, zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_OPAQUE_REGISTER:
+ if (zclient->opaque_register_handler)
+ (*zclient->opaque_register_handler)(command, zclient,
+ length, vrf_id);
+ break;
+ case ZEBRA_OPAQUE_UNREGISTER:
+ if (zclient->opaque_unregister_handler)
+ (*zclient->opaque_unregister_handler)(command, zclient,
+ length, vrf_id);
+ break;
default:
break;
}
diff --git a/lib/zclient.h b/lib/zclient.h
index feaabc0549..d99b6ddf8f 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -200,7 +200,10 @@ typedef enum {
ZEBRA_MLAG_CLIENT_UNREGISTER,
ZEBRA_MLAG_FORWARD_MSG,
ZEBRA_ERROR,
- ZEBRA_CLIENT_CAPABILITIES
+ ZEBRA_CLIENT_CAPABILITIES,
+ ZEBRA_OPAQUE_MESSAGE,
+ ZEBRA_OPAQUE_REGISTER,
+ ZEBRA_OPAQUE_UNREGISTER,
} zebra_message_types_t;
enum zebra_error_types {
@@ -339,6 +342,9 @@ struct zclient {
int (*mlag_process_down)(void);
int (*mlag_handle_msg)(struct stream *msg, int len);
int (*handle_error)(enum zebra_error_types error);
+ int (*opaque_msg_handler)(ZAPI_CALLBACK_ARGS);
+ int (*opaque_register_handler)(ZAPI_CALLBACK_ARGS);
+ int (*opaque_unregister_handler)(ZAPI_CALLBACK_ARGS);
};
/* Zebra API message flag. */
@@ -519,7 +525,6 @@ struct zapi_pw_status {
char ifname[IF_NAMESIZE];
ifindex_t ifindex;
uint32_t status;
- uint32_t pwstatus;
};
enum zapi_route_notify_owner {
@@ -833,6 +838,44 @@ extern void zclient_send_mlag_deregister(struct zclient *client);
extern void zclient_send_mlag_data(struct zclient *client,
struct stream *client_s);
+/*
+ * Send an OPAQUE message, contents opaque to zebra - but note that
+ * the length of the payload is restricted by the zclient's
+ * outgoing message buffer.
+ * The message header is a message subtype; please use the registry
+ * below to avoid sub-type collisions. Clients use the registration
+ * apis to manage the specific opaque subtypes they want to receive.
+ */
+int zclient_send_opaque(struct zclient *zclient, uint32_t type,
+ const uint8_t *data, size_t datasize);
+
+/* Simple struct to convey registration/unreg requests */
+struct zapi_opaque_reg_info {
+ /* Message subtype */
+ uint32_t type;
+
+ /* Client session tuple */
+ uint8_t proto;
+ uint16_t instance;
+ uint32_t session_id;
+};
+
+int zclient_register_opaque(struct zclient *zclient, uint32_t type);
+int zclient_unregister_opaque(struct zclient *zclient, uint32_t type);
+int zapi_parse_opaque_reg(struct stream *msg,
+ struct zapi_opaque_reg_info *info);
+
+/*
+ * Registry of opaque message types. Please do not reuse an in-use
+ * type code; some daemons are likely relying on it.
+ */
+enum zapi_opaque_registry {
+ /* Request link-state database dump, at restart for example */
+ LINK_STATE_REQUEST = 1,
+ /* Update containing link-state db info */
+ LINK_STATE_UPDATE = 2,
+};
+
/* Send the hello message.
* Returns 0 for success or -1 on an I/O error.
*/
diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c
index 987588947e..77d0d02e7d 100644
--- a/sharpd/sharp_vty.c
+++ b/sharpd/sharp_vty.c
@@ -547,6 +547,46 @@ DEFPY (logpump,
return CMD_SUCCESS;
}
+DEFPY (send_opaque,
+ send_opaque_cmd,
+ "sharp send opaque type (1-255) (1-1000)$count",
+ "Sharp Routing Protocol\n"
+ "Send messages for testing\n"
+ "Send opaque messages\n"
+ "Type code to send\n"
+ "Type code to send\n"
+ "Number of messages to send\n")
+{
+ sharp_opaque_send(type, count);
+ return CMD_SUCCESS;
+}
+
+DEFPY (send_opaque_reg,
+ send_opaque_reg_cmd,
+ "sharp send opaque <reg$reg | unreg> \
+ " FRR_IP_REDIST_STR_ZEBRA "$proto_str \
+ [{instance (0-1000) | session (1-1000)}] type (1-1000)",
+ "Sharp Routing Protocol\n"
+ "Send messages for testing\n"
+ "Send opaque messages\n"
+ "Send opaque registration\n"
+ "Send opaque unregistration\n"
+ FRR_IP_REDIST_HELP_STR_ZEBRA
+ "Daemon instance\n"
+ "Daemon instance\n"
+ "Session ID\n"
+ "Session ID\n"
+ "Opaque sub-type code\n"
+ "Opaque sub-type code\n")
+{
+ int proto;
+
+ proto = proto_redistnum(AFI_IP, proto_str);
+
+ sharp_opaque_reg_send((reg != NULL), proto, instance, session, type);
+ return CMD_SUCCESS;
+}
+
void sharp_vty_init(void)
{
install_element(ENABLE_NODE, &install_routes_data_dump_cmd);
@@ -559,6 +599,8 @@ void sharp_vty_init(void)
install_element(ENABLE_NODE, &sharp_lsp_prefix_v4_cmd);
install_element(ENABLE_NODE, &sharp_remove_lsp_prefix_v4_cmd);
install_element(ENABLE_NODE, &logpump_cmd);
+ install_element(ENABLE_NODE, &send_opaque_cmd);
+ install_element(ENABLE_NODE, &send_opaque_reg_cmd);
install_element(VIEW_NODE, &show_debugging_sharpd_cmd);
diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c
index 0795096440..34cc1a4b5a 100644
--- a/sharpd/sharp_zebra.c
+++ b/sharpd/sharp_zebra.c
@@ -480,6 +480,83 @@ static int sharp_redistribute_route(ZAPI_CALLBACK_ARGS)
return 0;
}
+/* Handler for opaque messages */
+static int sharp_opaque_handler(ZAPI_CALLBACK_ARGS)
+{
+ uint32_t type;
+ struct stream *s;
+
+ s = zclient->ibuf;
+
+ STREAM_GETL(s, type);
+
+ zlog_debug("%s: received opaque type %u", __func__, type);
+
+stream_failure:
+
+ return 0;
+}
+
+/*
+ * Send OPAQUE messages, using subtype 'type'.
+ */
+void sharp_opaque_send(uint32_t type, uint32_t count)
+{
+ uint8_t buf[32];
+ int ret;
+ uint32_t i;
+
+ /* Prepare a small payload */
+ for (i = 0; i < sizeof(buf); i++) {
+ if (type < 255)
+ buf[i] = type;
+ else
+ buf[i] = 255;
+ }
+
+ /* Send some messages */
+ for (i = 0; i < count; i++) {
+ ret = zclient_send_opaque(zclient, type, buf, sizeof(buf));
+ if (ret < 0) {
+ zlog_debug("%s: send_opaque() failed => %d",
+ __func__, ret);
+ break;
+ }
+ }
+
+}
+
+/*
+ * Send OPAQUE registration messages, using subtype 'type'.
+ */
+void sharp_opaque_reg_send(bool is_reg, uint32_t proto, uint32_t instance,
+ uint32_t session_id, uint32_t type)
+{
+ struct stream *s;
+
+ s = zclient->obuf;
+ stream_reset(s);
+
+ if (is_reg)
+ zclient_create_header(s, ZEBRA_OPAQUE_REGISTER, VRF_DEFAULT);
+ else
+ zclient_create_header(s, ZEBRA_OPAQUE_UNREGISTER, VRF_DEFAULT);
+
+ /* Send sub-type */
+ stream_putl(s, type);
+
+ /* Add zclient info */
+ stream_putc(s, proto);
+ stream_putw(s, instance);
+ stream_putl(s, session_id);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ (void)zclient_send_message(zclient);
+
+}
+
extern struct zebra_privs_t sharp_privs;
void sharp_zebra_init(void)
@@ -501,4 +578,5 @@ void sharp_zebra_init(void)
zclient->redistribute_route_add = sharp_redistribute_route;
zclient->redistribute_route_del = sharp_redistribute_route;
+ zclient->opaque_msg_handler = sharp_opaque_handler;
}
diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h
index 2b8e19dd97..7c714b52d3 100644
--- a/sharpd/sharp_zebra.h
+++ b/sharpd/sharp_zebra.h
@@ -45,4 +45,11 @@ int sharp_install_lsps_helper(bool install_p, const struct prefix *p,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg);
+/* Send OPAQUE messages, using subtype 'type'. */
+void sharp_opaque_send(uint32_t type, uint32_t count);
+
+/* Send OPAQUE registration messages, using subtype 'type'. */
+void sharp_opaque_reg_send(bool is_reg, uint32_t proto, uint32_t instance,
+ uint32_t session_id, uint32_t type);
+
#endif
diff --git a/zebra/main.c b/zebra/main.c
index f447e9aa07..05dd70ff7a 100644
--- a/zebra/main.c
+++ b/zebra/main.c
@@ -55,6 +55,7 @@
#include "zebra/zebra_vxlan.h"
#include "zebra/zebra_routemap.h"
#include "zebra/zebra_nb.h"
+#include "zebra/zebra_opaque.h"
#if defined(HANDLE_NETLINK_FUZZING)
#include "zebra/kernel_netlink.h"
@@ -151,18 +152,25 @@ static void sigint(void)
frr_early_fini();
+ /* Stop the opaque module pthread */
+ zebra_opaque_stop();
+
zebra_dplane_pre_finish();
/* Clean up GR related info. */
zebra_gr_stale_client_cleanup(zrouter.stale_client_list);
list_delete_all_node(zrouter.stale_client_list);
+ /* Clean up zapi clients and server module */
for (ALL_LIST_ELEMENTS(zrouter.client_list, ln, nn, client))
zserv_close_client(client);
zserv_close();
list_delete_all_node(zrouter.client_list);
+ /* Once all the zclients are cleaned up, clean up the opaque module */
+ zebra_opaque_finish();
+
zebra_ptm_finish();
if (retain_mode)
@@ -427,6 +435,7 @@ int main(int argc, char **argv)
zebra_mpls_vty_init();
zebra_pw_vty_init();
zebra_pbr_init();
+ zebra_opaque_init();
/* For debug purpose. */
/* SET_FLAG (zebra_debug_event, ZEBRA_DEBUG_EVENT); */
@@ -458,6 +467,9 @@ int main(int argc, char **argv)
/* Start dataplane system */
zebra_dplane_start();
+ /* Start the ted module, before zserv */
+ zebra_opaque_start();
+
/* Start Zebra API server */
zserv_start(zserv_path);
diff --git a/zebra/subdir.am b/zebra/subdir.am
index 5601b4c379..d98ef52571 100644
--- a/zebra/subdir.am
+++ b/zebra/subdir.am
@@ -10,6 +10,7 @@ vtysh_scan += \
zebra/interface.c \
zebra/router-id.c \
zebra/rtadv.c \
+ zebra/zebra_gr.c \
zebra/zebra_mlag_vty.c \
zebra/zebra_mpls_vty.c \
zebra/zebra_ptm.c \
@@ -17,7 +18,6 @@ vtysh_scan += \
zebra/zebra_routemap.c \
zebra/zebra_vty.c \
zebra/zserv.c \
- zebra/zebra_gr.c \
# end
# can be loaded as DSO - always include for vtysh
@@ -72,19 +72,30 @@ zebra_zebra_SOURCES = \
zebra/rtread_sysctl.c \
zebra/rule_netlink.c \
zebra/rule_socket.c \
+ zebra/table_manager.c \
+ zebra/zapi_msg.c \
+ zebra/zebra_dplane.c \
+ zebra/zebra_errors.c \
+ zebra/zebra_gr.c \
+ zebra/zebra_l2.c \
zebra/zebra_mlag.c \
zebra/zebra_mlag_vty.c \
- zebra/zebra_l2.c \
zebra/zebra_memory.c \
- zebra/zebra_dplane.c \
zebra/zebra_mpls.c \
zebra/zebra_mpls_netlink.c \
zebra/zebra_mpls_openbsd.c \
zebra/zebra_mpls_null.c \
zebra/zebra_mpls_vty.c \
zebra/zebra_mroute.c \
+ zebra/zebra_nb.c \
+ zebra/zebra_nb_config.c \
+ zebra/zebra_nb_rpcs.c \
+ zebra/zebra_nb_state.c \
+ zebra/zebra_netns_id.c \
+ zebra/zebra_netns_notify.c \
zebra/zebra_nhg.c \
zebra/zebra_ns.c \
+ zebra/zebra_opaque.c \
zebra/zebra_pbr.c \
zebra/zebra_ptm.c \
zebra/zebra_ptm_redistribute.c \
@@ -97,16 +108,6 @@ zebra_zebra_SOURCES = \
zebra/zebra_vty.c \
zebra/zebra_vxlan.c \
zebra/zserv.c \
- zebra/zebra_netns_id.c \
- zebra/zebra_netns_notify.c \
- zebra/table_manager.c \
- zebra/zapi_msg.c \
- zebra/zebra_nb.c \
- zebra/zebra_nb_config.c \
- zebra/zebra_nb_rpcs.c \
- zebra/zebra_nb_state.c \
- zebra/zebra_errors.c \
- zebra/zebra_gr.c \
# end
clippy_scan += \
@@ -137,17 +138,24 @@ noinst_HEADERS += \
zebra/rt_netlink.h \
zebra/rtadv.h \
zebra/rule_netlink.h \
- zebra/zebra_mlag.h \
- zebra/zebra_mlag_vty.h \
+ zebra/table_manager.h \
+ zebra/zapi_msg.h \
+ zebra/zebra_dplane.h \
+ zebra/zebra_errors.h \
zebra/zebra_fpm_private.h \
zebra/zebra_l2.h \
- zebra/zebra_dplane.h \
zebra/zebra_memory.h \
+ zebra/zebra_mlag.h \
+ zebra/zebra_mlag_vty.h \
zebra/zebra_mpls.h \
zebra/zebra_mroute.h \
+ zebra/zebra_nb.h \
+ zebra/zebra_netns_id.h \
+ zebra/zebra_netns_notify.h \
zebra/zebra_nhg.h \
zebra/zebra_nhg_private.h \
zebra/zebra_ns.h \
+ zebra/zebra_opaque.h \
zebra/zebra_pbr.h \
zebra/zebra_ptm.h \
zebra/zebra_ptm_redistribute.h \
@@ -159,12 +167,6 @@ noinst_HEADERS += \
zebra/zebra_vxlan.h \
zebra/zebra_vxlan_private.h \
zebra/zserv.h \
- zebra/zebra_netns_id.h \
- zebra/zebra_netns_notify.h \
- zebra/table_manager.h \
- zebra/zapi_msg.h \
- zebra/zebra_nb.h \
- zebra/zebra_errors.h \
# end
zebra_zebra_irdp_la_SOURCES = \
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 16714acc6e..cea8edf752 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -24,23 +24,16 @@
#include <libgen.h>
#include "lib/prefix.h"
-#include "lib/command.h"
-#include "lib/if.h"
-#include "lib/thread.h"
#include "lib/stream.h"
#include "lib/memory.h"
#include "lib/table.h"
#include "lib/network.h"
-#include "lib/sockunion.h"
#include "lib/log.h"
#include "lib/zclient.h"
#include "lib/privs.h"
-#include "lib/network.h"
-#include "lib/buffer.h"
#include "lib/nexthop.h"
#include "lib/vrf.h"
#include "lib/libfrr.h"
-#include "lib/sockopt.h"
#include "lib/lib_errors.h"
#include "zebra/zebra_router.h"
@@ -52,7 +45,6 @@
#include "zebra/redistribute.h"
#include "zebra/debug.h"
#include "zebra/zebra_rnh.h"
-#include "zebra/rt_netlink.h"
#include "zebra/interface.h"
#include "zebra/zebra_ptm.h"
#include "zebra/rtadv.h"
@@ -66,6 +58,7 @@
#include "zebra/zebra_errors.h"
#include "zebra/zebra_mlag.h"
#include "zebra/connected.h"
+#include "zebra/zebra_opaque.h"
/* Encoding helpers -------------------------------------------------------- */
@@ -2870,38 +2863,78 @@ static void zserv_write_incoming(struct stream *orig, uint16_t command)
}
#endif
-void zserv_handle_commands(struct zserv *client, struct stream *msg)
+/*
+ * Process a batch of zapi messages.
+ */
+void zserv_handle_commands(struct zserv *client, struct stream_fifo *fifo)
{
struct zmsghdr hdr;
struct zebra_vrf *zvrf;
+ struct stream *msg;
+ struct stream_fifo temp_fifo;
- if (STREAM_READABLE(msg) > ZEBRA_MAX_PACKET_SIZ) {
- if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
- zlog_debug(
- "ZAPI message is %zu bytes long but the maximum packet size is %u; dropping",
- STREAM_READABLE(msg), ZEBRA_MAX_PACKET_SIZ);
- return;
- }
+ stream_fifo_init(&temp_fifo);
- zapi_parse_header(msg, &hdr);
+ while (stream_fifo_head(fifo)) {
+ msg = stream_fifo_pop(fifo);
- if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
- zserv_log_message(NULL, msg, &hdr);
+ if (STREAM_READABLE(msg) > ZEBRA_MAX_PACKET_SIZ) {
+ if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
+ zlog_debug(
+ "ZAPI message is %zu bytes long but the maximum packet size is %u; dropping",
+ STREAM_READABLE(msg),
+ ZEBRA_MAX_PACKET_SIZ);
+ goto continue_loop;
+ }
+
+ zapi_parse_header(msg, &hdr);
+
+ if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
+ zserv_log_message(NULL, msg, &hdr);
#if defined(HANDLE_ZAPI_FUZZING)
- zserv_write_incoming(msg, hdr.command);
+ zserv_write_incoming(msg, hdr.command);
#endif
- hdr.length -= ZEBRA_HEADER_SIZE;
+ hdr.length -= ZEBRA_HEADER_SIZE;
- /* lookup vrf */
- zvrf = zebra_vrf_lookup_by_id(hdr.vrf_id);
- if (!zvrf)
- return zserv_error_no_vrf(client, &hdr, msg, zvrf);
+ /* Before checking for a handler function, check for
+ * special messages that are handled in another module;
+ * we'll treat these as opaque.
+ */
+ if (zebra_opaque_handles_msgid(hdr.command)) {
+ /* Reset message buffer */
+ stream_set_getp(msg, 0);
+
+ stream_fifo_push(&temp_fifo, msg);
+
+ /* Continue without freeing the message */
+ msg = NULL;
+ goto continue_loop;
+ }
+
+ /* lookup vrf */
+ zvrf = zebra_vrf_lookup_by_id(hdr.vrf_id);
+ if (!zvrf) {
+ zserv_error_no_vrf(client, &hdr, msg, zvrf);
+ goto continue_loop;
+ }
+
+ if (hdr.command >= array_size(zserv_handlers)
+ || zserv_handlers[hdr.command] == NULL) {
+ zserv_error_invalid_msg_type(client, &hdr, msg, zvrf);
+ goto continue_loop;
+ }
+
+ zserv_handlers[hdr.command](client, &hdr, msg, zvrf);
+
+continue_loop:
+ stream_free(msg);
+ }
- if (hdr.command >= array_size(zserv_handlers)
- || zserv_handlers[hdr.command] == NULL)
- return zserv_error_invalid_msg_type(client, &hdr, msg, zvrf);
+ /* Dispatch any special messages from the temp fifo */
+ if (stream_fifo_head(&temp_fifo) != NULL)
+ zebra_opaque_enqueue_batch(&temp_fifo);
- zserv_handlers[hdr.command](client, &hdr, msg, zvrf);
+ stream_fifo_deinit(&temp_fifo);
}
diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h
index a4f5e74e4d..6d655e11aa 100644
--- a/zebra/zapi_msg.h
+++ b/zebra/zapi_msg.h
@@ -42,10 +42,11 @@ extern "C" {
* client
* the client datastructure
*
- * msg
- * the message
+ * fifo
+ * a batch of messages
*/
-extern void zserv_handle_commands(struct zserv *client, struct stream *msg);
+extern void zserv_handle_commands(struct zserv *client,
+ struct stream_fifo *fifo);
extern int zsend_vrf_add(struct zserv *zclient, struct zebra_vrf *zvrf);
extern int zsend_vrf_delete(struct zserv *zclient, struct zebra_vrf *zvrf);
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 2c8ef37cbe..568b398924 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -28,7 +28,6 @@
#include "lib/memory.h"
#include "lib/queue.h"
#include "lib/zebra.h"
-#include "zebra/zebra_router.h"
#include "zebra/zebra_memory.h"
#include "zebra/zebra_router.h"
#include "zebra/zebra_dplane.h"
diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c
index c580fe40d5..b194f80fc7 100644
--- a/zebra/zebra_fpm_netlink.c
+++ b/zebra/zebra_fpm_netlink.c
@@ -339,7 +339,7 @@ static int netlink_route_info_fill(struct netlink_route_info *ri, int cmd,
}
/* If there is no useful nexthop then return. */
- if (ri->num_nhs == 0) {
+ if (ri->rtm_type != RTN_BLACKHOLE && ri->num_nhs == 0) {
zfpm_debug("netlink_encode_route(): No useful nexthop.");
return 0;
}
diff --git a/zebra/zebra_opaque.c b/zebra/zebra_opaque.c
new file mode 100644
index 0000000000..d1e0497154
--- /dev/null
+++ b/zebra/zebra_opaque.c
@@ -0,0 +1,699 @@
+/*
+ * Zebra opaque message handler module
+ * Copyright (c) 2020 Volta Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include <zebra.h>
+#include "lib/debug.h"
+#include "lib/frr_pthread.h"
+#include "lib/stream.h"
+#include "zebra/debug.h"
+#include "zebra/zserv.h"
+#include "zebra/zebra_memory.h"
+#include "zebra/zebra_opaque.h"
+
+/* Mem type */
+DEFINE_MTYPE_STATIC(ZEBRA, OPQ, "ZAPI Opaque Information");
+
+/* Hash to hold message registration info from zapi clients */
+PREDECL_HASH(opq_regh);
+
+/* Registered client info */
+struct opq_client_reg {
+ int proto;
+ int instance;
+ uint32_t session_id;
+
+ struct opq_client_reg *next;
+ struct opq_client_reg *prev;
+};
+
+/* Opaque message registration info */
+struct opq_msg_reg {
+ struct opq_regh_item item;
+
+ /* Message type */
+ uint32_t type;
+
+ struct opq_client_reg *clients;
+};
+
+/* Registration helper prototypes */
+static uint32_t registration_hash(const struct opq_msg_reg *reg);
+static int registration_compare(const struct opq_msg_reg *reg1,
+ const struct opq_msg_reg *reg2);
+
+DECLARE_HASH(opq_regh, struct opq_msg_reg, item, registration_compare,
+ registration_hash);
+
+static struct opq_regh_head opq_reg_hash;
+
+/*
+ * Globals
+ */
+static struct zebra_opaque_globals {
+
+ /* Sentinel for run or start of shutdown */
+ _Atomic uint32_t run;
+
+ /* Limit number of pending, unprocessed updates */
+ _Atomic uint32_t max_queued_updates;
+
+ /* Limit number of new messages dequeued at once, to pace an
+ * incoming burst.
+ */
+ uint32_t msgs_per_cycle;
+
+ /* Stats: counters of incoming messages, errors, and yields (when
+ * the limit has been reached.)
+ */
+ _Atomic uint32_t msgs_in;
+ _Atomic uint32_t msg_errors;
+ _Atomic uint32_t yields;
+
+ /* pthread */
+ struct frr_pthread *pthread;
+
+ /* Event-delivery context 'master' for the module */
+ struct thread_master *master;
+
+ /* Event/'thread' pointer for queued zapi messages */
+ struct thread *t_msgs;
+
+ /* Input fifo queue to the module, and lock to protect it. */
+ pthread_mutex_t mutex;
+ struct stream_fifo in_fifo;
+
+} zo_info;
+
+/* Name string for debugs/logs */
+static const char LOG_NAME[] = "Zebra Opaque";
+
+/* Prototypes */
+
+/* Main event loop, processing incoming message queue */
+static int process_messages(struct thread *event);
+static int handle_opq_registration(const struct zmsghdr *hdr,
+ struct stream *msg);
+static int handle_opq_unregistration(const struct zmsghdr *hdr,
+ struct stream *msg);
+static int dispatch_opq_messages(struct stream_fifo *msg_fifo);
+static struct opq_msg_reg *opq_reg_lookup(uint32_t type);
+static bool opq_client_match(const struct opq_client_reg *client,
+ const struct zapi_opaque_reg_info *info);
+static struct opq_msg_reg *opq_reg_alloc(uint32_t type);
+static void opq_reg_free(struct opq_msg_reg **reg);
+static struct opq_client_reg *opq_client_alloc(
+ const struct zapi_opaque_reg_info *info);
+static void opq_client_free(struct opq_client_reg **client);
+static const char *opq_client2str(char *buf, size_t buflen,
+ const struct opq_client_reg *client);
+
+/*
+ * Initialize the module at startup
+ */
+void zebra_opaque_init(void)
+{
+ memset(&zo_info, 0, sizeof(zo_info));
+
+ pthread_mutex_init(&zo_info.mutex, NULL);
+ stream_fifo_init(&zo_info.in_fifo);
+
+ zo_info.msgs_per_cycle = ZEBRA_OPAQUE_MSG_LIMIT;
+}
+
+/*
+ * Start the module pthread. This step is run later than the
+ * 'init' step, in case zebra has fork-ed.
+ */
+void zebra_opaque_start(void)
+{
+ struct frr_pthread_attr pattr = {
+ .start = frr_pthread_attr_default.start,
+ .stop = frr_pthread_attr_default.stop
+ };
+
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s module starting", LOG_NAME);
+
+ /* Start pthread */
+ zo_info.pthread = frr_pthread_new(&pattr, "Zebra Opaque thread",
+ "zebra_opaque");
+
+ /* Associate event 'master' */
+ zo_info.master = zo_info.pthread->master;
+
+ atomic_store_explicit(&zo_info.run, 1, memory_order_relaxed);
+
+ /* Enqueue an initial event for the pthread */
+ thread_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
+
+ /* And start the pthread */
+ frr_pthread_run(zo_info.pthread, NULL);
+}
+
+/*
+ * Module stop, halting the dedicated pthread; called from the main pthread.
+ */
+void zebra_opaque_stop(void)
+{
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s module stop", LOG_NAME);
+
+ atomic_store_explicit(&zo_info.run, 0, memory_order_relaxed);
+
+ frr_pthread_stop(zo_info.pthread, NULL);
+
+ frr_pthread_destroy(zo_info.pthread);
+
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s module stop complete", LOG_NAME);
+}
+
+/*
+ * Module final cleanup, called from the zebra main pthread.
+ */
+void zebra_opaque_finish(void)
+{
+ struct opq_msg_reg *reg;
+ struct opq_client_reg *client;
+
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s module shutdown", LOG_NAME);
+
+ /* Clear out registration info */
+ while ((reg = opq_regh_pop(&opq_reg_hash)) != NULL) {
+ client = reg->clients;
+ while (client) {
+ reg->clients = client->next;
+ opq_client_free(&client);
+ client = reg->clients;
+ }
+
+ opq_reg_free(&reg);
+ }
+
+ opq_regh_fini(&opq_reg_hash);
+
+ pthread_mutex_destroy(&zo_info.mutex);
+ stream_fifo_deinit(&zo_info.in_fifo);
+}
+
+/*
+ * Does this module handle (intercept) the specified zapi message type?
+ */
+bool zebra_opaque_handles_msgid(uint16_t id)
+{
+ bool ret = false;
+
+ switch (id) {
+ case ZEBRA_OPAQUE_MESSAGE:
+ case ZEBRA_OPAQUE_REGISTER:
+ case ZEBRA_OPAQUE_UNREGISTER:
+ ret = true;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Enqueue a batch of messages for processing - this is the public api
+ * used from the zapi processing threads.
+ */
+uint32_t zebra_opaque_enqueue_batch(struct stream_fifo *batch)
+{
+ uint32_t counter = 0;
+ struct stream *msg;
+
+ /* Dequeue messages from the incoming batch, and save them
+ * on the module fifo.
+ */
+ frr_with_mutex(&zo_info.mutex) {
+ msg = stream_fifo_pop(batch);
+ while (msg) {
+ stream_fifo_push(&zo_info.in_fifo, msg);
+ counter++;
+ msg = stream_fifo_pop(batch);
+ }
+ }
+
+ /* Schedule module pthread to process the batch */
+ if (counter > 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: received %u messages",
+ __func__, counter);
+ thread_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
+ }
+
+ return counter;
+}
+
+/*
+ * Pthread event loop, process the incoming message queue.
+ */
+static int process_messages(struct thread *event)
+{
+ struct stream_fifo fifo;
+ struct stream *msg;
+ uint32_t i;
+ bool need_resched = false;
+
+ stream_fifo_init(&fifo);
+
+ /* Check for zebra shutdown */
+ if (atomic_load_explicit(&zo_info.run, memory_order_relaxed) == 0)
+ goto done;
+
+ /*
+ * Dequeue some messages from the incoming queue, temporarily
+ * save them on the local fifo
+ */
+ frr_with_mutex(&zo_info.mutex) {
+
+ for (i = 0; i < zo_info.msgs_per_cycle; i++) {
+ msg = stream_fifo_pop(&zo_info.in_fifo);
+ if (msg == NULL)
+ break;
+
+ stream_fifo_push(&fifo, msg);
+ }
+
+ /*
+ * We may need to reschedule, if there are still
+ * queued messages
+ */
+ if (stream_fifo_head(&zo_info.in_fifo) != NULL)
+ need_resched = true;
+ }
+
+ /* Update stats */
+ atomic_fetch_add_explicit(&zo_info.msgs_in, i, memory_order_relaxed);
+
+ /* Check for zebra shutdown */
+ if (atomic_load_explicit(&zo_info.run, memory_order_relaxed) == 0) {
+ need_resched = false;
+ goto done;
+ }
+
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: processing %u messages", __func__, i);
+
+ /*
+ * Process the messages from the temporary fifo. We send the whole
+ * fifo so that we can take advantage of batching internally. Note
+ * that registration/deregistration messages are handled here also.
+ */
+ dispatch_opq_messages(&fifo);
+
+done:
+
+ if (need_resched) {
+ atomic_fetch_add_explicit(&zo_info.yields, 1,
+ memory_order_relaxed);
+ thread_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
+ }
+
+ /* This will also free any leftover messages, in the shutdown case */
+ stream_fifo_deinit(&fifo);
+
+ return 0;
+}
+
+/*
+ * Process (dispatch) or drop opaque messages.
+ */
+static int dispatch_opq_messages(struct stream_fifo *msg_fifo)
+{
+ struct stream *msg, *dup;
+ struct zmsghdr hdr;
+ struct opq_msg_reg *reg;
+ uint32_t type;
+ struct opq_client_reg *client;
+ struct zserv *zclient;
+ char buf[50];
+
+ while ((msg = stream_fifo_pop(msg_fifo)) != NULL) {
+ zapi_parse_header(msg, &hdr);
+ hdr.length -= ZEBRA_HEADER_SIZE;
+
+ /* Handle client registration messages */
+ if (hdr.command == ZEBRA_OPAQUE_REGISTER) {
+ handle_opq_registration(&hdr, msg);
+ continue;
+ } else if (hdr.command == ZEBRA_OPAQUE_UNREGISTER) {
+ handle_opq_unregistration(&hdr, msg);
+ continue;
+ }
+
+ /* We only process OPAQUE messages - drop anything else */
+ if (hdr.command != ZEBRA_OPAQUE_MESSAGE)
+ goto drop_it;
+
+ /* Dispatch to any registered ZAPI client(s) */
+
+ /* Extract subtype */
+ STREAM_GETL(msg, type);
+
+ /* Look up registered ZAPI client(s) */
+ reg = opq_reg_lookup(type);
+ if (reg == NULL) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: no registrations for opaque type %u",
+ __func__, type);
+ goto drop_it;
+ }
+
+ /* Reset read pointer, since we'll be re-sending message */
+ stream_set_getp(msg, 0);
+
+ /* Send a copy of the message to all registered clients */
+ for (client = reg->clients; client; client = client->next) {
+ dup = NULL;
+
+ /* Copy message if necessary */
+ if (client->next)
+ dup = stream_dup(msg);
+
+ /*
+ * TODO -- this isn't ideal: we're going through an
+ * acquire/release cycle for each client for each
+ * message. Replace this with a batching version.
+ */
+ zclient = zserv_acquire_client(client->proto,
+ client->instance,
+ client->session_id);
+ if (zclient) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: sending %s to client %s",
+ __func__,
+ (dup ? "dup" : "msg"),
+ opq_client2str(buf,
+ sizeof(buf),
+ client));
+
+ /*
+ * Sending a message actually means enqueuing
+ * it for a zapi io pthread to send - so we
+ * don't touch the message after this call.
+ */
+ zserv_send_message(zclient, dup ? dup : msg);
+ if (dup)
+ dup = NULL;
+ else
+ msg = NULL;
+
+ zserv_release_client(zclient);
+ } else {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: type %u: no zclient for %s",
+ __func__, type,
+ opq_client2str(buf,
+ sizeof(buf),
+ client));
+ /* Registered but gone? */
+ if (dup)
+ stream_free(dup);
+ }
+ }
+
+drop_it:
+stream_failure:
+ if (msg)
+ stream_free(msg);
+ }
+
+ return 0;
+}
+
+/*
+ * Process a register/unregister message
+ */
+static int handle_opq_registration(const struct zmsghdr *hdr,
+ struct stream *msg)
+{
+ int ret = 0;
+ struct zapi_opaque_reg_info info;
+ struct opq_client_reg *client;
+ struct opq_msg_reg key, *reg;
+ char buf[50];
+
+ memset(&info, 0, sizeof(info));
+
+ if (zapi_parse_opaque_reg(msg, &info) < 0) {
+ ret = -1;
+ goto done;
+ }
+
+ memset(&key, 0, sizeof(key));
+
+ key.type = info.type;
+
+ reg = opq_regh_find(&opq_reg_hash, &key);
+ if (reg) {
+ /* Look for dup client */
+ for (client = reg->clients; client != NULL;
+ client = client->next) {
+ if (opq_client_match(client, &info))
+ break;
+ }
+
+ if (client) {
+ /* Oops - duplicate registration? */
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: duplicate opq reg for client %s",
+ __func__,
+ opq_client2str(buf, sizeof(buf),
+ client));
+ goto done;
+ }
+
+ client = opq_client_alloc(&info);
+
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: client %s registers for %u",
+ __func__,
+ opq_client2str(buf, sizeof(buf), client),
+ info.type);
+
+ /* Link client into registration */
+ client->next = reg->clients;
+ if (reg->clients)
+ reg->clients->prev = client;
+ reg->clients = client;
+ } else {
+ /*
+ * No existing registrations - create one, add the
+ * client, and add registration to hash.
+ */
+ reg = opq_reg_alloc(info.type);
+ client = opq_client_alloc(&info);
+
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: client %s registers for new reg %u",
+ __func__,
+ opq_client2str(buf, sizeof(buf), client),
+ info.type);
+
+ reg->clients = client;
+
+ opq_regh_add(&opq_reg_hash, reg);
+ }
+
+done:
+
+ stream_free(msg);
+ return ret;
+}
+
+/*
+ * Process a register/unregister message
+ */
+static int handle_opq_unregistration(const struct zmsghdr *hdr,
+ struct stream *msg)
+{
+ int ret = 0;
+ struct zapi_opaque_reg_info info;
+ struct opq_client_reg *client;
+ struct opq_msg_reg key, *reg;
+ char buf[50];
+
+ memset(&info, 0, sizeof(info));
+
+ if (zapi_parse_opaque_reg(msg, &info) < 0) {
+ ret = -1;
+ goto done;
+ }
+
+ memset(&key, 0, sizeof(key));
+
+ key.type = info.type;
+
+ reg = opq_regh_find(&opq_reg_hash, &key);
+ if (reg == NULL) {
+ /* Weird: unregister for unknown message? */
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: unknown client %s/%u/%u unregisters for unknown type %u",
+ __func__,
+ zebra_route_string(info.proto),
+ info.instance, info.session_id, info.type);
+ goto done;
+ }
+
+ /* Look for client */
+ for (client = reg->clients; client != NULL;
+ client = client->next) {
+ if (opq_client_match(client, &info))
+ break;
+ }
+
+ if (client == NULL) {
+ /* Oops - unregister for unknown client? */
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: unknown client %s/%u/%u unregisters for %u",
+ __func__, zebra_route_string(info.proto),
+ info.instance, info.session_id, info.type);
+ goto done;
+ }
+
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: client %s unregisters for %u",
+ __func__, opq_client2str(buf, sizeof(buf), client),
+ info.type);
+
+ if (client->prev)
+ client->prev->next = client->next;
+ if (client->next)
+ client->next->prev = client->prev;
+ if (reg->clients == client)
+ reg->clients = client->next;
+
+ opq_client_free(&client);
+
+ /* Is registration empty now? */
+ if (reg->clients == NULL) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: free empty reg %u", __func__,
+ reg->type);
+
+ opq_regh_del(&opq_reg_hash, reg);
+ opq_reg_free(&reg);
+ }
+
+done:
+
+ stream_free(msg);
+ return ret;
+}
+
+/* Compare utility for registered clients */
+static bool opq_client_match(const struct opq_client_reg *client,
+ const struct zapi_opaque_reg_info *info)
+{
+ if (client->proto == info->proto &&
+ client->instance == info->instance &&
+ client->session_id == info->session_id)
+ return true;
+ else
+ return false;
+}
+
+static struct opq_msg_reg *opq_reg_lookup(uint32_t type)
+{
+ struct opq_msg_reg key, *reg;
+
+ memset(&key, 0, sizeof(key));
+
+ key.type = type;
+
+ reg = opq_regh_find(&opq_reg_hash, &key);
+
+ return reg;
+}
+
+static struct opq_msg_reg *opq_reg_alloc(uint32_t type)
+{
+ struct opq_msg_reg *reg;
+
+ reg = XCALLOC(MTYPE_OPQ, sizeof(struct opq_msg_reg));
+
+ reg->type = type;
+ INIT_HASH(&reg->item);
+
+ return reg;
+}
+
+static void opq_reg_free(struct opq_msg_reg **reg)
+{
+ XFREE(MTYPE_OPQ, (*reg));
+}
+
+static struct opq_client_reg *opq_client_alloc(
+ const struct zapi_opaque_reg_info *info)
+{
+ struct opq_client_reg *client;
+
+ client = XCALLOC(MTYPE_OPQ, sizeof(struct opq_client_reg));
+
+ client->proto = info->proto;
+ client->instance = info->instance;
+ client->session_id = info->session_id;
+
+ return client;
+}
+
+static void opq_client_free(struct opq_client_reg **client)
+{
+ XFREE(MTYPE_OPQ, (*client));
+}
+
+static const char *opq_client2str(char *buf, size_t buflen,
+ const struct opq_client_reg *client)
+{
+ char sbuf[20];
+
+ snprintf(buf, buflen, "%s/%u", zebra_route_string(client->proto),
+ client->instance);
+ if (client->session_id > 0) {
+ snprintf(sbuf, sizeof(sbuf), "/%u", client->session_id);
+ strlcat(buf, sbuf, buflen);
+ }
+
+ return buf;
+}
+
+/* Hash function for clients registered for messages */
+static uint32_t registration_hash(const struct opq_msg_reg *reg)
+{
+ return reg->type;
+}
+
+/* Comparison function for client registrations */
+static int registration_compare(const struct opq_msg_reg *reg1,
+ const struct opq_msg_reg *reg2)
+{
+ if (reg1->type == reg2->type)
+ return 0;
+ else
+ return -1;
+}
diff --git a/zebra/zebra_opaque.h b/zebra/zebra_opaque.h
new file mode 100644
index 0000000000..a9610bfef5
--- /dev/null
+++ b/zebra/zebra_opaque.h
@@ -0,0 +1,63 @@
+/*
+ * Zebra opaque message zapi message handler
+ * Copyright (c) 2020 Volta Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#ifndef _ZEBRA_OPAQUE_H
+#define _ZEBRA_OPAQUE_H 1
+
+/* Default for number of messages to dequeue per lock cycle */
+#define ZEBRA_OPAQUE_MSG_LIMIT 1000
+
+/*
+ * Initialize the module at startup
+ */
+void zebra_opaque_init(void);
+
+/*
+ * Start the module pthread. This step is run later than the
+ * 'init' step, in case zebra has fork-ed.
+ */
+void zebra_opaque_start(void);
+
+/*
+ * Does this module handle (intercept) the specified zapi message type?
+ */
+bool zebra_opaque_handles_msgid(uint16_t id);
+
+/*
+ * Module stop, called from the main pthread. This is synchronous:
+ * once it returns, the pthread has stopped and exited.
+ */
+void zebra_opaque_stop(void);
+
+/*
+ * Module cleanup, called from the zebra main pthread. When it returns,
+ * all module cleanup is complete.
+ */
+void zebra_opaque_finish(void);
+
+/*
+ * Enqueue a batch of messages for processing. Returns the number dequeued
+ * from the batch fifo.
+ */
+uint32_t zebra_opaque_enqueue_batch(struct stream_fifo *batch);
+
+
+#endif /* _ZEBRA_OPAQUE_H */
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 9d6ae2ec2e..cb863b258c 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -71,6 +71,14 @@ extern struct zebra_privs_t zserv_privs;
/* The listener socket for clients connecting to us */
static int zsock;
+/* The lock that protects access to zapi client objects */
+static pthread_mutex_t client_mutex;
+
+static struct zserv *find_client_internal(uint8_t proto,
+ unsigned short instance,
+ uint32_t session_id);
+
+
/*
* Client thread events.
*
@@ -512,11 +520,9 @@ static int zserv_process_messages(struct thread *thread)
need_resched = true;
}
- while (stream_fifo_head(cache)) {
- msg = stream_fifo_pop(cache);
- zserv_handle_commands(client, msg);
- stream_free(msg);
- }
+ /* Process the batch of messages */
+ if (stream_fifo_head(cache))
+ zserv_handle_commands(client, cache);
stream_fifo_free(cache);
@@ -538,6 +544,25 @@ int zserv_send_message(struct zserv *client, struct stream *msg)
return 0;
}
+/*
+ * Send a batch of messages to a connected Zebra API client.
+ */
+int zserv_send_batch(struct zserv *client, struct stream_fifo *fifo)
+{
+ struct stream *msg;
+
+ frr_with_mutex(&client->obuf_mtx) {
+ msg = stream_fifo_pop(fifo);
+ while (msg) {
+ stream_fifo_push(client->obuf_fifo, msg);
+ msg = stream_fifo_pop(fifo);
+ }
+ }
+
+ zserv_client_event(client, ZSERV_CLIENT_WRITE);
+
+ return 0;
+}
/* Hooks for client connect / disconnect */
DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
@@ -629,26 +654,47 @@ static void zserv_client_free(struct zserv *client)
void zserv_close_client(struct zserv *client)
{
- /* synchronously stop and join pthread */
- frr_pthread_stop(client->pthread, NULL);
+ bool free_p = true;
- if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("Closing client '%s'",
- zebra_route_string(client->proto));
+ if (client->pthread) {
+ /* synchronously stop and join pthread */
+ frr_pthread_stop(client->pthread, NULL);
- thread_cancel_event(zrouter.master, client);
- THREAD_OFF(client->t_cleanup);
- THREAD_OFF(client->t_process);
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("Closing client '%s'",
+ zebra_route_string(client->proto));
- /* destroy pthread */
- frr_pthread_destroy(client->pthread);
- client->pthread = NULL;
+ thread_cancel_event(zrouter.master, client);
+ THREAD_OFF(client->t_cleanup);
+ THREAD_OFF(client->t_process);
- /* remove from client list */
- listnode_delete(zrouter.client_list, client);
+ /* destroy pthread */
+ frr_pthread_destroy(client->pthread);
+ client->pthread = NULL;
+ }
+
+ /*
+ * Final check in case the client struct is in use in another
+ * pthread: if not in-use, continue and free the client
+ */
+ frr_with_mutex(&client_mutex) {
+ if (client->busy_count <= 0) {
+ /* remove from client list */
+ listnode_delete(zrouter.client_list, client);
+ } else {
+ /*
+ * The client session object may be in use, although
+ * the associated pthread is gone. Defer final
+ * cleanup.
+ */
+ client->is_closed = true;
+ free_p = false;
+ }
+ }
/* delete client */
- zserv_client_free(client);
+ if (free_p)
+ zserv_client_free(client);
}
/*
@@ -708,7 +754,9 @@ static struct zserv *zserv_client_create(int sock)
client->ridinfo = vrf_bitmap_init();
/* Add this client to linked list. */
- listnode_add(zrouter.client_list, client);
+ frr_with_mutex(&client_mutex) {
+ listnode_add(zrouter.client_list, client);
+ }
struct frr_pthread_attr zclient_pthr_attrs = {
.start = frr_pthread_attr_default.start,
@@ -731,6 +779,81 @@ static struct zserv *zserv_client_create(int sock)
}
/*
+ * Retrieve a client object by the complete tuple of
+ * {protocol, instance, session}. This version supports use
+ * from a different pthread: the object will be returned marked
+ * in-use. The caller *must* release the client object with the
+ * release_client() api, to ensure that the in-use marker is cleared properly.
+ */
+struct zserv *zserv_acquire_client(uint8_t proto, unsigned short instance,
+ uint32_t session_id)
+{
+ struct zserv *client = NULL;
+
+ frr_with_mutex(&client_mutex) {
+ client = find_client_internal(proto, instance, session_id);
+ if (client) {
+ /* Don't return a dead/closed client object */
+ if (client->is_closed)
+ client = NULL;
+ else
+ client->busy_count++;
+ }
+ }
+
+ return client;
+}
+
+/*
+ * Release a client object that was acquired with the acquire_client() api.
+ * After this has been called, the caller must not use the client pointer -
+ * it may be freed if the client has closed.
+ */
+void zserv_release_client(struct zserv *client)
+{
+ bool cleanup_p = false;
+ const char *proto_str;
+ uint16_t instance;
+
+ /* Capture some info for debugging */
+ proto_str = zebra_route_string(client->proto);
+ instance = client->instance;
+
+ /*
+ * Once we've decremented the client object's refcount, it's possible
+ * for it to be deleted as soon as we release the lock, so we won't
+ * touch the object again.
+ */
+ frr_with_mutex(&client_mutex) {
+ client->busy_count--;
+
+ if (client->busy_count <= 0) {
+ /*
+ * No more users of the client object. If the client
+ * session is closed, schedule cleanup on the zebra
+ * main pthread.
+ */
+ if (client->is_closed) {
+ thread_add_event(zrouter.master,
+ zserv_handle_client_fail,
+ client, 0, &client->t_cleanup);
+
+ cleanup_p = true;
+ }
+ }
+ }
+
+ /*
+ * Cleanup must take place on the zebra main pthread, so we've
+ * scheduled an event.
+ */
+ if (IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s: %s clean-up for client '%s'[%u]",
+ __func__, (cleanup_p ? "scheduled" : "NO"),
+ proto_str, instance);
+}
+
+/*
* Accept socket connection.
*/
static int zserv_accept(struct thread *thread)
@@ -773,6 +896,9 @@ void zserv_close(void)
*/
close(zsock);
zsock = -1;
+
+ /* Free client list's mutex */
+ pthread_mutex_destroy(&client_mutex);
}
void zserv_start(char *path)
@@ -1001,12 +1127,17 @@ static void zebra_show_stale_client_detail(struct vty *vty,
TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) {
if (first_p) {
+ vty_out(vty, "Stale Client Information\n");
+ vty_out(vty, "------------------------\n");
+
if (client->instance)
vty_out(vty, " Instance: %u", client->instance);
if (client->session_id)
vty_out(vty, " [%u]", client->session_id);
+
first_p = false;
}
+
vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id));
vty_out(vty, "Capabilities : ");
switch (info->capabilities) {
@@ -1077,24 +1208,55 @@ static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
client->v6_route_del_cnt);
}
-struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance,
- uint32_t session_id)
+/*
+ * Common logic that searches the client list for a zapi client; this
+ * MUST be called holding the client list mutex.
+ */
+static struct zserv *find_client_internal(uint8_t proto,
+ unsigned short instance,
+ uint32_t session_id)
{
struct listnode *node, *nnode;
- struct zserv *client;
+ struct zserv *client = NULL;
for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) {
if (client->proto == proto && client->instance == instance &&
client->session_id == session_id)
- return client;
+ break;
}
- return NULL;
+ return client;
}
+/*
+ * Public api that searches for a client session; this version is
+ * used from the zebra main pthread.
+ */
struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
{
- return zserv_find_client_session(proto, instance, 0);
+ struct zserv *client;
+
+ frr_with_mutex(&client_mutex) {
+ client = find_client_internal(proto, instance, 0);
+ }
+
+ return client;
+}
+
+/*
+ * Retrieve a client by its protocol, instance number, and session id.
+ */
+struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance,
+ uint32_t session_id)
+{
+ struct zserv *client;
+
+ frr_with_mutex(&client_mutex) {
+ client = find_client_internal(proto, instance, session_id);
+ }
+
+ return client;
+
}
/* This command is for debugging purpose. */
@@ -1110,8 +1272,7 @@ DEFUN (show_zebra_client,
for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) {
zebra_show_client_detail(vty, client);
- vty_out(vty, "Stale Client Information\n");
- vty_out(vty, "------------------------\n");
+ /* Show GR info if present */
zebra_show_stale_client_detail(vty, client);
}
@@ -1161,6 +1322,7 @@ void zserv_init(void)
/* Misc init. */
zsock = -1;
+ pthread_mutex_init(&client_mutex, NULL);
install_element(ENABLE_NODE, &show_zebra_client_cmd);
install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
diff --git a/zebra/zserv.h b/zebra/zserv.h
index 9d442899f1..f2a4523818 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -96,6 +96,14 @@ struct zserv {
/* Client file descriptor. */
int sock;
+ /* Attributes used to permit access to zapi clients from
+ * other pthreads: the client has a busy counter, and a
+ * 'closed' flag. These attributes are managed using a
+ * lock, via the acquire_client() and release_client() apis.
+ */
+ int busy_count;
+ bool is_closed;
+
/* Input/output buffer to the client. */
pthread_mutex_t ibuf_mtx;
struct stream_fifo *ibuf_fifo;
@@ -116,7 +124,7 @@ struct zserv {
/* Event for message processing, for the main pthread */
struct thread *t_process;
- /* Threads for the main pthread */
+ /* Event for the main pthread */
struct thread *t_cleanup;
/* This client's redistribute flag. */
@@ -274,6 +282,17 @@ extern void zserv_start(char *path);
extern int zserv_send_message(struct zserv *client, struct stream *msg);
/*
+ * Send a batch of messages to a connected Zebra API client.
+ *
+ * client
+ * the client to send to
+ *
+ * fifo
+ * the list of messages to send
+ */
+extern int zserv_send_batch(struct zserv *client, struct stream_fifo *fifo);
+
+/*
* Retrieve a client by its protocol and instance number.
*
* proto
@@ -306,6 +325,27 @@ struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance,
uint32_t session_id);
/*
+ * Retrieve a client object by the complete tuple of
+ * {protocol, instance, session}. This version supports use
+ * from a different pthread: the object will be returned marked
+ * in-use. The caller *must* release the client object with the
+ * release_client() api, to ensure that the in-use marker is cleared properly.
+ *
+ * Returns:
+ * The Zebra API client.
+ */
+extern struct zserv *zserv_acquire_client(uint8_t proto,
+ unsigned short instance,
+ uint32_t session_id);
+
+/*
+ * Release a client object that was acquired with the acquire_client() api.
+ * After this has been called, the pointer must not be used - it may be freed
+ * in another pthread if the client has closed.
+ */
+extern void zserv_release_client(struct zserv *client);
+
+/*
* Close a client.
*
* Kills a client's thread, removes the client from the client list and cleans