summaryrefslogtreecommitdiff
path: root/zebra/zebra_dplane.c
diff options
context:
space:
mode:
Diffstat (limited to 'zebra/zebra_dplane.c')
-rw-r--r--zebra/zebra_dplane.c1617
1 files changed, 1402 insertions, 215 deletions
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 61eba92c98..928169a862 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -24,8 +24,9 @@
#include "lib/memory.h"
#include "lib/queue.h"
#include "lib/zebra.h"
+#include "zebra/zebra_router.h"
#include "zebra/zebra_memory.h"
-#include "zebra/zserv.h"
+#include "zebra/zebra_router.h"
#include "zebra/zebra_dplane.h"
#include "zebra/rt.h"
#include "zebra/debug.h"
@@ -38,9 +39,14 @@ DEFINE_MTYPE(ZEBRA, DP_PROV, "Zebra DPlane Provider")
# define AOK 0
#endif
+/* Enable test dataplane provider */
+/*#define DPLANE_TEST_PROVIDER 1 */
+
/* Default value for max queued incoming updates */
const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
+/* Default value for new work per cycle */
+const uint32_t DPLANE_DEFAULT_NEW_WORK = 100;
/* Validation check macro for context blocks */
/* #define DPLANE_DEBUG 1 */
@@ -57,43 +63,25 @@ const uint32_t DPLANE_DEFAULT_MAX_QUEUED = 200;
#endif /* DPLANE_DEBUG */
/*
- * The context block used to exchange info about route updates across
- * the boundary between the zebra main context (and pthread) and the
- * dataplane layer (and pthread).
+ * Route information captured for route updates.
*/
-struct zebra_dplane_ctx {
-
- /* Operation code */
- enum dplane_op_e zd_op;
-
- /* Status on return */
- enum zebra_dplane_result zd_status;
-
- /* TODO -- internal/sub-operation status? */
- enum zebra_dplane_result zd_remote_status;
- enum zebra_dplane_result zd_kernel_status;
+struct dplane_route_info {
/* Dest and (optional) source prefixes */
struct prefix zd_dest;
struct prefix zd_src;
- bool zd_is_update;
-
- uint32_t zd_seq;
- uint32_t zd_old_seq;
- vrf_id_t zd_vrf_id;
- uint32_t zd_table_id;
+ afi_t zd_afi;
+ safi_t zd_safi;
int zd_type;
int zd_old_type;
- afi_t zd_afi;
- safi_t zd_safi;
-
route_tag_t zd_tag;
route_tag_t zd_old_tag;
uint32_t zd_metric;
uint32_t zd_old_metric;
+
uint16_t zd_instance;
uint16_t zd_old_instance;
@@ -103,9 +91,6 @@ struct zebra_dplane_ctx {
uint32_t zd_mtu;
uint32_t zd_nexthop_mtu;
- /* Namespace info */
- struct zebra_dplane_info zd_ns_info;
-
/* Nexthops */
struct nexthop_group zd_ng;
@@ -114,10 +99,76 @@ struct zebra_dplane_ctx {
/* TODO -- use fixed array of nexthops, to avoid mallocs? */
+};
+
+/*
+ * Pseudowire info for the dataplane
+ */
+struct dplane_pw_info {
+ char ifname[IF_NAMESIZE];
+ ifindex_t ifindex;
+ int type;
+ int af;
+ int status;
+ uint32_t flags;
+ union g_addr nexthop;
+ mpls_label_t local_label;
+ mpls_label_t remote_label;
+
+ union pw_protocol_fields fields;
+};
+
+/*
+ * The context block used to exchange info about route updates across
+ * the boundary between the zebra main context (and pthread) and the
+ * dataplane layer (and pthread).
+ */
+struct zebra_dplane_ctx {
+
+ /* Operation code */
+ enum dplane_op_e zd_op;
+
+ /* Status on return */
+ enum zebra_dplane_result zd_status;
+
+ /* Dplane provider id */
+ uint32_t zd_provider;
+
+ /* Flags - used by providers, e.g. */
+ int zd_flags;
+
+ bool zd_is_update;
+
+ uint32_t zd_seq;
+ uint32_t zd_old_seq;
+
+ /* TODO -- internal/sub-operation status? */
+ enum zebra_dplane_result zd_remote_status;
+ enum zebra_dplane_result zd_kernel_status;
+
+ vrf_id_t zd_vrf_id;
+ uint32_t zd_table_id;
+
+ /* Support info for either route or LSP update */
+ union {
+ struct dplane_route_info rinfo;
+ zebra_lsp_t lsp;
+ struct dplane_pw_info pw;
+ } u;
+
+ /* Namespace info, used especially for netlink kernel communication */
+ struct zebra_dplane_info zd_ns_info;
+
/* Embedded list linkage */
TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
};
+/* Flag that can be set by a pre-kernel provider as a signal that an update
+ * should bypass the kernel.
+ */
+#define DPLANE_CTX_FLAG_NO_KERNEL 0x01
+
+
/*
* Registration block for one dataplane provider.
*/
@@ -131,16 +182,37 @@ struct zebra_dplane_provider {
/* Id value */
uint32_t dp_id;
- dplane_provider_process_fp dp_fp;
+ /* Mutex */
+ pthread_mutex_t dp_mutex;
- dplane_provider_fini_fp dp_fini;
+ /* Plugin-provided extra data */
+ void *dp_data;
- _Atomic uint64_t dp_in_counter;
- _Atomic uint64_t dp_error_counter;
+ /* Flags */
+ int dp_flags;
- /* Embedded list linkage */
- TAILQ_ENTRY(zebra_dplane_provider) dp_q_providers;
+ int (*dp_fp)(struct zebra_dplane_provider *prov);
+
+ int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
+ _Atomic uint32_t dp_in_counter;
+ _Atomic uint32_t dp_in_queued;
+ _Atomic uint32_t dp_in_max;
+ _Atomic uint32_t dp_out_counter;
+ _Atomic uint32_t dp_out_queued;
+ _Atomic uint32_t dp_out_max;
+ _Atomic uint32_t dp_error_counter;
+
+ /* Queue of contexts inbound to the provider */
+ struct dplane_ctx_q dp_ctx_in_q;
+
+ /* Queue of completed contexts outbound from the provider back
+ * towards the dataplane module.
+ */
+ struct dplane_ctx_q dp_ctx_out_q;
+
+ /* Embedded list linkage for provider objects */
+ TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
};
/*
@@ -151,7 +223,7 @@ static struct zebra_dplane_globals {
pthread_mutex_t dg_mutex;
/* Results callback registered by zebra 'core' */
- dplane_results_fp dg_results_cb;
+ int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
/* Sentinel for beginning of shutdown */
volatile bool dg_is_shutdown;
@@ -171,10 +243,27 @@ static struct zebra_dplane_globals {
/* Limit number of pending, unprocessed updates */
_Atomic uint32_t dg_max_queued_updates;
- _Atomic uint64_t dg_routes_in;
+ /* Limit number of new updates dequeued at once, to pace an
+ * incoming burst.
+ */
+ uint32_t dg_updates_per_cycle;
+
+ _Atomic uint32_t dg_routes_in;
_Atomic uint32_t dg_routes_queued;
_Atomic uint32_t dg_routes_queued_max;
- _Atomic uint64_t dg_route_errors;
+ _Atomic uint32_t dg_route_errors;
+ _Atomic uint32_t dg_other_errors;
+
+ _Atomic uint32_t dg_lsps_in;
+ _Atomic uint32_t dg_lsp_errors;
+
+ _Atomic uint32_t dg_pws_in;
+ _Atomic uint32_t dg_pw_errors;
+
+ _Atomic uint32_t dg_update_yields;
+
+ /* Dataplane pthread */
+ struct frr_pthread *dg_pthread;
/* Event-delivery context 'master' for the dplane */
struct thread_master *dg_master;
@@ -188,19 +277,37 @@ static struct zebra_dplane_globals {
} zdplane_info;
/*
- * Lock and unlock for interactions with the zebra 'core'
+ * Lock and unlock for interactions with the zebra 'core' pthread
*/
#define DPLANE_LOCK() pthread_mutex_lock(&zdplane_info.dg_mutex)
-
#define DPLANE_UNLOCK() pthread_mutex_unlock(&zdplane_info.dg_mutex)
+
+/*
+ * Lock and unlock for individual providers
+ */
+#define DPLANE_PROV_LOCK(p) pthread_mutex_lock(&((p)->dp_mutex))
+#define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
+
/* Prototypes */
-static int dplane_route_process(struct thread *event);
+static int dplane_thread_loop(struct thread *event);
+static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
+ struct zebra_ns *zns);
+static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
+ enum dplane_op_e op);
+static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
+ enum dplane_op_e op);
/*
* Public APIs
*/
+/* Obtain thread_master for dataplane thread */
+struct thread_master *dplane_get_thread_master(void)
+{
+ return zdplane_info.dg_master;
+}
+
/*
* Allocate a dataplane update context
*/
@@ -221,27 +328,70 @@ static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
*/
static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
{
- if (pctx) {
- DPLANE_CTX_VALID(*pctx);
+ if (pctx == NULL)
+ return;
- /* TODO -- just freeing memory, but would like to maintain
- * a pool
- */
+ DPLANE_CTX_VALID(*pctx);
+
+ /* TODO -- just freeing memory, but would like to maintain
+ * a pool
+ */
+
+ /* Some internal allocations may need to be freed, depending on
+ * the type of info captured in the ctx.
+ */
+ switch ((*pctx)->zd_op) {
+ case DPLANE_OP_ROUTE_INSTALL:
+ case DPLANE_OP_ROUTE_UPDATE:
+ case DPLANE_OP_ROUTE_DELETE:
- /* Free embedded nexthops */
- if ((*pctx)->zd_ng.nexthop) {
+ /* Free allocated nexthops */
+ if ((*pctx)->u.rinfo.zd_ng.nexthop) {
/* This deals with recursive nexthops too */
- nexthops_free((*pctx)->zd_ng.nexthop);
+ nexthops_free((*pctx)->u.rinfo.zd_ng.nexthop);
+
+ (*pctx)->u.rinfo.zd_ng.nexthop = NULL;
}
- if ((*pctx)->zd_old_ng.nexthop) {
+ if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
/* This deals with recursive nexthops too */
- nexthops_free((*pctx)->zd_old_ng.nexthop);
+ nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
+
+ (*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
}
- XFREE(MTYPE_DP_CTX, *pctx);
- *pctx = NULL;
+ break;
+
+ case DPLANE_OP_LSP_INSTALL:
+ case DPLANE_OP_LSP_UPDATE:
+ case DPLANE_OP_LSP_DELETE:
+ {
+ zebra_nhlfe_t *nhlfe, *next;
+
+ /* Free allocated NHLFEs */
+ for (nhlfe = (*pctx)->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
+ next = nhlfe->next;
+
+ zebra_mpls_nhlfe_del(nhlfe);
+ }
+
+ /* Clear pointers in lsp struct, in case we're cacheing
+ * free context structs.
+ */
+ (*pctx)->u.lsp.nhlfe_list = NULL;
+ (*pctx)->u.lsp.best_nhlfe = NULL;
+
+ break;
+ }
+
+ case DPLANE_OP_PW_INSTALL:
+ case DPLANE_OP_PW_UNINSTALL:
+ case DPLANE_OP_NONE:
+ break;
}
+
+ XFREE(MTYPE_DP_CTX, *pctx);
+ *pctx = NULL;
}
/*
@@ -249,7 +399,7 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
*/
void dplane_ctx_fini(struct zebra_dplane_ctx **pctx)
{
- /* TODO -- enqueue for next provider; for now, just free */
+ /* TODO -- maintain pool; for now, just free */
dplane_ctx_free(pctx);
}
@@ -260,15 +410,27 @@ void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
}
+/* Append a list of context blocks to another list */
+void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
+ struct dplane_ctx_q *from_list)
+{
+ if (TAILQ_FIRST(from_list)) {
+ TAILQ_CONCAT(to_list, from_list, zd_q_entries);
+
+ /* And clear 'from' list */
+ TAILQ_INIT(from_list);
+ }
+}
+
/* Dequeue a context block from the head of a list */
-void dplane_ctx_dequeue(struct dplane_ctx_q *q, struct zebra_dplane_ctx **ctxp)
+struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
{
struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
if (ctx)
TAILQ_REMOVE(q, ctx, zd_q_entries);
- *ctxp = ctx;
+ return ctx;
}
/*
@@ -282,6 +444,38 @@ enum zebra_dplane_result dplane_ctx_get_status(
return ctx->zd_status;
}
+void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
+ enum zebra_dplane_result status)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->zd_status = status;
+}
+
+/* Retrieve last/current provider id */
+uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->zd_provider;
+}
+
+/* Providers run before the kernel can control whether a kernel
+ * update should be done.
+ */
+void dplane_ctx_set_skip_kernel(struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ SET_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
+}
+
+bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
+}
+
enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -309,6 +503,23 @@ const char *dplane_op2str(enum dplane_op_e op)
ret = "ROUTE_DELETE";
break;
+ case DPLANE_OP_LSP_INSTALL:
+ ret = "LSP_INSTALL";
+ break;
+ case DPLANE_OP_LSP_UPDATE:
+ ret = "LSP_UPDATE";
+ break;
+ case DPLANE_OP_LSP_DELETE:
+ ret = "LSP_DELETE";
+ break;
+
+ case DPLANE_OP_PW_INSTALL:
+ ret = "PW_INSTALL";
+ break;
+ case DPLANE_OP_PW_UNINSTALL:
+ ret = "PW_UNINSTALL";
+ break;
+
};
return ret;
@@ -337,7 +548,7 @@ const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return &(ctx->zd_dest);
+ return &(ctx->u.rinfo.zd_dest);
}
/* Source prefix is a little special - return NULL for "no src prefix" */
@@ -345,11 +556,11 @@ const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- if (ctx->zd_src.prefixlen == 0 &&
- IN6_IS_ADDR_UNSPECIFIED(&(ctx->zd_src.u.prefix6))) {
+ if (ctx->u.rinfo.zd_src.prefixlen == 0 &&
+ IN6_IS_ADDR_UNSPECIFIED(&(ctx->u.rinfo.zd_src.u.prefix6))) {
return NULL;
} else {
- return &(ctx->zd_src);
+ return &(ctx->u.rinfo.zd_src);
}
}
@@ -385,28 +596,28 @@ int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_type;
+ return ctx->u.rinfo.zd_type;
}
int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_old_type;
+ return ctx->u.rinfo.zd_old_type;
}
afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_afi;
+ return ctx->u.rinfo.zd_afi;
}
safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_safi;
+ return ctx->u.rinfo.zd_safi;
}
uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
@@ -420,70 +631,70 @@ route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_tag;
+ return ctx->u.rinfo.zd_tag;
}
route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_old_tag;
+ return ctx->u.rinfo.zd_old_tag;
}
uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_instance;
+ return ctx->u.rinfo.zd_instance;
}
uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_instance;
+ return ctx->u.rinfo.zd_old_instance;
}
uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_metric;
+ return ctx->u.rinfo.zd_metric;
}
uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_old_metric;
+ return ctx->u.rinfo.zd_old_metric;
}
uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_mtu;
+ return ctx->u.rinfo.zd_mtu;
}
uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_nexthop_mtu;
+ return ctx->u.rinfo.zd_nexthop_mtu;
}
uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_distance;
+ return ctx->u.rinfo.zd_distance;
}
uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->zd_old_distance;
+ return ctx->u.rinfo.zd_old_distance;
}
const struct nexthop_group *dplane_ctx_get_ng(
@@ -491,7 +702,7 @@ const struct nexthop_group *dplane_ctx_get_ng(
{
DPLANE_CTX_VALID(ctx);
- return &(ctx->zd_ng);
+ return &(ctx->u.rinfo.zd_ng);
}
const struct nexthop_group *dplane_ctx_get_old_ng(
@@ -499,7 +710,7 @@ const struct nexthop_group *dplane_ctx_get_old_ng(
{
DPLANE_CTX_VALID(ctx);
- return &(ctx->zd_old_ng);
+ return &(ctx->u.rinfo.zd_old_ng);
}
const struct zebra_dplane_info *dplane_ctx_get_ns(
@@ -510,10 +721,120 @@ const struct zebra_dplane_info *dplane_ctx_get_ns(
return &(ctx->zd_ns_info);
}
+/* Accessors for LSP information */
+
+mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.lsp.ile.in_label;
+}
+
+uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.lsp.addr_family;
+}
+
+uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.lsp.flags;
+}
+
+zebra_nhlfe_t *dplane_ctx_get_nhlfe(struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.lsp.nhlfe_list;
+}
+
+zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.lsp.best_nhlfe;
+}
+
+uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.lsp.num_ecmp;
+}
+
+const char *dplane_ctx_get_pw_ifname(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.ifname;
+}
+
+mpls_label_t dplane_ctx_get_pw_local_label(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.local_label;
+}
+
+mpls_label_t dplane_ctx_get_pw_remote_label(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.remote_label;
+}
+
+int dplane_ctx_get_pw_type(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.type;
+}
+
+int dplane_ctx_get_pw_af(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.af;
+}
+
+uint32_t dplane_ctx_get_pw_flags(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.flags;
+}
+
+int dplane_ctx_get_pw_status(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.pw.status;
+}
+
+const union g_addr *dplane_ctx_get_pw_nexthop(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.pw.nexthop);
+}
+
+const union pw_protocol_fields *dplane_ctx_get_pw_proto(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.pw.fields);
+}
+
/*
* End of dplane context accessors
*/
+
/*
* Retrieve the limit on the number of pending, unprocessed updates.
*/
@@ -546,6 +867,28 @@ uint32_t dplane_get_in_queue_len(void)
}
/*
+ * Common dataplane context init with zebra namespace info.
+ */
+static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
+ struct zebra_ns *zns,
+ bool is_update)
+{
+ dplane_info_from_zns(&(ctx->zd_ns_info), zns);
+
+#if defined(HAVE_NETLINK)
+ /* Increment message counter after copying to context struct - may need
+ * two messages in some 'update' cases.
+ */
+ if (is_update)
+ zns->netlink_dplane.seq += 2;
+ else
+ zns->netlink_dplane.seq++;
+#endif /* HAVE_NETLINK */
+
+ return AOK;
+}
+
+/*
* Initialize a context block for a route update from zebra data structs.
*/
static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
@@ -565,67 +908,58 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
goto done;
ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
- ctx->zd_type = re->type;
- ctx->zd_old_type = re->type;
+ ctx->u.rinfo.zd_type = re->type;
+ ctx->u.rinfo.zd_old_type = re->type;
/* Prefixes: dest, and optional source */
srcdest_rnode_prefixes(rn, &p, &src_p);
- prefix_copy(&(ctx->zd_dest), p);
+ prefix_copy(&(ctx->u.rinfo.zd_dest), p);
if (src_p)
- prefix_copy(&(ctx->zd_src), src_p);
+ prefix_copy(&(ctx->u.rinfo.zd_src), src_p);
else
- memset(&(ctx->zd_src), 0, sizeof(ctx->zd_src));
+ memset(&(ctx->u.rinfo.zd_src), 0, sizeof(ctx->u.rinfo.zd_src));
ctx->zd_table_id = re->table;
- ctx->zd_metric = re->metric;
- ctx->zd_old_metric = re->metric;
+ ctx->u.rinfo.zd_metric = re->metric;
+ ctx->u.rinfo.zd_old_metric = re->metric;
ctx->zd_vrf_id = re->vrf_id;
- ctx->zd_mtu = re->mtu;
- ctx->zd_nexthop_mtu = re->nexthop_mtu;
- ctx->zd_instance = re->instance;
- ctx->zd_tag = re->tag;
- ctx->zd_old_tag = re->tag;
- ctx->zd_distance = re->distance;
+ ctx->u.rinfo.zd_mtu = re->mtu;
+ ctx->u.rinfo.zd_nexthop_mtu = re->nexthop_mtu;
+ ctx->u.rinfo.zd_instance = re->instance;
+ ctx->u.rinfo.zd_tag = re->tag;
+ ctx->u.rinfo.zd_old_tag = re->tag;
+ ctx->u.rinfo.zd_distance = re->distance;
table = srcdest_rnode_table(rn);
info = table->info;
- ctx->zd_afi = info->afi;
- ctx->zd_safi = info->safi;
+ ctx->u.rinfo.zd_afi = info->afi;
+ ctx->u.rinfo.zd_safi = info->safi;
/* Extract ns info - can't use pointers to 'core' structs */
zvrf = vrf_info_lookup(re->vrf_id);
zns = zvrf->zns;
- zebra_dplane_info_from_zns(&(ctx->zd_ns_info), zns, true /*is_cmd*/);
-
-#if defined(HAVE_NETLINK)
- /* Increment message counter after copying to context struct - may need
- * two messages in some 'update' cases.
- */
- if (op == DPLANE_OP_ROUTE_UPDATE)
- zns->netlink_cmd.seq += 2;
- else
- zns->netlink_cmd.seq++;
-#endif /* NETLINK*/
+ dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
/* Copy nexthops; recursive info is included too */
- copy_nexthops(&(ctx->zd_ng.nexthop), re->ng.nexthop, NULL);
+ copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), re->ng.nexthop, NULL);
/* TODO -- maybe use array of nexthops to avoid allocs? */
- /* Ensure that the dplane's nexthop flag is clear. */
- for (ALL_NEXTHOPS(ctx->zd_ng, nexthop))
+ /* Ensure that the dplane's nexthops flags are clear. */
+ for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
/* Trying out the sequence number idea, so we can try to detect
* when a result is stale.
*/
- re->dplane_sequence++;
+ re->dplane_sequence = zebra_router_get_next_sequence();
ctx->zd_seq = re->dplane_sequence;
ret = AOK;
@@ -635,15 +969,120 @@ done:
}
/*
+ * Capture information for an LSP update in a dplane context.
+ */
+static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
+ enum dplane_op_e op,
+ zebra_lsp_t *lsp)
+{
+ int ret = AOK;
+ zebra_nhlfe_t *nhlfe, *new_nhlfe;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
+ dplane_op2str(op), lsp->ile.in_label,
+ lsp->num_ecmp);
+
+ ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+
+ /* Capture namespace info */
+ dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT),
+ (op == DPLANE_OP_LSP_UPDATE));
+
+ memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
+
+ ctx->u.lsp.ile = lsp->ile;
+ ctx->u.lsp.addr_family = lsp->addr_family;
+ ctx->u.lsp.num_ecmp = lsp->num_ecmp;
+ ctx->u.lsp.flags = lsp->flags;
+
+ /* Copy source LSP's nhlfes, and capture 'best' nhlfe */
+ for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ /* Not sure if this is meaningful... */
+ if (nhlfe->nexthop == NULL)
+ continue;
+
+ new_nhlfe =
+ zebra_mpls_lsp_add_nhlfe(
+ &(ctx->u.lsp),
+ nhlfe->type,
+ nhlfe->nexthop->type,
+ &(nhlfe->nexthop->gate),
+ nhlfe->nexthop->ifindex,
+ nhlfe->nexthop->nh_label->label[0]);
+
+ if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
+ ret = ENOMEM;
+ break;
+ }
+
+ /* Need to copy flags too */
+ new_nhlfe->flags = nhlfe->flags;
+ new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
+
+ if (nhlfe == lsp->best_nhlfe)
+ ctx->u.lsp.best_nhlfe = new_nhlfe;
+ }
+
+ /* On error the ctx will be cleaned-up, so we don't need to
+ * deal with any allocated nhlfe or nexthop structs here.
+ */
+
+ return ret;
+}
+
+/*
+ * Capture information for an LSP update in a dplane context.
+ */
+static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
+ enum dplane_op_e op,
+ struct zebra_pw *pw)
+{
+ int ret = AOK;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
+ dplane_op2str(op), pw->ifname, pw->local_label,
+ pw->remote_label);
+
+ ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+
+ /* Capture namespace info: no netlink support as of 12/18,
+ * but just in case...
+ */
+ dplane_ctx_ns_init(ctx, zebra_ns_lookup(NS_DEFAULT), false);
+
+ memset(&ctx->u.pw, 0, sizeof(ctx->u.pw));
+
+ /* This name appears to be c-string, so we use string copy. */
+ strlcpy(ctx->u.pw.ifname, pw->ifname, sizeof(ctx->u.pw.ifname));
+ ctx->zd_vrf_id = pw->vrf_id;
+ ctx->u.pw.ifindex = pw->ifindex;
+ ctx->u.pw.type = pw->type;
+ ctx->u.pw.af = pw->af;
+ ctx->u.pw.local_label = pw->local_label;
+ ctx->u.pw.remote_label = pw->remote_label;
+ ctx->u.pw.flags = pw->flags;
+
+ ctx->u.pw.nexthop = pw->nexthop;
+
+ ctx->u.pw.fields = pw->data;
+
+ return ret;
+}
+
+/*
* Enqueue a new route update,
- * and ensure an event is active for the dataplane thread.
+ * and ensure an event is active for the dataplane pthread.
*/
static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
{
int ret = EINVAL;
uint32_t high, curr;
- /* Enqueue for processing by the dataplane thread */
+ /* Enqueue for processing by the dataplane pthread */
DPLANE_LOCK();
{
TAILQ_INSERT_TAIL(&zdplane_info.dg_route_ctx_q, ctx,
@@ -675,35 +1114,12 @@ static int dplane_route_enqueue(struct zebra_dplane_ctx *ctx)
}
/* Ensure that an event for the dataplane thread is active */
- thread_add_event(zdplane_info.dg_master, dplane_route_process, NULL, 0,
- &zdplane_info.dg_t_update);
-
- ret = AOK;
+ ret = dplane_provider_work_ready();
return ret;
}
/*
- * Attempt to dequeue a route-update block
- */
-static struct zebra_dplane_ctx *dplane_route_dequeue(void)
-{
- struct zebra_dplane_ctx *ctx = NULL;
-
- DPLANE_LOCK();
- {
- ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
- if (ctx) {
- TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q,
- ctx, zd_q_entries);
- }
- }
- DPLANE_UNLOCK();
-
- return ctx;
-}
-
-/*
* Utility that prepares a route update and enqueues it for processing
*/
static enum zebra_dplane_result
@@ -733,20 +1149,21 @@ dplane_route_update_internal(struct route_node *rn,
old_re && (old_re != re)) {
ctx->zd_is_update = true;
- old_re->dplane_sequence++;
+ old_re->dplane_sequence =
+ zebra_router_get_next_sequence();
ctx->zd_old_seq = old_re->dplane_sequence;
- ctx->zd_old_tag = old_re->tag;
- ctx->zd_old_type = old_re->type;
- ctx->zd_old_instance = old_re->instance;
- ctx->zd_old_distance = old_re->distance;
- ctx->zd_old_metric = old_re->metric;
+ ctx->u.rinfo.zd_old_tag = old_re->tag;
+ ctx->u.rinfo.zd_old_type = old_re->type;
+ ctx->u.rinfo.zd_old_instance = old_re->instance;
+ ctx->u.rinfo.zd_old_distance = old_re->distance;
+ ctx->u.rinfo.zd_old_metric = old_re->metric;
#ifndef HAVE_NETLINK
/* For bsd, capture previous re's nexthops too, sigh.
* We'll need these to do per-nexthop deletes.
*/
- copy_nexthops(&(ctx->zd_old_ng.nexthop),
+ copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
old_re->ng.nexthop, NULL);
#endif /* !HAVE_NETLINK */
}
@@ -762,10 +1179,11 @@ done:
if (ret == AOK)
result = ZEBRA_DPLANE_REQUEST_QUEUED;
- else if (ctx) {
+ else {
atomic_fetch_add_explicit(&zdplane_info.dg_route_errors, 1,
memory_order_relaxed);
- dplane_ctx_free(&ctx);
+ if (ctx)
+ dplane_ctx_free(&ctx);
}
return result;
@@ -826,56 +1244,131 @@ done:
}
/*
- * Event handler function for routing updates
+ * Enqueue LSP add for the dataplane.
*/
-static int dplane_route_process(struct thread *event)
+enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
{
- enum zebra_dplane_result res;
- struct zebra_dplane_ctx *ctx;
+ enum zebra_dplane_result ret =
+ lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
- while (1) {
- /* Check for shutdown */
- if (!zdplane_info.dg_run)
- break;
+ return ret;
+}
- /* TODO -- limit number of updates per cycle? */
- ctx = dplane_route_dequeue();
- if (ctx == NULL)
- break;
+/*
+ * Enqueue LSP update for the dataplane.
+ */
+enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
+{
+ enum zebra_dplane_result ret =
+ lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
- /* Update counter */
- atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, 1,
- memory_order_relaxed);
+ return ret;
+}
- if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
- char dest_str[PREFIX_STRLEN];
+/*
+ * Enqueue LSP delete for the dataplane.
+ */
+enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
+{
+ enum zebra_dplane_result ret =
+ lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
- prefix2str(dplane_ctx_get_dest(ctx),
- dest_str, sizeof(dest_str));
+ return ret;
+}
- zlog_debug("%u:%s Dplane route update ctx %p op %s",
- dplane_ctx_get_vrf(ctx), dest_str,
- ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
- }
+/*
+ * Enqueue pseudowire install for the dataplane.
+ */
+enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw)
+{
+ return pw_update_internal(pw, DPLANE_OP_PW_INSTALL);
+}
+
+/*
+ * Enqueue pseudowire un-install for the dataplane.
+ */
+enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw)
+{
+ return pw_update_internal(pw, DPLANE_OP_PW_UNINSTALL);
+}
+
+/*
+ * Common internal LSP update utility
+ */
+static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
+ enum dplane_op_e op)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret = EINVAL;
+ struct zebra_dplane_ctx *ctx = NULL;
+
+ /* Obtain context block */
+ ctx = dplane_ctx_alloc();
+ if (ctx == NULL) {
+ ret = ENOMEM;
+ goto done;
+ }
- /* TODO -- support series of providers */
+ ret = dplane_ctx_lsp_init(ctx, op, lsp);
+ if (ret != AOK)
+ goto done;
- /* Initially, just doing kernel-facing update here */
- res = kernel_route_update(ctx);
+ ret = dplane_route_enqueue(ctx);
- if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
- atomic_fetch_add_explicit(&zdplane_info.dg_route_errors,
- 1, memory_order_relaxed);
+done:
+ /* Update counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
+ memory_order_relaxed);
- ctx->zd_status = res;
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
+ memory_order_relaxed);
+ if (ctx)
+ dplane_ctx_free(&ctx);
+ }
+
+ return result;
+}
- /* Enqueue result to zebra main context */
- zdplane_info.dg_results_cb(ctx);
+/*
+ * Internal, common handler for pseudowire updates.
+ */
+static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
+ enum dplane_op_e op)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret;
+ struct zebra_dplane_ctx *ctx = NULL;
- ctx = NULL;
+ ctx = dplane_ctx_alloc();
+ if (ctx == NULL) {
+ ret = ENOMEM;
+ goto done;
}
- return 0;
+ ret = dplane_ctx_pw_init(ctx, op, pw);
+ if (ret != AOK)
+ goto done;
+
+ ret = dplane_route_enqueue(ctx);
+
+done:
+ /* Update counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_pws_in, 1,
+ memory_order_relaxed);
+
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ atomic_fetch_add_explicit(&zdplane_info.dg_pw_errors, 1,
+ memory_order_relaxed);
+ if (ctx)
+ dplane_ctx_free(&ctx);
+ }
+
+ return result;
}
/*
@@ -883,10 +1376,11 @@ static int dplane_route_process(struct thread *event)
*/
int dplane_show_helper(struct vty *vty, bool detailed)
{
- uint64_t queued, limit, queue_max, errs, incoming;
+ uint64_t queued, queue_max, limit, errs, incoming, yields,
+ other_errs;
/* Using atomics because counters are being changed in different
- * contexts.
+ * pthread contexts.
*/
incoming = atomic_load_explicit(&zdplane_info.dg_routes_in,
memory_order_relaxed);
@@ -898,12 +1392,19 @@ int dplane_show_helper(struct vty *vty, bool detailed)
memory_order_relaxed);
errs = atomic_load_explicit(&zdplane_info.dg_route_errors,
memory_order_relaxed);
+ yields = atomic_load_explicit(&zdplane_info.dg_update_yields,
+ memory_order_relaxed);
+ other_errs = atomic_load_explicit(&zdplane_info.dg_other_errors,
+ memory_order_relaxed);
- vty_out(vty, "Route updates: %"PRIu64"\n", incoming);
+ vty_out(vty, "Zebra dataplane:\nRoute updates: %"PRIu64"\n",
+ incoming);
vty_out(vty, "Route update errors: %"PRIu64"\n", errs);
+ vty_out(vty, "Other errors : %"PRIu64"\n", other_errs);
vty_out(vty, "Route update queue limit: %"PRIu64"\n", limit);
vty_out(vty, "Route update queue depth: %"PRIu64"\n", queued);
vty_out(vty, "Route update queue max: %"PRIu64"\n", queue_max);
+ vty_out(vty, "Dplane update yields: %"PRIu64"\n", yields);
return CMD_SUCCESS;
}
@@ -913,8 +1414,35 @@ int dplane_show_helper(struct vty *vty, bool detailed)
*/
int dplane_show_provs_helper(struct vty *vty, bool detailed)
{
- vty_out(vty, "Zebra dataplane providers:%s\n",
- (detailed ? " (detailed)" : ""));
+ struct zebra_dplane_provider *prov;
+ uint64_t in, in_max, out, out_max;
+
+ vty_out(vty, "Zebra dataplane providers:\n");
+
+ DPLANE_LOCK();
+ prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+ DPLANE_UNLOCK();
+
+ /* Show counters, useful info from each registered provider */
+ while (prov) {
+
+ in = atomic_load_explicit(&prov->dp_in_counter,
+ memory_order_relaxed);
+ in_max = atomic_load_explicit(&prov->dp_in_max,
+ memory_order_relaxed);
+ out = atomic_load_explicit(&prov->dp_out_counter,
+ memory_order_relaxed);
+ out_max = atomic_load_explicit(&prov->dp_out_max,
+ memory_order_relaxed);
+
+ vty_out(vty, "%s (%u): in: %"PRIu64", q_max: %"PRIu64", "
+ "out: %"PRIu64", q_max: %"PRIu64"\n",
+ prov->dp_name, prov->dp_id, in, in_max, out, out_max);
+
+ DPLANE_LOCK();
+ prov = TAILQ_NEXT(prov, dp_prov_link);
+ DPLANE_UNLOCK();
+ }
return CMD_SUCCESS;
}
@@ -923,12 +1451,16 @@ int dplane_show_provs_helper(struct vty *vty, bool detailed)
* Provider registration
*/
int dplane_provider_register(const char *name,
- enum dplane_provider_prio_e prio,
- dplane_provider_process_fp fp,
- dplane_provider_fini_fp fini_fp)
+ enum dplane_provider_prio prio,
+ int flags,
+ int (*fp)(struct zebra_dplane_provider *),
+ int (*fini_fp)(struct zebra_dplane_provider *,
+ bool early),
+ void *data,
+ struct zebra_dplane_provider **prov_p)
{
int ret = 0;
- struct zebra_dplane_provider *p, *last;
+ struct zebra_dplane_provider *p = NULL, *last;
/* Validate */
if (fp == NULL) {
@@ -949,68 +1481,447 @@ int dplane_provider_register(const char *name,
goto done;
}
- strncpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
- p->dp_name[DPLANE_PROVIDER_NAMELEN] = '\0'; /* Belt-and-suspenders */
+ pthread_mutex_init(&(p->dp_mutex), NULL);
+ TAILQ_INIT(&(p->dp_ctx_in_q));
+ TAILQ_INIT(&(p->dp_ctx_out_q));
p->dp_priority = prio;
p->dp_fp = fp;
p->dp_fini = fini_fp;
+ p->dp_data = data;
- /* Lock the lock - the dplane pthread may be running */
+ /* Lock - the dplane pthread may be running */
DPLANE_LOCK();
p->dp_id = ++zdplane_info.dg_provider_id;
+ if (name)
+ strlcpy(p->dp_name, name, DPLANE_PROVIDER_NAMELEN);
+ else
+ snprintf(p->dp_name, DPLANE_PROVIDER_NAMELEN,
+ "provider-%u", p->dp_id);
+
/* Insert into list ordered by priority */
- TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_q_providers) {
+ TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
if (last->dp_priority > p->dp_priority)
break;
}
if (last)
- TAILQ_INSERT_BEFORE(last, p, dp_q_providers);
+ TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
else
TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
- dp_q_providers);
+ dp_prov_link);
/* And unlock */
DPLANE_UNLOCK();
+ if (IS_ZEBRA_DEBUG_DPLANE)
+ zlog_debug("dplane: registered new provider '%s' (%u), prio %d",
+ p->dp_name, p->dp_id, p->dp_priority);
+
done:
+ if (prov_p)
+ *prov_p = p;
+
+ return ret;
+}
+
+/* Accessors for provider attributes */
+const char *dplane_provider_get_name(const struct zebra_dplane_provider *prov)
+{
+ return prov->dp_name;
+}
+
+uint32_t dplane_provider_get_id(const struct zebra_dplane_provider *prov)
+{
+ return prov->dp_id;
+}
+
+void *dplane_provider_get_data(const struct zebra_dplane_provider *prov)
+{
+ return prov->dp_data;
+}
+
+int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov)
+{
+ return zdplane_info.dg_updates_per_cycle;
+}
+
+/* Lock/unlock a provider's mutex - iff the provider was registered with
+ * the THREADED flag.
+ */
+void dplane_provider_lock(struct zebra_dplane_provider *prov)
+{
+ if (dplane_provider_is_threaded(prov))
+ DPLANE_PROV_LOCK(prov);
+}
+
+void dplane_provider_unlock(struct zebra_dplane_provider *prov)
+{
+ if (dplane_provider_is_threaded(prov))
+ DPLANE_PROV_UNLOCK(prov);
+}
+
+/*
+ * Dequeue and maintain associated counter
+ */
+struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
+ struct zebra_dplane_provider *prov)
+{
+ struct zebra_dplane_ctx *ctx = NULL;
+
+ dplane_provider_lock(prov);
+
+ ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
+ if (ctx) {
+ TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
+
+ atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
+ memory_order_relaxed);
+ }
+
+ dplane_provider_unlock(prov);
+
+ return ctx;
+}
+
+/*
+ * Dequeue work to a list, return count
+ */
+int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
+ struct dplane_ctx_q *listp)
+{
+ int limit, ret;
+ struct zebra_dplane_ctx *ctx;
+
+ limit = zdplane_info.dg_updates_per_cycle;
+
+ dplane_provider_lock(prov);
+
+ for (ret = 0; ret < limit; ret++) {
+ ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
+ if (ctx) {
+ TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
+
+ TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
+ } else {
+ break;
+ }
+ }
+
+ if (ret > 0)
+ atomic_fetch_sub_explicit(&prov->dp_in_queued, ret,
+ memory_order_relaxed);
+
+ dplane_provider_unlock(prov);
+
return ret;
}
/*
- * Zebra registers a results callback with the dataplane system
+ * Enqueue and maintain associated counter
*/
-int dplane_results_register(dplane_results_fp fp)
+void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
+ struct zebra_dplane_ctx *ctx)
{
- zdplane_info.dg_results_cb = fp;
+ dplane_provider_lock(prov);
+
+ TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
+ zd_q_entries);
+
+ dplane_provider_unlock(prov);
+
+ atomic_fetch_add_explicit(&(prov->dp_out_counter), 1,
+ memory_order_relaxed);
+}
+
+/*
+ * Accessor for provider object
+ */
+bool dplane_provider_is_threaded(const struct zebra_dplane_provider *prov)
+{
+ return (prov->dp_flags & DPLANE_PROV_FLAG_THREADED);
+}
+
+/*
+ * Internal helper that copies information from a zebra ns object; this is
+ * called in the zebra main pthread context as part of dplane ctx init.
+ */
+static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
+ struct zebra_ns *zns)
+{
+ ns_info->ns_id = zns->ns_id;
+
+#if defined(HAVE_NETLINK)
+ ns_info->is_cmd = true;
+ ns_info->nls = zns->netlink_dplane;
+#endif /* NETLINK */
+}
+
+/*
+ * Provider api to signal that work/events are available
+ * for the dataplane pthread.
+ */
+int dplane_provider_work_ready(void)
+{
+ /* Note that during zebra startup, we may be offered work before
+ * the dataplane pthread (and thread-master) are ready. We want to
+ * enqueue the work, but the event-scheduling machinery may not be
+ * available.
+ */
+ if (zdplane_info.dg_run) {
+ thread_add_event(zdplane_info.dg_master,
+ dplane_thread_loop, NULL, 0,
+ &zdplane_info.dg_t_update);
+ }
+
return AOK;
}
/*
- * Initialize the dataplane module during startup, internal/private version
+ * Kernel dataplane provider
*/
-static void zebra_dplane_init_internal(struct zebra_t *zebra)
+
+/*
+ * Handler for kernel LSP updates
+ */
+static enum zebra_dplane_result
+kernel_dplane_lsp_update(struct zebra_dplane_ctx *ctx)
{
- memset(&zdplane_info, 0, sizeof(zdplane_info));
+ enum zebra_dplane_result res;
- pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
+ /* Call into the synchronous kernel-facing code here */
+ res = kernel_lsp_update(ctx);
- TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
- TAILQ_INIT(&zdplane_info.dg_providers_q);
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(
+ &zdplane_info.dg_lsp_errors, 1,
+ memory_order_relaxed);
- zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
+ return res;
+}
- /* TODO -- register default kernel 'provider' during init */
+/*
+ * Handler for kernel pseudowire updates
+ */
+static enum zebra_dplane_result
+kernel_dplane_pw_update(struct zebra_dplane_ctx *ctx)
+{
+ enum zebra_dplane_result res;
- zdplane_info.dg_run = true;
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("Dplane pw %s: op %s af %d loc: %u rem: %u",
+ dplane_ctx_get_pw_ifname(ctx),
+ dplane_op2str(ctx->zd_op),
+ dplane_ctx_get_pw_af(ctx),
+ dplane_ctx_get_pw_local_label(ctx),
+ dplane_ctx_get_pw_remote_label(ctx));
+
+ res = kernel_pw_update(ctx);
+
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(
+ &zdplane_info.dg_pw_errors, 1,
+ memory_order_relaxed);
+
+ return res;
+}
+
+/*
+ * Handler for kernel route updates
+ */
+static enum zebra_dplane_result
+kernel_dplane_route_update(struct zebra_dplane_ctx *ctx)
+{
+ enum zebra_dplane_result res;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char dest_str[PREFIX_STRLEN];
+
+ prefix2str(dplane_ctx_get_dest(ctx),
+ dest_str, sizeof(dest_str));
+
+ zlog_debug("%u:%s Dplane route update ctx %p op %s",
+ dplane_ctx_get_vrf(ctx), dest_str,
+ ctx, dplane_op2str(dplane_ctx_get_op(ctx)));
+ }
+
+ /* Call into the synchronous kernel-facing code here */
+ res = kernel_route_update(ctx);
+
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(
+ &zdplane_info.dg_route_errors, 1,
+ memory_order_relaxed);
+
+ return res;
+}
+
+/*
+ * Kernel provider callback
+ */
+static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
+{
+ enum zebra_dplane_result res;
+ struct zebra_dplane_ctx *ctx;
+ int counter, limit;
+
+ limit = dplane_provider_get_work_limit(prov);
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane provider '%s': processing",
+ dplane_provider_get_name(prov));
+
+ for (counter = 0; counter < limit; counter++) {
- /* TODO -- start dataplane pthread. We're using the zebra
- * core/main thread temporarily
+ ctx = dplane_provider_dequeue_in_ctx(prov);
+ if (ctx == NULL)
+ break;
+
+ /* Dispatch to appropriate kernel-facing apis */
+ switch (dplane_ctx_get_op(ctx)) {
+
+ case DPLANE_OP_ROUTE_INSTALL:
+ case DPLANE_OP_ROUTE_UPDATE:
+ case DPLANE_OP_ROUTE_DELETE:
+ res = kernel_dplane_route_update(ctx);
+ break;
+
+ case DPLANE_OP_LSP_INSTALL:
+ case DPLANE_OP_LSP_UPDATE:
+ case DPLANE_OP_LSP_DELETE:
+ res = kernel_dplane_lsp_update(ctx);
+ break;
+
+ case DPLANE_OP_PW_INSTALL:
+ case DPLANE_OP_PW_UNINSTALL:
+ res = kernel_dplane_pw_update(ctx);
+ break;
+
+ default:
+ atomic_fetch_add_explicit(
+ &zdplane_info.dg_other_errors, 1,
+ memory_order_relaxed);
+
+ res = ZEBRA_DPLANE_REQUEST_FAILURE;
+ break;
+ }
+
+ dplane_ctx_set_status(ctx, res);
+
+ dplane_provider_enqueue_out_ctx(prov, ctx);
+ }
+
+ /* Ensure that we'll run the work loop again if there's still
+ * more work to do.
*/
- zdplane_info.dg_master = zebra->master;
+ if (counter >= limit) {
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane provider '%s' reached max updates %d",
+ dplane_provider_get_name(prov), counter);
+
+ atomic_fetch_add_explicit(&zdplane_info.dg_update_yields,
+ 1, memory_order_relaxed);
+
+ dplane_provider_work_ready();
+ }
+
+ return 0;
+}
+
+#if DPLANE_TEST_PROVIDER
+
+/*
+ * Test dataplane provider plugin
+ */
+
+/*
+ * Test provider process callback
+ */
+static int test_dplane_process_func(struct zebra_dplane_provider *prov)
+{
+ struct zebra_dplane_ctx *ctx;
+ int counter, limit;
+
+ /* Just moving from 'in' queue to 'out' queue */
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane provider '%s': processing",
+ dplane_provider_get_name(prov));
+
+ limit = dplane_provider_get_work_limit(prov);
+
+ for (counter = 0; counter < limit; counter++) {
+
+ ctx = dplane_provider_dequeue_in_ctx(prov);
+ if (ctx == NULL)
+ break;
+
+ dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
+
+ dplane_provider_enqueue_out_ctx(prov, ctx);
+ }
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane provider '%s': processed %d",
+ dplane_provider_get_name(prov), counter);
+
+ /* Ensure that we'll run the work loop again if there's still
+ * more work to do.
+ */
+ if (counter >= limit)
+ dplane_provider_work_ready();
+
+ return 0;
+}
+
+/*
+ * Test provider shutdown/fini callback
+ */
+static int test_dplane_shutdown_func(struct zebra_dplane_provider *prov,
+ bool early)
+{
+ if (IS_ZEBRA_DEBUG_DPLANE)
+ zlog_debug("dplane provider '%s': %sshutdown",
+ dplane_provider_get_name(prov),
+ early ? "early " : "");
+
+ return 0;
+}
+#endif /* DPLANE_TEST_PROVIDER */
+
+/*
+ * Register default kernel provider
+ */
+static void dplane_provider_init(void)
+{
+ int ret;
+
+ ret = dplane_provider_register("Kernel",
+ DPLANE_PRIO_KERNEL,
+ DPLANE_PROV_FLAGS_DEFAULT,
+ kernel_dplane_process_func,
+ NULL,
+ NULL, NULL);
+
+ if (ret != AOK)
+ zlog_err("Unable to register kernel dplane provider: %d",
+ ret);
+
+#if DPLANE_TEST_PROVIDER
+ /* Optional test provider ... */
+ ret = dplane_provider_register("Test",
+ DPLANE_PRIO_PRE_KERNEL,
+ DPLANE_PROV_FLAGS_DEFAULT,
+ test_dplane_process_func,
+ test_dplane_shutdown_func,
+ NULL /* data */, NULL);
+
+ if (ret != AOK)
+ zlog_err("Unable to register test dplane provider: %d",
+ ret);
+#endif /* DPLANE_TEST_PROVIDER */
}
/* Indicates zebra shutdown/exit is in progress. Some operations may be
@@ -1026,7 +1937,7 @@ bool dplane_is_in_shutdown(void)
* early during zebra shutdown, as a signal to stop new work and prepare
* for updates generated by shutdown/cleanup activity, as zebra tries to
* remove everything it's responsible for.
- * NB: This runs in the main zebra thread context.
+ * NB: This runs in the main zebra pthread context.
*/
void zebra_dplane_pre_finish(void)
{
@@ -1035,7 +1946,7 @@ void zebra_dplane_pre_finish(void)
zdplane_info.dg_is_shutdown = true;
- /* Notify provider(s) of pending shutdown */
+ /* TODO -- Notify provider(s) of pending shutdown */
}
/*
@@ -1044,16 +1955,48 @@ void zebra_dplane_pre_finish(void)
*/
static bool dplane_work_pending(void)
{
+ bool ret = false;
struct zebra_dplane_ctx *ctx;
+ struct zebra_dplane_provider *prov;
- /* TODO -- just checking incoming/pending work for now */
+ /* TODO -- just checking incoming/pending work for now, must check
+ * providers
+ */
DPLANE_LOCK();
{
ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
+ prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
}
DPLANE_UNLOCK();
- return (ctx != NULL);
+ if (ctx != NULL) {
+ ret = true;
+ goto done;
+ }
+
+ while (prov) {
+
+ dplane_provider_lock(prov);
+
+ ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
+ if (ctx == NULL)
+ ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
+
+ dplane_provider_unlock(prov);
+
+ if (ctx != NULL)
+ break;
+
+ DPLANE_LOCK();
+ prov = TAILQ_NEXT(prov, dp_prov_link);
+ DPLANE_UNLOCK();
+ }
+
+ if (ctx != NULL)
+ ret = true;
+
+done:
+ return ret;
}
/*
@@ -1081,7 +2024,7 @@ static int dplane_check_shutdown_status(struct thread *event)
/* We appear to be done - schedule a final callback event
* for the zebra main pthread.
*/
- thread_add_event(zebrad.master, zebra_finalize, NULL, 0, NULL);
+ thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
}
return 0;
@@ -1108,6 +2051,198 @@ void zebra_dplane_finish(void)
}
/*
+ * Main dataplane pthread event loop. The thread takes new incoming work
+ * and offers it to the first provider. It then iterates through the
+ * providers, taking complete work from each one and offering it
+ * to the next in order. At each step, a limited number of updates are
+ * processed during a cycle in order to provide some fairness.
+ *
+ * This loop through the providers is only run once, so that the dataplane
+ * pthread can look for other pending work - such as i/o work on behalf of
+ * providers.
+ */
+static int dplane_thread_loop(struct thread *event)
+{
+ struct dplane_ctx_q work_list;
+ struct dplane_ctx_q error_list;
+ struct zebra_dplane_provider *prov;
+ struct zebra_dplane_ctx *ctx, *tctx;
+ int limit, counter, error_counter;
+ uint64_t curr, high;
+
+ /* Capture work limit per cycle */
+ limit = zdplane_info.dg_updates_per_cycle;
+
+ /* Init temporary lists used to move contexts among providers */
+ TAILQ_INIT(&work_list);
+ TAILQ_INIT(&error_list);
+ error_counter = 0;
+
+ /* Check for zebra shutdown */
+ if (!zdplane_info.dg_run)
+ goto done;
+
+ /* Dequeue some incoming work from zebra (if any) onto the temporary
+ * working list.
+ */
+ DPLANE_LOCK();
+
+ /* Locate initial registered provider */
+ prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+
+ /* Move new work from incoming list to temp list */
+ for (counter = 0; counter < limit; counter++) {
+ ctx = TAILQ_FIRST(&zdplane_info.dg_route_ctx_q);
+ if (ctx) {
+ TAILQ_REMOVE(&zdplane_info.dg_route_ctx_q, ctx,
+ zd_q_entries);
+
+ ctx->zd_provider = prov->dp_id;
+
+ TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
+ } else {
+ break;
+ }
+ }
+
+ DPLANE_UNLOCK();
+
+ atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter,
+ memory_order_relaxed);
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane: incoming new work counter: %d", counter);
+
+ /* Iterate through the registered providers, offering new incoming
+ * work. If the provider has outgoing work in its queue, take that
+ * work for the next provider
+ */
+ while (prov) {
+
+ /* At each iteration, the temporary work list has 'counter'
+ * items.
+ */
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane enqueues %d new work to provider '%s'",
+ counter, dplane_provider_get_name(prov));
+
+ /* Capture current provider id in each context; check for
+ * error status.
+ */
+ TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
+ if (dplane_ctx_get_status(ctx) ==
+ ZEBRA_DPLANE_REQUEST_SUCCESS) {
+ ctx->zd_provider = prov->dp_id;
+ } else {
+ /*
+ * TODO -- improve error-handling: recirc
+ * errors backwards so that providers can
+ * 'undo' their work (if they want to)
+ */
+
+ /* Move to error list; will be returned
+ * zebra main.
+ */
+ TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
+ TAILQ_INSERT_TAIL(&error_list,
+ ctx, zd_q_entries);
+ error_counter++;
+ }
+ }
+
+ /* Enqueue new work to the provider */
+ dplane_provider_lock(prov);
+
+ if (TAILQ_FIRST(&work_list))
+ TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
+ zd_q_entries);
+
+ atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
+ memory_order_relaxed);
+ atomic_fetch_add_explicit(&prov->dp_in_queued, counter,
+ memory_order_relaxed);
+ curr = atomic_load_explicit(&prov->dp_in_queued,
+ memory_order_relaxed);
+ high = atomic_load_explicit(&prov->dp_in_max,
+ memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&prov->dp_in_max, curr,
+ memory_order_relaxed);
+
+ dplane_provider_unlock(prov);
+
+ /* Reset the temp list (though the 'concat' may have done this
+ * already), and the counter
+ */
+ TAILQ_INIT(&work_list);
+ counter = 0;
+
+ /* Call into the provider code. Note that this is
+ * unconditional: we offer to do work even if we don't enqueue
+ * any _new_ work.
+ */
+ (*prov->dp_fp)(prov);
+
+ /* Check for zebra shutdown */
+ if (!zdplane_info.dg_run)
+ break;
+
+ /* Dequeue completed work from the provider */
+ dplane_provider_lock(prov);
+
+ while (counter < limit) {
+ ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
+ if (ctx) {
+ TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
+ zd_q_entries);
+
+ TAILQ_INSERT_TAIL(&work_list,
+ ctx, zd_q_entries);
+ counter++;
+ } else
+ break;
+ }
+
+ dplane_provider_unlock(prov);
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane dequeues %d completed work from provider %s",
+ counter, dplane_provider_get_name(prov));
+
+ /* Locate next provider */
+ DPLANE_LOCK();
+ prov = TAILQ_NEXT(prov, dp_prov_link);
+ DPLANE_UNLOCK();
+ }
+
+ /* After all providers have been serviced, enqueue any completed
+ * work and any errors back to zebra so it can process the results.
+ */
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("dplane has %d completed, %d errors, for zebra main",
+ counter, error_counter);
+
+ /*
+ * Hand lists through the api to zebra main,
+ * to reduce the number of lock/unlock cycles
+ */
+
+ /* Call through to zebra main */
+ (zdplane_info.dg_results_cb)(&error_list);
+
+ TAILQ_INIT(&error_list);
+
+
+ /* Call through to zebra main */
+ (zdplane_info.dg_results_cb)(&work_list);
+
+ TAILQ_INIT(&work_list);
+
+done:
+ return 0;
+}
+
+/*
* Final phase of shutdown, after all work enqueued to dplane has been
* processed. This is called from the zebra main pthread context.
*/
@@ -1122,20 +2257,72 @@ void zebra_dplane_shutdown(void)
THREAD_OFF(zdplane_info.dg_t_update);
- /* TODO */
- /* frr_pthread_stop(...) */
+ frr_pthread_stop(zdplane_info.dg_pthread, NULL);
+
+ /* Destroy pthread */
+ frr_pthread_destroy(zdplane_info.dg_pthread);
+ zdplane_info.dg_pthread = NULL;
+ zdplane_info.dg_master = NULL;
+
+ /* TODO -- Notify provider(s) of final shutdown */
+
+ /* TODO -- Clean-up provider objects */
+
+ /* TODO -- Clean queue(s), free memory */
+}
+
+/*
+ * Initialize the dataplane module during startup, internal/private version
+ */
+static void zebra_dplane_init_internal(void)
+{
+ memset(&zdplane_info, 0, sizeof(zdplane_info));
+
+ pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
+
+ TAILQ_INIT(&zdplane_info.dg_route_ctx_q);
+ TAILQ_INIT(&zdplane_info.dg_providers_q);
- /* Notify provider(s) of final shutdown */
+ zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
- /* Clean-up provider objects */
+ zdplane_info.dg_max_queued_updates = DPLANE_DEFAULT_MAX_QUEUED;
+
+ /* Register default kernel 'provider' during init */
+ dplane_provider_init();
+}
+
+/*
+ * Start the dataplane pthread. This step needs to be run later than the
+ * 'init' step, in case zebra has fork-ed.
+ */
+void zebra_dplane_start(void)
+{
+ /* Start dataplane pthread */
+
+ struct frr_pthread_attr pattr = {
+ .start = frr_pthread_attr_default.start,
+ .stop = frr_pthread_attr_default.stop
+ };
+
+ zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
+ "Zebra dplane");
+
+ zdplane_info.dg_master = zdplane_info.dg_pthread->master;
+
+ zdplane_info.dg_run = true;
+
+ /* Enqueue an initial event for the dataplane pthread */
+ thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
+ &zdplane_info.dg_t_update);
- /* Clean queue(s) */
+ frr_pthread_run(zdplane_info.dg_pthread, NULL);
}
/*
* Initialize the dataplane module at startup; called by zebra rib_init()
*/
-void zebra_dplane_init(void)
+void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
{
- zebra_dplane_init_internal(&zebrad);
+ zebra_dplane_init_internal();
+ zdplane_info.dg_results_cb = results_fp;
}