diff options
| -rw-r--r-- | isisd/isis_zebra.c | 1 | ||||
| -rw-r--r-- | yang/frr-pathd.yang | 96 | ||||
| -rw-r--r-- | zebra/dplane_fpm_nl.c | 29 | ||||
| -rw-r--r-- | zebra/rib.h | 7 | ||||
| -rw-r--r-- | zebra/zebra_dplane.c | 8 | ||||
| -rw-r--r-- | zebra/zebra_dplane.h | 3 | ||||
| -rw-r--r-- | zebra/zebra_rib.c | 134 | ||||
| -rw-r--r-- | zebra/zebra_vty.c | 15 |
8 files changed, 253 insertions, 40 deletions
diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c index 9d483c9368..15af9636ca 100644 --- a/isisd/isis_zebra.c +++ b/isisd/isis_zebra.c @@ -1549,6 +1549,7 @@ static int isis_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) isis_zebra_srv6_sid_uninstall(area, sid); listnode_delete(area->srv6db.srv6_sids, sid); + isis_srv6_sid_free(sid); } /* Allocate new SRv6 End SID */ diff --git a/yang/frr-pathd.yang b/yang/frr-pathd.yang index 5beda769c1..96eafda9d4 100644 --- a/yang/frr-pathd.yang +++ b/yang/frr-pathd.yang @@ -6,15 +6,9 @@ module frr-pathd { import ietf-inet-types { prefix inet; } - import ietf-yang-types { - prefix yang; - } import ietf-routing-types { prefix rt-types; } - import frr-interface { - prefix frr-interface; - } organization "Free Range Routing"; @@ -27,11 +21,10 @@ module frr-pathd { revision 2018-11-06 { description "Initial revision."; + reference "FRRouting"; } typedef protocol-origin-type { - description - "Indication for the protocol origin of an object."; type enumeration { enum pcep { value 1; @@ -46,6 +39,8 @@ module frr-pathd { description "The object was created through CLI, Yang model via Netconf, gRPC, etc"; } } + description + "Indication for the protocol origin of an object."; } typedef originator-type { @@ -57,7 +52,13 @@ module frr-pathd { } container pathd { + description + "Path properties for Segment Routing TE"; + container srte { + description + "Segment Routing TE properties"; + list segment-list { key "name"; description "Segment-list properties"; @@ -91,9 +92,10 @@ module frr-pathd { } container nai { presence "The segment has a Node or Adjacency Identifier"; + description + "Node or Adjacency Identifier for the segment"; + leaf type { - description "NAI type"; - mandatory true; type enumeration { enum ipv4_node { value 1; @@ -132,42 +134,59 @@ module frr-pathd { description "IPv6 prefix with optional algorithm"; } } + mandatory true; + description "NAI type"; } leaf local-address { type inet:ip-address; mandatory true; + description + "Local address of the NAI"; } leaf local-prefix-len { + when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_algo' or ../type = 'ipv6_algo'"; type uint8; mandatory true; - when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_algo' or ../type = 'ipv6_algo'"; + description + "Prefix length of the local address"; } leaf local-interface { + when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_unnumbered_adjacency'"; type uint32; mandatory true; - when "../type = 'ipv4_local_iface' or ../type = 'ipv6_local_iface' or ../type = 'ipv4_unnumbered_adjacency'"; + description + "Local interface ID for the NAI"; } leaf remote-address { + when "../type = 'ipv4_adjacency' or ../type = 'ipv6_adjacency' or ../type = 'ipv4_unnumbered_adjacency'"; type inet:ip-address; - mandatory true; - when "../type = 'ipv4_adjacency' or ../type = 'ipv6_adjacency' or ../type = 'ipv4_unnumbered_adjacency'"; - } - leaf remote-interface { - type uint32; - mandatory true; - when "../type = 'ipv4_unnumbered_adjacency'"; - } + mandatory true; + description + "Remote address of the NAI"; + } + leaf remote-interface { + when "../type = 'ipv4_unnumbered_adjacency'"; + type uint32; + mandatory true; + description + "Remote interface ID for the NAI"; + } leaf algorithm { + when "../type = 'ipv4_algo' or ../type = 'ipv6_algo'"; type uint8; mandatory true; - when "../type = 'ipv4_algo' or ../type = 'ipv6_algo'"; - } + description + "Algorithm to use for the NAI"; } + } } } list policy { key "color endpoint"; unique "name"; + description + "List of SR Policies."; + leaf color { type uint32; description @@ -197,10 +216,10 @@ module frr-pathd { "True if a valid candidate path of this policy is operational in zebra, False otherwise"; } list candidate-path { + key "preference"; unique "name"; description "List of Candidate Paths of the SR Policy."; - key "preference"; leaf preference { type uint32; description @@ -237,17 +256,21 @@ module frr-pathd { description "Candidate path distinguisher"; } leaf type { - description - "Type of the Candidate Path."; - mandatory true; type enumeration { enum explicit { value 1; + description + "Explicit path defined by a segment list"; } enum dynamic { value 2; + description + "Dynamic path computed by a routing protocol"; } } + mandatory true; + description + "Type of the Candidate Path."; } leaf segment-list-name { type leafref { @@ -271,10 +294,12 @@ module frr-pathd { "If the bandwidth limitation is a requirement or only a suggestion"; } leaf value { - mandatory true; type decimal64 { fraction-digits 6; } + mandatory true; + description + "The bandwidth value for the candidate path."; } } container affinity { @@ -298,9 +323,10 @@ module frr-pathd { } list metrics { key "type"; + description + "This list contains the different metrics that can be used to describe a path."; + leaf type { - description - "Type of the metric."; type enumeration { enum igp { value 1; @@ -387,6 +413,8 @@ module frr-pathd { description "Border Node Count metric"; } } + description + "Type of the metric."; } leaf required { type boolean; @@ -405,10 +433,12 @@ module frr-pathd { "Defines if the value has been generated by the originator of the path."; } leaf value { - mandatory true; type decimal64 { fraction-digits 6; } + mandatory true; + description + "Value of the metric."; } } container objective-function { @@ -422,9 +452,6 @@ module frr-pathd { "If an objective function is a requirement, or if it is only a suggestion"; } leaf type { - description - "Type of objective function."; - mandatory true; type enumeration { enum mcp { value 1; @@ -495,6 +522,9 @@ module frr-pathd { description "Minimize the number of Shared Nodes"; } } + mandatory true; + description + "Type of objective function."; } } } diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index 9f26852d1f..116a697de9 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -587,6 +587,10 @@ static void fpm_read(struct event *t) struct zebra_dplane_ctx *ctx; size_t available_bytes; size_t hdr_available_bytes; + struct dplane_ctx_list_head batch_list; + + /* Initialize the batch list */ + dplane_ctx_q_init(&batch_list); /* Let's ignore the input at the moment. */ rv = stream_read_try(fnc->ibuf, fnc->socket, @@ -627,7 +631,7 @@ static void fpm_read(struct event *t) while (available_bytes) { if (available_bytes < (ssize_t)FPM_MSG_HDR_LEN) { stream_pulldown(fnc->ibuf); - return; + goto send_batch; } fpm.version = stream_getc(fnc->ibuf); @@ -642,7 +646,7 @@ static void fpm_read(struct event *t) __func__, fpm.version, fpm.msg_type); FPM_RECONNECT(fnc); - return; + goto send_batch; } /* @@ -654,7 +658,7 @@ static void fpm_read(struct event *t) "%s: Received message length: %u that does not even fill the FPM header", __func__, fpm.msg_len); FPM_RECONNECT(fnc); - return; + goto send_batch; } /* @@ -665,7 +669,7 @@ static void fpm_read(struct event *t) if (fpm.msg_len > available_bytes) { stream_rewind_getp(fnc->ibuf, FPM_MSG_HDR_LEN); stream_pulldown(fnc->ibuf); - return; + goto send_batch; } available_bytes -= FPM_MSG_HDR_LEN; @@ -715,8 +719,9 @@ static void fpm_read(struct event *t) break; } - /* Parse the route data into a dplane ctx, then - * enqueue it to zebra for processing. + /* + * Parse the route data into a dplane ctx, add to ctx list + * and enqueue the batch of ctx to zebra for processing */ ctx = dplane_ctx_alloc(); dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_NOTIFY, NULL, @@ -735,7 +740,8 @@ static void fpm_read(struct event *t) * tableid to 0 in order for this to work. */ dplane_ctx_set_vrf(ctx, VRF_UNKNOWN); - dplane_provider_enqueue_to_zebra(ctx); + /* Add to the list for batching */ + dplane_ctx_enqueue_tail(&batch_list, ctx); } else { /* * Let's continue to read other messages @@ -755,6 +761,15 @@ static void fpm_read(struct event *t) } stream_reset(fnc->ibuf); + +send_batch: + /* Send all contexts to zebra in a single batch if we have any */ + if (dplane_ctx_queue_count(&batch_list) > 0) { + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: Sending batch of %u contexts to zebra", __func__, + dplane_ctx_queue_count(&batch_list)); + dplane_provider_enqueue_ctx_list_to_zebra(&batch_list); + } } static void fpm_write(struct event *t) diff --git a/zebra/rib.h b/zebra/rib.h index 652f6208f4..fa6ce4447b 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -192,6 +192,12 @@ struct route_entry { struct meta_queue { struct list *subq[MQ_SIZE]; uint32_t size; /* sum of lengths of all subqueues */ + _Atomic uint32_t max_subq[MQ_SIZE]; /* Max size of individual sub queue */ + _Atomic uint32_t max_metaq; /* Max size of the MetaQ */ + _Atomic uint32_t total_subq[MQ_SIZE]; /* Total subq events */ + _Atomic uint32_t total_metaq; /* Total MetaQ events */ + _Atomic uint32_t re_subq[MQ_SIZE]; /* current RE count sub queue */ + _Atomic uint32_t max_re_subq[MQ_SIZE]; /* Max RE in sub queue */ }; /* @@ -474,6 +480,7 @@ extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq, bool rt_delete); extern void rib_update_handle_vrf_all(enum rib_update_event event, int rtype); +int zebra_show_metaq_counter(struct vty *vty, bool uj); /* * rib_find_rn_from_ctx diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 4344a8d79a..a6d43daa93 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -6590,6 +6590,14 @@ int dplane_provider_work_ready(void) } /* + * Enqueue a context list to zebra main. + */ +void dplane_provider_enqueue_ctx_list_to_zebra(struct dplane_ctx_list_head *batch_list) +{ + (zdplane_info.dg_results_cb)(batch_list); +} + +/* * Enqueue a context directly to zebra main. */ void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx) diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h index cabc70c232..1c03a29534 100644 --- a/zebra/zebra_dplane.h +++ b/zebra/zebra_dplane.h @@ -1236,6 +1236,9 @@ void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov, /* Enqueue a context directly to zebra main. */ void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx); +/* Enqueue a context list to zebra main. */ +void dplane_provider_enqueue_ctx_list_to_zebra(struct dplane_ctx_list_head *batch_list); + /* Enable collection of extra info about interfaces in route updates; * this allows a provider/plugin to see some extra info in route update * context objects. diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index e32b004ae9..c7dc5e5d07 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -30,6 +30,7 @@ #include "printfrr.h" #include "frrscript.h" #include "frrdistance.h" +#include "lib/termtable.h" #include "zebra/zebra_router.h" #include "zebra/connected.h" @@ -273,6 +274,63 @@ static const char *subqueue2str(enum meta_queue_indexes index) return "Unknown"; } +/* Handler for 'show zebra metaq' */ +int zebra_show_metaq_counter(struct vty *vty, bool uj) +{ + struct meta_queue *mq = zrouter.mq; + struct ttable *tt = NULL; + char *table = NULL; + json_object *json = NULL; + json_object *json_table = NULL; + + if (!mq) + return CMD_WARNING; + + /* Create a table for subqueue details */ + tt = ttable_new(&ttable_styles[TTSTYLE_ASCII]); + ttable_add_row(tt, "SubQ|Current|Max Size|Total"); + + /* Add rows for each subqueue */ + for (uint8_t i = 0; i < MQ_SIZE; i++) { + ttable_add_row(tt, "%s|%u|%u|%u", subqueue2str(i), mq->subq[i]->count, + mq->max_subq[i], mq->total_subq[i]); + } + + /* For a better formatting between the content and separator */ + tt->style.cell.rpad = 2; + tt->style.cell.lpad = 1; + ttable_restyle(tt); + + if (uj) { + json = json_object_new_object(); + /* Add MetaQ summary to the JSON object */ + json_object_int_add(json, "currentSize", mq->size); + json_object_int_add(json, "maxSize", mq->max_metaq); + json_object_int_add(json, "total", mq->total_metaq); + + /* Convert the table to JSON and add it to the main JSON object */ + /* n = name/string, u = unsigned int */ + json_table = ttable_json(tt, "sddd"); + json_object_object_add(json, "subqueues", json_table); + vty_json(vty, json); + } else { + vty_out(vty, "MetaQ Summary\n"); + vty_out(vty, "Current Size\t: %u\n", mq->size); + vty_out(vty, "Max Size\t: %u\n", mq->max_metaq); + vty_out(vty, "Total\t\t: %u\n", mq->total_metaq); + + /* Dump the table */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP_TTABLE, table); + } + + /* Clean up the table */ + ttable_del(tt); + + return CMD_SUCCESS; +} + printfrr_ext_autoreg_p("ZN", printfrr_zebra_node); static ssize_t printfrr_zebra_node(struct fbuf *buf, struct printfrr_eargs *ea, const void *ptr) @@ -3257,6 +3315,7 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) struct route_node *rn = NULL; struct route_entry *re = NULL, *curr_re = NULL; uint8_t qindex = MQ_SIZE, curr_qindex = MQ_SIZE; + uint64_t curr, high; rn = (struct route_node *)data; @@ -3300,6 +3359,15 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) listnode_add(mq->subq[qindex], rn); route_lock_node(rn); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed); + curr = listcount(mq->subq[qindex]); + high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s mq size %u", (void *)rn, @@ -3310,8 +3378,21 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) static int early_label_meta_queue_add(struct meta_queue *mq, void *data) { + uint64_t curr, high; + listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_LABEL], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_EARLY_LABEL]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], curr, + memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); + return 0; } @@ -3320,6 +3401,7 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data) struct nhg_ctx *ctx = NULL; uint8_t qindex = META_QUEUE_NHG; struct wq_nhg_wrapper *w; + uint64_t curr, high; ctx = (struct nhg_ctx *)data; @@ -3333,6 +3415,15 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data) listnode_add(mq->subq[qindex], w); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed); + curr = listcount(mq->subq[qindex]); + high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug("NHG Context id=%u queued into sub-queue %s mq size %u", ctx->id, @@ -3347,6 +3438,7 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data, struct nhg_hash_entry *nhe = NULL; uint8_t qindex = META_QUEUE_NHG; struct wq_nhg_wrapper *w; + uint64_t curr, high; nhe = (struct nhg_hash_entry *)data; @@ -3361,6 +3453,15 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data, listnode_add(mq->subq[qindex], w); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed); + curr = listcount(mq->subq[qindex]); + high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug("NHG id=%u queued into sub-queue %s mq size %u", nhe->id, @@ -3381,8 +3482,19 @@ static int rib_meta_queue_nhg_del(struct meta_queue *mq, void *data) static int rib_meta_queue_evpn_add(struct meta_queue *mq, void *data) { + uint64_t curr, high; + listnode_add(mq->subq[META_QUEUE_EVPN], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EVPN], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_EVPN]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EVPN], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_EVPN], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); return 0; } @@ -4227,8 +4339,19 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data) { + uint64_t curr, high; + listnode_add(mq->subq[META_QUEUE_GR_RUN], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_GR_RUN], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_GR_RUN]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_GR_RUN], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_GR_RUN], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug("Graceful Run adding mq size %u", zrouter.mq->size); @@ -4239,9 +4362,20 @@ static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data) static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data) { struct zebra_early_route *ere = data; + uint64_t curr, high; listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_ROUTE], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_EARLY_ROUTE]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], curr, + memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) { struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id); diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 15bc2c20d2..9e4db11989 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -4047,6 +4047,20 @@ DEFUN (zebra_show_routing_tables_summary, return CMD_SUCCESS; } +/* Display Zebra MetaQ counters */ +DEFUN (show_zebra_metaq_counters, + show_zebra_metaq_counters_cmd, + "show zebra metaq [json]", + SHOW_STR + ZEBRA_STR + "Zebra MetaQ counters\n" + JSON_STR) +{ + bool uj = use_json(argc, argv); + + return zebra_show_metaq_counter(vty, uj); +} + /* IPForwarding configuration write function. */ static int config_write_forwarding(struct vty *vty) { @@ -4336,6 +4350,7 @@ void zebra_vty_init(void) install_element(VIEW_NODE, &show_dataplane_providers_cmd); install_element(CONFIG_NODE, &zebra_dplane_queue_limit_cmd); install_element(CONFIG_NODE, &no_zebra_dplane_queue_limit_cmd); + install_element(VIEW_NODE, &show_zebra_metaq_counters_cmd); #ifdef HAVE_NETLINK install_element(CONFIG_NODE, &zebra_kernel_netlink_batch_tx_buf_cmd); |
