summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/bfd.c19
-rw-r--r--lib/bfd.h4
-rw-r--r--lib/command.h1
-rw-r--r--lib/darr.h20
-rw-r--r--lib/event.c119
-rw-r--r--lib/frr_pthread.c8
-rw-r--r--lib/frrcu.c15
-rw-r--r--lib/frrevent.h27
-rw-r--r--lib/frrscript.c2
-rw-r--r--lib/if.c289
-rw-r--r--lib/if.h10
-rw-r--r--lib/libfrr.c14
-rw-r--r--lib/mgmt_be_client.c391
-rw-r--r--lib/mgmt_be_client.h18
-rw-r--r--lib/mgmt_msg_native.c3
-rw-r--r--lib/mgmt_msg_native.h14
-rw-r--r--lib/nexthop.c87
-rw-r--r--lib/nexthop.h104
-rw-r--r--lib/northbound.c39
-rw-r--r--lib/northbound.h164
-rw-r--r--lib/northbound_notif.c707
-rw-r--r--lib/northbound_oper.c293
-rw-r--r--lib/plist.c8
-rw-r--r--lib/privs.c11
-rwxr-xr-xlib/route_types.pl15
-rw-r--r--lib/route_types.txt2
-rw-r--r--lib/routemap.h1
-rw-r--r--lib/routemap_cli.c4
-rw-r--r--lib/sigevent.h27
-rw-r--r--lib/srcdest_table.c10
-rw-r--r--lib/srcdest_table.h2
-rw-r--r--lib/srv6.h38
-rw-r--r--lib/subdir.am2
-rw-r--r--lib/vrf.c64
-rw-r--r--lib/vrf.h4
-rw-r--r--lib/yang.c165
-rw-r--r--lib/yang.h79
-rw-r--r--lib/zclient.c199
-rw-r--r--lib/zclient.h27
-rw-r--r--lib/zlog_5424.c12
40 files changed, 2521 insertions, 497 deletions
diff --git a/lib/bfd.c b/lib/bfd.c
index bc4b1c5b51..6300f6f5c0 100644
--- a/lib/bfd.c
+++ b/lib/bfd.c
@@ -18,6 +18,7 @@
#include "table.h"
#include "vty.h"
#include "bfd.h"
+#include "bfdd/bfd.h"
DEFINE_MTYPE_STATIC(LIB, BFD_INFO, "BFD info");
DEFINE_MTYPE_STATIC(LIB, BFD_SOURCE, "BFD source cache");
@@ -140,14 +141,15 @@ static void bfd_source_cache_put(struct bfd_session_params *session);
* bfd_get_peer_info - Extract the Peer information for which the BFD session
* went down from the message sent from Zebra to clients.
*/
-static struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
- struct prefix *sp, int *status,
- int *remote_cbit, vrf_id_t vrf_id)
+static struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp, struct prefix *sp,
+ int *status, int *remote_cbit, vrf_id_t vrf_id,
+ char *bfd_name)
{
unsigned int ifindex;
struct interface *ifp = NULL;
int plen;
int local_remote_cbit;
+ uint8_t bfd_name_len = 0;
/*
* If the ifindex lookup fails the
@@ -194,6 +196,13 @@ static struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
STREAM_GETC(s, local_remote_cbit);
if (remote_cbit)
*remote_cbit = local_remote_cbit;
+
+ STREAM_GETC(s, bfd_name_len);
+ if (bfd_name_len) {
+ STREAM_GET(bfd_name, s, bfd_name_len);
+ *(bfd_name + bfd_name_len) = 0;
+ }
+
return ifp;
stream_failure:
@@ -918,6 +927,7 @@ int zclient_bfd_session_update(ZAPI_CALLBACK_ARGS)
struct prefix dp;
struct prefix sp;
char ifstr[128], cbitstr[32];
+ char bfd_name[BFD_NAME_SIZE + 1] = { 0 };
if (!zclient->bfd_integration)
return 0;
@@ -926,8 +936,7 @@ int zclient_bfd_session_update(ZAPI_CALLBACK_ARGS)
if (bsglobal.shutting_down)
return 0;
- ifp = bfd_get_peer_info(zclient->ibuf, &dp, &sp, &state, &remote_cbit,
- vrf_id);
+ ifp = bfd_get_peer_info(zclient->ibuf, &dp, &sp, &state, &remote_cbit, vrf_id, bfd_name);
/*
* When interface lookup fails or an invalid stream is read, we must
* not proceed otherwise it will trigger an assertion while checking
diff --git a/lib/bfd.h b/lib/bfd.h
index 99790f96a5..07d4c9781d 100644
--- a/lib/bfd.h
+++ b/lib/bfd.h
@@ -26,6 +26,8 @@ extern "C" {
#define BFD_PROFILE_NAME_LEN 64
+#define BFD_NAME_SIZE 255
+
const char *bfd_get_status_str(int status);
extern void bfd_client_sendmsg(struct zclient *zclient, int command,
@@ -409,6 +411,8 @@ struct bfd_session_arg {
uint32_t min_tx;
/** Detection multiplier. */
uint32_t detection_multiplier;
+ /* bfd session name*/
+ char bfd_name[BFD_NAME_SIZE + 1];
};
/**
diff --git a/lib/command.h b/lib/command.h
index c60751789f..dfd732893b 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -154,6 +154,7 @@ enum node_type {
PCEP_PCE_NODE, /* PCE configuration node */
PCEP_PCC_NODE, /* PCC configuration node */
SRV6_NODE, /* SRv6 node */
+ SRV6_SIDS_NODE, /* SRv6 SIDs node */
SRV6_LOCS_NODE, /* SRv6 locators node */
SRV6_LOC_NODE, /* SRv6 locator node */
SRV6_ENCAP_NODE, /* SRv6 encapsulation node */
diff --git a/lib/darr.h b/lib/darr.h
index 121e3dd14e..084c2a103a 100644
--- a/lib/darr.h
+++ b/lib/darr.h
@@ -571,16 +571,16 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
* Return:
* The dynamic_array D with the new string content.
*/
-#define darr_in_strcat(D, S) \
- ({ \
- uint __dlen = darr_strlen(D); \
- uint __slen = strlen(S); \
- darr_ensure_cap_mt(D, __dlen + __slen + 1, MTYPE_DARR_STR); \
- if (darr_len(D) == 0) \
- *darr_append(D) = 0; \
- memcpy(darr_last(D), (S), __slen + 1); \
- _darr_len(D) += __slen; \
- D; \
+#define darr_in_strcat(D, S) \
+ ({ \
+ uint __dlen = darr_strlen(D); \
+ uint __slen = strlen(S); \
+ darr_ensure_cap_mt(D, __dlen + __slen + 1, MTYPE_DARR_STR); \
+ if (darr_len(D) == 0) \
+ *darr_append(D) = 0; \
+ memcpy(&(D)[darr_strlen(D)] /* darr_last(D) clangSA :( */, (S), __slen + 1); \
+ _darr_len(D) += __slen; \
+ D; \
})
/**
diff --git a/lib/event.c b/lib/event.c
index cfe8c3adc0..8c09debf91 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -111,6 +111,11 @@ static struct cpu_event_history *cpu_records_get(struct event_loop *loop,
return res;
}
+static void cpu_records_clear(struct cpu_event_history *p)
+{
+ memset(p->_clear_begin, 0, p->_clear_end - p->_clear_begin);
+}
+
static void cpu_records_free(struct cpu_event_history **p)
{
XFREE(MTYPE_EVENT_STATS, *p);
@@ -250,20 +255,15 @@ static void cpu_record_clear(uint8_t filter)
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
frr_with_mutex (&m->mtx) {
struct cpu_event_history *item;
- struct cpu_records_head old[1];
-
- cpu_records_init(old);
- cpu_records_swap_all(old, m->cpu_records);
- while ((item = cpu_records_pop(old))) {
+ /* it isn't possible to free the memory here
+ * because some of these will be in use (e.g.
+ * the one we're currently running in!)
+ */
+ frr_each (cpu_records, m->cpu_records, item) {
if (item->types & filter)
- cpu_records_free(&item);
- else
- cpu_records_add(m->cpu_records,
- item);
+ cpu_records_clear(item);
}
-
- cpu_records_fini(old);
}
}
}
@@ -429,9 +429,6 @@ DEFUN_NOSH (show_event_poll,
return CMD_SUCCESS;
}
-#if CONFDATE > 20241231
-CPP_NOTICE("Remove `clear thread cpu` command")
-#endif
DEFUN (clear_event_cpu,
clear_event_cpu_cmd,
"clear event cpu [FILTER]",
@@ -457,14 +454,6 @@ DEFUN (clear_event_cpu,
return CMD_SUCCESS;
}
-ALIAS (clear_event_cpu,
- clear_thread_cpu_cmd,
- "clear thread cpu [FILTER]",
- "Clear stored data in all pthreads\n"
- "Thread information\n"
- "Thread CPU usage\n"
- "Display filter (rwtexb)\n")
-
static void show_event_timers_helper(struct vty *vty, struct event_loop *m)
{
const char *name = m->name ? m->name : "main";
@@ -504,7 +493,6 @@ void event_cmd_init(void)
{
install_element(VIEW_NODE, &show_event_cpu_cmd);
install_element(VIEW_NODE, &show_event_poll_cmd);
- install_element(ENABLE_NODE, &clear_thread_cpu_cmd);
install_element(ENABLE_NODE, &clear_event_cpu_cmd);
install_element(CONFIG_NODE, &service_cputime_stats_cmd);
@@ -583,6 +571,11 @@ struct event_loop *event_master_create(const char *name)
rv->spin = true;
rv->handle_signals = true;
+ /* tardy event warnings */
+ monotime(&rv->last_tardy_warning);
+ rv->last_tardy_warning.tv_sec -= (TARDY_WARNING_INTERVAL + TIMER_SECOND_MICRO - 1) /
+ TIMER_SECOND_MICRO;
+
/* Set pthread owner, should be updated by actual owner */
rv->owner = pthread_self();
rv->cancel_req = list_new();
@@ -782,13 +775,13 @@ static struct event *thread_get(struct event_loop *m, uint8_t type,
thread->master = m;
thread->arg = arg;
thread->yield = EVENT_YIELD_TIME_SLOT; /* default */
+ thread->tardy_threshold = 0;
/* thread->ref is zeroed either by XCALLOC above or by memset before
* being put on the "unuse" list by thread_add_unuse().
* Setting it here again makes coverity complain about a missing
* lock :(
*/
/* thread->ref = NULL; */
- thread->ignore_timer_late = false;
/*
* So if the passed in funcname is not what we have
@@ -1035,6 +1028,8 @@ static void _event_add_timer_timeval(const struct xref_eventsched *xref,
return;
thread = thread_get(m, EVENT_TIMER, func, arg, xref);
+ /* default lateness warning: 4s */
+ thread->tardy_threshold = TARDY_DEFAULT_THRESHOLD;
frr_with_mutex (&thread->mtx) {
thread->u.sands = t;
@@ -1697,34 +1692,12 @@ static void thread_process_io(struct event_loop *m, unsigned int num)
static unsigned int thread_process_timers(struct event_loop *m,
struct timeval *timenow)
{
- struct timeval prev = *timenow;
- bool displayed = false;
struct event *thread;
unsigned int ready = 0;
while ((thread = event_timer_list_first(&m->timer))) {
if (timercmp(timenow, &thread->u.sands, <))
break;
- prev = thread->u.sands;
- prev.tv_sec += 4;
- /*
- * If the timer would have popped 4 seconds in the
- * past then we are in a situation where we are
- * really getting behind on handling of events.
- * Let's log it and do the right thing with it.
- */
- if (timercmp(timenow, &prev, >)) {
- atomic_fetch_add_explicit(
- &thread->hist->total_starv_warn, 1,
- memory_order_seq_cst);
- if (!displayed && !thread->ignore_timer_late) {
- flog_warn(
- EC_LIB_STARVE_THREAD,
- "Thread Starvation: %pTHD was scheduled to pop greater than 4s ago",
- thread);
- displayed = true;
- }
- }
event_timer_list_pop(&m->timer);
thread->type = EVENT_READY;
@@ -1958,6 +1931,29 @@ void event_getrusage(RUSAGE_T *r)
#endif
}
+static void event_tardy_warn(struct event *thread, unsigned long since_us)
+{
+ char buf[64];
+ struct fbuf fb = { .buf = buf, .pos = buf, .len = sizeof(buf) };
+ double loadavg[3];
+ int rv;
+
+ rv = getloadavg(loadavg, array_size(loadavg));
+ if (rv < 0)
+ bprintfrr(&fb, "not available");
+ else {
+ for (int i = 0; i < rv; i++) {
+ bprintfrr(&fb, "%.2f", loadavg[i]);
+ if (i < rv - 1)
+ bputs(&fb, ", ");
+ }
+ }
+
+ flog_warn(EC_LIB_STARVE_THREAD,
+ "CPU starvation: %pTHD getting executed %lums late, warning threshold %lums. System load: %pFB",
+ thread, (since_us + 999) / 1000, (thread->tardy_threshold + 999) / 1000, &fb);
+}
+
/*
* Call a thread.
*
@@ -1974,6 +1970,33 @@ void event_call(struct event *thread)
RUSAGE_T before, after;
bool suppress_warnings = EVENT_ARG(thread);
+ if (thread->tardy_threshold) {
+ int64_t timer_late_us = monotime_since(&thread->u.sands, NULL);
+
+ /* Timers have a tardiness warning defaulting to 4s.
+ * It can be customized with event_set_tardy_threshold()
+ * (bfdd does that since the protocol has really short timers)
+ *
+ * If we are more than that threshold late, print a warning
+ * since we're running behind in calling timers (probably due
+ * to high system load.)
+ */
+ if (timer_late_us > (int64_t)thread->tardy_threshold) {
+ int64_t since_last_warning;
+ struct timeval *tw;
+
+ atomic_fetch_add_explicit(&thread->hist->total_starv_warn, 1,
+ memory_order_seq_cst);
+
+ tw = &thread->master->last_tardy_warning;
+ since_last_warning = monotime_since(tw, NULL);
+ if (since_last_warning > TARDY_WARNING_INTERVAL) {
+ event_tardy_warn(thread, timer_late_us);
+ monotime(tw);
+ }
+ }
+ }
+
/* if the thread being called is the CLI, it may change cputime_enabled
* ("service cputime-stats" command), which can result in nonsensical
* and very confusing warnings
@@ -2157,9 +2180,9 @@ static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
char info[16] = "";
if (!thread)
- return bputs(buf, "{(thread *)NULL}");
+ return bputs(buf, "{(event *)NULL}");
- rv += bprintfrr(buf, "{(thread *)%p arg=%p", thread, thread->arg);
+ rv += bprintfrr(buf, "{(event *)%p arg=%p", thread, thread->arg);
if (thread->type < array_size(types) && types[thread->type])
rv += bprintfrr(buf, " %-6s", types[thread->type]);
diff --git a/lib/frr_pthread.c b/lib/frr_pthread.c
index 3a4bc712fc..0b4d7c77ae 100644
--- a/lib/frr_pthread.c
+++ b/lib/frr_pthread.c
@@ -20,6 +20,7 @@
#include "zlog.h"
#include "libfrr.h"
#include "libfrr_trace.h"
+#include "sigevent.h"
DEFINE_MTYPE_STATIC(LIB, FRR_PTHREAD, "FRR POSIX Thread");
DEFINE_MTYPE_STATIC(LIB, PTHREAD_PRIM, "POSIX sync primitives");
@@ -185,10 +186,9 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
assert(frr_is_after_fork || !"trying to start thread before fork()");
- /* Ensure we never handle signals on a background thread by blocking
- * everything here (new thread inherits signal mask)
- */
- sigfillset(&blocksigs);
+ sigemptyset(&blocksigs);
+ frr_sigset_add_mainonly(&blocksigs);
+ /* new thread inherits mask */
pthread_sigmask(SIG_BLOCK, &blocksigs, &oldsigs);
frrtrace(1, frr_libfrr, frr_pthread_run, fpt->name);
diff --git a/lib/frrcu.c b/lib/frrcu.c
index b85c525c58..1e7ed99eff 100644
--- a/lib/frrcu.c
+++ b/lib/frrcu.c
@@ -42,6 +42,7 @@
#include "frrcu.h"
#include "seqlock.h"
#include "atomlist.h"
+#include "sigevent.h"
DEFINE_MTYPE_STATIC(LIB, RCU_THREAD, "RCU thread");
DEFINE_MTYPE_STATIC(LIB, RCU_NEXT, "RCU sequence barrier");
@@ -346,7 +347,19 @@ static void rcu_start(void)
*/
sigset_t oldsigs, blocksigs;
- sigfillset(&blocksigs);
+ /* technically, the RCU thread is very poorly suited to run even just a
+ * crashlog handler, since zlog_sigsafe() could deadlock on transiently
+ * invalid (due to RCU) logging data structures
+ *
+ * but given that when we try to write a crashlog, we're already in
+ * b0rked territory anyway - give the crashlog handler a chance.
+ *
+ * (also cf. the SIGALRM usage in writing crashlogs to avoid hung
+ * processes on any kind of deadlock in crash handlers)
+ */
+ sigemptyset(&blocksigs);
+ frr_sigset_add_mainonly(&blocksigs);
+ /* new thread inherits mask */
pthread_sigmask(SIG_BLOCK, &blocksigs, &oldsigs);
rcu_active = true;
diff --git a/lib/frrevent.h b/lib/frrevent.h
index 44776b29a7..61baf7919c 100644
--- a/lib/frrevent.h
+++ b/lib/frrevent.h
@@ -95,6 +95,7 @@ struct event_loop {
bool ready_run_loop;
RUSAGE_T last_getrusage;
+ struct timeval last_tardy_warning;
};
/* Event types. */
@@ -126,11 +127,17 @@ struct event {
struct timeval real;
struct cpu_event_history *hist; /* cache pointer to cpu_history */
unsigned long yield; /* yield time in microseconds */
+ /* lateness warning threshold, usec. 0 if it's not a timer. */
+ unsigned long tardy_threshold;
const struct xref_eventsched *xref; /* origin location */
pthread_mutex_t mtx; /* mutex for thread.c functions */
- bool ignore_timer_late;
};
+/* rate limit late timer warnings */
+#define TARDY_WARNING_INTERVAL 10 * TIMER_SECOND_MICRO
+/* default threshold for late timer warning */
+#define TARDY_DEFAULT_THRESHOLD 4 * TIMER_SECOND_MICRO
+
#ifdef _FRR_ATTRIBUTE_PRINTFRR
#pragma FRR printfrr_ext "%pTH"(struct event *)
#endif
@@ -139,6 +146,10 @@ struct cpu_event_history {
struct cpu_records_item item;
void (*func)(struct event *e);
+
+ /* fields between the pair of these two are nulled on "clear event cpu" */
+ char _clear_begin[0];
+
atomic_size_t total_cpu_warn;
atomic_size_t total_wall_warn;
atomic_size_t total_starv_warn;
@@ -149,6 +160,10 @@ struct cpu_event_history {
} real;
struct time_stats cpu;
atomic_uint_fast32_t types;
+
+ /* end of cleared region */
+ char _clear_end[0];
+
const char *funcname;
};
@@ -297,9 +312,17 @@ static inline bool event_is_scheduled(struct event *thread)
/* Debug signal mask */
void debug_signals(const sigset_t *sigs);
+/* getting called more than given microseconds late will print a warning.
+ * Default if not called: 4s. Don't call this on non-timers.
+ */
+static inline void event_set_tardy_threshold(struct event *event, unsigned long thres)
+{
+ event->tardy_threshold = thres;
+}
+
static inline void event_ignore_late_timer(struct event *event)
{
- event->ignore_timer_late = true;
+ event->tardy_threshold = 0;
}
#ifdef __cplusplus
diff --git a/lib/frrscript.c b/lib/frrscript.c
index 06460b014d..8b068ba61b 100644
--- a/lib/frrscript.c
+++ b/lib/frrscript.c
@@ -248,10 +248,12 @@ int _frrscript_call_lua(struct lua_function_state *lfs, int nargs)
zlog_err("Lua hook call '%s' : error handler error: %s",
lfs->name, lua_tostring(lfs->L, -1));
break;
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM <= 503
case LUA_ERRGCMM:
zlog_err("Lua hook call '%s' : garbage collector error: %s",
lfs->name, lua_tostring(lfs->L, -1));
break;
+#endif
default:
zlog_err("Lua hook call '%s' : unknown error: %s", lfs->name,
lua_tostring(lfs->L, -1));
diff --git a/lib/if.c b/lib/if.c
index 378ca16e49..68724a65e9 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -29,6 +29,10 @@
#include "admin_group.h"
#include "lib/if_clippy.c"
+
+/* Set by the owner (zebra). */
+bool if_notify_oper_changes;
+
DEFINE_MTYPE_STATIC(LIB, IF, "Interface");
DEFINE_MTYPE_STATIC(LIB, IFDESC, "Intf Desc");
DEFINE_MTYPE_STATIC(LIB, CONNECTED, "Connected");
@@ -208,6 +212,104 @@ void if_down_via_zapi(struct interface *ifp)
hook_call(if_down, ifp);
}
+void if_update_state_metric(struct interface *ifp, uint32_t metric)
+{
+ if (ifp->metric == metric)
+ return;
+ ifp->metric = metric;
+ if (ifp->state && if_notify_oper_changes)
+ nb_op_updatef(ifp->state, "metric", "%u", ifp->metric);
+}
+
+void if_update_state_mtu(struct interface *ifp, uint mtu)
+{
+ if (ifp->mtu == mtu)
+ return;
+ ifp->mtu = mtu;
+ if (ifp->state && if_notify_oper_changes)
+ nb_op_updatef(ifp->state, "mtu", "%u", ifp->mtu);
+}
+
+void if_update_state_mtu6(struct interface *ifp, uint mtu)
+{
+ if (ifp->mtu6 == mtu)
+ return;
+ ifp->mtu6 = mtu;
+ if (ifp->state && if_notify_oper_changes)
+ nb_op_updatef(ifp->state, "mtu6", "%u", ifp->mtu);
+}
+
+void if_update_state_hw_addr(struct interface *ifp, const uint8_t *hw_addr, uint len)
+{
+ if (len == (uint)ifp->hw_addr_len && (len == 0 || !memcmp(hw_addr, ifp->hw_addr, len)))
+ return;
+ memcpy(ifp->hw_addr, hw_addr, len);
+ ifp->hw_addr_len = len;
+ if (ifp->state && if_notify_oper_changes)
+ nb_op_updatef(ifp->state, "phy-address", "%pEA", (struct ethaddr *)ifp->hw_addr);
+}
+
+void if_update_state_speed(struct interface *ifp, uint32_t speed)
+{
+ if (ifp->speed == speed)
+ return;
+ ifp->speed = speed;
+ if (ifp->state && if_notify_oper_changes)
+ nb_op_updatef(ifp->state, "speed", "%u", ifp->speed);
+}
+
+void if_update_state(struct interface *ifp)
+{
+ struct lyd_node *state = ifp->state;
+
+ if (!state || !if_notify_oper_changes)
+ return;
+
+ /*
+ * Remove top level container update when we have patch support, for now
+ * this keeps us from generating 6 separate REPLACE messages though.
+ */
+ // nb_op_update(state, ".", NULL);
+ nb_op_updatef(state, "if-index", "%d", ifp->ifindex);
+ nb_op_updatef(state, "mtu", "%u", ifp->mtu);
+ nb_op_updatef(state, "mtu6", "%u", ifp->mtu);
+ nb_op_updatef(state, "speed", "%u", ifp->speed);
+ nb_op_updatef(state, "metric", "%u", ifp->metric);
+ nb_op_updatef(state, "phy-address", "%pEA", (struct ethaddr *)ifp->hw_addr);
+}
+
+static void if_update_state_remove(struct interface *ifp)
+{
+ if (!if_notify_oper_changes || ifp->name[0] == 0)
+ return;
+
+ if (vrf_is_backend_netns())
+ nb_op_update_delete_pathf(NULL, "/frr-interface:lib/interface[name=\"%s:%s\"]/state",
+ ifp->vrf->name, ifp->name);
+ else
+ nb_op_update_delete_pathf(NULL, "/frr-interface:lib/interface[name=\"%s\"]/state",
+ ifp->name);
+ if (ifp->state) {
+ lyd_free_all(ifp->state);
+ ifp->state = NULL;
+ }
+}
+
+static void if_update_state_add(struct interface *ifp)
+{
+ if (!if_notify_oper_changes || ifp->name[0] == 0)
+ return;
+
+ if (vrf_is_backend_netns())
+ ifp->state = nb_op_update_pathf(NULL,
+ "/frr-interface:lib/interface[name=\"%s:%s\"]/state",
+ NULL, ifp->vrf->name, ifp->name);
+ else
+ ifp->state = nb_op_update_pathf(NULL,
+ "/frr-interface:lib/interface[name=\"%s\"]/state",
+ NULL, ifp->name);
+}
+
static struct interface *if_create_name(const char *name, struct vrf *vrf)
{
struct interface *ifp;
@@ -216,7 +318,11 @@ static struct interface *if_create_name(const char *name, struct vrf *vrf)
if_set_name(ifp, name);
+ if (if_notify_oper_changes && ifp->state)
+ if_update_state(ifp);
+
hook_call(if_add, ifp);
+
return ifp;
}
@@ -228,8 +334,10 @@ void if_update_to_new_vrf(struct interface *ifp, vrf_id_t vrf_id)
/* remove interface from old master vrf list */
old_vrf = ifp->vrf;
- if (ifp->name[0] != '\0')
+ if (ifp->name[0] != '\0') {
IFNAME_RB_REMOVE(old_vrf, ifp);
+ if_update_state_remove(ifp);
+ }
if (ifp->ifindex != IFINDEX_INTERNAL)
IFINDEX_RB_REMOVE(old_vrf, ifp);
@@ -237,8 +345,11 @@ void if_update_to_new_vrf(struct interface *ifp, vrf_id_t vrf_id)
vrf = vrf_get(vrf_id, NULL);
ifp->vrf = vrf;
- if (ifp->name[0] != '\0')
+ if (ifp->name[0] != '\0') {
IFNAME_RB_INSERT(vrf, ifp);
+ if_update_state_add(ifp);
+ if_update_state(ifp);
+ }
if (ifp->ifindex != IFINDEX_INTERNAL)
IFINDEX_RB_INSERT(vrf, ifp);
@@ -280,6 +391,8 @@ void if_delete(struct interface **ifp)
XFREE(MTYPE_IFDESC, ptr->desc);
+ if_update_state_remove(ptr);
+
XFREE(MTYPE_IF, ptr);
*ifp = NULL;
}
@@ -303,7 +416,6 @@ static struct interface *if_lookup_by_ifindex(ifindex_t ifindex,
struct interface *if_lookup_by_index(ifindex_t ifindex, vrf_id_t vrf_id)
{
switch (vrf_get_backend()) {
- case VRF_BACKEND_UNKNOWN:
case VRF_BACKEND_NETNS:
return(if_lookup_by_ifindex(ifindex, vrf_id));
case VRF_BACKEND_VRF_LITE:
@@ -573,7 +685,6 @@ struct interface *if_get_by_name(const char *name, vrf_id_t vrf_id,
struct vrf *vrf;
switch (vrf_get_backend()) {
- case VRF_BACKEND_UNKNOWN:
case VRF_BACKEND_NETNS:
vrf = vrf_get(vrf_id, vrf_name);
assert(vrf);
@@ -630,6 +741,9 @@ int if_set_index(struct interface *ifp, ifindex_t ifindex)
ifp->ifindex = ifindex;
+ if (if_notify_oper_changes)
+ nb_op_updatef(ifp->state, "if-index", "%d", ifp->ifindex);
+
if (ifp->ifindex != IFINDEX_INTERNAL) {
/*
* This should never happen, since we checked if there was
@@ -648,13 +762,17 @@ static void if_set_name(struct interface *ifp, const char *name)
if (if_cmp_name_func(ifp->name, name) == 0)
return;
- if (ifp->name[0] != '\0')
+ if (ifp->name[0] != '\0') {
IFNAME_RB_REMOVE(ifp->vrf, ifp);
+ if_update_state_remove(ifp);
+ }
strlcpy(ifp->name, name, sizeof(ifp->name));
- if (ifp->name[0] != '\0')
+ if (ifp->name[0] != '\0') {
IFNAME_RB_INSERT(ifp->vrf, ifp);
+ if_update_state_add(ifp);
+ }
}
/* Does interface up ? */
@@ -858,47 +976,6 @@ struct nbr_connected *nbr_connected_check(struct interface *ifp,
return NULL;
}
-/* Print if_addr structure. */
-static void __attribute__((unused))
-connected_log(struct connected *connected, char *str)
-{
- struct prefix *p;
- struct interface *ifp;
- char logbuf[BUFSIZ];
- char buf[BUFSIZ];
-
- ifp = connected->ifp;
- p = connected->address;
-
- snprintf(logbuf, sizeof(logbuf), "%s interface %s vrf %s(%u) %s %pFX ",
- str, ifp->name, ifp->vrf->name, ifp->vrf->vrf_id,
- prefix_family_str(p), p);
-
- p = connected->destination;
- if (p) {
- strlcat(logbuf, inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
- BUFSIZ);
- }
- zlog_info("%s", logbuf);
-}
-
-/* Print if_addr structure. */
-static void __attribute__((unused))
-nbr_connected_log(struct nbr_connected *connected, char *str)
-{
- struct prefix *p;
- struct interface *ifp;
- char logbuf[BUFSIZ];
-
- ifp = connected->ifp;
- p = connected->address;
-
- snprintf(logbuf, sizeof(logbuf), "%s interface %s %s %pFX ", str,
- ifp->name, prefix_family_str(p), p);
-
- zlog_info("%s", logbuf);
-}
-
/* count the number of connected addresses that are in the given family */
unsigned int connected_count_by_family(struct interface *ifp, int family)
{
@@ -1649,90 +1726,90 @@ static int lib_interface_description_destroy(struct nb_cb_destroy_args *args)
return NB_OK;
}
-/*
- * XPath: /frr-interface:lib/interface/vrf
- */
-static struct yang_data *
-lib_interface_vrf_get_elem(struct nb_cb_get_elem_args *args)
+static enum nb_error __return_ok(const struct nb_node *nb_node, const void *list_entry,
+ struct lyd_node *parent)
{
- const struct interface *ifp = args->list_entry;
-
- return yang_data_new_string(args->xpath, ifp->vrf->name);
+ return NB_OK;
}
/*
- * XPath: /frr-interface:lib/interface/state/if-index
+ * XPath: /frr-interface:lib/interface/vrf
*/
-static struct yang_data *
-lib_interface_state_if_index_get_elem(struct nb_cb_get_elem_args *args)
+static enum nb_error lib_interface_vrf_get(const struct nb_node *nb_node, const void *list_entry,
+ struct lyd_node *parent)
{
- const struct interface *ifp = args->list_entry;
+ const struct lysc_node *snode = nb_node->snode;
+ const struct interface *ifp = list_entry;
- return yang_data_new_int32(args->xpath, ifp->ifindex);
+ if (lyd_new_term(parent, snode->module, snode->name, ifp->vrf->name, LYD_NEW_PATH_UPDATE,
+ NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
}
/*
- * XPath: /frr-interface:lib/interface/state/mtu
+ * XPath: /frr-interface:lib/interface/state/if-index
*/
-static struct yang_data *
-lib_interface_state_mtu_get_elem(struct nb_cb_get_elem_args *args)
+static enum nb_error lib_interface_state_if_index_get(const struct nb_node *nb_node,
+ const void *list_entry,
+ struct lyd_node *parent)
{
- const struct interface *ifp = args->list_entry;
+ const struct lysc_node *snode = nb_node->snode;
+ const struct interface *ifp = list_entry;
+ int32_t value = ifp->ifindex;
- return yang_data_new_uint32(args->xpath, ifp->mtu);
+ if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
+ LYD_NEW_PATH_UPDATE, NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
}
/*
- * XPath: /frr-interface:lib/interface/state/mtu6
+ * XPath: /frr-interface:lib/interface/state/mtu[6]
*/
-static struct yang_data *
-lib_interface_state_mtu6_get_elem(struct nb_cb_get_elem_args *args)
+static enum nb_error lib_interface_state_mtu_get(const struct nb_node *nb_node,
+ const void *list_entry, struct lyd_node *parent)
{
- const struct interface *ifp = args->list_entry;
+ const struct lysc_node *snode = nb_node->snode;
+ const struct interface *ifp = list_entry;
+ uint32_t value = ifp->mtu;
- return yang_data_new_uint32(args->xpath, ifp->mtu6);
+ if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
+ LYD_NEW_PATH_UPDATE, NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
}
/*
* XPath: /frr-interface:lib/interface/state/speed
*/
-static struct yang_data *
-lib_interface_state_speed_get_elem(struct nb_cb_get_elem_args *args)
+static enum nb_error lib_interface_state_speed_get(const struct nb_node *nb_node,
+ const void *list_entry, struct lyd_node *parent)
{
- const struct interface *ifp = args->list_entry;
+ const struct lysc_node *snode = nb_node->snode;
+ const struct interface *ifp = list_entry;
+ uint32_t value = ifp->speed;
- return yang_data_new_uint32(args->xpath, ifp->speed);
+ if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
+ LYD_NEW_PATH_UPDATE, NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
}
/*
* XPath: /frr-interface:lib/interface/state/metric
*/
-static struct yang_data *
-lib_interface_state_metric_get_elem(struct nb_cb_get_elem_args *args)
+static enum nb_error lib_interface_state_metric_get(const struct nb_node *nb_node,
+ const void *list_entry, struct lyd_node *parent)
{
- const struct interface *ifp = args->list_entry;
-
- return yang_data_new_uint32(args->xpath, ifp->metric);
-}
+ const struct lysc_node *snode = nb_node->snode;
+ const struct interface *ifp = list_entry;
+ uint32_t value = ifp->metric;
-/*
- * XPath: /frr-interface:lib/interface/state/flags
- */
-static struct yang_data *
-lib_interface_state_flags_get_elem(struct nb_cb_get_elem_args *args)
-{
- /* TODO: implement me. */
- return NULL;
-}
-
-/*
- * XPath: /frr-interface:lib/interface/state/type
- */
-static struct yang_data *
-lib_interface_state_type_get_elem(struct nb_cb_get_elem_args *args)
-{
- /* TODO: implement me. */
- return NULL;
+ if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
+ LYD_NEW_PATH_UPDATE, NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
}
/*
@@ -1779,49 +1856,49 @@ const struct frr_yang_module_info frr_interface_info = {
{
.xpath = "/frr-interface:lib/interface/vrf",
.cbs = {
- .get_elem = lib_interface_vrf_get_elem,
+ .get = lib_interface_vrf_get,
}
},
{
.xpath = "/frr-interface:lib/interface/state/if-index",
.cbs = {
- .get_elem = lib_interface_state_if_index_get_elem,
+ .get = lib_interface_state_if_index_get,
}
},
{
.xpath = "/frr-interface:lib/interface/state/mtu",
.cbs = {
- .get_elem = lib_interface_state_mtu_get_elem,
+ .get = lib_interface_state_mtu_get,
}
},
{
.xpath = "/frr-interface:lib/interface/state/mtu6",
.cbs = {
- .get_elem = lib_interface_state_mtu6_get_elem,
+ .get = lib_interface_state_mtu_get,
}
},
{
.xpath = "/frr-interface:lib/interface/state/speed",
.cbs = {
- .get_elem = lib_interface_state_speed_get_elem,
+ .get = lib_interface_state_speed_get,
}
},
{
.xpath = "/frr-interface:lib/interface/state/metric",
.cbs = {
- .get_elem = lib_interface_state_metric_get_elem,
+ .get = lib_interface_state_metric_get,
}
},
{
.xpath = "/frr-interface:lib/interface/state/flags",
.cbs = {
- .get_elem = lib_interface_state_flags_get_elem,
+ .get = __return_ok,
}
},
{
.xpath = "/frr-interface:lib/interface/state/type",
.cbs = {
- .get_elem = lib_interface_state_type_get_elem,
+ .get = __return_ok,
}
},
{
diff --git a/lib/if.h b/lib/if.h
index c2ec73378d..1e52020b64 100644
--- a/lib/if.h
+++ b/lib/if.h
@@ -297,6 +297,8 @@ struct interface {
struct vrf *vrf;
+ struct lyd_node *state;
+
/*
* Has the end users entered `interface XXXX` from the cli in some
* fashion?
@@ -633,6 +635,14 @@ extern void if_up_via_zapi(struct interface *ifp);
extern void if_down_via_zapi(struct interface *ifp);
extern void if_destroy_via_zapi(struct interface *ifp);
+extern void if_update_state(struct interface *ifp);
+extern void if_update_state_metric(struct interface *ifp, uint32_t metric);
+extern void if_update_state_mtu(struct interface *ifp, uint mtu);
+extern void if_update_state_mtu6(struct interface *ifp, uint mtu);
+extern void if_update_state_hw_addr(struct interface *ifp, const uint8_t *hw_addr, uint len);
+extern void if_update_state_speed(struct interface *ifp, uint32_t speed);
+
+extern bool if_notify_oper_changes;
extern const struct frr_yang_module_info frr_interface_info;
extern const struct frr_yang_module_info frr_interface_cli_info;
diff --git a/lib/libfrr.c b/lib/libfrr.c
index d1a9f0b1cb..261d3aa87e 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -108,6 +108,9 @@ static const struct option lo_always[] = {
{ "module", no_argument, NULL, 'M' },
{ "profile", required_argument, NULL, 'F' },
{ "pathspace", required_argument, NULL, 'N' },
+#ifdef HAVE_NETLINK
+ { "vrfwnetns", no_argument, NULL, 'w' },
+#endif
{ "vrfdefaultname", required_argument, NULL, 'o' },
{ "graceful_restart", optional_argument, NULL, 'K' },
{ "vty_socket", required_argument, NULL, OPTION_VTYSOCK },
@@ -120,6 +123,9 @@ static const struct option lo_always[] = {
{ NULL }
};
static const struct optspec os_always = {
+#ifdef HAVE_NETLINK
+ "w"
+#endif
"hvdM:F:N:o:K::",
" -h, --help Display this help and exit\n"
" -v, --version Print program version\n"
@@ -127,6 +133,9 @@ static const struct optspec os_always = {
" -M, --module Load specified module\n"
" -F, --profile Use specified configuration profile\n"
" -N, --pathspace Insert prefix into config & socket paths\n"
+#ifdef HAVE_NETLINK
+ " -w, --vrfwnetns Use network namespaces for VRFs\n"
+#endif
" -o, --vrfdefaultname Set default VRF name.\n"
" -K, --graceful_restart FRR starting in Graceful Restart mode, with optional route-cleanup timer\n"
" --vty_socket Override vty socket path\n"
@@ -516,6 +525,11 @@ static int frr_opt(int opt)
snprintf(frr_zclientpath, sizeof(frr_zclientpath),
ZAPI_SOCK_NAME);
break;
+#ifdef HAVE_NETLINK
+ case 'w':
+ vrf_configure_backend(VRF_BACKEND_NETNS);
+ break;
+#endif
case 'o':
vrf_set_default_name(optarg);
break;
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
index f03006ad0e..806242ed53 100644
--- a/lib/mgmt_be_client.c
+++ b/lib/mgmt_be_client.c
@@ -99,12 +99,12 @@ struct mgmt_be_client {
struct nb_config *candidate_config;
struct nb_config *running_config;
- unsigned long num_edit_nb_cfg;
- unsigned long avg_edit_nb_cfg_tm;
- unsigned long num_prep_nb_cfg;
- unsigned long avg_prep_nb_cfg_tm;
- unsigned long num_apply_nb_cfg;
- unsigned long avg_apply_nb_cfg_tm;
+ uint64_t num_edit_nb_cfg;
+ uint64_t avg_edit_nb_cfg_tm;
+ uint64_t num_prep_nb_cfg;
+ uint64_t avg_prep_nb_cfg_tm;
+ uint64_t num_apply_nb_cfg;
+ uint64_t avg_apply_nb_cfg_tm;
struct mgmt_be_txns_head txn_head;
@@ -117,7 +117,7 @@ struct mgmt_be_client {
struct debug mgmt_dbg_be_client = {
.conf = "debug mgmt client backend",
- .desc = "Management backend client operations"
+ .desc = "Management backend client operations",
};
/* NOTE: only one client per proc for now. */
@@ -312,49 +312,102 @@ static int be_client_send_error(struct mgmt_be_client *client, uint64_t txn_id,
return ret;
}
-static int mgmt_be_send_notification(void *__be_client, const char *xpath,
- const struct lyd_node *tree)
+static int __send_notification(struct mgmt_be_client *client, const char *xpath,
+ const struct lyd_node *tree, uint8_t op)
{
- struct mgmt_be_client *client = __be_client;
struct mgmt_msg_notify_data *msg = NULL;
+ // LYD_FORMAT format = LYD_LYB;
LYD_FORMAT format = LYD_JSON;
uint8_t **darrp;
LY_ERR err;
int ret = 0;
- assert(tree);
-
- debug_be_client("%s: sending YANG notification: %s", __func__,
- tree->schema->name);
+ assert(op != NOTIFY_OP_NOTIFICATION || xpath || tree);
+ debug_be_client("%s: sending %sYANG %snotification: %s", __func__,
+ op == NOTIFY_OP_DS_DELETE ? "delete "
+ : op == NOTIFY_OP_DS_REPLACE ? "replace "
+ : op == NOTIFY_OP_DS_PATCH ? "patch "
+ : "",
+ op == NOTIFY_OP_NOTIFICATION ? "" : "DS ", xpath ?: tree->schema->name);
/*
* Allocate a message and append the data to it using `format`
*/
- msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_notify_data, 0,
- MTYPE_MSG_NATIVE_NOTIFY);
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_notify_data, 0, MTYPE_MSG_NATIVE_NOTIFY);
msg->code = MGMT_MSG_CODE_NOTIFY;
msg->result_type = format;
+ msg->op = op;
mgmt_msg_native_xpath_encode(msg, xpath);
- darrp = mgmt_msg_native_get_darrp(msg);
- err = yang_print_tree_append(darrp, tree, format,
- (LYD_PRINT_SHRINK | LYD_PRINT_WD_EXPLICIT |
- LYD_PRINT_WITHSIBLINGS));
- if (err) {
- flog_err(EC_LIB_LIBYANG,
- "%s: error creating notification data: %s", __func__,
- ly_strerrcode(err));
- ret = 1;
- goto done;
+ if (tree) {
+ darrp = mgmt_msg_native_get_darrp(msg);
+ err = yang_print_tree_append(darrp, tree, format,
+ (LYD_PRINT_SHRINK | LYD_PRINT_WD_EXPLICIT |
+ LYD_PRINT_WITHSIBLINGS));
+ if (err) {
+ flog_err(EC_LIB_LIBYANG, "%s: error creating notification data: %s",
+ __func__, ly_strerrcode(err));
+ ret = 1;
+ goto done;
+ }
}
- (void)be_client_send_native_msg(client, msg,
- mgmt_msg_native_get_msg_len(msg), false);
+ ret = be_client_send_native_msg(client, msg, mgmt_msg_native_get_msg_len(msg), false);
done:
mgmt_msg_native_free_msg(msg);
return ret;
}
+/**
+ * mgmt_be_send_ds_delete_notification() - Send DS notification to mgmtd
+ */
+int mgmt_be_send_ds_delete_notification(const char *path)
+{
+ if (!__be_client) {
+ debug_be_client("%s: No mgmtd connection for DS delete notification: %s", __func__,
+ path);
+ return 1;
+ }
+ return __send_notification(__be_client, path, NULL, NOTIFY_OP_DS_DELETE);
+}
+
+/**
+ * mgmt_be_send_ds_patch_notification() - Send a YANG patch DS notification to mgmtd
+ */
+int mgmt_be_send_ds_patch_notification(const char *path, const struct lyd_node *patch)
+{
+ if (!__be_client) {
+ debug_be_client("%s: No mgmtd connection for DS delete notification: %s", __func__,
+ path);
+ return 1;
+ }
+ return __send_notification(__be_client, path, patch, NOTIFY_OP_DS_PATCH);
+}
+
+/**
+ * mgmt_be_send_ds_replace_notification() - Send a replace DS notification to mgmtd
+ */
+int mgmt_be_send_ds_replace_notification(const char *path, const struct lyd_node *tree)
+{
+ if (!__be_client) {
+ debug_be_client("%s: No mgmtd connection for DS delete notification: %s", __func__,
+ path);
+ return 1;
+ }
+ return __send_notification(__be_client, path, tree, NOTIFY_OP_DS_REPLACE);
+}
+
+/**
+ * mgmt_be_send_notification() - Send notification to mgmtd
+ *
+ * This function is attached to the northbound notification hook.
+ */
+static int mgmt_be_send_notification(void *__client, const char *path, const struct lyd_node *tree)
+{
+ __send_notification(__client, path, tree, NOTIFY_OP_NOTIFICATION);
+ return 0;
+}
+
static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx,
uint64_t txn_id, bool create)
{
@@ -568,7 +621,7 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
mgmt_be_send_cfgdata_create_reply(client_ctx, txn->txn_id,
error ? false : true, error ? err_buf : NULL);
- debug_be_client("Avg-nb-edit-duration %lu uSec, nb-prep-duration %lu (avg: %lu) uSec, batch size %u",
+ debug_be_client("Avg-nb-edit-duration %Lu uSec, nb-prep-duration %lu (avg: %Lu) uSec, batch size %u",
client_ctx->avg_edit_nb_cfg_tm, prep_nb_cfg_tm,
client_ctx->avg_prep_nb_cfg_tm, (uint32_t)num_processed);
@@ -717,10 +770,9 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
gettimeofday(&apply_nb_cfg_end, NULL);
apply_nb_cfg_tm = timeval_elapsed(apply_nb_cfg_end, apply_nb_cfg_start);
- client_ctx->avg_apply_nb_cfg_tm = ((client_ctx->avg_apply_nb_cfg_tm *
- client_ctx->num_apply_nb_cfg) +
- apply_nb_cfg_tm) /
- (client_ctx->num_apply_nb_cfg + 1);
+ client_ctx->avg_apply_nb_cfg_tm =
+ ((client_ctx->avg_apply_nb_cfg_tm * client_ctx->num_apply_nb_cfg) + apply_nb_cfg_tm) /
+ (client_ctx->num_apply_nb_cfg + 1);
client_ctx->num_apply_nb_cfg++;
txn->nb_txn = NULL;
@@ -736,8 +788,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
mgmt_be_send_apply_reply(client_ctx, txn->txn_id, true, NULL);
- debug_be_client("Nb-apply-duration %lu (avg: %lu) uSec",
- apply_nb_cfg_tm, client_ctx->avg_apply_nb_cfg_tm);
+ debug_be_client("Nb-apply-duration %lu (avg: %Lu) uSec", apply_nb_cfg_tm,
+ client_ctx->avg_apply_nb_cfg_tm);
return 0;
}
@@ -854,8 +906,15 @@ static enum nb_error be_client_send_tree_data_batch(const struct lyd_node *tree,
more = true;
ret = NB_OK;
}
- if (ret != NB_OK)
+ if (ret != NB_OK) {
+ if (be_client_send_error(client, args->txn_id, args->req_id, false, -EINVAL,
+ "BE client %s txn-id %Lu error fetching oper state %d",
+ client->name, args->txn_id, ret))
+ ret = NB_ERR;
+ else
+ ret = NB_OK;
goto done;
+ }
tree_msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_tree_data, 0,
MTYPE_MSG_NATIVE_TREE_DATA);
@@ -870,20 +929,15 @@ static enum nb_error be_client_send_tree_data_batch(const struct lyd_node *tree,
(LYD_PRINT_SHRINK | LYD_PRINT_WD_EXPLICIT |
LYD_PRINT_WITHSIBLINGS));
if (err) {
- ret = NB_ERR;
- goto done;
+ mgmt_msg_native_free_msg(tree_msg);
+ /* We will be called again to send the error */
+ return NB_ERR;
}
(void)be_client_send_native_msg(client, tree_msg,
mgmt_msg_native_get_msg_len(tree_msg),
false);
-done:
mgmt_msg_native_free_msg(tree_msg);
- if (ret)
- be_client_send_error(client, args->txn_id, args->req_id, false,
- -EINVAL,
- "BE client %s txn-id %" PRIu64
- " error fetching oper state %d",
- client->name, args->txn_id, ret);
+done:
if (ret != NB_OK || !more)
XFREE(MTYPE_MGMTD_BE_GT_CB_ARGS, args);
return ret;
@@ -1060,19 +1114,24 @@ static void be_client_handle_notify(struct mgmt_be_client *client, void *msgbuf,
size_t msg_len)
{
struct mgmt_msg_notify_data *notif_msg = msgbuf;
- struct nb_node *nb_node;
- struct lyd_node *dnode;
+ struct nb_node *nb_node, *nb_parent;
+ struct lyd_node *dnode = NULL;
const char *data = NULL;
const char *notif;
- LY_ERR err;
+ bool is_yang_notify;
+ LY_ERR err = LY_SUCCESS;
debug_be_client("Received notification for client %s", client->name);
notif = mgmt_msg_native_xpath_data_decode(notif_msg, msg_len, data);
- if (!notif || !data) {
+ if (!notif) {
log_err_be_client("Corrupt notify msg");
return;
}
+ if (!data && (notif_msg->op == NOTIFY_OP_DS_REPLACE || notif_msg->op == NOTIFY_OP_DS_PATCH)) {
+ log_err_be_client("Corrupt replace/patch notify msg: missing data");
+ return;
+ }
nb_node = nb_node_find(notif);
if (!nb_node) {
@@ -1080,25 +1139,62 @@ static void be_client_handle_notify(struct mgmt_be_client *client, void *msgbuf,
return;
}
- if (!nb_node->cbs.notify) {
+ is_yang_notify = !!CHECK_FLAG(nb_node->snode->nodetype, LYS_NOTIF);
+
+ if (is_yang_notify && !nb_node->cbs.notify) {
debug_be_client("No notification callback for: %s", notif);
return;
}
- err = yang_parse_notification(notif, notif_msg->result_type, data,
+ if (!nb_node->cbs.notify) {
+ /*
+ * See if a parent has a callback, this is so backend's can
+ * listen for changes on an entire datastore sub-tree.
+ */
+ for (nb_parent = nb_node->parent; nb_parent; nb_parent = nb_node->parent)
+ if (nb_parent->cbs.notify)
+ break;
+ if (!nb_parent) {
+ debug_be_client("Including parents, no DS notification callback for: %s",
+ notif);
+ return;
+ }
+ nb_node = nb_parent;
+ }
+
+ if (data && is_yang_notify) {
+ err = yang_parse_notification(notif, notif_msg->result_type, data, &dnode);
+ } else if (data) {
+ err = yang_parse_data(notif, notif_msg->result_type, false, true, false, data,
&dnode);
+ }
if (err) {
- log_err_be_client("Can't parse notification data for: %s",
- notif);
+ log_err_be_client("Can't parse notification data for: %s", notif);
return;
}
- nb_callback_notify(nb_node, notif, dnode);
+ nb_callback_notify(nb_node, notif_msg->op, notif, dnode);
lyd_free_all(dnode);
}
/*
+ * Process a notify select msg
+ */
+static void be_client_handle_notify_select(struct mgmt_be_client *client, void *msgbuf,
+ size_t msg_len)
+{
+ struct mgmt_msg_notify_select *msg = msgbuf;
+ const char **selectors = NULL;
+
+ debug_be_client("Received notify-select for client %s", client->name);
+
+ if (msg_len >= sizeof(*msg))
+ selectors = mgmt_msg_native_strings_decode(msg, msg_len, msg->selectors);
+ nb_notif_set_filters(selectors, msg->replace);
+}
+
+/*
* Handle a native encoded message
*
* We don't create transactions with native messaging.
@@ -1119,6 +1215,9 @@ static void be_client_handle_native_msg(struct mgmt_be_client *client,
case MGMT_MSG_CODE_NOTIFY:
be_client_handle_notify(client, msg, msg_len);
break;
+ case MGMT_MSG_CODE_NOTIFY_SELECT:
+ be_client_handle_notify_select(client, msg, msg_len);
+ break;
default:
log_err_be_client("unknown native message txn-id %" PRIu64
" req-id %" PRIu64 " code %u to client %s",
@@ -1259,6 +1358,190 @@ DEFPY(debug_mgmt_client_be, debug_mgmt_client_be_cmd,
return CMD_SUCCESS;
}
+/*
+ * XPath: /frr-backend:clients/client
+ *
+ * We only implement a list of one entry (for the this backend client) the
+ * results will be merged inside mgmtd.
+ */
+static const void *clients_client_get_next(struct nb_cb_get_next_args *args)
+{
+ if (args->list_entry == NULL)
+ return __be_client;
+ return NULL;
+}
+
+static int clients_client_get_keys(struct nb_cb_get_keys_args *args)
+{
+ args->keys->num = 1;
+ strlcpy(args->keys->key[0], __be_client->name, sizeof(args->keys->key[0]));
+
+ return NB_OK;
+}
+
+static const void *clients_client_lookup_entry(struct nb_cb_lookup_entry_args *args)
+{
+ const char *name = args->keys->key[0];
+
+ if (!strcmp(name, __be_client->name))
+ return __be_client;
+
+ return NULL;
+}
+
+/*
+ * XPath: /frr-backend:clients/client/name
+ */
+static enum nb_error clients_client_name_get(const struct nb_node *nb_node,
+ const void *parent_list_entry, struct lyd_node *parent)
+{
+ const struct lysc_node *snode = nb_node->snode;
+ LY_ERR err;
+
+ err = lyd_new_term(parent, snode->module, snode->name, __be_client->name, false, NULL);
+ if (err != LY_SUCCESS)
+ return NB_ERR_RESOURCE;
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-backend:clients/client/state/candidate-config-version
+ */
+static enum nb_error clients_client_state_candidate_config_version_get(
+ const struct nb_node *nb_node, const void *parent_list_entry, struct lyd_node *parent)
+{
+ const struct lysc_node *snode = nb_node->snode;
+ uint64_t value = __be_client->candidate_config->version;
+
+ if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
+ LYD_NEW_PATH_UPDATE, NULL))
+ return NB_ERR_RESOURCE;
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-backend:clients/client/state/running-config-version
+ */
+static enum nb_error clients_client_state_running_config_version_get(const struct nb_node *nb_node,
+ const void *parent_list_entry,
+ struct lyd_node *parent)
+{
+ const struct lysc_node *snode = nb_node->snode;
+ uint64_t value = __be_client->running_config->version;
+
+ if (lyd_new_term_bin(parent, snode->module, snode->name, &value, sizeof(value),
+ LYD_NEW_PATH_UPDATE, NULL))
+ return NB_ERR_RESOURCE;
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-backend:clients/client/state/notify-selectors
+ *
+ * Is this better in northbound_notif.c? Let's decide when we add more to this module.
+ */
+
+static enum nb_error clients_client_state_notify_selectors_get(const struct nb_node *nb_node,
+ const void *parent_list_entry,
+ struct lyd_node *parent)
+{
+ const struct lysc_node *snode = nb_node->snode;
+ const char **p;
+ LY_ERR err;
+
+ darr_foreach_p (nb_notif_filters, p) {
+ err = lyd_new_term(parent, snode->module, snode->name, *p, false, NULL);
+ if (err != LY_SUCCESS)
+ return NB_ERR_RESOURCE;
+ }
+
+ return NB_OK;
+}
+
+/* clang-format off */
+const struct frr_yang_module_info frr_backend_info = {
+ .name = "frr-backend",
+ .nodes = {
+ {
+ .xpath = "/frr-backend:clients/client",
+ .cbs = {
+ .get_next = clients_client_get_next,
+ .get_keys = clients_client_get_keys,
+ .lookup_entry = clients_client_lookup_entry,
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/name",
+ .cbs.get = clients_client_name_get,
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/candidate-config-version",
+ .cbs = {
+ .get = clients_client_state_candidate_config_version_get,
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/running-config-version",
+ .cbs = {
+ .get = clients_client_state_running_config_version_get,
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/edit-count",
+ .cbs = {
+ .get = nb_oper_uint64_get,
+ .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, num_edit_nb_cfg),
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/avg-edit-time",
+ .cbs = {
+ .get = nb_oper_uint64_get,
+ .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, avg_edit_nb_cfg_tm),
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/prep-count",
+ .cbs = {
+ .get = nb_oper_uint64_get,
+ .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, num_prep_nb_cfg),
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/avg-prep-time",
+ .cbs = {
+ .get = nb_oper_uint64_get,
+ .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, avg_prep_nb_cfg_tm),
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/apply-count",
+ .cbs = {
+ .get = nb_oper_uint64_get,
+ .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, num_apply_nb_cfg),
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/avg-apply-time",
+ .cbs = {
+ .get = nb_oper_uint64_get,
+ .get_elem = (void *)(intptr_t)offsetof(struct mgmt_be_client, avg_apply_nb_cfg_tm),
+ }
+ },
+ {
+ .xpath = "/frr-backend:clients/client/state/notify-selectors",
+ .cbs.get = clients_client_state_notify_selectors_get,
+ },
+ {
+ .xpath = NULL,
+ },
+ }
+};
+/* clang-format on */
+
struct mgmt_be_client *mgmt_be_client_create(const char *client_name,
struct mgmt_be_client_cbs *cbs,
uintptr_t user_data,
diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h
index 6ed8c2a39f..5e78f0f433 100644
--- a/lib/mgmt_be_client.h
+++ b/lib/mgmt_be_client.h
@@ -85,6 +85,8 @@ struct mgmt_be_client_cbs {
extern struct debug mgmt_dbg_be_client;
+extern const struct frr_yang_module_info frr_backend_info;
+
/***************************************************************
* API prototypes
***************************************************************/
@@ -112,6 +114,22 @@ extern struct mgmt_be_client *
mgmt_be_client_create(const char *name, struct mgmt_be_client_cbs *cbs,
uintptr_t user_data, struct event_loop *event_loop);
+
+/**
+ * mgmt_be_send_ds_delete_notification() - Send a datastore delete notification.
+ */
+extern int mgmt_be_send_ds_delete_notification(const char *path);
+
+/**
+ * mgmt_be_send_ds_patch_notification() - Send a datastore YANG patch notification.
+ */
+extern int mgmt_be_send_ds_patch_notification(const char *path, const struct lyd_node *tree);
+
+/**
+ * mgmt_be_send_ds_replace_notification() - Send a datastore replace notification.
+ */
+extern int mgmt_be_send_ds_replace_notification(const char *path, const struct lyd_node *tree);
+
/*
* Initialize library vty (adds debug support).
*
diff --git a/lib/mgmt_msg_native.c b/lib/mgmt_msg_native.c
index b85c7d1b61..46dfe7f2e1 100644
--- a/lib/mgmt_msg_native.c
+++ b/lib/mgmt_msg_native.c
@@ -14,7 +14,8 @@ DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_ERROR, "native error msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_TREE, "native get tree msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_TREE_DATA, "native tree data msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_DATA, "native get data msg");
-DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_NOTIFY, "native get data msg");
+DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_NOTIFY, "native notify msg");
+DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_NOTIFY_SELECT, "native notify select msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_EDIT, "native edit msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_EDIT_REPLY, "native edit reply msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_RPC, "native RPC msg");
diff --git a/lib/mgmt_msg_native.h b/lib/mgmt_msg_native.h
index 587a002801..73303846e7 100644
--- a/lib/mgmt_msg_native.h
+++ b/lib/mgmt_msg_native.h
@@ -159,6 +159,7 @@ DECLARE_MTYPE(MSG_NATIVE_GET_TREE);
DECLARE_MTYPE(MSG_NATIVE_TREE_DATA);
DECLARE_MTYPE(MSG_NATIVE_GET_DATA);
DECLARE_MTYPE(MSG_NATIVE_NOTIFY);
+DECLARE_MTYPE(MSG_NATIVE_NOTIFY_SELECT);
DECLARE_MTYPE(MSG_NATIVE_EDIT);
DECLARE_MTYPE(MSG_NATIVE_EDIT_REPLY);
DECLARE_MTYPE(MSG_NATIVE_RPC);
@@ -323,22 +324,29 @@ _Static_assert(sizeof(struct mgmt_msg_get_data) ==
offsetof(struct mgmt_msg_get_data, xpath),
"Size mismatch");
+
+#define NOTIFY_OP_NOTIFICATION 0
+#define NOTIFY_OP_DS_REPLACE 1
+#define NOTIFY_OP_DS_DELETE 2
+#define NOTIFY_OP_DS_PATCH 3
+
/**
* struct mgmt_msg_notify_data - Message carrying notification data.
*
* @result_type: ``LYD_FORMAT`` for format of the @result value.
* @data: The xpath string of the notification followed by the tree data in
* @result_type format.
+ * @op: notify operation type.
*/
struct mgmt_msg_notify_data {
struct mgmt_msg_header;
uint8_t result_type;
- uint8_t resv2[7];
+ uint8_t op;
+ uint8_t resv2[6];
alignas(8) char data[];
};
-_Static_assert(sizeof(struct mgmt_msg_notify_data) ==
- offsetof(struct mgmt_msg_notify_data, data),
+_Static_assert(sizeof(struct mgmt_msg_notify_data) == offsetof(struct mgmt_msg_notify_data, data),
"Size mismatch");
#define EDIT_FLAG_IMPLICIT_LOCK 0x01
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 332581fbd8..ee6c2b7ec0 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -772,68 +772,30 @@ unsigned int nexthop_level(const struct nexthop *nexthop)
return rv;
}
-/* Only hash word-sized things, let cmp do the rest. */
-uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
+uint32_t nexthop_hash(const struct nexthop *nexthop)
{
uint32_t key = 0x45afe398;
- int i;
- key = jhash_3words(nexthop->type, nexthop->vrf_id,
- nexthop->nh_label_type, key);
-
- if (nexthop->nh_label) {
- int labels = nexthop->nh_label->num_labels;
+ /* type, vrf, ifindex, ip addresses - see nexthop.h */
+ key = _nexthop_hash_bytes(nexthop, key);
- i = 0;
+ key = jhash_1word(nexthop->flags & NEXTHOP_FLAGS_HASHED, key);
- while (labels >= 3) {
- key = jhash_3words(nexthop->nh_label->label[i],
- nexthop->nh_label->label[i + 1],
- nexthop->nh_label->label[i + 2],
- key);
- labels -= 3;
- i += 3;
- }
-
- if (labels >= 2) {
- key = jhash_2words(nexthop->nh_label->label[i],
- nexthop->nh_label->label[i + 1],
- key);
- labels -= 2;
- i += 2;
- }
+ if (nexthop->nh_label) {
+ const struct mpls_label_stack *ls = nexthop->nh_label;
- if (labels >= 1)
- key = jhash_1word(nexthop->nh_label->label[i], key);
+ /* num_labels itself isn't useful to hash, if the number of
+ * labels is different, the hash value will change just due to
+ * that already.
+ */
+ key = jhash(ls->label, sizeof(ls->label[0]) * ls->num_labels, key);
}
- key = jhash_2words(nexthop->ifindex,
- CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK),
- key);
-
/* Include backup nexthops, if present */
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
int backups = nexthop->backup_num;
- i = 0;
-
- while (backups >= 3) {
- key = jhash_3words(nexthop->backup_idx[i],
- nexthop->backup_idx[i + 1],
- nexthop->backup_idx[i + 2], key);
- backups -= 3;
- i += 3;
- }
-
- while (backups >= 2) {
- key = jhash_2words(nexthop->backup_idx[i],
- nexthop->backup_idx[i + 1], key);
- backups -= 2;
- i += 2;
- }
-
- if (backups >= 1)
- key = jhash_1word(nexthop->backup_idx[i], key);
+ key = jhash(nexthop->backup_idx, sizeof(nexthop->backup_idx[0]) * backups, key);
}
if (nexthop->nh_srv6) {
@@ -868,31 +830,6 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
return key;
}
-
-#define GATE_SIZE 4 /* Number of uint32_t words in struct g_addr */
-
-/* For a more granular hash */
-uint32_t nexthop_hash(const struct nexthop *nexthop)
-{
- uint32_t gate_src_rmap_raw[GATE_SIZE * 3] = {};
- /* Get all the quick stuff */
- uint32_t key = nexthop_hash_quick(nexthop);
-
- assert(((sizeof(nexthop->gate) + sizeof(nexthop->src)
- + sizeof(nexthop->rmap_src))
- / 3)
- == (GATE_SIZE * sizeof(uint32_t)));
-
- memcpy(gate_src_rmap_raw, &nexthop->gate, GATE_SIZE);
- memcpy(gate_src_rmap_raw + GATE_SIZE, &nexthop->src, GATE_SIZE);
- memcpy(gate_src_rmap_raw + (2 * GATE_SIZE), &nexthop->rmap_src,
- GATE_SIZE);
-
- key = jhash2(gate_src_rmap_raw, (GATE_SIZE * 3), key);
-
- return key;
-}
-
void nexthop_copy_no_recurse(struct nexthop *copy,
const struct nexthop *nexthop,
struct nexthop *rparent)
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 5dfb58d846..cea7c77e3e 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -8,6 +8,7 @@
#ifndef _LIB_NEXTHOP_H
#define _LIB_NEXTHOP_H
+#include "jhash.h"
#include "prefix.h"
#include "mpls.h"
#include "vxlan.h"
@@ -56,15 +57,48 @@ struct nexthop {
struct nexthop *next;
struct nexthop *prev;
- /*
- * What vrf is this nexthop associated with?
+
+ /* begin of hashed data - all fields from here onwards are given to
+ * jhash() as one consecutive chunk. DO NOT create "padding holes".
+ * DO NOT insert pointers that need to be deep-hashed.
+ *
+ * static_assert() below needs to be updated when fields are added
*/
+ char _hash_begin[0];
+
+ /* see above */
+ enum nexthop_types_t type;
+
+ /* What vrf is this nexthop associated with? */
vrf_id_t vrf_id;
/* Interface index. */
ifindex_t ifindex;
- enum nexthop_types_t type;
+ /* Type of label(s), if any */
+ enum lsp_types_t nh_label_type;
+
+ /* padding: keep 16 byte alignment here */
+
+ /* Nexthop address
+ * make sure all 16 bytes for IPv6 are zeroed when putting in an IPv4
+ * address since the entire thing is hashed as-is
+ */
+ union {
+ union g_addr gate;
+ enum blackhole_type bh_type;
+ };
+ union g_addr src;
+ union g_addr rmap_src; /* Src is set via routemap */
+
+ /* end of hashed data - remaining fields in this struct are not
+ * directly fed into jhash(). Most of them are actually part of the
+ * hash but have special rules or handling attached.
+ */
+ char _hash_end[0];
+
+ /* Weight of the nexthop ( for unequal cost ECMP ) */
+ uint8_t weight;
uint16_t flags;
#define NEXTHOP_FLAG_ACTIVE (1 << 0) /* This nexthop is alive. */
@@ -82,18 +116,15 @@ struct nexthop {
#define NEXTHOP_FLAG_EVPN (1 << 8) /* nexthop is EVPN */
#define NEXTHOP_FLAG_LINKDOWN (1 << 9) /* is not removed on link down */
+ /* which flags are part of nexthop_hash(). Should probably be split
+ * off into a separate field...
+ */
+#define NEXTHOP_FLAGS_HASHED NEXTHOP_FLAG_ONLINK
+
#define NEXTHOP_IS_ACTIVE(flags) \
(CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \
&& !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE))
- /* Nexthop address */
- union {
- union g_addr gate;
- enum blackhole_type bh_type;
- };
- union g_addr src;
- union g_addr rmap_src; /* Src is set via routemap */
-
/* Nexthops obtained by recursive resolution.
*
* If the nexthop struct needs to be resolved recursively,
@@ -104,15 +135,9 @@ struct nexthop {
/* Recursive parent */
struct nexthop *rparent;
- /* Type of label(s), if any */
- enum lsp_types_t nh_label_type;
-
/* Label(s) associated with this nexthop. */
struct mpls_label_stack *nh_label;
- /* Weight of the nexthop ( for unequal cost ECMP ) */
- uint8_t weight;
-
/* Count and index of corresponding backup nexthop(s) in a backup list;
* only meaningful if the HAS_BACKUP flag is set.
*/
@@ -138,6 +163,29 @@ struct nexthop {
struct nexthop_srv6 *nh_srv6;
};
+/* all hashed fields (including padding, if it is necessary to add) need to
+ * be listed in the static_assert below
+ */
+
+#define S(field) sizeof(((struct nexthop *)NULL)->field)
+
+static_assert(
+ offsetof(struct nexthop, _hash_end) - offsetof(struct nexthop, _hash_begin) ==
+ S(type) + S(vrf_id) + S(ifindex) + S(nh_label_type) + S(gate) + S(src) + S(rmap_src),
+ "struct nexthop contains padding, this can break things. insert _pad fields at appropriate places");
+
+#undef S
+
+/* this is here to show exactly what is meant by the comments above about
+ * the hashing
+ */
+static inline uint32_t _nexthop_hash_bytes(const struct nexthop *nh, uint32_t seed)
+{
+ return jhash(&nh->_hash_begin,
+ offsetof(struct nexthop, _hash_end) - offsetof(struct nexthop, _hash_begin),
+ seed);
+}
+
/* Utility to append one nexthop to another. */
#define NEXTHOP_APPEND(to, new) \
do { \
@@ -183,27 +231,11 @@ struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type,
/*
* Hash a nexthop. Suitable for use with hash tables.
*
- * This function uses the following values when computing the hash:
- * - vrf_id
- * - ifindex
- * - type
- * - gate
- *
- * nexthop
- * The nexthop to hash
- *
- * Returns:
- * 32-bit hash of nexthop
+ * Please double check the code on what is included in the hash, there was
+ * documentation here but it got outdated and the only thing worse than no
+ * doc is incorrect doc.
*/
uint32_t nexthop_hash(const struct nexthop *nexthop);
-/*
- * Hash a nexthop only on word-sized attributes:
- * - vrf_id
- * - ifindex
- * - type
- * - (some) flags
- */
-uint32_t nexthop_hash_quick(const struct nexthop *nexthop);
extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
extern bool nexthop_same_no_labels(const struct nexthop *nh1,
diff --git a/lib/northbound.c b/lib/northbound.c
index a385cc9ece..60794b8728 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -273,9 +273,11 @@ static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
error += nb_node_validate_cb(nb_node, NB_CB_APPLY_FINISH,
!!nb_node->cbs.apply_finish, true);
error += nb_node_validate_cb(nb_node, NB_CB_GET_ELEM,
- !!nb_node->cbs.get_elem, false);
+ (nb_node->cbs.get_elem || nb_node->cbs.get), false);
error += nb_node_validate_cb(nb_node, NB_CB_GET_NEXT,
- !!nb_node->cbs.get_next, false);
+ (nb_node->cbs.get_next ||
+ (nb_node->snode->nodetype == LYS_LEAFLIST && nb_node->cbs.get)),
+ false);
error += nb_node_validate_cb(nb_node, NB_CB_GET_KEYS,
!!nb_node->cbs.get_keys, false);
error += nb_node_validate_cb(nb_node, NB_CB_LOOKUP_ENTRY,
@@ -683,19 +685,30 @@ void nb_config_diff(const struct nb_config *config1,
lyd_free_all(diff);
}
-static int dnode_create(struct nb_config *candidate, const char *xpath,
- const char *value, uint32_t options,
- struct lyd_node **new_dnode)
+/**
+ * dnode_create() - create a new node in the tree
+ * @candidate: config tree to create node in.
+ * @xpath: target node to create.
+ * @value: value for the new if required.
+ * @options: lyd_new_path options
+ * @new_dnode: the newly created node. If options includes LYD_NEW_PATH_UPDATE,
+ * and the node exists (i.e., isn't create but updated), then
+ * new_node will be set to NULL not the existing node).
+ *
+ * Return: NB_OK or NB_ERR.
+ */
+static LY_ERR dnode_create(struct nb_config *candidate, const char *xpath, const char *value,
+ uint32_t options, struct lyd_node **new_dnode)
{
struct lyd_node *dnode;
LY_ERR err;
- err = lyd_new_path(candidate->dnode, ly_native_ctx, xpath, value,
- options, &dnode);
+ err = lyd_new_path2(candidate->dnode, ly_native_ctx, xpath, value, 0, 0, options, NULL,
+ &dnode);
if (err) {
flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path(%s) failed: %d",
__func__, xpath, err);
- return NB_ERR;
+ return err;
} else if (dnode) {
err = lyd_new_implicit_tree(dnode, LYD_IMPLICIT_NO_STATE, NULL);
if (err) {
@@ -706,7 +719,7 @@ static int dnode_create(struct nb_config *candidate, const char *xpath,
}
if (new_dnode)
*new_dnode = dnode;
- return NB_OK;
+ return LY_SUCCESS;
}
int nb_candidate_edit(struct nb_config *candidate, const struct nb_node *nb_node,
@@ -1855,7 +1868,7 @@ int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
return nb_node->cbs.rpc(&args);
}
-void nb_callback_notify(const struct nb_node *nb_node, const char *xpath,
+void nb_callback_notify(const struct nb_node *nb_node, uint8_t op, const char *xpath,
struct lyd_node *dnode)
{
struct nb_cb_notify_args args = {};
@@ -1863,6 +1876,7 @@ void nb_callback_notify(const struct nb_node *nb_node, const char *xpath,
DEBUGD(&nb_dbg_cbs_notify, "northbound notify: %s", xpath);
args.xpath = xpath;
+ args.op = op;
args.dnode = dnode;
nb_node->cbs.notify(&args);
}
@@ -2752,10 +2766,15 @@ void nb_init(struct event_loop *tm,
/* Initialize oper-state */
nb_oper_init(tm);
+
+ /* Initialize notification-state */
+ nb_notif_init(tm);
}
void nb_terminate(void)
{
+ nb_notif_terminate();
+
nb_oper_terminate();
/* Terminate the northbound CLI. */
diff --git a/lib/northbound.h b/lib/northbound.h
index 97a1d31e57..c31f007e70 100644
--- a/lib/northbound.h
+++ b/lib/northbound.h
@@ -21,6 +21,7 @@ extern "C" {
/* Forward declaration(s). */
struct vty;
struct debug;
+struct nb_node;
struct nb_yang_xpath_tag {
uint32_t ns;
@@ -102,6 +103,20 @@ enum nb_cb_operation {
NB_CB_NOTIFY,
};
+/* Northbound error codes. */
+enum nb_error {
+ NB_OK = 0,
+ NB_ERR,
+ NB_ERR_NO_CHANGES,
+ NB_ERR_NOT_FOUND,
+ NB_ERR_EXISTS,
+ NB_ERR_LOCKED,
+ NB_ERR_VALIDATION,
+ NB_ERR_RESOURCE,
+ NB_ERR_INCONSISTENCY,
+ NB_YIELD,
+};
+
union nb_resource {
int fd;
void *ptr;
@@ -290,6 +305,7 @@ struct nb_cb_rpc_args {
struct nb_cb_notify_args {
/* XPath of the notification. */
const char *xpath;
+ uint8_t op;
/*
* libyang data node representing the notification. If the notification
@@ -426,6 +442,25 @@ struct nb_callbacks {
void (*apply_finish)(struct nb_cb_apply_finish_args *args);
/*
+ * Operational data callback (new direct tree add method).
+ *
+ * The callback function should create a new lyd_node (leaf) or
+ * lyd_node's (leaf list) for the value and attach to parent.
+ *
+ * nb_node
+ * The node representing the leaf or leaf list
+ * list_entry
+ * List entry from get_next (or NULL).
+ * parent
+ * The parent lyd_node to attach the leaf data to.
+ *
+ * Returns:
+ * Returns an nb_error if the data could not be added to the tree.
+ */
+ enum nb_error (*get)(const struct nb_node *nb_node, const void *list_entry,
+ struct lyd_node *parent);
+
+ /*
* Operational data callback.
*
* The callback function should return the value of a specific leaf,
@@ -672,20 +707,6 @@ struct frr_yang_module_info {
#endif
};
-/* Northbound error codes. */
-enum nb_error {
- NB_OK = 0,
- NB_ERR,
- NB_ERR_NO_CHANGES,
- NB_ERR_NOT_FOUND,
- NB_ERR_EXISTS,
- NB_ERR_LOCKED,
- NB_ERR_VALIDATION,
- NB_ERR_RESOURCE,
- NB_ERR_INCONSISTENCY,
- NB_YIELD,
-};
-
/* Default priority. */
#define NB_DFLT_PRIORITY (UINT32_MAX / 2)
@@ -777,16 +798,19 @@ typedef int (*nb_oper_data_cb)(const struct lysc_node *snode,
* error.
*
* If nb_op_iterate_yielding() was passed with @should_batch set then this
- * callback will be invoked during each portion (batch) of the walk.
+ * callback will be invoked during each portion (batch) of the walk with @ret
+ * set to NB_YIELD.
*
* The @tree is read-only and should not be modified or freed.
*
- * If this function returns anything but NB_OK then the walk will be terminated.
- * and this function will not be called again regardless of if @ret was
- * `NB_YIELD` or not.
+ * When @ret is NB_YIELD and this function returns anything but NB_OK then the
+ * walk will be terminated, and this function *will* be called again with @ret
+ * set the non-NB_OK return value it just returned. This allows the callback
+ * have a single bit of code to send an error message and do any cleanup for any
+ * type of failure, whether that failure was from itself or from the infra code.
*
- * Return: NB_OK to continue or complete the walk normally, otherwise an error
- * to immediately terminate the walk.
+ * Return: NB_OK or an error during handling of @ret == NB_YIELD otherwise the
+ * value is ignored.
*/
/* Callback function used by nb_oper_data_iter_yielding(). */
typedef enum nb_error (*nb_oper_data_finish_cb)(const struct lyd_node *tree,
@@ -813,9 +837,13 @@ extern struct debug nb_dbg_libyang;
/* Global running configuration. */
extern struct nb_config *running_config;
+/* Global notification filters */
+extern const char **nb_notif_filters;
+
/* Wrappers for the northbound callbacks. */
-extern struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
- const char *xpath,
+extern struct yang_data *nb_callback_has_new_get_elem(const struct nb_node *nb_node);
+
+extern struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node, const char *xpath,
const void *list_entry);
extern const void *nb_callback_get_next(const struct nb_node *nb_node,
const void *parent_list_entry,
@@ -834,7 +862,7 @@ extern const void *nb_callback_lookup_next(const struct nb_node *nb_node,
extern int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
const struct lyd_node *input, struct lyd_node *output,
char *errmsg, size_t errmsg_len);
-extern void nb_callback_notify(const struct nb_node *nb_node, const char *xpath,
+extern void nb_callback_notify(const struct nb_node *nb_node, uint8_t op, const char *xpath,
struct lyd_node *dnode);
/*
@@ -1488,6 +1516,22 @@ extern void nb_oper_cancel_walk(void *walk);
*/
extern void nb_oper_cancel_all_walks(void);
+/**
+ * nb_oper_walk_finish_arg() - return the finish arg for this walk
+ */
+extern void *nb_oper_walk_finish_arg(void *walk);
+/**
+ * nb_oper_walk_cb_arg() - return the callback arg for this walk
+ */
+extern void *nb_oper_walk_cb_arg(void *walk);
+
+/* Generic getter functions */
+extern enum nb_error nb_oper_uint32_get(const struct nb_node *nb_node,
+ const void *parent_list_entry, struct lyd_node *parent);
+
+extern enum nb_error nb_oper_uint64_get(const struct nb_node *nb_node,
+ const void *parent_list_entry, struct lyd_node *parent);
+
/*
* Validate if the northbound callback operation is valid for the given node.
*
@@ -1720,6 +1764,80 @@ extern void nb_oper_init(struct event_loop *loop);
extern void nb_oper_terminate(void);
extern bool nb_oper_is_yang_lib_query(const char *xpath);
+
+/**
+ * nb_op_update() - Create new state data.
+ * @tree: subtree @path is relative to or NULL in which case @path must be
+ * absolute.
+ * @path: The path of the state node to create.
+ * @value: The canonical value of the state.
+ *
+ * Return: The new libyang node.
+ */
+extern struct lyd_node *nb_op_update(struct lyd_node *tree, const char *path, const char *value);
+
+/**
+ * nb_op_update_delete() - Delete state data.
+ * @tree: subtree @path is relative to or NULL in which case @path must be
+ * absolute.
+ * @path: The path of the state node to delete, or NULL if @tree should just be
+ * deleted.
+ */
+extern void nb_op_update_delete(struct lyd_node *tree, const char *path);
+
+/**
+ * nb_op_update_pathf() - Create new state data.
+ * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must
+ * be absolute.
+ * @path_fmt: The path format string of the state node to create.
+ * @value: The canonical value of the state.
+ * @...: The values to substitute into @path_fmt.
+ *
+ * Return: The new libyang node.
+ */
+extern struct lyd_node *nb_op_update_pathf(struct lyd_node *tree, const char *path_fmt,
+ const char *value, ...) PRINTFRR(2, 4);
+extern struct lyd_node *nb_op_update_vpathf(struct lyd_node *tree, const char *path_fmt,
+ const char *value, va_list ap);
+/**
+ * nb_op_update_delete_pathf() - Delete state data.
+ * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must
+ * be absolute.
+ * @path: The path of the state node to delete.
+ * @...: The values to substitute into @path_fmt.
+ */
+extern void nb_op_update_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...)
+ PRINTFRR(2, 3);
+extern void nb_op_update_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap);
+
+/**
+ * nb_op_updatef() - Create new state data.
+ * @tree: subtree @path is relative to or NULL in which case @path must be
+ * absolute.
+ * @path: The path of the state node to create.
+ * @val_fmt: The value format string to set the canonical value of the state.
+ * @...: The values to substitute into @val_fmt.
+ *
+ * Return: The new libyang node.
+ */
+extern struct lyd_node *nb_op_updatef(struct lyd_node *tree, const char *path, const char *val_fmt,
+ ...) PRINTFRR(3, 4);
+
+extern struct lyd_node *nb_op_vupdatef(struct lyd_node *tree, const char *path, const char *val_fmt,
+ va_list ap);
+
+/**
+ * nb_notif_set_filters() - add or replace notification filters
+ * @selectors: darr array of selector (filter) xpath strings, can be NULL if
+ * @replace is true. nb_notif_set_filters takes ownership of this
+ * array and the contained darr strings.
+ * @replace: true to replace existing set otherwise append.
+ */
+extern void nb_notif_set_filters(const char **selectors, bool replace);
+
+extern void nb_notif_init(struct event_loop *loop);
+extern void nb_notif_terminate(void);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/northbound_notif.c b/lib/northbound_notif.c
new file mode 100644
index 0000000000..9caca9f6d7
--- /dev/null
+++ b/lib/northbound_notif.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * December 1 2024, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2024, LabN Consulting, L.L.C.
+ *
+ */
+#include <zebra.h>
+#include "debug.h"
+#include "lib_errors.h"
+#include "typesafe.h"
+#include "northbound.h"
+#include "mgmt_be_client.h"
+
+#define __dbg(fmt, ...) DEBUGD(&nb_dbg_notif, "NB_OP_CHANGE: %s: " fmt, __func__, ##__VA_ARGS__)
+#define __log_err(fmt, ...) zlog_err("NB_OP_CHANGE: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define NB_NOTIF_TIMER_MSEC (10) /* 10msec */
+
+/*
+ * ADDS:
+ * - Less specific:
+ * - Any new add will cause more specific pending adds to be dropped and equal
+ * or more specific deletes to be dropped.
+ * - More specific:
+ * - Ignore any new add that is the same or more specific than an existing add.
+ * - A new add that is more specific than a delete should change the delete
+ * into an add query (since adds are reported as a replace).
+ *
+ * DELETES:
+ * - Less specific:
+ * - Any new delete will cause more specific pending deletes to be dropped and
+ * equal or more specific adds to be dropped.
+ * - More specific:
+ * - Ignore new deletes that are the same or more specific than existing
+ * deletes.
+ * - A new delete that is more specific than an add can be dropped since we
+ * use replacement methodology for the add.
+ *
+ * One thing we have to pay close attention to is that the state is going to be
+ * queried when the notification sent, not when we are told of the change.
+ */
+
+DEFINE_MTYPE_STATIC(LIB, OP_CHANGE, "NB Oper Change");
+DEFINE_MTYPE_STATIC(LIB, OP_CHANGES_GROUP, "NB Oper Changes Group");
+DEFINE_MTYPE_STATIC(LIB, NB_NOTIF_WALK_ARGS, "NB Notify Oper Walk");
+
+struct op_change {
+ RB_ENTRY(op_change) link;
+ char path[];
+};
+
+/*
+ * RB tree for op_change
+ */
+static int op_change_cmp(const struct op_change *e1, const struct op_change *e2);
+RB_HEAD(op_changes, op_change);
+RB_PROTOTYPE(op_changes, op_change, link, op_change_cmp)
+RB_GENERATE(op_changes, op_change, link, op_change_cmp)
+
+struct op_changes nb_notif_adds = RB_INITIALIZER(&nb_notif_adds);
+struct op_changes nb_notif_dels = RB_INITIALIZER(&nb_notif_dels);
+struct event_loop *nb_notif_master;
+struct event *nb_notif_timer;
+void *nb_notif_walk;
+
+const char **nb_notif_filters;
+
+/*
+ * We maintain a queue of change lists one entry per query and notification send
+ * action
+ */
+PREDECL_LIST(op_changes_queue);
+struct op_changes_group {
+ struct op_changes_queue_item item;
+ struct op_changes adds;
+ struct op_changes dels;
+ struct op_changes *cur_changes; /* used when walking */
+ struct op_change *cur_change; /* " " " */
+};
+
+DECLARE_LIST(op_changes_queue, struct op_changes_group, item);
+static struct op_changes_queue_head op_changes_queue;
+
+struct nb_notif_walk_args {
+ struct op_changes_group *group;
+ struct lyd_node *tree;
+};
+
+static void nb_notif_set_walk_timer(void);
+
+
+static int pathncmp(const char *s1, const char *s2, size_t n)
+{
+ size_t i = 0;
+
+ while (i < n && *s1 && *s2) {
+ char c1 = *s1;
+ char c2 = *s2;
+
+ if ((c1 == '\'' && c2 == '\"') || (c1 == '\"' && c2 == '\'')) {
+ s1++;
+ s2++;
+ i++;
+ continue;
+ }
+ if (c1 != c2)
+ return (unsigned char)c1 - (unsigned char)c2;
+ s1++;
+ s2++;
+ i++;
+ }
+ if (i < n)
+ return (unsigned char)*s1 - (unsigned char)*s2;
+ return 0;
+}
+
+static int pathcmp(const char *s1, const char *s2)
+{
+ while (*s1 && *s2) {
+ char c1 = *s1;
+ char c2 = *s2;
+
+ if ((c1 == '\'' && c2 == '\"') || (c1 == '\"' && c2 == '\'')) {
+ s1++;
+ s2++;
+ continue;
+ }
+ if (c1 != c2)
+ return (unsigned char)c1 - (unsigned char)c2;
+ s1++;
+ s2++;
+ }
+ return (unsigned char)*s1 - (unsigned char)*s2;
+}
+
+
+static int op_change_cmp(const struct op_change *e1, const struct op_change *e2)
+{
+ return pathcmp(e1->path, e2->path);
+}
+
+static struct op_change *op_change_alloc(const char *path)
+{
+ struct op_change *note;
+ size_t ssize = strlen(path) + 1;
+
+ note = XMALLOC(MTYPE_OP_CHANGE, sizeof(*note) + ssize);
+ memset(note, 0, sizeof(*note));
+ strlcpy(note->path, path, ssize);
+
+ return note;
+}
+
+static void op_change_free(struct op_change *note)
+{
+ XFREE(MTYPE_OP_CHANGE, note);
+}
+
+/**
+ * op_changes_group_push() - Save the current set of changes on the queue.
+ *
+ * This function will save the current set of changes on the queue and
+ * initialize a new set of changes.
+ */
+static void op_changes_group_push(void)
+{
+ struct op_changes_group *changes;
+
+ if (RB_EMPTY(op_changes, &nb_notif_adds) && RB_EMPTY(op_changes, &nb_notif_dels))
+ return;
+
+ __dbg("pushing current oper changes onto queue");
+
+ changes = XCALLOC(MTYPE_OP_CHANGES_GROUP, sizeof(*changes));
+ changes->adds = nb_notif_adds;
+ changes->dels = nb_notif_dels;
+ op_changes_queue_add_tail(&op_changes_queue, changes);
+
+ RB_INIT(op_changes, &nb_notif_adds);
+ RB_INIT(op_changes, &nb_notif_dels);
+}
+
+static void op_changes_group_free(struct op_changes_group *group)
+{
+ struct op_change *e, *next;
+
+ RB_FOREACH_SAFE (e, op_changes, &group->adds, next) {
+ RB_REMOVE(op_changes, &group->adds, e);
+ op_change_free(e);
+ }
+ RB_FOREACH_SAFE (e, op_changes, &group->dels, next) {
+ RB_REMOVE(op_changes, &group->dels, e);
+ op_change_free(e);
+ }
+ XFREE(MTYPE_OP_CHANGES_GROUP, group);
+}
+
+static struct op_change *__find_less_specific(struct op_changes *head, struct op_change *note)
+{
+ struct op_change *e;
+ size_t plen;
+
+ /*
+ * RB_NFIND finds equal or greater (more specific) than the key,
+ * so the previous node will be a less specific or no match that
+ * sorts earlier. We want to find when we are a more specific
+ * match.
+ */
+ e = RB_NFIND(op_changes, head, note);
+ if (e)
+ e = RB_PREV(op_changes, e);
+ else
+ e = RB_MAX(op_changes, head);
+ if (!e)
+ return NULL;
+ plen = strlen(e->path);
+ if (pathncmp(e->path, note->path, plen))
+ return NULL;
+ /* equal would have been returned from RB_NFIND() then we went RB_PREV */
+ assert(strlen(note->path) != plen);
+ return e;
+}
+
+static void __drop_eq_or_more_specific(struct op_changes *head, const char *path, int plen,
+ struct op_change *next)
+{
+ struct op_change *e;
+
+ for (e = next; e != NULL; e = next) {
+ /* if the prefix no longer matches we are done */
+ if (pathncmp(path, e->path, plen))
+ break;
+ __dbg("dropping more specific %s: %s", head == &nb_notif_adds ? "add" : "delete",
+ e->path);
+ next = RB_NEXT(op_changes, e);
+ RB_REMOVE(op_changes, head, e);
+ op_change_free(e);
+ }
+}
+
+static void __op_change_add_del(const char *path, struct op_changes *this_head,
+ struct op_changes *other_head)
+{
+ /* find out if this has been subsumed or will subsume */
+
+ const char *op = this_head == &nb_notif_adds ? "add" : "delete";
+ struct op_change *note = op_change_alloc(path);
+ struct op_change *next, *e;
+ int plen;
+
+ __dbg("processing oper %s change path: %s", op, path);
+
+ /*
+ * See if we are already covered by a more general `op`.
+ */
+ e = __find_less_specific(this_head, note);
+ if (e) {
+ __dbg("%s path already covered by: %s", op, e->path);
+ op_change_free(note);
+ return;
+ }
+
+ /*
+ * Handle having a less-specific `other op`.
+ */
+ e = __find_less_specific(other_head, note);
+ if (e) {
+ if (this_head == &nb_notif_dels) {
+ /*
+ * If we have a less-specific add then drop this
+ * more-specific delete as the add-replace will remove
+ * this missing state.
+ */
+ __dbg("delete path already covered add-replace: %s", e->path);
+ } else {
+ /*
+ * If we have a less-specific delete, convert the delete
+ * to an add, and drop this more-specific add. The new
+ * less-specific add will pick up the more specific add
+ * during the walk and as adds are processed as replaces
+ * any other existing state that was to be deleted will
+ * still be deleted (unless it also returns) by the replace.
+ */
+ __dbg("add covered, converting covering delete to add-replace: %s", e->path);
+ RB_REMOVE(op_changes, other_head, e);
+ __op_change_add_del(e->path, &nb_notif_adds, &nb_notif_dels);
+ op_change_free(e);
+ }
+ op_change_free(note);
+ return;
+ }
+
+ e = RB_INSERT(op_changes, this_head, note);
+ if (e) {
+ __dbg("path already in %s tree: %s", op, path);
+ op_change_free(note);
+ return;
+ }
+
+ __dbg("scanning for subsumed or subsuming: %s", path);
+
+ plen = strlen(path);
+
+ next = RB_NEXT(op_changes, note);
+ __drop_eq_or_more_specific(this_head, path, plen, next);
+
+ /* Drop exact match or more specific `other op` */
+ next = RB_NFIND(op_changes, other_head, note);
+ __drop_eq_or_more_specific(other_head, path, plen, next);
+
+ nb_notif_set_walk_timer();
+}
+
+static void nb_notif_add(const char *path)
+{
+ __op_change_add_del(path, &nb_notif_adds, &nb_notif_dels);
+}
+
+
+static void nb_notif_delete(const char *path)
+{
+ __op_change_add_del(path, &nb_notif_dels, &nb_notif_adds);
+}
+
+struct lyd_node *nb_op_update(struct lyd_node *tree, const char *path, const char *value)
+{
+ struct lyd_node *dnode;
+ const char *abs_path = NULL;
+
+ __dbg("updating path: %s with value: %s", path, value);
+
+ dnode = yang_state_new(tree, path, value);
+
+ if (path[0] == '/')
+ abs_path = path;
+ else
+ abs_path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
+
+ nb_notif_add(abs_path);
+
+ if (abs_path != path)
+ free((char *)abs_path);
+
+ return dnode;
+}
+
+void nb_op_update_delete(struct lyd_node *tree, const char *path)
+{
+ char *abs_path = NULL;
+
+ __dbg("deleting path: %s", path);
+
+ if (path && path[0] == '/')
+ abs_path = (char *)path;
+ else {
+ assert(tree);
+ abs_path = lyd_path(tree, LYD_PATH_STD, NULL, 0);
+ assert(abs_path);
+ if (path) {
+ char *tmp = darr_strdup(abs_path);
+
+ free(abs_path);
+ abs_path = tmp;
+ if (*darr_last(abs_path) != '/')
+ darr_in_strcat(abs_path, "/");
+ assert(abs_path); /* silence bad CLANG NULL warning */
+ darr_in_strcat(abs_path, path);
+ }
+ }
+
+ yang_state_delete(tree, path);
+
+ nb_notif_delete(abs_path);
+
+ if (abs_path != path) {
+ if (path)
+ darr_free(abs_path);
+ else
+ free(abs_path);
+ }
+}
+
+PRINTFRR(2, 0)
+struct lyd_node *nb_op_update_vpathf(struct lyd_node *tree, const char *path_fmt, const char *value,
+ va_list ap)
+{
+ struct lyd_node *dnode;
+ char *path;
+
+ path = darr_vsprintf(path_fmt, ap);
+ dnode = nb_op_update(tree, path, value);
+ darr_free(path);
+
+ return dnode;
+}
+
+struct lyd_node *nb_op_update_pathf(struct lyd_node *tree, const char *path_fmt, const char *value,
+ ...)
+{
+ struct lyd_node *dnode;
+ va_list ap;
+
+ va_start(ap, value);
+ dnode = nb_op_update_vpathf(tree, path_fmt, value, ap);
+ va_end(ap);
+
+ return dnode;
+}
+
+PRINTFRR(2, 0)
+void nb_op_update_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap)
+{
+ char *path;
+
+ path = darr_vsprintf(path_fmt, ap);
+ nb_op_update_delete(tree, path);
+ darr_free(path);
+}
+
+void nb_op_update_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, path_fmt);
+ nb_op_update_delete_vpathf(tree, path_fmt, ap);
+ va_end(ap);
+}
+
+
+PRINTFRR(3, 0)
+struct lyd_node *nb_op_vupdatef(struct lyd_node *tree, const char *path, const char *val_fmt,
+ va_list ap)
+{
+ struct lyd_node *dnode;
+ char *value;
+
+ value = darr_vsprintf(val_fmt, ap);
+ dnode = nb_op_update(tree, path, value);
+ darr_free(value);
+
+ return dnode;
+}
+
+
+struct lyd_node *nb_op_updatef(struct lyd_node *tree, const char *path, const char *val_fmt, ...)
+{
+ struct lyd_node *dnode;
+ va_list ap;
+
+ va_start(ap, val_fmt);
+ dnode = nb_op_vupdatef(tree, path, val_fmt, ap);
+ va_end(ap);
+
+ return dnode;
+}
+
+static struct op_changes_group *op_changes_group_next(void)
+{
+ struct op_changes_group *group;
+
+ group = op_changes_queue_pop(&op_changes_queue);
+ if (!group) {
+ op_changes_group_push();
+ group = op_changes_queue_pop(&op_changes_queue);
+ }
+ if (!group)
+ return NULL;
+ group->cur_changes = &group->dels;
+ group->cur_change = RB_MIN(op_changes, group->cur_changes);
+ if (!group->cur_change) {
+ group->cur_changes = &group->adds;
+ group->cur_change = RB_MIN(op_changes, group->cur_changes);
+ assert(group->cur_change);
+ }
+ return group;
+}
+
+/* ---------------------------- */
+/* Query for changes and notify */
+/* ---------------------------- */
+
+static void timer_walk_abort(struct nb_notif_walk_args *args);
+static void timer_walk_continue(struct event *event);
+static void timer_walk_done(struct nb_notif_walk_args *args);
+
+static struct op_change *__next_change(struct op_changes_group *group)
+{
+ struct op_change *next = RB_NEXT(op_changes, group->cur_change);
+
+ /* Remove and free current so retry works */
+ RB_REMOVE(op_changes, group->cur_changes, group->cur_change);
+ op_change_free(group->cur_change);
+ return next;
+}
+
+static struct op_changes_group *__next_group(struct op_changes_group *group)
+{
+ __dbg("done with oper-path collection for group");
+ op_changes_group_free(group);
+ return op_changes_group_next();
+}
+
+static enum nb_error oper_walk_done(const struct lyd_node *tree, void *arg, enum nb_error ret)
+{
+ struct nb_notif_walk_args *args = arg;
+ struct op_changes_group *group = args->group;
+ const char *path = group->cur_change->path;
+
+ /* we don't send batches when yielding as we need completed edit in any patch */
+ assert(ret != NB_YIELD);
+
+ if (ret == NB_ERR_NOT_FOUND) {
+ __dbg("Path not found while walking oper tree: %s", path);
+ ret = NB_OK;
+ } else if (ret != NB_OK) {
+error:
+ __log_err("Error notifying for datastore path: %s: %s", path, nb_err_name(ret));
+
+ timer_walk_abort(args);
+ goto done;
+ } else {
+ __dbg("Done with oper-path collection for path: %s", path);
+
+ /* Do we need this? */
+ while (tree->parent)
+ tree = lyd_parent(tree);
+
+ /* Send the add (replace) notification */
+ if (mgmt_be_send_ds_replace_notification(path, tree)) {
+ __log_err("Error sending notification message for path: %s", path);
+ ret = NB_ERR;
+ goto error;
+ }
+ }
+
+ /*
+ * Advance to next change.
+ */
+
+ group->cur_change = __next_change(group);
+ if (!group->cur_change) {
+ args->group = __next_group(group);
+ if (!args->group) {
+ timer_walk_done(args);
+ goto done;
+ }
+ }
+
+ /* Run next walk after giving other events a shot to run */
+ event_add_timer_msec(nb_notif_master, timer_walk_continue, args, 0, &nb_notif_timer);
+done:
+ /* Done with current walk and scheduled next one if there is more */
+ nb_notif_walk = NULL;
+
+ return ret;
+}
+
+static int nb_notify_delete_changes(struct nb_notif_walk_args *args)
+{
+ struct op_changes_group *group = args->group;
+
+ group->cur_change = RB_MIN(op_changes, group->cur_changes);
+ while (group->cur_change) {
+ if (mgmt_be_send_ds_delete_notification(group->cur_change->path)) {
+ __log_err("Error sending delete notification message for path: %s",
+ group->cur_change->path);
+ return 1;
+ }
+ group->cur_change = __next_change(group);
+ }
+ return 0;
+}
+
+static void timer_walk_continue(struct event *event)
+{
+ struct nb_notif_walk_args *args = EVENT_ARG(event);
+ struct op_changes_group *group = args->group;
+ const char *path;
+ int ret;
+
+ /*
+ * Notify about deletes until we have add changes to collect.
+ */
+ while (group->cur_changes == &group->dels) {
+ ret = nb_notify_delete_changes(args);
+ if (ret) {
+ timer_walk_abort(args);
+ return;
+ }
+
+ /* after deletes advance to adds */
+ group->cur_changes = &group->adds;
+ group->cur_change = RB_MIN(op_changes, group->cur_changes);
+ if (group->cur_change)
+ break;
+
+ args->group = __next_group(group);
+ if (!args->group) {
+ timer_walk_done(args);
+ return;
+ }
+ group = args->group;
+ }
+
+ path = group->cur_change->path;
+ __dbg("starting next oper-path replace walk for path: %s", path);
+ nb_notif_walk = nb_oper_walk(path, NULL, 0, false, NULL, NULL, oper_walk_done, args);
+}
+
+static void timer_walk_start(struct event *event)
+{
+ struct op_changes_group *group;
+ struct nb_notif_walk_args *args;
+
+ __dbg("oper-state change notification timer fires");
+
+ group = op_changes_group_next();
+ if (!group) {
+ __dbg("no oper changes to notify");
+ return;
+ }
+
+ args = XCALLOC(MTYPE_NB_NOTIF_WALK_ARGS, sizeof(*args));
+ args->group = group;
+
+ EVENT_ARG(event) = args;
+ timer_walk_continue(event);
+}
+
+static void timer_walk_abort(struct nb_notif_walk_args *args)
+{
+ __dbg("Failed notifying datastore changes, will retry");
+
+ __dbg("oper-state notify setting retry timer to fire in: %d msec ", NB_NOTIF_TIMER_MSEC);
+ event_add_timer_msec(nb_notif_master, timer_walk_continue, args, NB_NOTIF_TIMER_MSEC,
+ &nb_notif_timer);
+}
+
+static void timer_walk_done(struct nb_notif_walk_args *args)
+{
+ __dbg("Finished notifying for all datastore changes");
+ assert(!args->group);
+ XFREE(MTYPE_NB_NOTIF_WALK_ARGS, args);
+}
+
+static void nb_notif_set_walk_timer(void)
+{
+ if (nb_notif_walk) {
+ __dbg("oper-state walk already in progress.");
+ return;
+ }
+ if (event_is_scheduled(nb_notif_timer)) {
+ __dbg("oper-state notification timer already set.");
+ return;
+ }
+
+ __dbg("oper-state notification setting timer to fire in: %d msec ", NB_NOTIF_TIMER_MSEC);
+ event_add_timer_msec(nb_notif_master, timer_walk_start, NULL, NB_NOTIF_TIMER_MSEC,
+ &nb_notif_timer);
+}
+
+void nb_notif_set_filters(const char **selectors, bool replace)
+{
+ const char **csp;
+
+ if (replace) {
+ darr_free_free(nb_notif_filters);
+ nb_notif_filters = selectors;
+ return;
+ }
+ darr_foreach_p (selectors, csp)
+ *darr_append(nb_notif_filters) = *csp;
+ darr_free(selectors);
+}
+
+void nb_notif_init(struct event_loop *tm)
+{
+ nb_notif_master = tm;
+ op_changes_queue_init(&op_changes_queue);
+}
+
+void nb_notif_terminate(void)
+{
+ struct nb_notif_walk_args *args = nb_notif_timer ? EVENT_ARG(nb_notif_timer) : NULL;
+ struct op_changes_group *group;
+
+ __dbg("terminating: timer: %p timer arg: %p walk %p", nb_notif_timer, args, nb_notif_walk);
+
+ EVENT_OFF(nb_notif_timer);
+
+ if (nb_notif_walk) {
+ /* Grab walk args from walk if active. */
+ args = nb_oper_walk_finish_arg(nb_notif_walk);
+ nb_oper_cancel_walk(nb_notif_walk);
+ nb_notif_walk = NULL;
+ }
+ if (args) {
+ op_changes_group_free(args->group);
+ XFREE(MTYPE_NB_NOTIF_WALK_ARGS, args);
+ }
+
+ while ((group = op_changes_group_next()))
+ op_changes_group_free(group);
+
+ darr_free_free(nb_notif_filters);
+}
diff --git a/lib/northbound_oper.c b/lib/northbound_oper.c
index a3ff360780..6336db502a 100644
--- a/lib/northbound_oper.c
+++ b/lib/northbound_oper.c
@@ -35,6 +35,7 @@
* We must also process containers with lookup-next descendants last.
*/
+DEFINE_MTYPE_STATIC(LIB, NB_STATE, "Northbound State");
DEFINE_MTYPE_STATIC(LIB, NB_YIELD_STATE, "NB Yield State");
DEFINE_MTYPE_STATIC(LIB, NB_NODE_INFOS, "NB Node Infos");
@@ -54,6 +55,7 @@ struct nb_op_node_info {
struct lyd_node *inner;
const struct lysc_node *schema; /* inner schema in case we rm inner */
struct yang_list_keys keys; /* if list, keys to locate element */
+ uint position; /* if keyless list, list position */
const void *list_entry; /* opaque entry from user or NULL */
uint xpath_len; /* length of the xpath string for this node */
uint niters; /* # list elems create this iteration */
@@ -232,6 +234,22 @@ static void nb_op_get_keys(struct lyd_node_inner *list_node,
keys->num = n;
}
+static uint nb_op_get_position_predicate(struct nb_op_yield_state *ys, struct nb_op_node_info *ni)
+{
+ const char *cursor = ys->xpath + ni->xpath_len - 1;
+
+ if (cursor[0] != ']')
+ return 0;
+
+ while (--cursor > ys->xpath && isdigit(cursor[0]))
+ ;
+
+ if (cursor[0] != '[')
+ return 0;
+
+ return atoi(&cursor[1]);
+}
+
/**
* __move_back_to_next() - move back to the next lookup-next schema
*/
@@ -344,7 +362,8 @@ static void nb_op_resume_data_tree(struct nb_op_yield_state *ys)
/**
* nb_op_xpath_to_trunk() - generate a lyd_node tree (trunk) using an xpath.
* @xpath_in: xpath query string to build trunk from.
- * @dnode: resulting tree (trunk)
+ * @xpath_out: resulting xpath for the trunk.
+ * @trunk: resulting tree (trunk)
*
* Use the longest prefix of @xpath_in as possible to resolve to a tree (trunk).
* This is logically as if we walked along the xpath string resolving each
@@ -352,7 +371,7 @@ static void nb_op_resume_data_tree(struct nb_op_yield_state *ys)
*
* Return: error if any, if no error then @dnode contains the tree (trunk).
*/
-static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in,
+static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in, char **xpath_out,
struct lyd_node **trunk)
{
char *xpath = NULL;
@@ -370,7 +389,10 @@ static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in,
if (ret != NB_OK)
break;
}
- darr_free(xpath);
+ if (ret == NB_OK)
+ *xpath_out = xpath;
+ else
+ darr_free(xpath);
return ret;
}
@@ -410,28 +432,57 @@ static enum nb_error nb_op_ys_finalize_node_info(struct nb_op_yield_state *ys,
ni->lookup_next_ok = yield_ok && ni->has_lookup_next &&
(index == 0 || ni[-1].lookup_next_ok);
- nb_op_get_keys((struct lyd_node_inner *)inner, &ni->keys);
+ if (CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)) {
+ uint i;
- /* A list entry cannot be present in a tree w/o it's keys */
- assert(ni->keys.num == yang_snode_num_keys(inner->schema));
+ ni->position = nb_op_get_position_predicate(ys, ni);
+ if (!ni->position) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: can't decode keyless list positional predicate in %s",
+ __func__, ys->xpath);
+ return NB_ERR_NOT_FOUND;
+ }
- /*
- * Get this nodes opaque list_entry object
- */
+ /*
+ * Get the entry at the position given by the predicate
+ */
- if (!nn->cbs.lookup_entry) {
- flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
- "%s: data path doesn't support iteration over operational data: %s",
- __func__, ys->xpath);
- return NB_ERR_NOT_FOUND;
- }
+ /* ni->list_entry starts as the parent entry of this node */
+ ni->list_entry = nb_callback_get_next(nn, ni->list_entry, NULL);
+ for (i = 1; i < ni->position && ni->list_entry; i++)
+ ni->list_entry = nb_callback_get_next(nn, ni->list_entry, ni->list_entry);
- /* ni->list_entry starts as the parent entry of this node */
- ni->list_entry = nb_callback_lookup_entry(nn, ni->list_entry, &ni->keys);
- if (ni->list_entry == NULL) {
- flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
- "%s: list entry lookup failed", __func__);
- return NB_ERR_NOT_FOUND;
+ if (i != ni->position || !ni->list_entry) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: entry at position %d doesn't exist in: %s", __func__,
+ ni->position, ys->xpath);
+ return NB_ERR_NOT_FOUND;
+ }
+
+ } else {
+ nb_op_get_keys((struct lyd_node_inner *)inner, &ni->keys);
+ /* A list entry cannot be present in a tree w/o it's keys */
+ assert(ni->keys.num == yang_snode_num_keys(inner->schema));
+
+ /*
+ * Get this nodes opaque list_entry object
+ */
+
+ /* We need a lookup entry unless this is a keyless list */
+ if (!nn->cbs.lookup_entry && ni->keys.num) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: data path doesn't support iteration over operational data: %s",
+ __func__, ys->xpath);
+ return NB_ERR_NOT_FOUND;
+ }
+
+ /* ni->list_entry starts as the parent entry of this node */
+ ni->list_entry = nb_callback_lookup_entry(nn, ni->list_entry, &ni->keys);
+ if (ni->list_entry == NULL) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA, "%s: list entry lookup failed",
+ __func__);
+ return NB_ERR_NOT_FOUND;
+ }
}
/*
@@ -460,8 +511,9 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
struct lyd_node *inner;
struct lyd_node *node = NULL;
enum nb_error ret;
- uint i, len;
- char *tmp;
+ const char *cur;
+ char *xpath = NULL;
+ uint i, len, prevlen, xplen;
/*
* Obtain the trunk of the data node tree of the query.
@@ -471,8 +523,8 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
* node could be identified (e.g., a list-node name with no keys).
*/
- ret = nb_op_xpath_to_trunk(ys->xpath, &node);
- if (ret || !node) {
+ ret = nb_op_xpath_to_trunk(ys->xpath, &xpath, &node);
+ if (ret != NB_OK || !node) {
flog_warn(EC_LIB_LIBYANG,
"%s: can't instantiate concrete path using xpath: %s",
__func__, ys->xpath);
@@ -482,12 +534,18 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
}
/* Move up to the container if on a leaf currently. */
- if (node &&
- !CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)) {
+ if (!CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)) {
struct lyd_node *leaf = node;
node = &node->parent->node;
+ /* Have to trim the leaf from the xpath now */
+ ret = yang_xpath_pop_node(xpath);
+ if (ret != NB_OK) {
+ darr_free(xpath);
+ return ret;
+ }
+
/*
* If the leaf is not a key, delete it, because it has a wrong
* empty value.
@@ -495,10 +553,7 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
if (!lysc_is_key(leaf->schema))
lyd_free_tree(leaf);
}
- assert(!node ||
- CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST));
- if (!node)
- return NB_ERR_NOT_FOUND;
+ assert(CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST));
inner = node;
for (len = 1; inner->parent; len++)
@@ -511,26 +566,42 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
* -- save the prefix length.
*/
inner = node;
+ prevlen = 0;
+ xplen = strlen(xpath);
+ darr_free(xpath);
for (i = len; i > 0; i--, inner = &inner->parent->node) {
ni = &ys->node_infos[i - 1];
ni->inner = inner;
ni->schema = inner->schema;
+
+ if (i == len) {
+ prevlen = xplen;
+ ni->xpath_len = prevlen;
+ continue;
+ }
+
/*
- * NOTE: we could build this by hand with a litte more effort,
- * but this simple implementation works and won't be expensive
- * since the number of nodes is small and only done once per
- * query.
+ * The only predicates we should have are concrete ones at this
+ * point b/c of nb_op_xpath_to_trunk() above, so we aren't in
+ * danger of finding a division symbol in the path, only '/'s
+ * inside strings which frrstr_back_to_char skips over.
*/
- tmp = yang_dnode_get_path(inner, NULL, 0);
- ni->xpath_len = strlen(tmp);
- /* Replace users supplied xpath with the libyang returned value */
- if (i == len)
- darr_in_strdup(ys->xpath, tmp);
+ assert(prevlen == xplen || ys->xpath[prevlen] == '/');
+ if (prevlen != xplen)
+ ys->xpath[prevlen] = 0;
+ cur = frrstr_back_to_char(ys->xpath, '/');
+ if (prevlen != xplen)
+ ys->xpath[prevlen] = '/';
+
+ if (!cur || cur == ys->xpath) {
+ flog_warn(EC_LIB_LIBYANG, "%s: error tokenizing query xpath: %s", __func__,
+ ys->xpath);
+ return NB_ERR_VALIDATION;
+ }
- /* The prefix must match the prefix of the stored xpath */
- assert(!strncmp(tmp, ys->xpath, ni->xpath_len));
- free(tmp);
+ prevlen = cur - ys->xpath;
+ ni->xpath_len = prevlen;
}
/*
@@ -584,6 +655,10 @@ static enum nb_error nb_op_iter_leaf(struct nb_op_yield_state *ys,
if (lysc_is_key(snode))
return NB_OK;
+ /* Check for new simple get */
+ if (nb_node->cbs.get)
+ return nb_node->cbs.get(nb_node, ni->list_entry, ni->inner);
+
data = nb_callback_get_elem(nb_node, xpath, ni->list_entry);
if (data == NULL)
return NB_OK;
@@ -617,6 +692,10 @@ static enum nb_error nb_op_iter_leaflist(struct nb_op_yield_state *ys,
if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
return NB_OK;
+ /* Check for new simple get */
+ if (nb_node->cbs.get)
+ return nb_node->cbs.get(nb_node, ni->list_entry, ni->inner);
+
do {
struct yang_data *data;
@@ -1483,17 +1562,13 @@ static void nb_op_walk_continue(struct event *thread)
ret = __walk(ys, true);
if (ret == NB_YIELD) {
- if (nb_op_yield(ys) != NB_OK) {
- if (ys->should_batch)
- goto stopped;
- else
- goto finish;
- }
- return;
+ ret = nb_op_yield(ys);
+ if (ret == NB_OK)
+ return;
}
finish:
+ assert(ret != NB_YIELD);
(*ys->finish)(ys_root_node(ys), ys->finish_arg, ret);
-stopped:
nb_op_free_yield_state(ys, false);
}
@@ -1552,6 +1627,13 @@ static void nb_op_trim_yield_state(struct nb_op_yield_state *ys)
(int)darr_lasti(ys->node_infos));
}
+/**
+ * nb_op_yield() - Yield during the walk.
+ * @ys: the yield state tracking the walk.
+ *
+ * Return: Any error from the `ys->finish` callback which should terminate the
+ * walk. Otherwise if `ys->should_batch` == false always returns NB_OK.
+ */
static enum nb_error nb_op_yield(struct nb_op_yield_state *ys)
{
enum nb_error ret;
@@ -1752,6 +1834,20 @@ bool nb_oper_is_yang_lib_query(const char *xpath)
return strlen(xpath) > liblen;
}
+void *nb_oper_walk_finish_arg(void *walk)
+{
+ struct nb_op_yield_state *ys = walk;
+
+ return ys->finish_arg;
+}
+
+void *nb_oper_walk_cb_arg(void *walk)
+{
+ struct nb_op_yield_state *ys = walk;
+
+ return ys->cb_arg;
+}
+
void *nb_oper_walk(const char *xpath, struct yang_translator *translator,
uint32_t flags, bool should_batch, nb_oper_data_cb cb,
void *cb_arg, nb_oper_data_finish_cb finish, void *finish_arg)
@@ -1764,17 +1860,13 @@ void *nb_oper_walk(const char *xpath, struct yang_translator *translator,
ret = nb_op_walk_start(ys);
if (ret == NB_YIELD) {
- if (nb_op_yield(ys) != NB_OK) {
- if (ys->should_batch)
- goto stopped;
- else
- goto finish;
- }
- return ys;
+ ret = nb_op_yield(ys);
+ if (ret == NB_OK)
+ return ys;
}
-finish:
+
+ assert(ret != NB_YIELD);
(void)(*ys->finish)(ys_root_node(ys), ys->finish_arg, ret);
-stopped:
nb_op_free_yield_state(ys, false);
return NULL;
}
@@ -1826,6 +1918,87 @@ enum nb_error nb_oper_iterate_legacy(const char *xpath,
return ret;
}
+static const char *__adjust_ptr(struct lysc_node_leaf *lsnode, const char *valuep, size_t *size)
+{
+ switch (lsnode->type->basetype) {
+ case LY_TYPE_INT8:
+ case LY_TYPE_UINT8:
+#ifdef BIG_ENDIAN
+ valuep += 7;
+#endif
+ *size = 1;
+ break;
+ case LY_TYPE_INT16:
+ case LY_TYPE_UINT16:
+#ifdef BIG_ENDIAN
+ valuep += 6;
+#endif
+ *size = 2;
+ break;
+ case LY_TYPE_INT32:
+ case LY_TYPE_UINT32:
+#ifdef BIG_ENDIAN
+ valuep += 4;
+#endif
+ *size = 4;
+ break;
+ case LY_TYPE_INT64:
+ case LY_TYPE_UINT64:
+ *size = 8;
+ break;
+ case LY_TYPE_UNKNOWN:
+ case LY_TYPE_BINARY:
+ case LY_TYPE_STRING:
+ case LY_TYPE_BITS:
+ case LY_TYPE_BOOL:
+ case LY_TYPE_DEC64:
+ case LY_TYPE_EMPTY:
+ case LY_TYPE_ENUM:
+ case LY_TYPE_IDENT:
+ case LY_TYPE_INST:
+ case LY_TYPE_LEAFREF:
+ case LY_TYPE_UNION:
+ default:
+ assert(0);
+ }
+ return valuep;
+}
+
+enum nb_error nb_oper_uint64_get(const struct nb_node *nb_node, const void *parent_list_entry,
+ struct lyd_node *parent)
+{
+ struct lysc_node_leaf *lsnode = (struct lysc_node_leaf *)nb_node->snode;
+ struct lysc_node *snode = &lsnode->node;
+ ssize_t offset = (ssize_t)nb_node->cbs.get_elem;
+ uint64_t ubigval = *(uint64_t *)((char *)parent_list_entry + offset);
+ const char *valuep;
+ size_t size;
+
+ valuep = __adjust_ptr(lsnode, (const char *)&ubigval, &size);
+ if (lyd_new_term_bin(parent, snode->module, snode->name, valuep, size, LYD_NEW_PATH_UPDATE,
+ NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
+}
+
+
+enum nb_error nb_oper_uint32_get(const struct nb_node *nb_node, const void *parent_list_entry,
+ struct lyd_node *parent)
+{
+ struct lysc_node_leaf *lsnode = (struct lysc_node_leaf *)nb_node->snode;
+ struct lysc_node *snode = &lsnode->node;
+ ssize_t offset = (ssize_t)nb_node->cbs.get_elem;
+ uint64_t ubigval = *(uint64_t *)((char *)parent_list_entry + offset);
+ const char *valuep;
+ size_t size;
+
+ valuep = __adjust_ptr(lsnode, (const char *)&ubigval, &size);
+ if (lyd_new_term_bin(parent, snode->module, snode->name, valuep, size, LYD_NEW_PATH_UPDATE,
+ NULL))
+ return NB_ERR_RESOURCE;
+ return NB_OK;
+}
+
void nb_oper_init(struct event_loop *loop)
{
event_loop = loop;
diff --git a/lib/plist.c b/lib/plist.c
index 6950ab5761..713eee25ed 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -1536,7 +1536,6 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name,
if (use_json) {
json = json_object_new_object();
json_prefix = json_object_new_object();
- json_list = json_object_new_object();
json_object_int_add(json_prefix, "prefixListCounter",
plist->count);
@@ -1544,10 +1543,7 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name,
plist->name);
for (pentry = plist->head; pentry; pentry = pentry->next) {
- struct prefix *p = &pentry->prefix;
- char buf_a[BUFSIZ];
-
- snprintf(buf_a, sizeof(buf_a), "%pFX", p);
+ json_list = json_object_new_object();
json_object_int_add(json_list, "seq", pentry->seq);
json_object_string_add(json_list, "seqPrefixListType",
@@ -1560,7 +1556,7 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name,
json_object_int_add(json_list, "le",
pentry->le);
- json_object_object_add(json_prefix, buf_a, json_list);
+ json_object_object_addf(json_prefix, json_list, "%pFX", &pentry->prefix);
}
if (afi == AFI_IP)
json_object_object_add(json, "ipPrefixList",
diff --git a/lib/privs.c b/lib/privs.c
index 717a2e48d6..e7df383e5d 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -179,7 +179,7 @@ static pset_t *zcaps2sys(zebra_capabilities_t *zcaps, int num)
for (i = 0; i < num; i++)
count += cap_map[zcaps[i]].num;
- if ((syscaps = XCALLOC(MTYPE_PRIVS, (sizeof(pset_t) * num))) == NULL) {
+ if ((syscaps = XCALLOC(MTYPE_PRIVS, sizeof(pset_t))) == NULL) {
fprintf(stderr, "%s: could not allocate syscaps!", __func__);
return NULL;
}
@@ -210,10 +210,11 @@ int zprivs_change_caps(zebra_privs_ops_t op)
{
cap_flag_value_t cflag;
- /* should be no possibility of being called without valid caps */
- assert(zprivs_state.syscaps_p && zprivs_state.caps);
- if (!(zprivs_state.syscaps_p && zprivs_state.caps))
- exit(1);
+ /* Called without valid caps - just return. Not every daemon needs
+ * privs.
+ */
+ if (zprivs_state.syscaps_p == NULL || zprivs_state.caps == NULL)
+ return 0;
if (op == ZPRIVS_RAISE)
cflag = CAP_SET;
diff --git a/lib/route_types.pl b/lib/route_types.pl
index c75a866964..834cb822d2 100755
--- a/lib/route_types.pl
+++ b/lib/route_types.pl
@@ -127,9 +127,12 @@ printf "#define SHOW_ROUTE_V6_HEADER \\\n%s\n", codelist(@protosv6);
print "\n";
sub collect {
- my ($daemon, $ipv4, $ipv6, $any) = @_;
+ my ($daemon, $ipv4, $ipv6, $any, $ip_prot) = @_;
my (@names, @help) = ((), ());
for my $p (@protos) {
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "kernel");
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "connected");
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "local");
next if ($protodetail{$p}->{"daemon"} eq $daemon && $daemon ne "zebra");
next if ($protodetail{$p}->{"restrict2"} ne "" &&
$protodetail{$p}->{"restrict2"} ne $daemon);
@@ -151,24 +154,24 @@ for my $daemon (sort keys %daemons) {
next unless ($daemons{$daemon}->{"ipv4"} || $daemons{$daemon}->{"ipv6"});
printf "/* %s */\n", $daemon;
if ($daemons{$daemon}->{"ipv4"} && $daemons{$daemon}->{"ipv6"}) {
- my ($names, $help) = collect($daemon, 1, 1, 0);
+ my ($names, $help) = collect($daemon, 1, 1, 0, 0);
printf "#define FRR_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 1, 0, 0);
+ ($names, $help) = collect($daemon, 1, 0, 0, 0);
printf "#define FRR_IP_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 0, 1, 0);
+ ($names, $help) = collect($daemon, 0, 1, 0, 0);
printf "#define FRR_IP6_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP6_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
if ($daemon eq "zebra") {
- ($names, $help) = collect($daemon, 1, 0, 1);
+ ($names, $help) = collect($daemon, 1, 0, 1, 1);
printf "#define FRR_IP_PROTOCOL_MAP_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP_PROTOCOL_MAP_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 0, 1, 1);
+ ($names, $help) = collect($daemon, 0, 1, 1, 1);
printf "#define FRR_IP6_PROTOCOL_MAP_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP6_PROTOCOL_MAP_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
}
diff --git a/lib/route_types.txt b/lib/route_types.txt
index 93cbc36e97..b5f8b6fdf3 100644
--- a/lib/route_types.txt
+++ b/lib/route_types.txt
@@ -88,7 +88,7 @@ ZEBRA_ROUTE_VRRP, vrrp, vrrpd, '-', 0, 0, 0, "VRRP", vr
ZEBRA_ROUTE_NHG, zebra, none, '-', 0, 0, 0, "Nexthop Group", none
ZEBRA_ROUTE_SRTE, srte, none, '-', 0, 0, 0, "SR-TE", none
ZEBRA_ROUTE_TABLE_DIRECT, table-direct, zebra, 't', 1, 1, 1, "Table-Direct", zebra
-ZEBRA_ROUTE_ALL, wildcard, none, '-', 0, 0, 0, "-", none
+ZEBRA_ROUTE_ALL, any, none, '-', 0, 0, 0, "-", none
## help strings
diff --git a/lib/routemap.h b/lib/routemap.h
index 8dcc17ecc3..1c02348313 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -310,6 +310,7 @@ DECLARE_QOBJ_TYPE(route_map);
(strmatch(C, "frr-bgp-route-map:ip-route-source"))
#define IS_MATCH_ROUTE_SRC_PL(C) \
(strmatch(C, "frr-bgp-route-map:ip-route-source-prefix-list"))
+#define IS_MATCH_COMMUNITY_LIMIT(C) (strmatch(C, "frr-bgp-route-map:match-community-limit"))
#define IS_MATCH_COMMUNITY(C) \
(strmatch(C, "frr-bgp-route-map:match-community"))
#define IS_MATCH_LCOMMUNITY(C) \
diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c
index 69b942064b..eb01709707 100644
--- a/lib/routemap_cli.c
+++ b/lib/routemap_cli.c
@@ -810,6 +810,10 @@ void route_map_condition_show(struct vty *vty, const struct lyd_node *dnode,
yang_dnode_get_string(
dnode,
"./rmap-match-condition/frr-bgp-route-map:list-name"));
+ } else if (IS_MATCH_COMMUNITY_LIMIT(condition)) {
+ vty_out(vty, " match community-limit %s\n",
+ yang_dnode_get_string(dnode,
+ "./rmap-match-condition/frr-bgp-route-map:community-limit"));
} else if (IS_MATCH_COMMUNITY(condition)) {
vty_out(vty, " match community %s",
yang_dnode_get_string(
diff --git a/lib/sigevent.h b/lib/sigevent.h
index 0b07f594c1..2c51ba3767 100644
--- a/lib/sigevent.h
+++ b/lib/sigevent.h
@@ -45,6 +45,33 @@ bool frr_sigevent_check(sigset_t *setp);
/* check whether there are signals to handle, process any found */
extern int frr_sigevent_process(void);
+/* Ensure we don't handle "application-type" signals on a secondary thread by
+ * blocking these signals when creating threads
+ *
+ * NB: SIGSEGV, SIGABRT, etc. must be allowed on all threads or we get no
+ * crashlogs. Since signals vary a little bit between platforms, below is a
+ * list of known things to go to the main thread. Any unknown signals should
+ * stay thread-local.
+ */
+static inline void frr_sigset_add_mainonly(sigset_t *blocksigs)
+{
+ /* signals we actively handle */
+ sigaddset(blocksigs, SIGHUP);
+ sigaddset(blocksigs, SIGINT);
+ sigaddset(blocksigs, SIGTERM);
+ sigaddset(blocksigs, SIGUSR1);
+
+ /* signals we don't actively use but that semantically belong */
+ sigaddset(blocksigs, SIGUSR2);
+ sigaddset(blocksigs, SIGQUIT);
+ sigaddset(blocksigs, SIGCHLD);
+ sigaddset(blocksigs, SIGPIPE);
+ sigaddset(blocksigs, SIGTSTP);
+ sigaddset(blocksigs, SIGTTIN);
+ sigaddset(blocksigs, SIGTTOU);
+ sigaddset(blocksigs, SIGWINCH);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/srcdest_table.c b/lib/srcdest_table.c
index 3247a0372c..7203c8ac8e 100644
--- a/lib/srcdest_table.c
+++ b/lib/srcdest_table.c
@@ -309,13 +309,3 @@ static ssize_t printfrr_rn(struct fbuf *buf, struct printfrr_eargs *ea,
cbuf, sizeof(cbuf));
return bputs(buf, cbuf);
}
-
-struct route_table *srcdest_srcnode_table(struct route_node *rn)
-{
- if (rnode_is_dstnode(rn)) {
- struct srcdest_rnode *srn = srcdest_rnode_from_rnode(rn);
-
- return srn->src_table;
- }
- return NULL;
-}
diff --git a/lib/srcdest_table.h b/lib/srcdest_table.h
index ff97f9b735..a699d4a11b 100644
--- a/lib/srcdest_table.h
+++ b/lib/srcdest_table.h
@@ -87,8 +87,6 @@ static inline void *srcdest_rnode_table_info(struct route_node *rn)
return route_table_get_info(srcdest_rnode_table(rn));
}
-extern struct route_table *srcdest_srcnode_table(struct route_node *rn);
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/srv6.h b/lib/srv6.h
index 9a041e3d85..7e4fb97ad1 100644
--- a/lib/srv6.h
+++ b/lib/srv6.h
@@ -22,6 +22,8 @@
#define SRV6_SID_FORMAT_NAME_SIZE 512
+#define DEFAULT_SRV6_IFNAME "sr0"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -186,6 +188,42 @@ enum srv6_endpoint_behavior_codepoint {
SRV6_ENDPOINT_BEHAVIOR_OPAQUE = 0xFFFF,
};
+/*
+ * Convert SRv6 endpoint behavior codepoints to human-friendly string.
+ */
+static inline const char *
+srv6_endpoint_behavior_codepoint2str(enum srv6_endpoint_behavior_codepoint behavior)
+{
+ switch (behavior) {
+ case SRV6_ENDPOINT_BEHAVIOR_RESERVED:
+ return "Reserved";
+ case SRV6_ENDPOINT_BEHAVIOR_END:
+ return "End";
+ case SRV6_ENDPOINT_BEHAVIOR_END_X:
+ return "End.X";
+ case SRV6_ENDPOINT_BEHAVIOR_END_DT6:
+ return "End.DT6";
+ case SRV6_ENDPOINT_BEHAVIOR_END_DT4:
+ return "End.DT4";
+ case SRV6_ENDPOINT_BEHAVIOR_END_DT46:
+ return "End.DT46";
+ case SRV6_ENDPOINT_BEHAVIOR_END_NEXT_CSID:
+ return "uN";
+ case SRV6_ENDPOINT_BEHAVIOR_END_X_NEXT_CSID:
+ return "uA";
+ case SRV6_ENDPOINT_BEHAVIOR_END_DT6_USID:
+ return "uDT6";
+ case SRV6_ENDPOINT_BEHAVIOR_END_DT4_USID:
+ return "uDT4";
+ case SRV6_ENDPOINT_BEHAVIOR_END_DT46_USID:
+ return "uDT46";
+ case SRV6_ENDPOINT_BEHAVIOR_OPAQUE:
+ return "Opaque";
+ }
+
+ return "Unspec";
+}
+
struct nexthop_srv6 {
/* SRv6 localsid info for Endpoint-behaviour */
enum seg6local_action_t seg6local_action;
diff --git a/lib/subdir.am b/lib/subdir.am
index 4bcce9a2b0..a975eb2fc4 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -84,6 +84,7 @@ lib_libfrr_la_SOURCES = \
lib/northbound.c \
lib/northbound_cli.c \
lib/northbound_db.c \
+ lib/northbound_notif.c \
lib/northbound_oper.c \
lib/ntop.c \
lib/openbsd-tree.c \
@@ -144,6 +145,7 @@ lib_libfrr_la_SOURCES = \
nodist_lib_libfrr_la_SOURCES = \
yang/frr-affinity-map.yang.c \
+ yang/frr-backend.yang.c \
yang/frr-filter.yang.c \
yang/frr-if-rmap.yang.c \
yang/frr-interface.yang.c \
diff --git a/lib/vrf.c b/lib/vrf.c
index 31632a80d5..0b39d93602 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -22,6 +22,9 @@
#include "northbound.h"
#include "northbound_cli.h"
+/* Set by the owner (zebra). */
+bool vrf_notify_oper_changes;
+
/* default VRF name value used when VRF backend is not NETNS */
#define VRF_DEFAULT_NAME_INTERNAL "default"
@@ -39,8 +42,7 @@ RB_GENERATE(vrf_name_head, vrf, name_entry, vrf_name_compare);
struct vrf_id_head vrfs_by_id = RB_INITIALIZER(&vrfs_by_id);
struct vrf_name_head vrfs_by_name = RB_INITIALIZER(&vrfs_by_name);
-static int vrf_backend;
-static int vrf_backend_configured;
+static int vrf_backend = VRF_BACKEND_VRF_LITE;
static char vrf_default_name[VRF_NAMSIZ] = VRF_DEFAULT_NAME_INTERNAL;
/*
@@ -105,6 +107,19 @@ int vrf_switchback_to_initial(void)
return ret;
}
+static void vrf_update_state(struct vrf *vrf)
+{
+ if (!vrf->state || !vrf_notify_oper_changes)
+ return;
+
+ /*
+ * Remove top level container update when we have patch support, for now
+ * this keeps us from generating 2 separate REPLACE messages though.
+ */
+ nb_op_updatef(vrf->state, "id", "%u", vrf->vrf_id);
+ nb_op_update(vrf->state, "active", CHECK_FLAG(vrf->status, VRF_ACTIVE) ? "true" : "false");
+}
+
/* Get a VRF. If not found, create one.
* Arg:
* name - The name of the vrf. May be NULL if unknown.
@@ -155,16 +170,32 @@ struct vrf *vrf_get(vrf_id_t vrf_id, const char *name)
/* Set name */
if (name && vrf->name[0] != '\0' && strcmp(name, vrf->name)) {
- /* update the vrf name */
+ /* vrf name has changed */
+ if (vrf_notify_oper_changes) {
+ nb_op_update_delete_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]", vrf->name);
+ lyd_free_all(vrf->state);
+ }
RB_REMOVE(vrf_name_head, &vrfs_by_name, vrf);
- strlcpy(vrf->data.l.netns_name,
- name, NS_NAMSIZ);
+ strlcpy(vrf->data.l.netns_name, name, NS_NAMSIZ);
strlcpy(vrf->name, name, sizeof(vrf->name));
RB_INSERT(vrf_name_head, &vrfs_by_name, vrf);
+ /* New state with new name */
+ if (vrf_notify_oper_changes)
+ vrf->state = nb_op_update_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]/state",
+ NULL, vrf->name);
} else if (name && vrf->name[0] == '\0') {
strlcpy(vrf->name, name, sizeof(vrf->name));
RB_INSERT(vrf_name_head, &vrfs_by_name, vrf);
+
+ /* We have a name now so we can have state */
+ if (vrf_notify_oper_changes)
+ vrf->state = nb_op_update_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]/state",
+ NULL, vrf->name);
}
+ /* Update state before hook call */
+ if (vrf->state)
+ vrf_update_state(vrf);
+
if (new &&vrf_master.vrf_new_hook)
(*vrf_master.vrf_new_hook)(vrf);
@@ -208,6 +239,7 @@ struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name)
vrf->vrf_id = new_vrf_id;
RB_INSERT(vrf_id_head, &vrfs_by_id, vrf);
+ vrf_update_state(vrf);
} else {
/*
@@ -254,6 +286,11 @@ void vrf_delete(struct vrf *vrf)
if (vrf->name[0] != '\0')
RB_REMOVE(vrf_name_head, &vrfs_by_name, vrf);
+ if (vrf_notify_oper_changes) {
+ nb_op_update_delete_pathf(NULL, "/frr-vrf:lib/vrf[name=\"%s\"]", vrf->name);
+ lyd_free_all(vrf->state);
+ }
+
XFREE(MTYPE_VRF, vrf);
}
@@ -282,6 +319,8 @@ int vrf_enable(struct vrf *vrf)
SET_FLAG(vrf->status, VRF_ACTIVE);
+ vrf_update_state(vrf);
+
if (vrf_master.vrf_enable_hook)
(*vrf_master.vrf_enable_hook)(vrf);
@@ -307,6 +346,8 @@ void vrf_disable(struct vrf *vrf)
UNSET_FLAG(vrf->status, VRF_ACTIVE);
+ vrf_update_state(vrf);
+
if (debug_vrf)
zlog_debug("VRF %s(%u) is to be disabled.", vrf->name,
vrf->vrf_id);
@@ -540,15 +581,6 @@ void vrf_init(int (*create)(struct vrf *), int (*enable)(struct vrf *),
"vrf_init: failed to create the default VRF!");
exit(1);
}
- if (vrf_is_backend_netns()) {
- struct ns *ns;
-
- strlcpy(default_vrf->data.l.netns_name,
- VRF_DEFAULT_NAME, NS_NAMSIZ);
- ns = ns_lookup(NS_DEFAULT);
- ns->vrf_ctxt = default_vrf;
- default_vrf->ns_ctxt = ns;
- }
/* Enable the default VRF. */
if (!vrf_enable(default_vrf)) {
@@ -612,8 +644,6 @@ int vrf_is_backend_netns(void)
int vrf_get_backend(void)
{
- if (!vrf_backend_configured)
- return VRF_BACKEND_UNKNOWN;
return vrf_backend;
}
@@ -621,7 +651,6 @@ int vrf_configure_backend(enum vrf_backend_type backend)
{
/* Work around issue in old gcc */
switch (backend) {
- case VRF_BACKEND_UNKNOWN:
case VRF_BACKEND_NETNS:
case VRF_BACKEND_VRF_LITE:
break;
@@ -630,7 +659,6 @@ int vrf_configure_backend(enum vrf_backend_type backend)
}
vrf_backend = backend;
- vrf_backend_configured = 1;
return 0;
}
diff --git a/lib/vrf.h b/lib/vrf.h
index 3ebb6ddf53..46d72910c2 100644
--- a/lib/vrf.h
+++ b/lib/vrf.h
@@ -80,6 +80,8 @@ struct vrf {
/* Back pointer to namespace context */
void *ns_ctxt;
+ struct lyd_node *state;
+
QOBJ_FIELDS;
};
RB_HEAD(vrf_id_head, vrf);
@@ -92,7 +94,6 @@ DECLARE_QOBJ_TYPE(vrf);
enum vrf_backend_type {
VRF_BACKEND_VRF_LITE,
VRF_BACKEND_NETNS,
- VRF_BACKEND_UNKNOWN,
VRF_BACKEND_MAX,
};
@@ -299,6 +300,7 @@ extern void vrf_disable(struct vrf *vrf);
extern int vrf_enable(struct vrf *vrf);
extern void vrf_delete(struct vrf *vrf);
+extern bool vrf_notify_oper_changes;
extern const struct frr_yang_module_info frr_vrf_info;
extern const struct frr_yang_module_info frr_vrf_cli_info;
diff --git a/lib/yang.c b/lib/yang.c
index b847b8b77b..dd48d8861b 100644
--- a/lib/yang.c
+++ b/lib/yang.c
@@ -14,6 +14,7 @@
#include <libyang/version.h>
#include "northbound.h"
#include "frrstr.h"
+#include "darr.h"
#include "lib/config_paths.h"
@@ -680,6 +681,116 @@ void yang_dnode_rpc_output_add(struct lyd_node *output, const char *xpath,
assert(err == LY_SUCCESS);
}
+struct lyd_node *yang_state_new(struct lyd_node *tree, const char *path, const char *value)
+{
+ struct lyd_node *dnode, *parent;
+ LY_ERR err;
+
+ err = lyd_new_path2(tree, ly_native_ctx, path, value, 0, 0, LYD_NEW_PATH_UPDATE, &parent,
+ &dnode);
+ assert(err == LY_SUCCESS);
+
+ /*
+ * If the node exists and isn't updated returned dnode will be NULL, so
+ * we need to find it. But even if returned it can be the first newly
+ * created node (could be container of path) not the actual path dnode.
+ * So we always find.
+ */
+ err = lyd_find_path(tree ?: parent, path, false, &dnode);
+ assert(err == LY_SUCCESS);
+
+ return dnode;
+}
+
+void yang_state_delete(struct lyd_node *tree, const char *path)
+{
+ LY_ERR err;
+
+ if (!tree)
+ return;
+
+ if (path) {
+ err = lyd_find_path(tree, path, false, &tree);
+ if (err != LY_SUCCESS) {
+ zlog_info("State %s has already been deleted", path);
+ return;
+ }
+ }
+ lyd_free_tree(tree);
+}
+
+PRINTFRR(2, 0)
+struct lyd_node *yang_state_new_vpathf(struct lyd_node *tree, const char *path_fmt,
+ const char *value, va_list ap)
+{
+ struct lyd_node *dnode;
+ char *path;
+
+ path = darr_vsprintf(path_fmt, ap);
+ dnode = yang_state_new(tree, path, value);
+ darr_free(path);
+
+ return dnode;
+}
+
+struct lyd_node *yang_state_new_pathf(struct lyd_node *tree, const char *path_fmt,
+ const char *value, ...)
+{
+ struct lyd_node *dnode;
+ va_list ap;
+
+ va_start(ap, value);
+ dnode = yang_state_new_vpathf(tree, path_fmt, value, ap);
+ va_end(ap);
+
+ return dnode;
+}
+
+PRINTFRR(2, 0)
+void yang_state_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap)
+{
+ char *path;
+
+ path = darr_vsprintf(path_fmt, ap);
+ yang_state_delete(tree, path);
+ darr_free(path);
+}
+
+void yang_state_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, path_fmt);
+ yang_state_delete_vpathf(tree, path_fmt, ap);
+ va_end(ap);
+}
+
+PRINTFRR(3, 0)
+struct lyd_node *yang_state_vnewf(struct lyd_node *tree, const char *path, const char *val_fmt,
+ va_list ap)
+{
+ struct lyd_node *dnode;
+ char *value;
+
+ value = darr_vsprintf(val_fmt, ap);
+ dnode = yang_state_new(tree, path, value);
+ darr_free(value);
+
+ return dnode;
+}
+
+struct lyd_node *yang_state_newf(struct lyd_node *tree, const char *path, const char *val_fmt, ...)
+{
+ struct lyd_node *dnode;
+ va_list ap;
+
+ va_start(ap, val_fmt);
+ dnode = yang_state_vnewf(tree, path, val_fmt, ap);
+ va_end(ap);
+
+ return dnode;
+}
+
struct yang_data *yang_data_new(const char *xpath, const char *value)
{
struct yang_data *data;
@@ -763,6 +874,60 @@ static void ly_zlog_cb(LY_LOG_LEVEL level, const char *msg, const char *data_pat
zlog(priority, "libyang: %s", msg);
}
+LY_ERR yang_parse_data(const char *xpath, LYD_FORMAT format, bool as_subtree, bool is_oper,
+ bool validate, const char *data, struct lyd_node **tree)
+{
+ struct ly_in *in = NULL;
+ struct lyd_node *subtree = NULL;
+ uint32_t parse_options = LYD_PARSE_STRICT | LYD_PARSE_ONLY;
+ uint32_t validate_options = LYD_VALIDATE_PRESENT;
+ LY_ERR err;
+
+ err = ly_in_new_memory(data, &in);
+ if (err != LY_SUCCESS)
+ return err;
+
+ if (as_subtree) {
+ struct lyd_node *parent;
+
+ /*
+ * Create the subtree branch from root using the xpath. This
+ * will be used below to parse the data rooted at the subtree --
+ * a common YANG JSON technique (vs XML which starts all
+ * data trees from the root).
+ */
+ err = lyd_new_path2(NULL, ly_native_ctx, xpath, NULL, 0, 0, 0, &parent, &subtree);
+ if (err != LY_SUCCESS)
+ goto done;
+ err = lyd_find_path(parent, xpath, false, &subtree);
+ if (err != LY_SUCCESS)
+ goto done;
+ }
+
+ if (is_oper)
+ validate_options |= LYD_VALIDATE_OPERATIONAL;
+
+#ifdef LYD_VALIDATE_NOT_FINAL
+ if (!validate)
+ validate_options |= LYD_VALIDATE_NOT_FINAL;
+#endif
+
+ err = lyd_parse_data(ly_native_ctx, subtree, in, format, parse_options, validate_options,
+ tree);
+ if (err == LY_SUCCESS && subtree)
+ *tree = subtree;
+done:
+ ly_in_free(in, 0);
+ if (err != LY_SUCCESS) {
+ if (*tree)
+ lyd_free_all(*tree);
+ else if (subtree)
+ lyd_free_all(subtree);
+ *tree = NULL;
+ }
+ return err;
+}
+
LY_ERR yang_parse_notification(const char *xpath, LYD_FORMAT format,
const char *data, struct lyd_node **notif)
{
diff --git a/lib/yang.h b/lib/yang.h
index 52857ecf00..748f089037 100644
--- a/lib/yang.h
+++ b/lib/yang.h
@@ -535,6 +535,66 @@ extern struct lyd_node *yang_dnode_dup(const struct lyd_node *dnode);
*/
extern void yang_dnode_free(struct lyd_node *dnode);
+/**
+ * yang_state_new() - Create new state data.
+ * @tree: subtree @path is relative to or NULL in which case @path must be
+ * absolute.
+ * @path: The path of the state node to create.
+ * @value: The canonical value of the state.
+ *
+ * Return: The new libyang node.
+ */
+extern struct lyd_node *yang_state_new(struct lyd_node *tree, const char *path, const char *value);
+
+/**
+ * yang_state_delete() - Delete state data.
+ * @tree: subtree @path is relative to or NULL in which case @path must be
+ * absolute.
+ * @path: The path of the state node to delete, or NULL if @tree should just be
+ * deleted.
+ */
+extern void yang_state_delete(struct lyd_node *tree, const char *path);
+
+/**
+ * yang_state_new_pathf() - Create new state data.
+ * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must
+ * be absolute.
+ * @path_fmt: The path format string of the state node to create.
+ * @value: The canonical value of the state.
+ * @...: The values to substitute into @path_fmt.
+ *
+ * Return: The new libyang node.
+ */
+extern struct lyd_node *yang_state_new_pathf(struct lyd_node *tree, const char *path_fmt,
+ const char *value, ...) PRINTFRR(2, 4);
+extern struct lyd_node *yang_state_new_vpathf(struct lyd_node *tree, const char *path_fmt,
+ const char *value, va_list ap);
+/**
+ * yang_state_delete_pathf() - Delete state data.
+ * @tree: subtree @path_fmt is relative to or NULL in which case @path_fmt must
+ * be absolute.
+ * @path: The path of the state node to delete.
+ * @...: The values to substitute into @path_fmt.
+ */
+extern void yang_state_delete_pathf(struct lyd_node *tree, const char *path_fmt, ...) PRINTFRR(2, 3);
+extern void yang_state_delete_vpathf(struct lyd_node *tree, const char *path_fmt, va_list ap);
+
+/**
+ * yang_state_newf() - Create new state data.
+ * @tree: subtree @path is relative to or NULL in which case @path must be
+ * absolute.
+ * @path: The path of the state node to create.
+ * @val_fmt: The value format string to set the canonical value of the state.
+ * @...: The values to substitute into @val_fmt.
+ *
+ * Return: The new libyang node.
+ */
+extern struct lyd_node *yang_state_newf(struct lyd_node *tree, const char *path,
+ const char *val_fmt, ...) PRINTFRR(3, 4);
+
+extern struct lyd_node *yang_state_vnewf(struct lyd_node *tree, const char *path,
+ const char *val_fmt, va_list ap);
+
/*
* Add a libyang data node to an RPC/action output container.
*
@@ -621,6 +681,25 @@ extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_co
*/
extern void yang_debugging_set(bool enable);
+
+/*
+ * Parse YANG data.
+ *
+ * Args:
+ * xpath: xpath of the data.
+ * format: LYD_FORMAT of input data.
+ * as_subtree: parse the data as starting at the subtree identified by xpath.
+ * is_oper: parse as operational state allows for invalid (logs warning).
+ * validate: validate the data (otherwise treat as non-final).
+ * data: input data.
+ * notif: pointer to the libyang data tree to store the parsed notification.
+ * If the notification is not on the top level of the yang model,
+ * the pointer to the notification node is still returned, but it's
+ * part of the full data tree with all its parents.
+ */
+LY_ERR yang_parse_data(const char *xpath, LYD_FORMAT format, bool as_subtree, bool is_oper,
+ bool validate, const char *data, struct lyd_node **tree);
+
/*
* Parse a YANG notification.
*
diff --git a/lib/zclient.c b/lib/zclient.c
index 063944fd3b..5deea8f0cf 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -31,6 +31,7 @@
DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient");
DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs");
+DEFINE_MTYPE_STATIC(LIB, REDIST_TABLE_DIRECT, "Redistribution table direct");
/* Zebra client events. */
enum zclient_event { ZCLIENT_SCHEDULE, ZCLIENT_READ, ZCLIENT_CONNECT };
@@ -104,6 +105,11 @@ void zclient_free(struct zclient *zclient)
XFREE(MTYPE_ZCLIENT, zclient);
}
+static void redist_free_instance(void *data)
+{
+ XFREE(MTYPE_REDIST_INST, data);
+}
+
unsigned short *redist_check_instance(struct redist_proto *red,
unsigned short instance)
{
@@ -126,8 +132,10 @@ void redist_add_instance(struct redist_proto *red, unsigned short instance)
red->enabled = 1;
- if (!red->instances)
+ if (!red->instances) {
red->instances = list_new();
+ red->instances->del = redist_free_instance;
+ }
in = XMALLOC(MTYPE_REDIST_INST, sizeof(unsigned short));
*in = instance;
@@ -143,23 +151,100 @@ void redist_del_instance(struct redist_proto *red, unsigned short instance)
return;
listnode_delete(red->instances, id);
- XFREE(MTYPE_REDIST_INST, id);
+ red->instances->del(id);
if (!red->instances->count) {
red->enabled = 0;
list_delete(&red->instances);
}
}
-void redist_del_all_instances(struct redist_proto *red)
+static void redist_free_table_direct(void *data)
{
- struct listnode *ln, *nn;
- unsigned short *id;
+ XFREE(MTYPE_REDIST_TABLE_DIRECT, data);
+}
+
+struct redist_table_direct *redist_lookup_table_direct(const struct redist_proto *red,
+ const struct redist_table_direct *table)
+{
+ struct redist_table_direct *ntable;
+ struct listnode *node;
+
+ if (red->instances == NULL)
+ return NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, ntable)) {
+ if (table->vrf_id != ntable->vrf_id)
+ continue;
+ if (table->table_id != ntable->table_id)
+ continue;
+
+ return ntable;
+ }
+
+ return NULL;
+}
+
+bool redist_table_direct_has_id(const struct redist_proto *red, int table_id)
+{
+ struct redist_table_direct *table;
+ struct listnode *node;
+
+ if (red->instances == NULL)
+ return false;
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, table)) {
+ if (table->table_id != table_id)
+ continue;
+
+ return true;
+ }
+
+ return false;
+}
+
+void redist_add_table_direct(struct redist_proto *red, const struct redist_table_direct *table)
+{
+ struct redist_table_direct *ntable;
+
+ ntable = redist_lookup_table_direct(red, table);
+ if (ntable != NULL)
+ return;
+
+ if (red->instances == NULL) {
+ red->instances = list_new();
+ red->instances->del = redist_free_table_direct;
+ }
+
+ red->enabled = 1;
+
+ ntable = XCALLOC(MTYPE_REDIST_TABLE_DIRECT, sizeof(*ntable));
+ ntable->vrf_id = table->vrf_id;
+ ntable->table_id = table->table_id;
+ listnode_add(red->instances, ntable);
+}
+
+void redist_del_table_direct(struct redist_proto *red, const struct redist_table_direct *table)
+{
+ struct redist_table_direct *ntable;
+
+ ntable = redist_lookup_table_direct(red, table);
+ if (ntable == NULL)
+ return;
+
+ listnode_delete(red->instances, ntable);
+ red->instances->del(ntable);
+ if (red->instances->count == 0) {
+ red->enabled = 0;
+ list_delete(&red->instances);
+ }
+}
+
+void redist_del_all_instances(struct redist_proto *red)
+{
if (!red->instances)
return;
- for (ALL_LIST_ELEMENTS(red->instances, ln, nn, id))
- redist_del_instance(red, *id);
+ list_delete(&red->instances);
}
/* Stop zebra client services. */
@@ -480,6 +565,17 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
+static void zclient_send_table_direct(struct zclient *zclient, afi_t afi, int type)
+{
+ struct redist_table_direct *table;
+ struct redist_proto *red = &zclient->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT];
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(red->instances, node, table))
+ zebra_redistribute_send(type, zclient, afi, ZEBRA_ROUTE_TABLE_DIRECT,
+ table->table_id, table->vrf_id);
+}
+
/* Send register requests to zebra daemon for the information in a VRF. */
void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
{
@@ -513,6 +609,12 @@ void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
if (!zclient->mi_redist[afi][i].enabled)
continue;
+ if (i == ZEBRA_ROUTE_TABLE_DIRECT) {
+ zclient_send_table_direct(zclient, afi,
+ ZEBRA_REDISTRIBUTE_ADD);
+ continue;
+ }
+
struct listnode *node;
unsigned short *id;
@@ -580,6 +682,12 @@ void zclient_send_dereg_requests(struct zclient *zclient, vrf_id_t vrf_id)
if (!zclient->mi_redist[afi][i].enabled)
continue;
+ if (i == ZEBRA_ROUTE_TABLE_DIRECT) {
+ zclient_send_table_direct(zclient, afi,
+ ZEBRA_REDISTRIBUTE_DELETE);
+ continue;
+ }
+
struct listnode *node;
unsigned short *id;
@@ -2016,6 +2124,15 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p,
enum zapi_route_notify_owner *note,
afi_t *afi, safi_t *safi)
{
+ struct prefix dummy;
+
+ return zapi_route_notify_decode_srcdest(s, p, &dummy, tableid, note, afi, safi);
+}
+
+bool zapi_route_notify_decode_srcdest(struct stream *s, struct prefix *p, struct prefix *src_p,
+ uint32_t *tableid, enum zapi_route_notify_owner *note,
+ afi_t *afi, safi_t *safi)
+{
uint32_t t;
afi_t afi_val;
safi_t safi_val;
@@ -2025,6 +2142,9 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p,
STREAM_GETC(s, p->family);
STREAM_GETC(s, p->prefixlen);
STREAM_GET(&p->u.prefix, s, prefix_blen(p));
+ src_p->family = p->family;
+ STREAM_GETC(s, src_p->prefixlen);
+ STREAM_GET(&src_p->u.prefix, s, prefix_blen(src_p));
STREAM_GETL(s, t);
STREAM_GETC(s, afi_val);
STREAM_GETC(s, safi_val);
@@ -2180,7 +2300,27 @@ struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh)
n->type = znh->type;
n->vrf_id = znh->vrf_id;
n->ifindex = znh->ifindex;
- n->gate = znh->gate;
+
+ /* only copy values that have meaning - make sure "spare bytes" are
+ * left zeroed for hashing (look at _nexthop_hash_bytes)
+ */
+ switch (znh->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ n->bh_type = znh->bh_type;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ n->gate.ipv4 = znh->gate.ipv4;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ n->gate.ipv6 = znh->gate.ipv6;
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /* nothing, ifindex is always copied */
+ break;
+ }
+
n->srte_color = znh->srte_color;
n->weight = znh->weight;
@@ -4634,9 +4774,52 @@ static void zclient_read(struct event *thread)
zclient_event(ZCLIENT_READ, zclient);
}
+static void zclient_redistribute_table_direct(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi,
+ int instance, int command)
+{
+ struct redist_proto *red = &zclient->mi_redist[afi][ZEBRA_ROUTE_TABLE_DIRECT];
+ bool has_table;
+ struct redist_table_direct table = {
+ .vrf_id = vrf_id,
+ .table_id = instance,
+ };
+
+ has_table = redist_lookup_table_direct(red, &table);
+
+ if (command == ZEBRA_REDISTRIBUTE_ADD) {
+ if (has_table)
+ return;
+
+ redist_add_table_direct(red, &table);
+ } else {
+ if (!has_table)
+ return;
+
+ redist_del_table_direct(red, &table);
+ }
+
+ if (zclient->sock > 0)
+ zebra_redistribute_send(command, zclient, afi, ZEBRA_ROUTE_TABLE_DIRECT, instance,
+ vrf_id);
+}
+
void zclient_redistribute(int command, struct zclient *zclient, afi_t afi,
int type, unsigned short instance, vrf_id_t vrf_id)
{
+ /*
+ * When asking for table-direct redistribution the parameter
+ * `instance` has a different meaning: it means table
+ * identification.
+ *
+ * The table identification information is stored in
+ * `zclient->mi_redist` along with the VRF identification
+ * information in a pair (different from the usual single protocol
+ * instance value).
+ */
+ if (type == ZEBRA_ROUTE_TABLE_DIRECT) {
+ zclient_redistribute_table_direct(zclient, vrf_id, afi, instance, command);
+ return;
+ }
if (instance) {
if (command == ZEBRA_REDISTRIBUTE_ADD) {
diff --git a/lib/zclient.h b/lib/zclient.h
index 2385a8a219..afd84acce2 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -268,6 +268,21 @@ struct redist_proto {
struct list *instances;
};
+/**
+ * Redistribute table direct instance data structure: keeps the VRF
+ * that subscribed to the table ID.
+ *
+ * **NOTE**
+ * `table_id` is an integer because that is what the netlink interface
+ * uses for route attribute RTA_TABLE (32bit int), however the whole
+ * zclient API uses `unsigned short` (and CLI commands) so it will be
+ * limited to the range 1 to 65535.
+ */
+struct redist_table_direct {
+ vrf_id_t vrf_id;
+ int table_id;
+};
+
struct zclient_capabilities {
uint32_t ecmp;
bool mpls_enabled;
@@ -924,6 +939,15 @@ extern void redist_add_instance(struct redist_proto *, unsigned short);
extern void redist_del_instance(struct redist_proto *, unsigned short);
extern void redist_del_all_instances(struct redist_proto *red);
+extern struct redist_table_direct *
+redist_lookup_table_direct(const struct redist_proto *red, const struct redist_table_direct *table);
+extern bool redist_table_direct_has_id(const struct redist_proto *red, int table_id);
+extern void redist_add_table_direct(struct redist_proto *red,
+ const struct redist_table_direct *table);
+extern void redist_del_table_direct(struct redist_proto *red,
+ const struct redist_table_direct *table);
+
+
/*
* Send to zebra that the specified vrf is using label to resolve
* itself for L3VPN's. Repeated calls of this function with
@@ -1144,6 +1168,9 @@ bool zapi_route_notify_decode(struct stream *s, struct prefix *p,
uint32_t *tableid,
enum zapi_route_notify_owner *note,
afi_t *afi, safi_t *safi);
+bool zapi_route_notify_decode_srcdest(struct stream *s, struct prefix *p, struct prefix *src_p,
+ uint32_t *tableid, enum zapi_route_notify_owner *note,
+ afi_t *afi, safi_t *safi);
bool zapi_rule_notify_decode(struct stream *s, uint32_t *seqno,
uint32_t *priority, uint32_t *unique, char *ifname,
enum zapi_rule_notify_owner *note);
diff --git a/lib/zlog_5424.c b/lib/zlog_5424.c
index 4c60d4b405..6265ce3b1f 100644
--- a/lib/zlog_5424.c
+++ b/lib/zlog_5424.c
@@ -782,7 +782,7 @@ static void zlog_5424_cycle(struct zlog_cfg_5424 *zcf, int fd)
}
old = zcf->active ? &zcf->active->zt : NULL;
- old = zlog_target_replace(old, &zlt->zt);
+ old = zlog_target_replace(old, zlt ? &zlt->zt : NULL);
zcf->active = zlt;
/* oldt->fd == fd happens for zlog_5424_apply_meta() */
@@ -1076,9 +1076,17 @@ bool zlog_5424_apply_dst(struct zlog_cfg_5424 *zcf)
bool zlog_5424_apply_meta(struct zlog_cfg_5424 *zcf)
{
+ int fd;
+
frr_with_mutex (&zcf->cfg_mtx) {
if (zcf->active)
- zlog_5424_cycle(zcf, zcf->active->fd);
+ fd = zcf->active->fd;
+ else if (zcf->prio_min != ZLOG_DISABLED)
+ fd = zlog_5424_open(zcf, -1);
+ else
+ fd = -1;
+ if (fd >= 0)
+ zlog_5424_cycle(zcf, fd);
}
return true;