summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--configure.ac1
-rw-r--r--gdb/lib.txt49
-rw-r--r--lib/darr.c62
-rw-r--r--lib/darr.h457
-rw-r--r--lib/frrstr.c21
-rw-r--r--lib/frrstr.h8
-rw-r--r--lib/mgmt.proto16
-rw-r--r--lib/mgmt_be_client.c203
-rw-r--r--lib/mgmt_fe_client.c113
-rw-r--r--lib/mgmt_fe_client.h50
-rw-r--r--lib/mgmt_msg.c79
-rw-r--r--lib/mgmt_msg_native.c47
-rw-r--r--lib/mgmt_msg_native.h380
-rw-r--r--lib/northbound.c370
-rw-r--r--lib/northbound.h100
-rw-r--r--lib/northbound_cli.c16
-rw-r--r--lib/northbound_grpc.cpp20
-rw-r--r--lib/northbound_oper.c1769
-rw-r--r--lib/northbound_sysrepo.c37
-rw-r--r--lib/subdir.am3
-rw-r--r--lib/vrf.c14
-rw-r--r--lib/vty.c334
-rw-r--r--lib/vty.h5
-rw-r--r--lib/yang.c156
-rw-r--r--lib/yang.h71
-rw-r--r--mgmtd/mgmt_be_adapter.c102
-rw-r--r--mgmtd/mgmt_be_adapter.h17
-rw-r--r--mgmtd/mgmt_fe_adapter.c283
-rw-r--r--mgmtd/mgmt_fe_adapter.h46
-rw-r--r--mgmtd/mgmt_history.c4
-rw-r--r--mgmtd/mgmt_main.c41
-rw-r--r--mgmtd/mgmt_memory.c1
-rw-r--r--mgmtd/mgmt_memory.h1
-rw-r--r--mgmtd/mgmt_txn.c433
-rw-r--r--mgmtd/mgmt_txn.h59
-rw-r--r--mgmtd/mgmt_vty.c36
-rw-r--r--python/xref2vtysh.py2
-rw-r--r--tests/lib/subdir.am1
-rw-r--r--tests/lib/test_darr.c145
-rw-r--r--tests/lib/test_darr.py8
-rw-r--r--tests/topotests/mgmt_fe_client/fe_client.py103
-rw-r--r--tests/topotests/mgmt_fe_client/mgmt_pb2.py1990
l---------tests/topotests/mgmt_fe_client/oper.py1
-rw-r--r--tests/topotests/mgmt_fe_client/r1/frr.conf23
-rw-r--r--tests/topotests/mgmt_fe_client/test_client.py49
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json576
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json1145
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json576
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json572
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json572
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib.json1145
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json225
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json572
-rw-r--r--tests/topotests/mgmt_oper/oper.py258
-rw-r--r--tests/topotests/mgmt_oper/r1/frr-scale.conf25
-rw-r--r--tests/topotests/mgmt_oper/r1/frr-simple.conf23
-rw-r--r--tests/topotests/mgmt_oper/r1/frr.conf41
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-empty.json2
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-intf-state-mtu.json12
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-intf-state.json17
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json193
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json350
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json164
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json189
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json189
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib.json350
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-ribs-rib-ipv4-unicast.json110
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json189
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-nokey.json110
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-prefix.json50
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-singleton-metric.json30
-rw-r--r--tests/topotests/mgmt_oper/test_oper.py123
-rw-r--r--tests/topotests/mgmt_oper/test_querying.py103
-rw-r--r--tests/topotests/mgmt_oper/test_scale.py67
-rw-r--r--tests/topotests/mgmt_oper/test_simple.py140
-rw-r--r--tests/topotests/static_simple/r1/mgmtd.conf10
-rw-r--r--tests/topotests/static_simple/r1/zebra.conf10
-rw-r--r--tests/topotests/static_simple/test_static_simple.py11
-rw-r--r--zebra/debug.c4
-rw-r--r--zebra/main.c9
-rw-r--r--zebra/zebra_nb.c2
-rw-r--r--zebra/zebra_nb.h4
-rw-r--r--zebra/zebra_nb_state.c48
-rw-r--r--zebra/zebra_router.c20
-rw-r--r--zebra/zebra_router.h3
86 files changed, 15493 insertions, 474 deletions
diff --git a/.clang-format b/.clang-format
index 3446db48de..d16263da2e 100644
--- a/.clang-format
+++ b/.clang-format
@@ -80,6 +80,8 @@ ForEachMacros:
# libyang outliers:
- 'LY_FOR_KEYS'
- 'LY_LIST_FOR'
+ - 'LYD_LIST_FOR_INST'
+ - 'LYD_LIST_FOR_INST_SAFE'
- 'LY_TREE_FOR'
- 'LY_TREE_DFS_BEGIN'
- 'LYD_TREE_DFS_BEGIN'
diff --git a/configure.ac b/configure.ac
index 12cb4b9bb3..fc3775857f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -379,6 +379,7 @@ else
fi
AC_C_FLAG([-Wno-unused-parameter])
AC_C_FLAG([-Wno-missing-field-initializers])
+AC_C_FLAG([-Wno-microsoft-anon-tag])
AC_C_FLAG([-Wc++-compat], [], [CXX_COMPAT_CFLAGS="-Wc++-compat"])
AC_SUBST([CXX_COMPAT_CFLAGS])
diff --git a/gdb/lib.txt b/gdb/lib.txt
index 5d22321b62..435ec7eda7 100644
--- a/gdb/lib.txt
+++ b/gdb/lib.txt
@@ -306,8 +306,9 @@ define mq_walk
end
set $mg = $mg->next
end
+end
-document mg_walk
+document mq_walk
Walk the memory data structures to show what is holding memory.
Arguments:
@@ -315,3 +316,49 @@ Arguments:
sure where to start pass it mg_first, which is a global DS for
all memory allocated in FRR
end
+
+define __darr_meta
+ set $_ = ((struct darr_metadata *)$arg0) - 1
+end
+document __darr_meta
+Store a pointer to the struct darr_metadata in $_ for the given dynamic array.
+
+Argument: a pointer to a darr dynamic array.
+Returns: pointer to the struct darr_metadata in $_.
+end
+
+define darr_meta
+ __darr_meta $arg0
+ p *$_
+end
+document darr_meta
+Print the struct darr_metadata for the given dynamic array. Store the value
+in $_ as well.
+
+Argument: a pointer to a darr dynamic array.
+Returns: pointer to the struct darr_metadata in $_.
+end
+
+define darr_len
+ __darr_meta $arg0
+ set $_ = $_->len
+ p $_
+end
+document darr_len
+Print the length of the given dynamic array, and store in $_.
+
+Argument: a pointer to a darr dynamic array.
+Returns: length of the array.
+end
+
+define darr_cap
+ __darr_meta $arg0
+ set $_ = $_->cap
+ p $_
+end
+document darr_len
+Print the capacity of the given dynamic array, and store in $_.
+
+Argument: a pointer to a darr dynamic array.
+Returns: capacity of the array.
+end
diff --git a/lib/darr.c b/lib/darr.c
index 282e0dc5dc..f7a64fc394 100644
--- a/lib/darr.c
+++ b/lib/darr.c
@@ -10,6 +10,7 @@
#include "memory.h"
DEFINE_MTYPE(LIB, DARR, "Dynamic Array");
+DEFINE_MTYPE(LIB, DARR_STR, "Dynamic Array String");
static uint _msb(uint count)
{
@@ -52,29 +53,72 @@ static size_t darr_size(uint count, size_t esize)
return count * esize + sizeof(struct darr_metadata);
}
-void *__darr_resize(void *a, uint count, size_t esize)
+char *__darr_in_vsprintf(char **sp, bool concat, const char *fmt, va_list ap)
+{
+ size_t inlen = concat ? darr_strlen(*sp) : 0;
+ size_t capcount = strlen(fmt) + MIN(inlen + 64, 128);
+ ssize_t len;
+
+ darr_ensure_cap(*sp, capcount);
+
+ if (!concat)
+ darr_reset(*sp);
+
+ /* code below counts on having a NUL terminated string */
+ if (darr_len(*sp) == 0)
+ *darr_append(*sp) = 0;
+again:
+ len = vsnprintf(darr_last(*sp), darr_avail(*sp), fmt, ap);
+ if (len < 0)
+ darr_in_strcat(*sp, fmt);
+ else if ((size_t)len < darr_avail(*sp))
+ _darr_len(*sp) += len;
+ else {
+ darr_ensure_cap(*sp, darr_len(*sp) + (size_t)len);
+ goto again;
+ }
+ return *sp;
+}
+
+char *__darr_in_sprintf(char **sp, bool concat, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ (void)__darr_in_vsprintf(sp, concat, fmt, ap);
+ va_end(ap);
+ return *sp;
+}
+
+
+void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mtype)
{
uint ncount = darr_next_count(count, esize);
size_t osz = (a == NULL) ? 0 : darr_size(darr_cap(a), esize);
size_t sz = darr_size(ncount, esize);
- struct darr_metadata *dm = XREALLOC(MTYPE_DARR,
- a ? _darr_meta(a) : NULL, sz);
+ struct darr_metadata *dm;
- if (sz > osz)
- memset((char *)dm + osz, 0, sz - osz);
+ if (a) {
+ dm = XREALLOC(_darr_meta(a)->mtype, _darr_meta(a), sz);
+ if (sz > osz)
+ memset((char *)dm + osz, 0, sz - osz);
+ } else {
+ dm = XCALLOC(mtype, sz);
+ dm->mtype = mtype;
+ }
dm->cap = ncount;
return (void *)(dm + 1);
}
-void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero)
+void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero,
+ struct memtype *mtype)
{
-
struct darr_metadata *dm;
uint olen, nlen;
if (!a)
- a = __darr_resize(NULL, at + count, esize);
+ a = __darr_resize(NULL, at + count, esize, mtype);
dm = (struct darr_metadata *)a - 1;
olen = dm->len;
@@ -89,7 +133,7 @@ void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero)
nlen = olen + count;
if (nlen > dm->cap) {
- a = __darr_resize(a, nlen, esize);
+ a = __darr_resize(a, nlen, esize, mtype);
dm = (struct darr_metadata *)a - 1;
}
diff --git a/lib/darr.h b/lib/darr.h
index d78d97d5f3..2b6f0db0b9 100644
--- a/lib/darr.h
+++ b/lib/darr.h
@@ -3,22 +3,37 @@
* June 23 2023, Christian Hopps <chopps@labn.net>
*
* Copyright (c) 2023, LabN Consulting, L.L.C.
- *
+ */
+#ifndef _FRR_DARR_H_
+#define _FRR_DARR_H_
+
+/*
* API functions:
* ==============
* - darr_append
+ * - darr_append_mt
* - darr_append_n
+ * - darr_append_n_mt
* - darr_append_nz
+ * - darr_append_nz_mt
* - darr_cap
+ * - darr_ensure_avail
+ * - darr_ensure_avail_mt
* - darr_ensure_cap
+ * - darr_ensure_cap_mt
* - darr_ensure_i
- * - darr_foreach_i
- * - darr_foreach_p
+ * - darr_ensure_i_mt
* - darr_free
* - darr_insert
+ * - darr_insert_mt
* - darr_insertz
+ * - darr_insertz_mt
* - darr_insert_n
+ * - darr_insert_n_mt
* - darr_insert_nz
+ * - darr_insert_nz_mt
+ * - darr_last
+ * - darr_lasti
* - darr_len
* - darr_maxi
* - darr_pop
@@ -28,41 +43,80 @@
* - darr_remove_n
* - darr_reset
* - darr_setlen
+ *
+ * Iteration
+ * ---------
+ * - darr_foreach_i
+ * - darr_foreach_p
+ *
+ * String Utilities
+ * ----------------
+ * - darr_in_strcat_tail
+ * - darr_in_strcatf, darr_in_vstrcatf
+ * - darr_in_strdup
+ * - darr_in_strdup_cap
+ * - darr_in_sprintf, darr_in_vsprintf
+ * - darr_set_strlen
+ * - darr_strdup
+ * - darr_strdup_cap
+ * - darr_strlen
+ * - darr_strnul
+ * - darr_sprintf, darr_vsprintf
*/
/*
* A few assured items
*
* - DAs will never have capacity 0 unless they are NULL pointers.
*/
+
+/*
+ * NOTE: valgrind by default enables a "length64" heuristic (among others) which
+ * identifies "interior-pointer" 8 bytes forward of a "start-pointer" as a
+ * "start-pointer". This should cause what normally would be "possibly-lost"
+ * errors to instead be definite for dynamic arrays. This is b/c the header is 8 bytes
+ */
+
#include <zebra.h>
#include "memory.h"
DECLARE_MTYPE(DARR);
+DECLARE_MTYPE(DARR_STR);
struct darr_metadata {
- uint len;
- uint cap;
+ uint32_t len;
+ uint32_t cap;
+ struct memtype *mtype;
};
-void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero);
-void *__darr_resize(void *a, uint count, size_t esize);
-#define _darr_esize(A) sizeof((A)[0])
-#define darr_esize(A) sizeof((A)[0])
-#define _darr_len(A) _darr_meta(A)->len
-#define _darr_meta(A) (((struct darr_metadata *)(A)) - 1)
-#define _darr_resize(A, C) ({ (A) = __darr_resize((A), C, _darr_esize(A)); })
+void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero,
+ struct memtype *mt);
+char *__darr_in_sprintf(char **sp, bool concat, const char *fmt, ...)
+ PRINTFRR(3, 4);
+char *__darr_in_vsprintf(char **sp, bool concat, const char *fmt, va_list ap)
+ PRINTFRR(3, 0);
+void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
+
+
+#define _darr_esize(A) sizeof((A)[0])
+#define darr_esize(A) sizeof((A)[0])
+#define _darr_len(A) _darr_meta(A)->len
+#define _darr_meta(A) (((struct darr_metadata *)(A)) - 1)
+#define _darr_resize_mt(A, C, MT) \
+ ({ (A) = __darr_resize(A, C, _darr_esize(A), MT); })
+#define _darr_resize(A, C) _darr_resize_mt(A, C, MTYPE_DARR)
/* Get the current capacity of the array */
#define darr_cap(A) (((A) == NULL) ? 0 : _darr_meta(A)->cap)
+/* Get the current available expansion space */
+#define darr_avail(A) (((A) == NULL) ? 0 : (darr_cap(A) - darr_len(A)))
+
/* Get the largest possible index one can `darr_ensure_i` w/o resizing */
#define darr_maxi(A) ((int)darr_cap(A) - 1)
/**
- * Get the current length of the array.
- *
- * As long as `A` is non-NULL, this macro may be used as an L-value to modify
- * the length of the array.
+ * darr_len() - Get the current length of the array as a unsigned int.
+ * darr_ilen() - Get the current length of the array as an int.
*
* Args:
* A: The dynamic array, can be NULL.
@@ -70,7 +124,19 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* The current length of the array.
*/
-#define darr_len(A) (((A) == NULL) ? 0 : _darr_meta(A)->len)
+#define darr_len(A) (((A) == NULL) ? 0 : _darr_meta(A)->len)
+#define darr_ilen(A) (((A) == NULL) ? 0 : (ssize_t)_darr_meta(A)->len)
+
+/**
+ * darr_lasti() - Get the last element's index.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * The current last element index, or -1 for none.
+ */
+#define darr_lasti(A) (darr_ilen(A) - 1)
/**
* Set the current length of the array `A` to 0.
@@ -99,12 +165,42 @@ void *__darr_resize(void *a, uint count, size_t esize);
assert((A) || !(L)); \
if ((A)) { \
/* have to cast to avoid compiler warning for "0" */ \
- assert((long long)darr_cap(A) >= (L)); \
+ assert((long long)darr_cap(A) >= (long long)(L)); \
_darr_len(A) = (L); \
} \
} while (0)
/**
+ * Set the string length of the array `S` to `L`, and NUL
+ * terminate the string at L. The dynamic array length will be `L` + 1.
+ *
+ * Thus after calling:
+ *
+ * darr_len(S) == L + 1
+ * darr_strlen(S) == L
+ * S[L] == 0
+ *
+ * This function does *not* guarantee the `L` + 1 memory is allocated to
+ * the array, use `darr_ensure` or `*_cap` functions for that.
+ *
+ * Args:
+ * S: The dynamic array, cannot be NULL.
+ * L: The new str length of the array, will set
+ *
+ * Return:
+ * A pointer to the end of S (i.e., pointing to the NUL byte).
+ */
+#define darr_set_strlen(S, L) \
+ ({ \
+ assert((S)); \
+ /* have to cast to avoid compiler warning for "0" */ \
+ assert((long long)darr_cap(S) >= (long long)(L)); \
+ _darr_len(S) = (L) + 1; \
+ *darr_last(S) = 0; \
+ darr_last(S); \
+ })
+
+/**
* Free memory allocated for the dynamic array `A`
*
* Args:
@@ -114,13 +210,39 @@ void *__darr_resize(void *a, uint count, size_t esize);
#define darr_free(A) \
do { \
if ((A)) { \
- void *__ptr = _darr_meta(A); \
- XFREE(MTYPE_DARR, __ptr); \
+ struct darr_metadata *__meta = _darr_meta(A); \
+ XFREE(__meta->mtype, __meta); \
(A) = NULL; \
} \
} while (0)
/**
+ * Make sure that there is room in the dynamic array `A` to add `C` elements.
+ *
+ * Available space is `darr_cap(a) - darr_len(a)`.
+ *
+ * The value `A` may be changed as a result of this call in which case any
+ * pointers into the previous memory block are no longer valid. The `A` value
+ * is guaranteed not to change if there is sufficient capacity in the array.
+ *
+ * Args:
+ * A: (IN/OUT) the dynamic array, can be NULL.
+ * S: Amount of free space to guarantee.
+ *
+ * Return:
+ * A pointer to the (possibly moved) array.
+ */
+#define darr_ensure_avail_mt(A, S, MT) \
+ ({ \
+ ssize_t need = (ssize_t)(S) - \
+ (ssize_t)(darr_cap(A) - darr_len(A)); \
+ if (need > 0) \
+ _darr_resize_mt((A), darr_cap(A) + need, MT); \
+ (A); \
+ })
+#define darr_ensure_avail(A, S) darr_ensure_avail_mt(A, S, MTYPE_DARR)
+
+/**
* Make sure that there is room in the dynamic array `A` for `C` elements.
*
* The value `A` may be changed as a result of this call in which case any
@@ -129,17 +251,19 @@ void *__darr_resize(void *a, uint count, size_t esize);
*
* Args:
* A: (IN/OUT) the dynamic array, can be NULL.
- * I: the index to guarantee memory exists for
+ * C: Total capacity to guarantee.
*
* Return:
* A pointer to the (possibly moved) array.
*/
-#define darr_ensure_cap(A, C) \
+#define darr_ensure_cap_mt(A, C, MT) \
({ \
- if (darr_cap(A) < (C)) \
- _darr_resize((A), (C)); \
+ /* Cast to avoid warning when C == 0 */ \
+ if ((ssize_t)darr_cap(A) < (ssize_t)(C)) \
+ _darr_resize_mt((A), (C), MT); \
(A); \
})
+#define darr_ensure_cap(A, C) darr_ensure_cap_mt(A, C, MTYPE_DARR)
/**
* Return a pointer to the (I)th element of array `A`, making sure there is
@@ -159,18 +283,19 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* A pointer to the (I)th element in `A`
*/
-#define darr_ensure_i(A, I) \
+#define darr_ensure_i_mt(A, I, MT) \
({ \
if ((int)(I) > darr_maxi(A)) \
- _darr_resize((A), (I) + 1); \
+ _darr_resize_mt((A), (I) + 1, MT); \
if ((I) + 1 > _darr_len(A)) \
_darr_len(A) = (I) + 1; \
&(A)[I]; \
})
+#define darr_ensure_i(A, I) darr_ensure_i_mt(A, I, MTYPE_DARR)
-#define _darr_insert_n(A, I, N, Z) \
+#define _darr_insert_n(A, I, N, Z, MT) \
({ \
- (A) = __darr_insert_n(A, I, N, _darr_esize(A), Z); \
+ (A) = __darr_insert_n(A, I, N, _darr_esize(A), Z, MT); \
&(A)[I]; \
})
/**
@@ -191,8 +316,10 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* A pointer to the first inserted element in the array.
*/
-#define darr_insert_n(A, I, N) _darr_insert_n(A, I, N, false)
-#define darr_insert_nz(A, I, N) _darr_insert_n(A, I, N, true)
+#define darr_insert_n(A, I, N) _darr_insert_n(A, I, N, false, MTYPE_DARR)
+#define darr_insert_n_mt(A, I, N) _darr_insert_n(A, I, N, false, MT)
+#define darr_insert_nz(A, I, N) _darr_insert_n(A, I, N, true, MTYPE_DARR)
+#define darr_insert_nz_mt(A, I, N) _darr_insert_n(A, I, N, true, MT)
/**
* Insert an uninitialized element in the array at index `I`.
@@ -212,8 +339,10 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* A pointer to the element in the array.
*/
-#define darr_insert(A, I) _darr_insert_n(A, I, 1, false)
-#define darr_insertz(A, I) _darr_insert_n(A, I, 1, true)
+#define darr_insert(A, I) _darr_insert_n(A, I, 1, false, MTYPE_DARR)
+#define darr_insert_mt(A, I) _darr_insert_n(A, I, 1, false, MT)
+#define darr_insertz(A, I) _darr_insert_n(A, I, 1, true, MTYPE_DARR)
+#define darr_insertz_mt(A, I) _darr_insert_n(A, I, 1, true, MT)
/**
* Remove `N` elements from the array starting at index `I`.
@@ -251,10 +380,10 @@ void *__darr_resize(void *a, uint count, size_t esize);
#define darr_remove(A, I) darr_remove_n(A, I, 1)
-#define _darr_append_n(A, N, Z) \
+#define _darr_append_n(A, N, Z, MT) \
({ \
uint __len = darr_len(A); \
- darr_ensure_cap(A, __len + (N)); \
+ darr_ensure_cap_mt(A, __len + (N), MT); \
_darr_len(A) = __len + (N); \
if (Z) \
memset(&(A)[__len], 0, (N)*_darr_esize(A)); \
@@ -271,8 +400,10 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* A pointer to the first of the added elements at the end of the array.
*/
-#define darr_append_n(A, N) _darr_append_n(A, N, false)
-#define darr_append_nz(A, N) _darr_append_n(A, N, true)
+#define darr_append_n(A, N) _darr_append_n(A, N, false, MTYPE_DARR)
+#define darr_append_n_mt(A, N, MT) _darr_append_n(A, N, false, MT)
+#define darr_append_nz(A, N) _darr_append_n(A, N, true, MTYPE_DARR)
+#define darr_append_nz_mt(A, N, MT) _darr_append_n(A, N, true, MT)
/**
* Extending the array's length by 1.
@@ -285,8 +416,10 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* A pointer to the new element at the end of the array.
*/
-#define darr_append(A) _darr_append_n(A, 1, false)
-#define darr_appendz(A) _darr_append_n(A, 1, true)
+#define darr_append(A) _darr_append_n(A, 1, false, MTYPE_DARR)
+#define darr_append_mt(A, MT) _darr_append_n(A, 1, false, MT)
+#define darr_appendz(A) _darr_append_n(A, 1, true, MTYPE_DARR)
+#define darr_appendz_mt(A, MT) _darr_append_n(A, 1, true, MT)
/**
* Append an element `E` onto the array `A`, extending it's length by 1.
@@ -299,8 +432,10 @@ void *__darr_resize(void *a, uint count, size_t esize);
* Return:
* A pointer to the element in the array.
*/
-#define darr_push(A, E) (*darr_append(A) = (E))
-#define darr_pushz(A) (darr_appendz(A))
+#define darr_push(A, E) (*darr_append(A) = (E))
+#define darr_push_mt(A, E, MT) (*darr_append_mt(A, MT) = (E))
+#define darr_pushz(A) (darr_appendz(A))
+#define darr_pushz_mt(A, MT) (darr_appendz_mt(A, MT))
/**
@@ -349,6 +484,246 @@ void *__darr_resize(void *a, uint count, size_t esize);
#define darr_end(A) ((A) + darr_len(A))
/**
+ * darr_last() - Get a pointer to the last element of the array.
+ * darr_strnul() - Get a pointer to the NUL byte of the darr string or NULL.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * A pointer to the last element of the array or NULL if the array is
+ * empty.
+ */
+#define darr_last(A) \
+ ({ \
+ uint __len = darr_len(A); \
+ ((__len > 0) ? &(A)[__len - 1] : NULL); \
+ })
+#define darr_strnul(S) darr_last(S)
+
+/**
+ * darr_in_sprintf() - sprintf into D.
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * F: The format string
+ * ...: variable arguments for format string.
+ *
+ * Return:
+ * The dynamic_array D with the new string content.
+ */
+#define darr_in_sprintf(D, F, ...) __darr_in_sprintf(&(D), 0, F, __VA_ARGS__)
+
+
+/**
+ * darr_in_strcat() - concat a string into a darr string.
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * S: The string to concat onto D.
+ *
+ * Return:
+ * The dynamic_array D with the new string content.
+ */
+#define darr_in_strcat(D, S) \
+ ({ \
+ uint __dlen = darr_strlen(D); \
+ uint __slen = strlen(S); \
+ darr_ensure_cap_mt(D, __dlen + __slen + 1, MTYPE_DARR_STR); \
+ if (darr_len(D) == 0) \
+ *darr_append(D) = 0; \
+ memcpy(darr_last(D), (S), __slen + 1); \
+ _darr_len(D) += __slen; \
+ D; \
+ })
+
+/**
+ * darr_in_strcatf() - concat a formatted string into a darr string.
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * F: The format string to concat onto D after adding arguments.
+ * ...: The arguments for the format string.
+ * Return:
+ * The dynamic_array D with the new string content.
+ */
+#define darr_in_strcatf(D, F, ...) \
+ __darr_in_sprintf(&(D), true, (F), __VA_ARGS__)
+
+/**
+ * darr_in_strcat_tail() - copies end of one darr str to another.
+ *
+ * This is a rather specialized function, it takes 2 darr's, a destination and a
+ * source. If the source is not longer than the destination nothing is done.
+ * Otherwise the characters in the source that lie beyond the length of the dest
+ * are added to the dest. No checking is done to make sure the common prefix
+ * matches. For example:
+ *
+ * D: "/foo"
+ * S: "/foo/bar"
+ * -> D: "/foo/bar"
+ *
+ * perhaps surprising results:
+ * D: "/foo"
+ * S: "/zoo/bar"
+ * -> D: "/foo/bar"
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * S: The string to copy the tail from.
+ *
+ * Return:
+ * The dynamic_array D with the extended string content.
+ */
+#define darr_in_strcat_tail(D, S) \
+ ({ \
+ int __dsize, __ssize, __extra; \
+ \
+ if (darr_len(D) == 0) \
+ *darr_append(D) = 0; \
+ __dsize = darr_ilen(D); \
+ __ssize = darr_ilen(S); \
+ __extra = __ssize - __dsize; \
+ if (__extra > 0) { \
+ darr_ensure_cap_mt(D, (uint)__ssize, MTYPE_DARR_STR); \
+ memcpy(darr_last(D), (S) + __dsize - 1, __extra + 1); \
+ _darr_len(D) += __extra; \
+ } \
+ D; \
+ })
+
+/**
+ * darr_in_strdup_cap() - duplicate the string into a darr reserving capacity.
+ * darr_in_strdup() - duplicate the string into a darr.
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * S: The string to duplicate.
+ * C: The capacity to reserve.
+ *
+ * Return:
+ * The dynamic_array D with the duplicated string.
+ */
+#define darr_in_strdup_cap(D, S, C) \
+ ({ \
+ size_t __size = strlen(S) + 1; \
+ darr_reset(D); \
+ darr_ensure_cap_mt(D, \
+ ((size_t)(C) > __size) ? (size_t)(C) \
+ : __size, \
+ MTYPE_DARR_STR); \
+ strlcpy(D, (S), darr_cap(D)); \
+ darr_setlen((D), (size_t)__size); \
+ D; \
+ })
+#define darr_in_strdup(D, S) darr_in_strdup_cap(D, S, 1)
+
+/**
+ * darr_in_vsprintf() - vsprintf into D.
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * F: The format string
+ * A: Varargs
+ *
+ * Return:
+ * The dynamic_array D with the new string content.
+ */
+#define darr_in_vsprintf(D, F, A) __darr_in_vsprintf(&(D), 0, F, A)
+
+/**
+ * darr_in_vstrcatf() - concat a formatted string into a darr string.
+ *
+ * Args:
+ * D: The destination darr, D's value may be NULL.
+ * F: The format string to concat onto D after adding arguments.
+ * A: Varargs
+ *
+ * Return:
+ * The dynamic_array D with the new string content.
+ */
+#define darr_in_vstrcatf(D, F, A) __darr_in_vsprintf(&(D), true, (F), (A))
+
+/**
+ * darr_sprintf() - sprintf into a new dynamic array.
+ *
+ * Args:
+ * F: The format string
+ * ...: variable arguments for format string.
+ *
+ * Return:
+ * A char * dynamic_array with the new string content.
+ */
+#define darr_sprintf(F, ...) \
+ ({ \
+ char *d = NULL; \
+ __darr_in_sprintf(&d, false, F, __VA_ARGS__); \
+ d; \
+ })
+
+/**
+ * darr_strdup_cap() - duplicate the string reserving capacity.
+ * darr_strdup() - duplicate the string into a dynamic array.
+ *
+ * Args:
+ * S: The string to duplicate.
+ * C: The capacity to reserve.
+ *
+ * Return:
+ * The dynamic_array with the duplicated string.
+ */
+#define darr_strdup_cap(S, C) \
+ ({ \
+ size_t __size = strlen(S) + 1; \
+ char *__s = NULL; \
+ /* Cast to ssize_t to avoid warning when C == 0 */ \
+ darr_ensure_cap_mt(__s, \
+ ((ssize_t)(C) > (ssize_t)__size) \
+ ? (size_t)(C) \
+ : __size, \
+ MTYPE_DARR_STR); \
+ strlcpy(__s, (S), darr_cap(__s)); \
+ darr_setlen(__s, (size_t)__size); \
+ __s; \
+ })
+#define darr_strdup(S) darr_strdup_cap(S, 0)
+
+/**
+ * darr_strlen() - get the length of the NUL terminated string in a darr.
+ *
+ * Args:
+ * S: The string to measure, value may be NULL.
+ *
+ * Return:
+ * The length of the NUL terminated string in @S
+ */
+#define darr_strlen(S) \
+ ({ \
+ uint __size = darr_len(S); \
+ if (__size) \
+ __size -= 1; \
+ assert(!(S) || ((char *)(S))[__size] == 0); \
+ __size; \
+ })
+
+/**
+ * darr_vsprintf() - vsprintf into a new dynamic array.
+ *
+ * Args:
+ * F: The format string
+ * A: Varargs
+ *
+ * Return:
+ * The dynamic_array D with the new string content.
+ */
+#define darr_vsprintf(F, A) \
+ ({ \
+ char *d = NULL; \
+ darr_in_vsprintf(d, F, A); \
+ d; \
+ })
+
+/**
* Iterate over array `A` using a pointer to each element in `P`.
*
* Args:
@@ -365,3 +740,5 @@ void *__darr_resize(void *a, uint count, size_t esize);
* I: A uint variable to store the current element index in.
*/
#define darr_foreach_i(A, I) for ((I) = 0; (I) < darr_len(A); (I)++)
+
+#endif /* _FRR_DARR_H_ */
diff --git a/lib/frrstr.c b/lib/frrstr.c
index bb112afef7..1e743d4b0c 100644
--- a/lib/frrstr.c
+++ b/lib/frrstr.c
@@ -249,3 +249,24 @@ const char *frrstr_skip_over_char(const char *s, int skipc)
}
return NULL;
}
+
+/*
+ * Advance backward in string until reaching the char `toc`
+ * if beginning of string is reached w/o finding char return NULL
+ *
+ * /foo/bar'baz/booz'/foo
+ */
+const char *frrstr_back_to_char(const char *s, int toc)
+{
+ const char *next = s;
+ const char *prev = NULL;
+
+ if (s[0] == 0)
+ return NULL;
+ if (!strpbrk(s, "'\"\\"))
+ return strrchr(s, toc);
+ while ((next = frrstr_skip_over_char(next, toc)))
+ prev = next - 1;
+ return prev;
+}
+
diff --git a/lib/frrstr.h b/lib/frrstr.h
index 9a4fe257a2..33a4992001 100644
--- a/lib/frrstr.h
+++ b/lib/frrstr.h
@@ -167,13 +167,19 @@ int all_digit(const char *str);
*/
char *frrstr_hex(char *buff, size_t bufsiz, const uint8_t *str, size_t num);
-
/*
* Advance past a given char `skipc` in a string, while honoring quoting and
* backslash escapes (i.e., ignore `skipc` which occur in quoted sections).
*/
const char *frrstr_skip_over_char(const char *s, int skipc);
+/*
+ * Advance back from end to a given char `toc` in a string, while honoring
+ * quoting and backslash escapes. `toc` chars inside quote or escaped are
+ * ignored.
+ */
+const char *frrstr_back_to_char(const char *s, int toc);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/mgmt.proto b/lib/mgmt.proto
index 86b118d356..087d96a6ee 100644
--- a/lib/mgmt.proto
+++ b/lib/mgmt.proto
@@ -114,25 +114,11 @@ message BeCfgDataApplyReply {
optional string error_if_any = 3;
}
-message BeOperDataGetReq {
- required uint64 txn_id = 1;
- required uint64 batch_id = 2;
- repeated YangGetDataReq data = 3;
-}
-
message YangDataReply {
repeated YangData data = 1;
required int64 next_indx = 2;
}
-message BeOperDataGetReply {
- required uint64 txn_id = 1;
- required uint64 batch_id = 2;
- required bool success = 3;
- optional string error = 4;
- optional YangDataReply data = 5;
-}
-
//
// Any message on the MGMTD Backend Interface.
//
@@ -146,8 +132,6 @@ message BeMessage {
BeCfgDataCreateReply cfg_data_reply = 7;
BeCfgDataApplyReq cfg_apply_req = 8;
BeCfgDataApplyReply cfg_apply_reply = 9;
- BeOperDataGetReq get_req = 10;
- BeOperDataGetReply get_reply = 11;
}
}
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
index 058ff038d4..2ffcd8f9fc 100644
--- a/lib/mgmt_be_client.c
+++ b/lib/mgmt_be_client.c
@@ -8,9 +8,12 @@
#include <zebra.h>
#include "debug.h"
#include "compiler.h"
+#include "darr.h"
#include "libfrr.h"
+#include "lib_errors.h"
#include "mgmt_be_client.h"
#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
#include "mgmt_pb.h"
#include "network.h"
#include "northbound.h"
@@ -23,11 +26,11 @@ DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT, "backend client");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT_NAME, "backend client name");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH, "backend transaction batch data");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_TXN, "backend transaction data");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_GT_CB_ARGS, "backend get-tree cb args");
enum mgmt_be_txn_event {
MGMTD_BE_TXN_PROC_SETCFG = 1,
MGMTD_BE_TXN_PROC_GETCFG,
- MGMTD_BE_TXN_PROC_GETDATA
};
struct mgmt_be_set_cfg_req {
@@ -35,19 +38,18 @@ struct mgmt_be_set_cfg_req {
uint16_t num_cfg_changes;
};
-struct mgmt_be_get_data_req {
- char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
- uint16_t num_xpaths;
-};
-
struct mgmt_be_txn_req {
enum mgmt_be_txn_event event;
union {
struct mgmt_be_set_cfg_req set_cfg;
- struct mgmt_be_get_data_req get_data;
} req;
};
+struct be_oper_iter_arg {
+ struct lyd_node *root; /* the tree we are building */
+ struct lyd_node *hint; /* last node added */
+};
+
PREDECL_LIST(mgmt_be_batches);
struct mgmt_be_batch_ctx {
struct mgmt_be_txn_req txn_req;
@@ -119,6 +121,15 @@ struct debug mgmt_dbg_be_client = {
/* NOTE: only one client per proc for now. */
static struct mgmt_be_client *__be_client;
+static int be_client_send_native_msg(struct mgmt_be_client *client_ctx,
+ void *msg, size_t len,
+ bool short_circuit_ok)
+{
+ return msg_conn_send_msg(&client_ctx->client.conn,
+ MGMT_MSG_VERSION_NATIVE, msg, len, NULL,
+ short_circuit_ok);
+}
+
static int mgmt_be_client_send_msg(struct mgmt_be_client *client_ctx,
Mgmtd__BeMessage *be_msg)
{
@@ -190,7 +201,8 @@ mgmt_be_find_txn_by_id(struct mgmt_be_client *client_ctx, uint64_t txn_id,
if (txn->txn_id == txn_id)
return txn;
if (warn)
- MGMTD_BE_CLIENT_ERR("Unknown txn-id: %" PRIu64, txn_id);
+ MGMTD_BE_CLIENT_ERR("client %s unkonwn txn-id: %" PRIu64,
+ client_ctx->name, txn_id);
return NULL;
}
@@ -263,6 +275,41 @@ static void mgmt_be_cleanup_all_txns(struct mgmt_be_client *client_ctx)
}
}
+
+/**
+ * Send an error back to MGMTD using native messaging.
+ *
+ * Args:
+ * client: the BE client.
+ * txn_id: the txn_id this error pertains to.
+ * short_circuit_ok: True if OK to short-circuit the call.
+ * error: An integer error value.
+ * errfmt: An error format string (i.e., printfrr)
+ * ...: args for use by the `errfmt` format string.
+ *
+ * Return:
+ * the return value from the underlying send message function.
+ */
+static int be_client_send_error(struct mgmt_be_client *client, uint64_t txn_id,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, ...)
+ PRINTFRR(6, 7);
+
+static int be_client_send_error(struct mgmt_be_client *client, uint64_t txn_id,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, errfmt);
+ ret = vmgmt_msg_native_send_error(&client->client.conn, txn_id, req_id,
+ short_circuit_ok, error, errfmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx,
uint64_t txn_id, bool create)
{
@@ -702,19 +749,11 @@ static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
mgmt_be_process_cfg_apply(
client_ctx, (uint64_t)be_msg->cfg_apply_req->txn_id);
break;
- case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
- MGMTD_BE_CLIENT_ERR("Got unhandled message type %u",
- be_msg->message_case);
- /*
- * TODO: Add handling code in future.
- */
- break;
/*
* NOTE: The following messages are always sent from Backend
* clients to MGMTd only and/or need not be handled here.
*/
case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
- case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
@@ -732,6 +771,119 @@ static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
return 0;
}
+struct be_client_tree_data_batch_args {
+ struct mgmt_be_client *client;
+ uint64_t txn_id;
+ uint64_t req_id;
+ LYD_FORMAT result_type;
+};
+
+/*
+ * Process the get-tree request on our local oper state
+ */
+static enum nb_error be_client_send_tree_data_batch(const struct lyd_node *tree,
+ void *arg, enum nb_error ret)
+{
+ struct be_client_tree_data_batch_args *args = arg;
+ struct mgmt_be_client *client = args->client;
+ struct mgmt_msg_tree_data *tree_msg = NULL;
+ bool more = false;
+ uint8_t **darrp;
+ LY_ERR err;
+
+ if (ret == NB_YIELD) {
+ more = true;
+ ret = NB_OK;
+ }
+ if (ret != NB_OK)
+ goto done;
+
+ tree_msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_tree_data, 0,
+ MTYPE_MSG_NATIVE_TREE_DATA);
+ tree_msg->refer_id = args->txn_id;
+ tree_msg->req_id = args->req_id;
+ tree_msg->code = MGMT_MSG_CODE_TREE_DATA;
+ tree_msg->result_type = args->result_type;
+ tree_msg->more = more;
+
+ darrp = mgmt_msg_native_get_darrp(tree_msg);
+ err = yang_print_tree_append(darrp, tree, args->result_type,
+ (LYD_PRINT_WD_EXPLICIT |
+ LYD_PRINT_WITHSIBLINGS));
+ if (err) {
+ ret = NB_ERR;
+ goto done;
+ }
+ (void)be_client_send_native_msg(client, tree_msg,
+ mgmt_msg_native_get_msg_len(tree_msg),
+ false);
+done:
+ mgmt_msg_native_free_msg(tree_msg);
+ if (ret)
+ be_client_send_error(client, args->txn_id, args->req_id, false,
+ -EINVAL,
+ "FE cilent %s txn-id %" PRIu64
+ " error fetching oper state %d",
+ client->name, args->txn_id, ret);
+ if (ret != NB_OK || !more)
+ XFREE(MTYPE_MGMTD_BE_GT_CB_ARGS, args);
+ return ret;
+}
+
+/*
+ * Process the get-tree request on our local oper state
+ */
+static void be_client_handle_get_tree(struct mgmt_be_client *client,
+ uint64_t txn_id, void *msgbuf,
+ size_t msg_len)
+{
+ struct mgmt_msg_get_tree *get_tree_msg = msgbuf;
+ struct be_client_tree_data_batch_args *args;
+
+ MGMTD_BE_CLIENT_DBG("Received get-tree request for client %s txn-id %" PRIu64
+ " req-id %" PRIu64,
+ client->name, txn_id, get_tree_msg->req_id);
+
+ /* NOTE: removed the translator, if put back merge with northbound_cli
+ * code
+ */
+
+ args = XMALLOC(MTYPE_MGMTD_BE_GT_CB_ARGS, sizeof(*args));
+ args->client = client;
+ args->txn_id = get_tree_msg->refer_id;
+ args->req_id = get_tree_msg->req_id;
+ args->result_type = get_tree_msg->result_type;
+ nb_oper_walk(get_tree_msg->xpath, NULL, 0, true, NULL, NULL,
+ be_client_send_tree_data_batch, args);
+}
+
+/*
+ * Handle a native encoded message
+ *
+ * We don't create transactions with native messaging.
+ */
+static void be_client_handle_native_msg(struct mgmt_be_client *client,
+ struct mgmt_msg_header *msg,
+ size_t msg_len)
+{
+ uint64_t txn_id = msg->refer_id;
+
+ switch (msg->code) {
+ case MGMT_MSG_CODE_GET_TREE:
+ be_client_handle_get_tree(client, txn_id, msg, msg_len);
+ break;
+ default:
+ MGMTD_BE_CLIENT_ERR("unknown native message txn-id %" PRIu64
+ " req-id %" PRIu64 " code %u to client %s",
+ txn_id, msg->req_id, msg->code,
+ client->name);
+ be_client_send_error(client, msg->refer_id, msg->req_id, false, -1,
+ "BE cilent %s recv msg unknown txn-id %" PRIu64,
+ client->name, txn_id);
+ break;
+ }
+}
+
static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data,
size_t len, struct msg_conn *conn)
{
@@ -742,6 +894,17 @@ static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data,
client = container_of(conn, struct msg_client, conn);
client_ctx = container_of(client, struct mgmt_be_client, client);
+ if (version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *msg = (typeof(msg))data;
+
+ if (len >= sizeof(*msg))
+ be_client_handle_native_msg(client_ctx, msg, len);
+ else
+ MGMTD_BE_CLIENT_ERR("native message to client %s too short %zu",
+ client_ctx->name, len);
+ return;
+ }
+
be_msg = mgmtd__be_message__unpack(NULL, len, data);
if (!be_msg) {
MGMTD_BE_CLIENT_DBG("Failed to decode %zu bytes from server",
@@ -775,10 +938,9 @@ int mgmt_be_send_subscr_req(struct mgmt_be_client *client_ctx,
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ;
be_msg.subscr_req = &subscr_req;
- MGMTD_FE_CLIENT_DBG(
- "Sending SUBSCR_REQ name: %s subscr_xpaths: %u num_xpaths: %zu",
- subscr_req.client_name, subscr_req.subscribe_xpaths,
- subscr_req.n_xpath_reg);
+ MGMTD_BE_CLIENT_DBG("Sending SUBSCR_REQ name: %s subscr_xpaths: %u num_xpaths: %zu",
+ subscr_req.client_name, subscr_req.subscribe_xpaths,
+ subscr_req.n_xpath_reg);
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
@@ -922,6 +1084,7 @@ void mgmt_be_client_destroy(struct mgmt_be_client *client)
MGMTD_BE_CLIENT_DBG("Destroying MGMTD Backend Client '%s'",
client->name);
+ nb_oper_cancel_all_walks();
msg_client_cleanup(&client->client);
mgmt_be_cleanup_all_txns(client);
mgmt_be_txns_fini(&client->txn_head);
diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c
index 4c6f86b194..c30a0339eb 100644
--- a/lib/mgmt_fe_client.c
+++ b/lib/mgmt_fe_client.c
@@ -12,6 +12,7 @@
#include "libfrr.h"
#include "mgmt_fe_client.h"
#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
#include "mgmt_pb.h"
#include "network.h"
#include "stream.h"
@@ -304,6 +305,35 @@ int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
+/*
+ * Send get-tree request.
+ */
+int mgmt_fe_send_get_tree_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ LYD_FORMAT result_type, const char *xpath)
+{
+ struct mgmt_msg_get_tree *msg;
+ size_t xplen = strlen(xpath);
+ int ret;
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_tree, xplen + 1,
+ MTYPE_MSG_NATIVE_GET_TREE);
+ msg->refer_id = session_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_GET_TREE;
+ msg->result_type = result_type;
+ strlcpy(msg->xpath, xpath, xplen + 1);
+
+ MGMTD_FE_CLIENT_DBG("Sending GET_TREE_REQ session-id %" PRIu64
+ " req-id %" PRIu64 " xpath: %s",
+ session_id, req_id, xpath);
+
+ ret = mgmt_msg_native_send_msg(&client->client.conn, msg, false);
+ mgmt_msg_native_free_msg(msg);
+ return ret;
+}
+
+
static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client,
Mgmtd__FeMessage *fe_msg)
{
@@ -469,6 +499,73 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client,
return 0;
}
+/*
+ * Handle a native encoded message
+ */
+static void fe_client_handle_native_msg(struct mgmt_fe_client *client,
+ struct mgmt_msg_header *msg,
+ size_t msg_len)
+{
+ struct mgmt_fe_client_session *session;
+ struct mgmt_msg_tree_data *tree_msg;
+ struct mgmt_msg_error *err_msg;
+
+ MGMTD_FE_CLIENT_DBG("Got GET_TREE reply for session-id %" PRIu64,
+ msg->refer_id);
+
+ session = mgmt_fe_find_session_by_session_id(client, msg->refer_id);
+
+ if (!session || !session->client) {
+ MGMTD_FE_CLIENT_ERR("No session for received native msg session-id %" PRIu64,
+ msg->refer_id);
+ return;
+ }
+
+ switch (msg->code) {
+ case MGMT_MSG_CODE_ERROR:
+ if (!session->client->cbs.error_notify)
+ return;
+
+ err_msg = (typeof(err_msg))msg;
+ if (!MGMT_MSG_VALIDATE_NUL_TERM(err_msg, msg_len)) {
+ MGMTD_FE_CLIENT_ERR("Corrupt error msg recv");
+ return;
+ }
+ session->client->cbs.error_notify(client, client->user_data,
+ session->client_id,
+ msg->refer_id,
+ session->user_ctx,
+ msg->req_id, err_msg->error,
+ err_msg->errstr);
+ break;
+ case MGMT_MSG_CODE_TREE_DATA:
+ if (!session->client->cbs.get_tree_notify)
+ return;
+
+ tree_msg = (typeof(tree_msg))msg;
+ if (msg_len < sizeof(*tree_msg)) {
+ MGMTD_FE_CLIENT_ERR("Corrupt tree-data msg recv");
+ return;
+ }
+ session->client->cbs.get_tree_notify(client, client->user_data,
+ session->client_id,
+ msg->refer_id,
+ session->user_ctx,
+ msg->req_id,
+ MGMTD_DS_OPERATIONAL,
+ tree_msg->result_type,
+ tree_msg->result,
+ msg_len - sizeof(*tree_msg),
+ tree_msg->partial_error);
+ break;
+ default:
+ MGMTD_FE_CLIENT_ERR("unknown native message session-id %" PRIu64
+ " req-id %" PRIu64 " code %u",
+ msg->refer_id, msg->req_id, msg->code);
+ break;
+ }
+}
+
static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data,
size_t len, struct msg_conn *conn)
{
@@ -479,6 +576,17 @@ static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data,
msg_client = container_of(conn, struct msg_client, conn);
client = container_of(msg_client, struct mgmt_fe_client, client);
+ if (version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *msg = (typeof(msg))data;
+
+ if (len >= sizeof(*msg))
+ fe_client_handle_native_msg(client, msg, len);
+ else
+ MGMTD_FE_CLIENT_ERR("native message to FE client %s too short %zu",
+ client->name, len);
+ return;
+ }
+
fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
if (!fe_msg) {
MGMTD_FE_CLIENT_DBG("Failed to decode %zu bytes from server.",
@@ -647,6 +755,11 @@ bool mgmt_fe_client_current_msg_short_circuit(struct mgmt_fe_client *client)
return client->client.conn.is_short_circuit;
}
+const char *mgmt_fe_client_name(struct mgmt_fe_client *client)
+{
+ return client->name;
+}
+
/*
* Create a new Session for a Frontend Client connection.
*/
diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h
index d770748f23..3a1a1e5705 100644
--- a/lib/mgmt_fe_client.h
+++ b/lib/mgmt_fe_client.h
@@ -115,6 +115,20 @@ struct mgmt_fe_client_cbs {
uintptr_t user_data, uint64_t req_id,
Mgmtd__DatastoreId ds_id,
Mgmtd__YangData **yang_data, size_t num_data);
+
+ /* Called when get-tree result is returned */
+ int (*get_tree_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uint64_t session_id, uintptr_t session_ctx,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ LYD_FORMAT result_type, void *result, size_t len,
+ int partial_error);
+
+ /* Called when new native error is returned */
+ int (*error_notify)(struct mgmt_fe_client *client, uintptr_t user_data,
+ uint64_t client_id, uint64_t session_id,
+ uintptr_t session_ctx, uint64_t req_id, int error,
+ const char *errstr);
};
extern struct debug mgmt_dbg_fe_client;
@@ -364,6 +378,31 @@ extern int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
int num_reqs);
/*
+ * Send GET-TREE to MGMTD daemon.
+ *
+ * client
+ * Client object.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * result_type
+ * The LYD_FORMAT of the result.
+ *
+ * xpath
+ * the xpath to get.
+ *
+ * Returns:
+ * 0 on success, otherwise msg_conn_send_msg() return values.
+ */
+extern int mgmt_fe_send_get_tree_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ LYD_FORMAT result_type, const char *xpath);
+
+/*
* Destroy library and cleanup everything.
*/
extern void mgmt_fe_client_destroy(struct mgmt_fe_client *client);
@@ -379,6 +418,17 @@ extern uint mgmt_fe_client_session_count(struct mgmt_fe_client *client);
extern bool
mgmt_fe_client_current_msg_short_circuit(struct mgmt_fe_client *client);
+/**
+ * Get the name of the client
+ *
+ * Args:
+ * The client object.
+ *
+ * Return:
+ * The name of the client.
+ */
+extern const char *mgmt_fe_client_name(struct mgmt_fe_client *client);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/mgmt_msg.c b/lib/mgmt_msg.c
index 12432a06e2..b03dbe8cc3 100644
--- a/lib/mgmt_msg.c
+++ b/lib/mgmt_msg.c
@@ -13,6 +13,7 @@
#include "stream.h"
#include "frrevent.h"
#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
#define MGMT_MSG_DBG(dbgtag, fmt, ...) \
@@ -84,7 +85,7 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
*/
assert(stream_get_getp(ms->ins) == 0);
left = stream_get_endp(ms->ins);
- while (left > (long)sizeof(struct mgmt_msg_hdr)) {
+ while (left > (ssize_t)sizeof(struct mgmt_msg_hdr)) {
mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
if (!MGMT_MSG_IS_MARKER(mhdr->marker)) {
MGMT_MSG_DBG(dbgtag, "recv corrupt buffer, disconnect");
@@ -99,8 +100,25 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
mcount++;
}
- if (!mcount)
+ if (!mcount) {
+ /* Didn't manage to read a full message */
+ if (mhdr && avail == 0) {
+ struct stream *news;
+ /*
+ * Message was longer than what was left and we have no
+ * available space to read more in. B/c mcount == 0 the
+ * message starts at the beginning of the stream so
+ * therefor the stream is too small to fit the message..
+ * Resize the stream to fit.
+ */
+ news = stream_new(mhdr->len);
+ stream_put(news, mhdr, left);
+ stream_set_endp(news, left);
+ stream_free(ms->ins);
+ ms->ins = news;
+ }
return MSR_SCHED_STREAM;
+ }
/*
* We have read at least one message into the stream, queue it up.
@@ -108,7 +126,11 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
stream_set_endp(ms->ins, total);
stream_fifo_push(&ms->inq, ms->ins);
- ms->ins = stream_new(ms->max_msg_sz);
+ if (left < (ssize_t)sizeof(struct mgmt_msg_hdr))
+ ms->ins = stream_new(ms->max_msg_sz);
+ else
+ /* handle case where message is greater than max */
+ ms->ins = stream_new(MAX(ms->max_msg_sz, mhdr->len));
if (left) {
stream_put(ms->ins, mhdr, left);
stream_set_endp(ms->ins, left);
@@ -292,23 +314,26 @@ int mgmt_msg_send_msg(struct mgmt_msg_state *ms, uint8_t version, void *msg,
size_t endp, n;
size_t mlen = len + sizeof(*mhdr);
- if (mlen > ms->max_msg_sz) {
- MGMT_MSG_ERR(ms, "Message %zu > max size %zu, dropping", mlen,
- ms->max_msg_sz);
- return -1;
- }
+ if (mlen > ms->max_msg_sz)
+ MGMT_MSG_DBG(dbgtag, "Sending large msg size %zu > max size %zu",
+ mlen, ms->max_msg_sz);
if (!ms->outs) {
- MGMT_MSG_DBG(dbgtag, "creating new stream for msg len %zu",
- len);
- ms->outs = stream_new(ms->max_msg_sz);
+ MGMT_MSG_DBG(dbgtag, "creating new stream for msg len %zu", mlen);
+ ms->outs = stream_new(MAX(ms->max_msg_sz, mlen));
+ } else if (mlen > ms->max_msg_sz && ms->outs->endp == 0) {
+ /* msg is larger than stream max size get a fit-to-size stream */
+ MGMT_MSG_DBG(dbgtag,
+ "replacing old stream with fit-to-size for msg len %zu",
+ mlen);
+ stream_free(ms->outs);
+ ms->outs = stream_new(mlen);
} else if (STREAM_WRITEABLE(ms->outs) < mlen) {
- MGMT_MSG_DBG(
- dbgtag,
- "enq existing stream len %zu and creating new stream for msg len %zu",
- STREAM_WRITEABLE(ms->outs), mlen);
+ MGMT_MSG_DBG(dbgtag,
+ "enq existing stream len %zu and creating new stream for msg len %zu",
+ STREAM_WRITEABLE(ms->outs), mlen);
stream_fifo_push(&ms->outq, ms->outs);
- ms->outs = stream_new(ms->max_msg_sz);
+ ms->outs = stream_new(MAX(ms->max_msg_sz, mlen));
} else {
MGMT_MSG_DBG(
dbgtag,
@@ -317,6 +342,16 @@ int mgmt_msg_send_msg(struct mgmt_msg_state *ms, uint8_t version, void *msg,
}
s = ms->outs;
+ if (dbgtag && version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *native_msg = msg;
+
+ MGMT_MSG_DBG(
+ dbgtag,
+ "Sending native msg sess/txn-id %"PRIu64" req-id %"PRIu64" code %u",
+ native_msg->refer_id, native_msg->req_id, native_msg->code);
+
+ }
+
/* We have a stream with space, pack the message into it. */
mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(s) + s->endp);
mhdr->marker = MGMT_MSG_MARKER(version);
@@ -672,6 +707,9 @@ static int msg_client_connect_short_circuit(struct msg_client *client)
/* server side */
memset(&su, 0, sizeof(union sockunion));
server_conn = server->create(sockets[1], &su);
+ server_conn->debug = DEBUG_MODE_CHECK(server->debug, DEBUG_MODE_ALL)
+ ? true
+ : false;
client->conn.remote_conn = server_conn;
server_conn->remote_conn = &client->conn;
@@ -765,8 +803,9 @@ void msg_client_cleanup(struct msg_client *client)
static void msg_server_accept(struct event *event)
{
struct msg_server *server = EVENT_ARG(event);
- int fd;
+ struct msg_conn *conn;
union sockunion su;
+ int fd;
if (server->fd < 0)
return;
@@ -789,7 +828,11 @@ static void msg_server_accept(struct event *event)
DEBUGD(server->debug, "Accepted new %s connection", server->idtag);
- server->create(fd, &su);
+ conn = server->create(fd, &su);
+ if (conn)
+ conn->debug = DEBUG_MODE_CHECK(server->debug, DEBUG_MODE_ALL)
+ ? true
+ : false;
}
int msg_server_init(struct msg_server *server, const char *sopath,
diff --git a/lib/mgmt_msg_native.c b/lib/mgmt_msg_native.c
new file mode 100644
index 0000000000..b6dc126d49
--- /dev/null
+++ b/lib/mgmt_msg_native.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * June 29 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+#include <zebra.h>
+#include "mgmt_msg_native.h"
+
+DEFINE_MGROUP(MSG_NATIVE, "Native message allocations");
+DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_MSG, "native mgmt msg");
+DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_ERROR, "native error msg");
+DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_TREE, "native get tree msg");
+DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_TREE_DATA, "native tree data msg");
+
+int vmgmt_msg_native_send_error(struct msg_conn *conn, uint64_t sess_or_txn_id,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, va_list ap)
+{
+ struct mgmt_msg_error *msg;
+ char *errstr;
+ ssize_t slen;
+ int ret;
+
+ errstr = darr_vsprintf(errfmt, ap);
+ slen = strlen(errstr);
+
+ msg = mgmt_msg_native_alloc_msg(typeof(*msg), slen + 1,
+ MTYPE_MSG_NATIVE_ERROR);
+ msg->refer_id = sess_or_txn_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_ERROR;
+ msg->error = error;
+ strlcpy(msg->errstr, errstr, slen + 1);
+ darr_free(errstr);
+
+ if (conn->debug)
+ zlog_debug("Sending error %d session-id %" PRIu64
+ " req-id %" PRIu64 " scok %d errstr: %s",
+ error, sess_or_txn_id, req_id, short_circuit_ok,
+ msg->errstr);
+
+ ret = mgmt_msg_native_send_msg(conn, msg, short_circuit_ok);
+ mgmt_msg_native_free_msg(msg);
+ return ret;
+}
diff --git a/lib/mgmt_msg_native.h b/lib/mgmt_msg_native.h
new file mode 100644
index 0000000000..3f6283025c
--- /dev/null
+++ b/lib/mgmt_msg_native.h
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * June 29 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+
+#ifndef _FRR_MGMT_MSG_NATIVE_H_
+#define _FRR_MGMT_MSG_NATIVE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#elif 0
+}
+#endif
+
+#include <zebra.h>
+#include "compiler.h"
+#include "darr.h"
+#include "memory.h"
+#include "mgmt_msg.h"
+#include "mgmt_defines.h"
+
+#include <stdalign.h>
+
+/*
+ * ==================
+ * Native Message API
+ * ==================
+ *
+ * -----------------------
+ * Defining A New Message:
+ * -----------------------
+ *
+ * 1) Start with `struct mgmt_msg_header` as the first (unnamed) field.
+ *
+ * 2) Add fixed-width fields. Add on natural aligned boundaries (*)
+ *
+ * 3) [Optional] Add a zero-length variable field. Add aligned on a 64-bit
+ * boundary, this is done so that: `value = (HDR + 1)` works.
+ *
+ * 4) Define a new MTYPE for the new message type (see DECLARE_MTYPE below
+ * as well as the paired DEFINE_MTYPE in mgmt_msg_native.c)
+ *
+ * These rules are so the messages may be read from and written directly to
+ * "the wire", easily, using common programming languages (e.g., C, rust, go,
+ * python, ...)
+ *
+ * (*) Natrual aligned boundaries, i.e., uint16_t on 2-byte boundary, uint64_t
+ * on 8-byte boundaries, ...)
+ *
+ * ------------------------------
+ * Allocating New Native Messages
+ * ------------------------------
+ *
+ * For fixed-length and variable length messages one should allocate new
+ * messages with the mgmt_msg_native_alloc_msg() passing in the newly defined
+ * MTYPE. Likewise, to free the message one should use
+ * mgmt_msg_native_free_msg().
+ *
+ * Unknown Variable Length Messages:
+ * ---------------------------------
+ *
+ * If using a zero-length variable length field and the length is not known at
+ * message creation time, you can use the `native` API function
+ * mgmt_msg_native_append() to add data to the end of the message, or if a more
+ * full set of operations are required, the darr_xxxx() API is also available as
+ * in the Advanced section below.
+ *
+ * Notable API Functions:
+ * ---------------------------------
+ *
+ * mgmt_msg_native_alloc_msg() - Allocate a native msg.
+ * mgmt_msg_native_free_msg() - Free a native msg.
+ * mgmt_msg_native_append() - Append data to the end of the msg.
+ * mgmt_msg_native_get_msg_len() - Get the total length of the msg.
+ * mgmt_msg_native_send_msg() - Send the message.
+ *
+ *
+ * -------------------------------------
+ * [Advanced Use] Dynamic Array Messages
+ * -------------------------------------
+ *
+ * NOTE: Most users can simply use mgmt_msg_native_append() and skip this
+ * section.
+ *
+ * This section is only important to understand if you wish to utilize the fact
+ * that native messages allocated with mgmt_msg_native_alloc_msg are
+ * actually allocated as uint8_t dynamic arrays (`darr`).
+ *
+ * You can utilize all the darr_xxxx() API to manipulate the variable length
+ * message data in a native message. To do so you simply need to understand that
+ * the native message is actually a `uint8_t *` darr. So, for example, to append
+ * data to the end of a message one could do the following:
+ *
+ * void append_metric_path(struct mgmt_msg_my_msg *msg)
+ * {
+ * msg = (struct mggm_msg_my_msg *)
+ * darr_strcat((uint8_t *)msg, "/metric");
+ *
+ * // ...
+ * }
+ *
+ * NOTE: If reallocs happen the original passed in pointer will be updated;
+ * however, any other pointers into the message will become invalid, and so they
+ * should always be discarded or reinitialized after using any reallocating
+ * darr_xxx() API functions.
+ *
+ * void append_metric_path(struct mgmt_msg_my_msg *msg)
+ * {
+ * char *xpath = msg->xpath; // pointer into message
+ *
+ * darr_in_strcat((uint8_t *)msg, "/metric");
+ * // msg may have been updated to point at new memory
+ *
+ * xpath = NULL; // now invalid
+ * xpath = msg->xpath; // reinitialize
+ * // ...
+ * }
+ *
+ * Rather than worry about this, it's typical when using dynamic arrays to always
+ * work from the main pointer to the dynamic array, rather than caching multiple
+ * pointers into the data. Modern compilers will optimize the code so that it
+ * adds no extra execution cost.
+ *
+ * void append_metric_path(struct mgmt_msg_my_msg *msg)
+ * {
+ * darr_in_strcat((uint8_t *)msg, "/metric");
+ *
+ * // Use `msg->xpath` directly rather creating and using an
+ * // `xpath = msg->xpath` local variable.
+ *
+ * if (strcmp(msg->xpath, "foobar/metric")) {
+ * // ...
+ * }
+ * }
+ *
+ */
+
+DECLARE_MTYPE(MSG_NATIVE_MSG);
+DECLARE_MTYPE(MSG_NATIVE_ERROR);
+DECLARE_MTYPE(MSG_NATIVE_GET_TREE);
+DECLARE_MTYPE(MSG_NATIVE_TREE_DATA);
+
+/*
+ * Native message codes
+ */
+#define MGMT_MSG_CODE_ERROR 0
+#define MGMT_MSG_CODE_GET_TREE 1
+#define MGMT_MSG_CODE_TREE_DATA 2
+
+/**
+ * struct mgmt_msg_header - Header common to all native messages.
+ *
+ * @code: the actual type of the message.
+ * @resv: Set to zero, ignore on receive.
+ * @vsplit: If a variable section is split in 2, the length of first part.
+ * @refer_id: the session, txn, conn, etc, this message is associated with.
+ * @req_id: the request this message is for.
+ */
+struct mgmt_msg_header {
+ uint16_t code;
+ uint16_t resv;
+ uint32_t vsplit;
+ uint64_t refer_id;
+ uint64_t req_id;
+};
+_Static_assert(sizeof(struct mgmt_msg_header) == 3 * 8, "Bad padding");
+_Static_assert(sizeof(struct mgmt_msg_header) ==
+ offsetof(struct mgmt_msg_header, req_id) +
+ sizeof(((struct mgmt_msg_header *)0)->req_id),
+ "Size mismatch");
+
+/**
+ * struct mgmt_msg_error - Common error message.
+ *
+ * @error: An error value.
+ * @errst: Description of error can be 0 length.
+ *
+ * This common error message can be used for replies for many msg requests
+ * (req_id).
+ */
+struct mgmt_msg_error {
+ struct mgmt_msg_header;
+ int16_t error;
+ uint8_t resv2[6];
+
+ alignas(8) char errstr[];
+};
+_Static_assert(sizeof(struct mgmt_msg_error) ==
+ offsetof(struct mgmt_msg_error, errstr),
+ "Size mismatch");
+
+/**
+ * struct mgmt_msg_get_tree - Message carrying xpath query request.
+ *
+ * @result_type: ``LYD_FORMAT`` for the returned result.
+ * @xpath: the query for the data to return.
+ */
+struct mgmt_msg_get_tree {
+ struct mgmt_msg_header;
+ uint8_t result_type;
+ uint8_t resv2[7];
+
+ alignas(8) char xpath[];
+};
+_Static_assert(sizeof(struct mgmt_msg_get_tree) ==
+ offsetof(struct mgmt_msg_get_tree, xpath),
+ "Size mismatch");
+
+/**
+ * struct mgmt_msg_tree_data - Message carrying tree data.
+ *
+ * @partial_error: If the full result could not be returned do to this error.
+ * @result_type: ``LYD_FORMAT`` for format of the @result value.
+ * @more: if this is a partial return and there will be more coming.
+ * @result: The tree data in @result_type format.
+ *
+ */
+struct mgmt_msg_tree_data {
+ struct mgmt_msg_header;
+ int8_t partial_error;
+ uint8_t result_type;
+ uint8_t more;
+ uint8_t resv2[5];
+
+ alignas(8) uint8_t result[];
+};
+_Static_assert(sizeof(struct mgmt_msg_tree_data) ==
+ offsetof(struct mgmt_msg_tree_data, result),
+ "Size mismatch");
+
+#define MGMT_MSG_VALIDATE_NUL_TERM(msgp, len) \
+ ((len) >= sizeof(*msg) + 1 && ((char *)msgp)[(len)-1] == 0)
+
+
+/**
+ * Send a native message error to the other end of the connection.
+ *
+ * This function is normally used by the server-side to indicate a failure to
+ * process a client request. For this server side handling of client messages
+ * which expect a reply, either that reply or this error should be returned, as
+ * closing the connection is not allowed during message handling.
+ *
+ * Args:
+ * conn: the connection.
+ * sess_or_txn_id: Session ID (to FE client) or Txn ID (from BE client)
+ * req_id: which req_id this error is associated with.
+ * short_circuit_ok: if short circuit sending is OK.
+ * error: the error value
+ * errfmt: vprintfrr style format string
+ * ap: the variable args for errfmt.
+ *
+ * Return:
+ * The return value of ``msg_conn_send_msg``.
+ */
+extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
+ uint64_t sess_or_txn_id, uint64_t req_id,
+ bool short_circuit_ok, int16_t error,
+ const char *errfmt, va_list ap)
+ PRINTFRR(6, 0);
+
+/**
+ * mgmt_msg_native_alloc_msg() - Create a native appendable msg.
+ * @msg_type: The message structure type.
+ * @var_len: The initial additional length to add to the message.
+ * @mem_type: The initial additional length to add to the message.
+ *
+ * This function takes a C type (e.g., `struct mgmt_msg_get_tree`) as an
+ * argument and returns a new native message. The newly allocated message
+ * can be used with the other `native` functions.
+ *
+ * Importantly the mgmt_msg_native_append() function can be used to add data
+ * to the end of the message, and mgmt_msg_get_native_msg_len() can be used
+ * to obtain the total length of the message (i.e., the fixed sized header plus
+ * the variable length data that has been appended).
+ *
+ * Additionally, a dynamic array (darr) pointer can be obtained using
+ * mgmt_msg_get_native_darr() which allows adding and manipulating the
+ * variable data that follows the fixed sized header.
+ *
+ * Return: A `msg_type` object created using a dynamic_array.
+ */
+#define mgmt_msg_native_alloc_msg(msg_type, var_len, mem_type) \
+ ({ \
+ uint8_t *buf = NULL; \
+ (msg_type *)darr_append_nz_mt(buf, \
+ sizeof(msg_type) + (var_len), \
+ mem_type); \
+ })
+
+/**
+ * mgmt_msg_free_native_msg() - Free a native msg.
+ * @msg - pointer to message allocated by mgmt_msg_create_native_msg().
+ */
+#define mgmt_msg_native_free_msg(msg) darr_free(msg)
+
+/**
+ * mgmt_msg_native_get_msg_len() - Get the total length of the msg.
+ * @msg: the native message.
+ *
+ * Return: the total length of the message, fixed + variable length.
+ */
+#define mgmt_msg_native_get_msg_len(msg) (darr_len((uint8_t *)(msg)))
+
+/**
+ * mgmt_msg_native_append() - Append data to the end of the msg.
+ * @msg: (IN/OUT) Pointer to the native message, variable may be updated.
+ * @data: data to append.
+ * @len: length of data to append.
+ *
+ * Append @data of length @len to the native message @msg.
+ *
+ * NOTE: Be aware @msg pointer may change as a result of reallocating the
+ * message to fit the new data. Any other pointers into the old message should
+ * be discarded.
+ *
+ * Return: a pointer to the newly appended data.
+ */
+#define mgmt_msg_native_append(msg, data, len) \
+ memcpy(darr_append(*mgmt_msg_native_get_darrp(msg), len), data, len)
+
+/**
+ * mgmt_msg_native_send_msg(msg, short_circuit_ok) - Send a native msg.
+ * @conn: the mgmt_msg connection.
+ * @msg: the native message.
+ * @short_circuit_ok: True if short-circuit sending is required.
+ *
+ * Return: The error return value of msg_conn_send_msg().
+ */
+#define mgmt_msg_native_send_msg(conn, msg, short_circuit_ok) \
+ msg_conn_send_msg(conn, MGMT_MSG_VERSION_NATIVE, msg, \
+ mgmt_msg_native_get_msg_len(msg), NULL, \
+ short_circuit_ok)
+
+/**
+ * mgmt_msg_native_get_darrp() - Return a ptr to the dynamic array ptr.
+ * @msg: Pointer to the native message.
+ *
+ * NOTE: Most users can simply use mgmt_msg_native_append() instead of this.
+ *
+ * This function obtains a pointer to the dynamic byte array for this message,
+ * this array actually includes the message header if one is going to look at
+ * the length value. With that in mind any of the `darr_*()` functions/API may
+ * be used to manipulate the variable data at the end of the message.
+ *
+ * NOTE: The pointer returned is actually a pointer to the message pointer
+ * passed in to this function. This pointer to pointer is required so that
+ * realloc can be done inside the darr API.
+ *
+ * NOTE: If reallocs happen the original passed in pointer will be updated;
+ * however, any other pointers into the message will become invalid and so they
+ * should always be discarded after using the returned value.
+ *
+ * Example:
+ *
+ * void append_metric_path(struct mgmt_msg_my_msg *msg)
+ * {
+ * char *xpath = msg->xpath; // pointer into message
+ * uint8_t **darp;
+ *
+ * darrp = mgmt_msg_native_get_darrp(msg);
+ * darr_in_strcat(*darrp, "/metric");
+ *
+ * xpath = NULL; // now invalid
+ * xpath = msg->xpath;
+ * }
+ *
+ *
+ * Return: A pointer to the first argument -- which is a pointer to a pointer to
+ * a dynamic array.
+ */
+#define mgmt_msg_native_get_darrp(msg) ((uint8_t **)&(msg))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMT_MSG_NATIVE_H_ */
diff --git a/lib/northbound.c b/lib/northbound.c
index 32988dfc15..3b02c08bbf 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -73,9 +73,9 @@ static void nb_transaction_apply_finish(struct nb_transaction *transaction,
static int nb_oper_data_iter_node(const struct lysc_node *snode,
const char *xpath, const void *list_entry,
const struct yang_list_keys *list_keys,
- struct yang_translator *translator,
- bool first, uint32_t flags,
- nb_oper_data_cb cb, void *arg);
+ struct yang_translator *translator, bool first,
+ uint32_t flags, nb_oper_data_cb cb, void *arg,
+ struct lyd_node *pdnode);
static int nb_node_check_config_only(const struct lysc_node *snode, void *arg)
{
@@ -1465,6 +1465,50 @@ const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
return nb_node->cbs.lookup_entry(&args);
}
+const void *nb_callback_lookup_node_entry(struct lyd_node *node,
+ const void *parent_list_entry)
+{
+ struct yang_list_keys keys;
+ struct nb_cb_lookup_entry_args args = {};
+ const struct nb_node *nb_node = node->schema->priv;
+
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return NULL;
+
+ if (yang_get_node_keys(node, &keys)) {
+ flog_warn(EC_LIB_LIBYANG,
+ "%s: can't get keys for lookup from existing data node %s",
+ __func__, node->schema->name);
+ return NULL;
+ }
+
+ DEBUGD(&nb_dbg_cbs_state,
+ "northbound callback (lookup_node_entry): node [%s] parent_list_entry [%p]",
+ nb_node->xpath, parent_list_entry);
+
+ args.parent_list_entry = parent_list_entry;
+ args.keys = &keys;
+ return nb_node->cbs.lookup_entry(&args);
+}
+
+const void *nb_callback_lookup_next(const struct nb_node *nb_node,
+ const void *parent_list_entry,
+ const struct yang_list_keys *keys)
+{
+ struct nb_cb_lookup_entry_args args = {};
+
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return NULL;
+
+ DEBUGD(&nb_dbg_cbs_state,
+ "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
+ nb_node->xpath, parent_list_entry);
+
+ args.parent_list_entry = parent_list_entry;
+ args.keys = keys;
+ return nb_node->cbs.lookup_next(&args);
+}
+
int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
const struct list *input, struct list *output, char *errmsg,
size_t errmsg_len)
@@ -1772,7 +1816,8 @@ static int nb_oper_data_iter_children(const struct lysc_node *snode,
const struct yang_list_keys *list_keys,
struct yang_translator *translator,
bool first, uint32_t flags,
- nb_oper_data_cb cb, void *arg)
+ nb_oper_data_cb cb, void *arg,
+ struct lyd_node *pdnode)
{
const struct lysc_node *child;
@@ -1781,7 +1826,7 @@ static int nb_oper_data_iter_children(const struct lysc_node *snode,
ret = nb_oper_data_iter_node(child, xpath, list_entry,
list_keys, translator, false,
- flags, cb, arg);
+ flags, cb, arg, pdnode);
if (ret != NB_OK)
return ret;
}
@@ -1793,15 +1838,19 @@ static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
const char *xpath, const void *list_entry,
const struct yang_list_keys *list_keys,
struct yang_translator *translator,
- uint32_t flags, nb_oper_data_cb cb, void *arg)
+ uint32_t flags, nb_oper_data_cb cb, void *arg,
+ struct lyd_node *pdnode)
{
+ const struct lysc_node *snode = nb_node->snode;
struct yang_data *data;
+ LY_ERR err = LY_SUCCESS;
+
- if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
+ if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
return NB_OK;
/* Ignore list keys. */
- if (lysc_is_key(nb_node->snode))
+ if (lysc_is_key(snode))
return NB_OK;
data = nb_callback_get_elem(nb_node, xpath, list_entry);
@@ -1809,50 +1858,89 @@ static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
/* Leaf of type "empty" is not present. */
return NB_OK;
- return (*cb)(nb_node->snode, translator, data, arg);
+ /*
+ * Add a dnode to our tree
+ */
+ err = lyd_new_term(pdnode, snode->module, snode->name, data->value,
+ false, NULL);
+ if (err)
+ return NB_ERR_RESOURCE;
+
+ if (cb)
+ return (*cb)(nb_node->snode, translator, data, arg);
+ return NB_OK;
}
static int nb_oper_data_iter_container(const struct nb_node *nb_node,
- const char *xpath,
+ const char *xpath, bool first,
const void *list_entry,
const struct yang_list_keys *list_keys,
struct yang_translator *translator,
uint32_t flags, nb_oper_data_cb cb,
- void *arg)
+ void *arg, struct lyd_node *pdnode)
{
const struct lysc_node *snode = nb_node->snode;
+ struct lyd_node *cnode = NULL;
+ bool presence = false;
+ LY_ERR err;
+ int ret;
if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
return NB_OK;
+ if (pdnode->schema == snode)
+ assert(first);
+ else
+ assert(!first);
+
/* Read-only presence containers. */
if (nb_node->cbs.get_elem) {
struct yang_data *data;
int ret;
+ presence = true;
data = nb_callback_get_elem(nb_node, xpath, list_entry);
if (data == NULL)
/* Presence container is not present. */
return NB_OK;
- ret = (*cb)(snode, translator, data, arg);
- if (ret != NB_OK)
- return ret;
- }
+ if (!first) {
+ err = lyd_new_inner(pdnode, snode->module, snode->name,
+ false, &cnode);
+ if (err)
+ return NB_ERR_RESOURCE;
+ }
- /* Read-write presence containers. */
- if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)) {
- struct lysc_node_container *scontainer;
+ if (cb) {
+ ret = (*cb)(snode, translator, data, arg);
+ if (ret != NB_OK)
+ return ret;
+ }
+ }
- scontainer = (struct lysc_node_container *)snode;
- if (CHECK_FLAG(scontainer->flags, LYS_PRESENCE)
- && !yang_dnode_get(running_config->dnode, xpath))
- return NB_OK;
+ if (first)
+ cnode = pdnode;
+ else if (!cnode) {
+ /* Add a node in for this container in-case we have children. */
+ err = lyd_new_inner(pdnode, snode->module, snode->name, false,
+ &cnode);
+ if (err)
+ return NB_ERR_RESOURCE;
}
/* Iterate over the child nodes. */
- return nb_oper_data_iter_children(snode, xpath, list_entry, list_keys,
- translator, false, flags, cb, arg);
+ ret = nb_oper_data_iter_children(snode, xpath, list_entry, list_keys,
+ translator, false, flags, cb, arg,
+ cnode);
+
+ /* TODO: here we are freeing only if we created; however, we may want to
+ * also free if pdnode was cnode on entry to cleanup the data tree
+ */
+ /* If we aren't presence container and we gained no children remove */
+ if (!presence && !first && !lyd_child(cnode))
+ lyd_free_tree(cnode);
+
+ return ret;
}
static int
@@ -1860,11 +1948,14 @@ nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
const void *parent_list_entry,
const struct yang_list_keys *parent_list_keys,
struct yang_translator *translator, uint32_t flags,
- nb_oper_data_cb cb, void *arg)
+ nb_oper_data_cb cb, void *arg,
+ struct lyd_node *pdnode)
{
+ const struct lysc_node *snode = nb_node->snode;
const void *list_entry = NULL;
+ LY_ERR err;
- if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
+ if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
return NB_OK;
do {
@@ -1881,9 +1972,19 @@ nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
if (data == NULL)
continue;
- ret = (*cb)(nb_node->snode, translator, data, arg);
- if (ret != NB_OK)
- return ret;
+ /*
+ * Add a dnode to our tree
+ */
+ err = lyd_new_term(pdnode, snode->module, snode->name,
+ data->value, false, NULL);
+ if (err)
+ return NB_ERR_RESOURCE;
+
+ if (cb) {
+ ret = (*cb)(nb_node->snode, translator, data, arg);
+ if (ret != NB_OK)
+ return ret;
+ }
} while (list_entry);
return NB_OK;
@@ -1894,21 +1995,24 @@ static int nb_oper_data_iter_list(const struct nb_node *nb_node,
const void *parent_list_entry,
const struct yang_list_keys *parent_list_keys,
struct yang_translator *translator,
- uint32_t flags, nb_oper_data_cb cb, void *arg)
+ uint32_t flags, nb_oper_data_cb cb, void *arg,
+ struct lyd_node *pdnode)
{
+ char xpath[XPATH_MAXLEN * 2];
const struct lysc_node *snode = nb_node->snode;
const void *list_entry = NULL;
+ struct lyd_node *list_node = NULL;
+ const char *key_preds = NULL;
uint32_t position = 1;
+ LY_ERR err;
if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
return NB_OK;
/* Iterate over all list entries. */
do {
- const struct lysc_node_leaf *skey;
struct yang_list_keys list_keys = {};
- char xpath[XPATH_MAXLEN * 2];
- int ret;
+ int len, ret;
/* Obtain list entry. */
list_entry = nb_callback_get_next(nb_node, parent_list_entry,
@@ -1930,16 +2034,14 @@ static int nb_oper_data_iter_list(const struct nb_node *nb_node,
/* Build XPath of the list entry. */
strlcpy(xpath, xpath_list, sizeof(xpath));
- unsigned int i = 0;
- LY_FOR_KEYS (snode, skey) {
- assert(i < list_keys.num);
- snprintf(xpath + strlen(xpath),
- sizeof(xpath) - strlen(xpath),
- "[%s='%s']", skey->name,
- list_keys.key[i]);
- i++;
- }
- assert(i == list_keys.num);
+ len = strlen(xpath);
+ key_preds = &xpath[len];
+
+ uint n = yang_get_key_preds(xpath + len, snode,
+ &list_keys,
+ sizeof(xpath) - len);
+ assert(n == list_keys.num);
+
} else {
/*
* Keyless list - build XPath using a positional index.
@@ -1949,10 +2051,20 @@ static int nb_oper_data_iter_list(const struct nb_node *nb_node,
position++;
}
+ /*
+ * `pdnode` needs to point at lib - and it does for
+ * "/frr-vrf:lib/vrf" need to test "/frr-vrf:lib" too though
+ */
+ err = lyd_new_list2(pdnode, snode->module, snode->name,
+ key_preds, false, &list_node);
+ if (err)
+ return NB_ERR_RESOURCE;
+
/* Iterate over the child nodes. */
- ret = nb_oper_data_iter_children(
- nb_node->snode, xpath, list_entry, &list_keys,
- translator, false, flags, cb, arg);
+ ret = nb_oper_data_iter_children(nb_node->snode, xpath,
+ list_entry, &list_keys,
+ translator, false, flags, cb,
+ arg, list_node);
if (ret != NB_OK)
return ret;
} while (list_entry);
@@ -1960,13 +2072,12 @@ static int nb_oper_data_iter_list(const struct nb_node *nb_node,
return NB_OK;
}
-static int nb_oper_data_iter_node(const struct lysc_node *snode,
- const char *xpath_parent,
- const void *list_entry,
- const struct yang_list_keys *list_keys,
- struct yang_translator *translator,
- bool first, uint32_t flags,
- nb_oper_data_cb cb, void *arg)
+int nb_oper_data_iter_node(const struct lysc_node *snode,
+ const char *xpath_parent, const void *list_entry,
+ const struct yang_list_keys *list_keys,
+ struct yang_translator *translator, bool first,
+ uint32_t flags, nb_oper_data_cb cb, void *arg,
+ struct lyd_node *pdnode)
{
struct nb_node *nb_node;
char xpath[XPATH_MAXLEN];
@@ -1976,6 +2087,10 @@ static int nb_oper_data_iter_node(const struct lysc_node *snode,
&& CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
return NB_OK;
+ /*
+ * would be nice to just be building a libyang data tree here as well
+ */
+
/* Update XPath. */
strlcpy(xpath, xpath_parent, sizeof(xpath));
if (!first && snode->nodetype != LYS_USES) {
@@ -2001,29 +2116,36 @@ static int nb_oper_data_iter_node(const struct lysc_node *snode,
nb_node = snode->priv;
switch (snode->nodetype) {
case LYS_CONTAINER:
- ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
- list_keys, translator, flags,
- cb, arg);
+ /* does something, then walks children */
+ ret = nb_oper_data_iter_container(nb_node, xpath, first,
+ list_entry, list_keys,
+ translator, flags, cb, arg,
+ pdnode);
+
break;
case LYS_LEAF:
+ /* does something then returns */
ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
list_keys, translator, flags, cb,
- arg);
+ arg, pdnode);
break;
case LYS_LEAFLIST:
+ /* walks leaf list doing things and returns */
ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
list_keys, translator, flags,
- cb, arg);
+ cb, arg, pdnode);
break;
case LYS_LIST:
+ /* walks children */
ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
list_keys, translator, flags, cb,
- arg);
+ arg, pdnode);
break;
case LYS_USES:
+ /* walks children */
ret = nb_oper_data_iter_children(snode, xpath, list_entry,
list_keys, translator, false,
- flags, cb, arg);
+ flags, cb, arg, pdnode);
break;
default:
break;
@@ -2032,8 +2154,64 @@ static int nb_oper_data_iter_node(const struct lysc_node *snode,
return ret;
}
+static int nb_xpath_dirname(char *xpath)
+{
+ int len = strlen(xpath);
+ bool abs = xpath[0] == '/';
+ char *slash;
+
+ /* "//" or "/" => NULL */
+ if (abs && (len == 1 || (len == 2 && xpath[1] == '/')))
+ return NB_ERR_NOT_FOUND;
+ slash = (char *)frrstr_back_to_char(xpath, '/');
+ /* "/foo/bar/" or "/foo/bar//" => "/foo " */
+ if (slash && slash == &xpath[len - 1]) {
+ xpath[--len] = 0;
+ slash = (char *)frrstr_back_to_char(xpath, '/');
+ if (slash && slash == &xpath[len - 1]) {
+ xpath[--len] = 0;
+ slash = (char *)frrstr_back_to_char(xpath, '/');
+ }
+ }
+ if (!slash)
+ return NB_ERR_NOT_FOUND;
+ *slash = 0;
+ return NB_OK;
+}
+
+static int nb_oper_data_xpath_to_tree(const char *xpath_in,
+ struct lyd_node **dnode,
+ bool is_top_node_list)
+{
+ /* Eventually this function will loop until it finds a concrete path */
+ char *xpath;
+ LY_ERR err;
+ int ret;
+
+ err = lyd_new_path2(NULL, ly_native_ctx, xpath_in, NULL, 0, 0,
+ LYD_NEW_PATH_UPDATE, NULL, dnode);
+ if (err == LY_SUCCESS)
+ return NB_OK;
+ if (!is_top_node_list)
+ return NB_ERR_NOT_FOUND;
+
+ xpath = XSTRDUP(MTYPE_TMP, xpath_in);
+ ret = nb_xpath_dirname(xpath);
+ if (ret != NB_OK)
+ goto done;
+
+ err = lyd_new_path2(NULL, ly_native_ctx, xpath, NULL, 0, 0,
+ LYD_NEW_PATH_UPDATE, NULL, dnode);
+ if (err != LY_SUCCESS)
+ ret = NB_ERR_NOT_FOUND;
+done:
+ XFREE(MTYPE_TMP, xpath);
+ return ret;
+}
+
int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
- uint32_t flags, nb_oper_data_cb cb, void *arg)
+ uint32_t flags, nb_oper_data_cb cb, void *arg,
+ struct lyd_node **tree)
{
struct nb_node *nb_node;
const void *list_entry = NULL;
@@ -2064,25 +2242,24 @@ int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
* all YANG lists (if any).
*/
- LY_ERR err = lyd_new_path2(NULL, ly_native_ctx, xpath, NULL, 0, 0,
- LYD_NEW_PATH_UPDATE, NULL, &dnode);
- if (err || !dnode) {
- const char *errmsg =
- err ? ly_errmsg(ly_native_ctx) : "node not found";
- flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed %s",
- __func__, errmsg);
- return NB_ERR;
+ ret = nb_oper_data_xpath_to_tree(xpath, &dnode,
+ nb_node->snode->nodetype == LYS_LIST);
+ if (ret) {
+ flog_warn(EC_LIB_LIBYANG,
+ "%s: can't instantiate concrete path using xpath: %s",
+ __func__, xpath);
+ return ret;
}
+
/*
* Create a linked list to sort the data nodes starting from the root.
*/
list_dnodes = list_new();
- for (dn = dnode; dn; dn = lyd_parent(dn)) {
- if (dn->schema->nodetype != LYS_LIST || !lyd_child(dn))
- continue;
- listnode_add_head(list_dnodes, dn);
- }
+ for (dn = dnode; dn; dn = lyd_parent(dn))
+ if (dn->schema->nodetype == LYS_LIST)
+ listnode_add_head(list_dnodes, dn);
+
/*
* Use the northbound callbacks to find list entry pointer corresponding
* to the given XPath.
@@ -2104,6 +2281,10 @@ int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
}
list_keys.num = n;
if (list_keys.num != yang_snode_num_keys(dn->schema)) {
+ flog_warn(
+ EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: internal list entry '%s' missing required key values predicates in xpath: %s",
+ __func__, dn->schema->name, xpath);
list_delete(&list_dnodes);
yang_dnode_free(dnode);
return NB_ERR_NOT_FOUND;
@@ -2121,6 +2302,11 @@ int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
return NB_ERR;
}
+ /* NOTE: To add support for multiple levels of unspecified keys
+ * we need to loop here using the list entry's get_next to work
+ * with each "existing in the data" list entry. It will be a bit
+ * tricky b/c we are inside a loop here.
+ */
list_entry =
nb_callback_lookup_entry(nn, list_entry, &list_keys);
if (list_entry == NULL) {
@@ -2130,18 +2316,33 @@ int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
}
}
- /* If a list entry was given, iterate over that list entry only. */
- if (dnode->schema->nodetype == LYS_LIST && lyd_child(dnode))
- ret = nb_oper_data_iter_children(
- nb_node->snode, xpath, list_entry, &list_keys,
- translator, true, flags, cb, arg);
+ /* If a list entry was given with keys as the last node in the path,
+ * iterate over that list entry only.
+ */
+ if (dnode->schema->nodetype == LYS_LIST && lyd_child(dnode)
+ && dnode->schema == nb_node->snode)
+ ret = nb_oper_data_iter_children(nb_node->snode, xpath,
+ list_entry, &list_keys,
+ translator, true, flags, cb,
+ arg, dnode);
else
ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
&list_keys, translator, true,
- flags, cb, arg);
+ flags, cb, arg, dnode);
list_delete(&list_dnodes);
- yang_dnode_free(dnode);
+ if (dnode) {
+ while (lyd_parent(dnode))
+ dnode = lyd_parent(dnode);
+
+ if (tree && ret == NB_OK)
+ *tree = dnode;
+ else {
+ lyd_free_all(dnode);
+ if (tree)
+ *tree = NULL;
+ }
+ }
return ret;
}
@@ -2544,6 +2745,8 @@ const char *nb_err_name(enum nb_error error)
return "failed to allocate resource";
case NB_ERR_INCONSISTENCY:
return "internal inconsistency";
+ case NB_YIELD:
+ return "should yield";
}
assert(!"Reached end of function we should never hit");
@@ -2665,10 +2868,15 @@ void nb_init(struct event_loop *tm,
/* Initialize the northbound CLI. */
nb_cli_init(tm);
+
+ /* Initialize oper-state */
+ nb_oper_init(tm);
}
void nb_terminate(void)
{
+ nb_oper_terminate();
+
/* Terminate the northbound CLI. */
nb_cli_terminate();
diff --git a/lib/northbound.h b/lib/northbound.h
index 9c0b4d16c3..850397d221 100644
--- a/lib/northbound.h
+++ b/lib/northbound.h
@@ -485,6 +485,22 @@ struct nb_callbacks {
const void *(*lookup_entry)(struct nb_cb_lookup_entry_args *args);
/*
+ * Operational data callback for YANG lists.
+ *
+ * The callback function should return the next list entry that would
+ * follow a list entry with the keys given as a parameter. Keyless
+ * lists don't need to implement this callback.
+ *
+ * args
+ * Refer to the documentation comments of nb_cb_lookup_entry_args for
+ * details.
+ *
+ * Returns:
+ * Pointer to the list entry if found, or NULL if not found.
+ */
+ const void *(*lookup_next)(struct nb_cb_lookup_entry_args *args);
+
+ /*
* RPC and action callback.
*
* Both 'input' and 'output' are lists of 'yang_data' structures. The
@@ -644,6 +660,7 @@ enum nb_error {
NB_ERR_VALIDATION,
NB_ERR_RESOURCE,
NB_ERR_INCONSISTENCY,
+ NB_YIELD,
};
/* Default priority. */
@@ -710,6 +727,29 @@ typedef int (*nb_oper_data_cb)(const struct lysc_node *snode,
struct yang_translator *translator,
struct yang_data *data, void *arg);
+/**
+ * nb_oper_data_finish_cb() - finish a portion or all of a oper data walk.
+ * @tree - r/o copy of the tree created during this portion of the walk.
+ * @arg - finish arg passed to nb_op_iterate_yielding.
+ * @ret - NB_OK if done with walk, NB_YIELD if done with portion, otherwise an
+ * error.
+ *
+ * If nb_op_iterate_yielding() was passed with @should_batch set then this
+ * callback will be invoked during each portion (batch) of the walk.
+ *
+ * The @tree is read-only and should not be modified or freed.
+ *
+ * If this function returns anything but NB_OK then the walk will be terminated.
+ * and this function will not be called again regardless of if @ret was
+ * `NB_YIELD` or not.
+ *
+ * Return: NB_OK to continue or complete the walk normally, otherwise an error
+ * to immediately terminate the walk.
+ */
+/* Callback function used by nb_oper_data_iter_yielding(). */
+typedef enum nb_error (*nb_oper_data_finish_cb)(const struct lyd_node *tree,
+ void *arg, enum nb_error ret);
+
/* Iterate over direct child nodes only. */
#define NB_OPER_DATA_ITER_NORECURSE 0x0001
@@ -743,6 +783,11 @@ extern int nb_callback_get_keys(const struct nb_node *nb_node,
extern const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
const void *parent_list_entry,
const struct yang_list_keys *keys);
+extern const void *nb_callback_lookup_node_entry(struct lyd_node *node,
+ const void *parent_list_entry);
+extern const void *nb_callback_lookup_next(const struct nb_node *nb_node,
+ const void *parent_list_entry,
+ const struct yang_list_keys *keys);
extern int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
const struct list *input, struct list *output,
char *errmsg, size_t errmsg_len);
@@ -1250,8 +1295,13 @@ extern int nb_running_unlock(enum nb_client client, const void *user);
*/
extern int nb_running_lock_check(enum nb_client client, const void *user);
+extern int nb_oper_data_iterate(const char *xpath,
+ struct yang_translator *translator,
+ uint32_t flags, nb_oper_data_cb cb, void *arg,
+ struct lyd_node **tree);
+
/*
- * Iterate over operational data.
+ * Iterate over operational data -- deprecated.
*
* xpath
* Data path of the YANG data we want to iterate over.
@@ -1262,18 +1312,57 @@ extern int nb_running_lock_check(enum nb_client client, const void *user);
* flags
* NB_OPER_DATA_ITER_ flags to control how the iteration is performed.
*
+ * should_batch
+ * Should call finish cb with partial results (i.e., creating batches)
+ *
* cb
* Function to call with each data node.
*
* arg
* Arbitrary argument passed as the fourth parameter in each call to 'cb'.
*
+ * tree
+ * If non-NULL will contain the data tree built from the walk.
+ *
* Returns:
* NB_OK on success, NB_ERR otherwise.
*/
-extern int nb_oper_data_iterate(const char *xpath,
- struct yang_translator *translator,
- uint32_t flags, nb_oper_data_cb cb, void *arg);
+extern enum nb_error nb_oper_iterate_legacy(const char *xpath,
+ struct yang_translator *translator,
+ uint32_t flags, nb_oper_data_cb cb,
+ void *arg, struct lyd_node **tree);
+
+/**
+ * nb_op_walk() - walk the schema building operational state.
+ * @xpath -
+ * @translator -
+ * @flags -
+ * @should_batch - should allow yielding and processing portions of the tree.
+ * @cb - callback invoked for each non-list, non-container node.
+ * @arg - arg to pass to @cb.
+ * @finish - function to call when done with portion or all of walk.
+ * @finish_arg - arg to pass to @finish.
+ *
+ * Return: walk - a cookie that can be used to cancel the walk.
+ */
+extern void *nb_oper_walk(const char *xpath, struct yang_translator *translator,
+ uint32_t flags, bool should_batch, nb_oper_data_cb cb,
+ void *arg, nb_oper_data_finish_cb finish,
+ void *finish_arg);
+
+/**
+ * nb_op_iterate_yielding_cancel() - cancel the in progress walk.
+ * @walk - value returned from nb_op_iterate_yielding()
+ *
+ * Should only be called on an in-progress walk. It is invalid to cancel and
+ * already finished walk. The walks `finish` callback will not be called.
+ */
+extern void nb_oper_cancel_walk(void *walk);
+
+/**
+ * nb_op_cancel_all_walks() - cancel all in progress walks.
+ */
+extern void nb_oper_cancel_all_walks(void);
/*
* Validate if the northbound operation is valid for the given node.
@@ -1481,6 +1570,9 @@ extern void nb_init(struct event_loop *tm,
*/
extern void nb_terminate(void);
+extern void nb_oper_init(struct event_loop *loop);
+extern void nb_oper_terminate(void);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index f2415d3383..20f030e280 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -1437,11 +1437,9 @@ static int nb_cli_oper_data_cb(const struct lysc_node *snode,
}
exit:
- yang_data_free(data);
return NB_OK;
error:
- yang_data_free(data);
return NB_ERR;
}
@@ -1490,9 +1488,14 @@ DEFPY (show_yang_operational_data,
ly_ctx = ly_native_ctx;
/* Obtain data. */
- dnode = yang_dnode_new(ly_ctx, false);
- ret = nb_oper_data_iterate(xpath, translator, 0, nb_cli_oper_data_cb,
- dnode);
+ if (translator) {
+ dnode = yang_dnode_new(ly_ctx, false);
+ ret = nb_oper_iterate_legacy(xpath, translator, 0,
+ nb_cli_oper_data_cb, dnode, NULL);
+ } else {
+ dnode = NULL;
+ ret = nb_oper_iterate_legacy(xpath, NULL, 0, NULL, NULL, &dnode);
+ }
if (ret != NB_OK) {
if (format == LYD_JSON)
vty_out(vty, "{}\n");
@@ -1500,7 +1503,8 @@ DEFPY (show_yang_operational_data,
/* embed ly_last_errmsg() when we get newer libyang */
vty_out(vty, "<!-- Not found -->\n");
}
- yang_dnode_free(dnode);
+ if (dnode)
+ yang_dnode_free(dnode);
return CMD_WARNING;
}
diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp
index 6c33351cef..7957752589 100644
--- a/lib/northbound_grpc.cpp
+++ b/lib/northbound_grpc.cpp
@@ -427,25 +427,11 @@ static struct lyd_node *get_dnode_config(const std::string &path)
return dnode;
}
-static int get_oper_data_cb(const struct lysc_node *snode,
- struct yang_translator *translator,
- struct yang_data *data, void *arg)
-{
- struct lyd_node *dnode = static_cast<struct lyd_node *>(arg);
- int ret = yang_dnode_edit(dnode, data->xpath, data->value);
- yang_data_free(data);
-
- return (ret == 0) ? NB_OK : NB_ERR;
-}
-
static struct lyd_node *get_dnode_state(const std::string &path)
{
- struct lyd_node *dnode = yang_dnode_new(ly_native_ctx, false);
- if (nb_oper_data_iterate(path.c_str(), NULL, 0, get_oper_data_cb, dnode)
- != NB_OK) {
- yang_dnode_free(dnode);
- return NULL;
- }
+ struct lyd_node *dnode = NULL;
+
+ (void)nb_oper_iterate_legacy(path.c_str(), NULL, 0, NULL, NULL, &dnode);
return dnode;
}
diff --git a/lib/northbound_oper.c b/lib/northbound_oper.c
new file mode 100644
index 0000000000..4a7a0bb559
--- /dev/null
+++ b/lib/northbound_oper.c
@@ -0,0 +1,1769 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * October 14 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (C) 2018 NetDEF, Inc.
+ * Renato Westphal
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+
+#include <zebra.h>
+#include "darr.h"
+#include "debug.h"
+#include "frrevent.h"
+#include "frrstr.h"
+#include "lib_errors.h"
+#include "monotime.h"
+#include "northbound.h"
+
+/*
+ * YANG model yielding design restrictions:
+ *
+ * In order to be able to yield and guarantee we have a valid data tree at the
+ * point of yielding we must know that each parent has all it's siblings
+ * collected to represent a complete element.
+ *
+ * Basically, there should be a only single branch in the schema tree that
+ * supports yielding. In practice this means:
+ *
+ * list node schema with lookup next:
+ * - must not have any lookup-next list node sibling schema
+ * - must not have any list or container node siblings with lookup-next descendants.
+ * - any parent list nodes must also be lookup-next list nodes
+ *
+ * We must also process containers with lookup-next descendants last.
+ */
+
+DEFINE_MTYPE_STATIC(LIB, NB_YIELD_STATE, "NB Yield State");
+DEFINE_MTYPE_STATIC(LIB, NB_NODE_INFOS, "NB Node Infos");
+
+/* Amount of time allowed to spend constructing oper-state prior to yielding */
+#define NB_OP_WALK_INTERVAL_MS 50
+#define NB_OP_WALK_INTERVAL_US (NB_OP_WALK_INTERVAL_MS * 1000)
+
+/* ---------- */
+/* Data Types */
+/* ---------- */
+PREDECL_LIST(nb_op_walks);
+
+/*
+ * This is our information about a node on the branch we are looking at
+ */
+struct nb_op_node_info {
+ struct lyd_node_inner *inner;
+ const struct lysc_node *schema; /* inner schema in case we rm inner */
+ struct yang_list_keys keys; /* if list, keys to locate element */
+ const void *list_entry; /* opaque entry from user or NULL */
+ uint xpath_len; /* length of the xpath string for this node */
+ uint niters; /* # list elems create this iteration */
+ uint nents; /* # list elems create so far */
+ bool query_specific_entry : 1; /* this info is specific specified */
+ bool has_lookup_next : 1; /* if this node support lookup next */
+ bool lookup_next_ok : 1; /* if this and all previous support */
+};
+
+/**
+ * struct nb_op_yield_state - tracking required state for yielding.
+ *
+ * @xpath: current xpath representing the node_info stack.
+ * @xpath_orig: the original query string from the user
+ * @node_infos: the container stack for the walk from root to current
+ * @schema_path: the schema nodes for each node in the query string.
+ # @query_tokstr: the query string tokenized with NUL bytes.
+ * @query_tokens: the string pointers to each query token (node).
+ * @walk_root_level: The topmost specific node, +1 is where we start walking.
+ * @walk_start_level: @walk_root_level + 1.
+ * @query_base_level: the level the query string stops at and full walks
+ * commence below that.
+ */
+struct nb_op_yield_state {
+ /* Walking state */
+ char *xpath;
+ char *xpath_orig;
+ struct nb_op_node_info *node_infos;
+ const struct lysc_node **schema_path;
+ char *query_tokstr;
+ char **query_tokens;
+ int walk_root_level;
+ int walk_start_level;
+ int query_base_level;
+ bool query_list_entry; /* XXX query was for a specific list entry */
+
+ /* Yielding state */
+ bool query_did_entry; /* currently processing the entry */
+ bool should_batch;
+ struct timeval start_time;
+ struct yang_translator *translator;
+ uint32_t flags;
+ nb_oper_data_cb cb;
+ void *cb_arg;
+ nb_oper_data_finish_cb finish;
+ void *finish_arg;
+ struct event *walk_ev;
+ struct nb_op_walks_item link;
+};
+
+DECLARE_LIST(nb_op_walks, struct nb_op_yield_state, link);
+
+/* ---------------- */
+/* Global Variables */
+/* ---------------- */
+
+static struct event_loop *event_loop;
+static struct nb_op_walks_head nb_op_walks;
+
+/* --------------------- */
+/* Function Declarations */
+/* --------------------- */
+
+static enum nb_error nb_op_yield(struct nb_op_yield_state *ys);
+static struct lyd_node *ys_root_node(struct nb_op_yield_state *ys);
+
+/* -------------------- */
+/* Function Definitions */
+/* -------------------- */
+
+static inline struct nb_op_yield_state *
+nb_op_create_yield_state(const char *xpath, struct yang_translator *translator,
+ uint32_t flags, bool should_batch, nb_oper_data_cb cb,
+ void *cb_arg, nb_oper_data_finish_cb finish,
+ void *finish_arg)
+{
+ struct nb_op_yield_state *ys;
+
+ ys = XCALLOC(MTYPE_NB_YIELD_STATE, sizeof(*ys));
+ ys->xpath = darr_strdup_cap(xpath, (size_t)XPATH_MAXLEN);
+ ys->xpath_orig = darr_strdup(xpath);
+ ys->translator = translator;
+ ys->flags = flags;
+ ys->should_batch = should_batch;
+ ys->cb = cb;
+ ys->cb_arg = cb_arg;
+ ys->finish = finish;
+ ys->finish_arg = finish_arg;
+
+ nb_op_walks_add_tail(&nb_op_walks, ys);
+
+ return ys;
+}
+
+static inline void nb_op_free_yield_state(struct nb_op_yield_state *ys,
+ bool nofree_tree)
+{
+ if (ys) {
+ EVENT_OFF(ys->walk_ev);
+ nb_op_walks_del(&nb_op_walks, ys);
+ /* if we have a branch then free up it's libyang tree */
+ if (!nofree_tree && ys_root_node(ys))
+ lyd_free_all(ys_root_node(ys));
+ darr_free(ys->query_tokens);
+ darr_free(ys->query_tokstr);
+ darr_free(ys->schema_path);
+ darr_free(ys->node_infos);
+ darr_free(ys->xpath_orig);
+ darr_free(ys->xpath);
+ XFREE(MTYPE_NB_YIELD_STATE, ys);
+ }
+}
+
+static const struct lysc_node *ys_get_walk_stem_tip(struct nb_op_yield_state *ys)
+{
+ if (ys->walk_start_level <= 0)
+ return NULL;
+ return ys->node_infos[ys->walk_start_level - 1].schema;
+}
+
+static struct lyd_node *ys_root_node(struct nb_op_yield_state *ys)
+{
+ if (!darr_len(ys->node_infos))
+ return NULL;
+ return &ys->node_infos[0].inner->node;
+}
+
+static void ys_trim_xpath(struct nb_op_yield_state *ys)
+{
+ uint len = darr_len(ys->node_infos);
+
+ if (len == 0)
+ darr_setlen(ys->xpath, 1);
+ else
+ darr_setlen(ys->xpath, darr_last(ys->node_infos)->xpath_len + 1);
+ ys->xpath[darr_len(ys->xpath) - 1] = 0;
+}
+
+static void ys_pop_inner(struct nb_op_yield_state *ys)
+{
+ uint len = darr_len(ys->node_infos);
+
+ assert(len);
+ darr_setlen(ys->node_infos, len - 1);
+ ys_trim_xpath(ys);
+}
+
+static void nb_op_get_keys(struct lyd_node_inner *list_node,
+ struct yang_list_keys *keys)
+{
+ struct lyd_node *child;
+ uint n = 0;
+
+ keys->num = 0;
+ LY_LIST_FOR (list_node->child, child) {
+ if (!lysc_is_key(child->schema))
+ break;
+ strlcpy(keys->key[n], yang_dnode_get_string(child, NULL),
+ sizeof(keys->key[n]));
+ n++;
+ }
+
+ keys->num = n;
+}
+
+/**
+ * __move_back_to_next() - move back to the next lookup-next schema
+ */
+static bool __move_back_to_next(struct nb_op_yield_state *ys, int i)
+{
+ struct nb_op_node_info *ni;
+ int j;
+
+ /*
+ * We will free the subtree we are trimming back to, or we will be done
+ * with the walk and will free the root on cleanup.
+ */
+
+ /* pop any node_info we dropped below on entry */
+ for (j = darr_ilen(ys->node_infos) - 1; j > i; j--)
+ ys_pop_inner(ys);
+
+ for (; i >= ys->walk_root_level; i--) {
+ if (ys->node_infos[i].has_lookup_next)
+ break;
+ ys_pop_inner(ys);
+ }
+
+ if (i < ys->walk_root_level)
+ return false;
+
+ ni = &ys->node_infos[i];
+
+ /*
+ * The i'th node has been lost after a yield so trim it from the tree
+ * now.
+ */
+ lyd_free_tree(&ni->inner->node);
+ ni->inner = NULL;
+ ni->list_entry = NULL;
+
+ /*
+ * Leave the empty-of-data node_info on top, __walk will deal with
+ * this, by doing a lookup-next with the keys which we still have.
+ */
+
+ return true;
+}
+
+static void nb_op_resume_data_tree(struct nb_op_yield_state *ys)
+{
+ struct nb_op_node_info *ni;
+ struct nb_node *nn;
+ const void *parent_entry;
+ const void *list_entry;
+ uint i;
+
+ /*
+ * IMPORTANT: On yielding: we always yield during list iteration and
+ * after the initial list element has been created and handled, so the
+ * top of the yield stack will always point at a list node.
+ *
+ * Additionally, that list node has been processed and was in the
+ * process of being "get_next"d when we yielded. We process the
+ * lookup-next list node last so all the rest of the data (to the left)
+ * has been gotten. NOTE: To keep this simple we will require only a
+ * single lookup-next sibling in any parents list of children.
+ *
+ * Walk the rightmost branch (the node info stack) from base to tip
+ * verifying all list nodes are still present. If not we backup to the
+ * node which has a lookup next, and we prune the branch to this node.
+ * If the list node that went away is the topmost we will be using
+ * lookup_next, but if it's a parent then the list_entry will have been
+ * restored.
+ */
+ darr_foreach_i (ys->node_infos, i) {
+ ni = &ys->node_infos[i];
+ nn = ni->schema->priv;
+
+ if (CHECK_FLAG(ni->schema->nodetype, LYS_CONTAINER))
+ continue;
+
+ assert(ni->list_entry != NULL ||
+ ni == darr_last(ys->node_infos));
+
+ /* Verify the entry is still present */
+ parent_entry = (i == 0 ? NULL : ni[-1].list_entry);
+ list_entry = nb_callback_lookup_entry(nn, parent_entry,
+ &ni->keys);
+ if (!list_entry || list_entry != ni->list_entry) {
+ /* May be NULL or a different pointer
+ * move back to first of
+ * container with last lookup_next list node
+ * (which may be this one) and get next.
+ */
+ if (!__move_back_to_next(ys, i))
+ DEBUGD(&nb_dbg_events,
+ "%s: Nothing to resume after delete during walk (yield)",
+ __func__);
+ return;
+ }
+ }
+}
+
+/*
+ * Can only yield if all list nodes to root have lookup_next() callbacks
+ *
+ * In order to support lookup_next() the list_node get_next() callback
+ * needs to return ordered (i.e., sorted) results.
+ */
+
+/* ======================= */
+/* Start of walk init code */
+/* ======================= */
+
+/**
+ * __xpath_pop_node() - remove the last node from xpath string
+ * @xpath: an xpath string
+ *
+ * Return: NB_OK or NB_ERR_NOT_FOUND if nothing left to pop.
+ */
+static int __xpath_pop_node(char *xpath)
+{
+ int len = strlen(xpath);
+ bool abs = xpath[0] == '/';
+ char *slash;
+
+ /* "//" or "/" => NULL */
+ if (abs && (len == 1 || (len == 2 && xpath[1] == '/')))
+ return NB_ERR_NOT_FOUND;
+
+ slash = (char *)frrstr_back_to_char(xpath, '/');
+ /* "/foo/bar/" or "/foo/bar//" => "/foo " */
+ if (slash && slash == &xpath[len - 1]) {
+ xpath[--len] = 0;
+ slash = (char *)frrstr_back_to_char(xpath, '/');
+ if (slash && slash == &xpath[len - 1]) {
+ xpath[--len] = 0;
+ slash = (char *)frrstr_back_to_char(xpath, '/');
+ }
+ }
+ if (!slash)
+ return NB_ERR_NOT_FOUND;
+ *slash = 0;
+ return NB_OK;
+}
+
+/**
+ * nb_op_xpath_to_trunk() - generate a lyd_node tree (trunk) using an xpath.
+ * @xpath_in: xpath query string to build trunk from.
+ * @dnode: resulting tree (trunk)
+ *
+ * Use the longest prefix of @xpath_in as possible to resolve to a tree (trunk).
+ * This is logically as if we walked along the xpath string resolving each
+ * nodename reference (in particular list nodes) until we could not.
+ *
+ * Return: error if any, if no error then @dnode contains the tree (trunk).
+ */
+static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in,
+ struct lyd_node **trunk)
+{
+ char *xpath = NULL;
+ enum nb_error ret = NB_OK;
+ LY_ERR err;
+
+ darr_in_strdup(xpath, xpath_in);
+ for (;;) {
+ err = lyd_new_path2(NULL, ly_native_ctx, xpath, NULL, 0, 0,
+ LYD_NEW_PATH_UPDATE, NULL, trunk);
+ if (err == LY_SUCCESS)
+ break;
+
+ ret = __xpath_pop_node(xpath);
+ if (ret != NB_OK)
+ break;
+ }
+ darr_free(xpath);
+ return ret;
+}
+
+/*
+ * Finish initializing the node info based on the xpath string, and previous
+ * node_infos on the stack. If this node is a list node, obtain the specific
+ * list-entry object.
+ */
+static enum nb_error nb_op_ys_finalize_node_info(struct nb_op_yield_state *ys,
+ uint index)
+{
+ struct nb_op_node_info *ni = &ys->node_infos[index];
+ struct lyd_node_inner *inner = ni->inner;
+ struct nb_node *nn = ni->schema->priv;
+ bool yield_ok = ys->finish != NULL;
+
+ ni->has_lookup_next = nn->cbs.lookup_next != NULL;
+
+ /* track the last list_entry until updated by new list node */
+ ni->list_entry = index == 0 ? NULL : ni[-1].list_entry;
+
+ /* Assert that we are walking the rightmost branch */
+ assert(!inner->parent || &inner->node == inner->parent->child->prev);
+
+ if (CHECK_FLAG(inner->schema->nodetype, LYS_CONTAINER)) {
+ /* containers have only zero or one child on a branch of a tree */
+ inner = (struct lyd_node_inner *)inner->child;
+ assert(!inner || inner->prev == &inner->node);
+ ni->lookup_next_ok = yield_ok &&
+ (index == 0 || ni[-1].lookup_next_ok);
+ return NB_OK;
+ }
+
+ assert(CHECK_FLAG(inner->schema->nodetype, LYS_LIST));
+
+ ni->lookup_next_ok = yield_ok && ni->has_lookup_next &&
+ (index == 0 || ni[-1].lookup_next_ok);
+
+ nb_op_get_keys(inner, &ni->keys);
+
+ /* A list entry cannot be present in a tree w/o it's keys */
+ assert(ni->keys.num == yang_snode_num_keys(inner->schema));
+
+ /*
+ * Get this nodes opaque list_entry object
+ */
+
+ if (!nn->cbs.lookup_entry) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: data path doesn't support iteration over operational data: %s",
+ __func__, ys->xpath);
+ return NB_ERR_NOT_FOUND;
+ }
+
+ /* ni->list_entry starts as the parent entry of this node */
+ ni->list_entry = nb_callback_lookup_entry(nn, ni->list_entry, &ni->keys);
+ if (ni->list_entry == NULL) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: list entry lookup failed", __func__);
+ return NB_ERR_NOT_FOUND;
+ }
+
+ /*
+ * By definition any list element we can get a specific list_entry for
+ * is specific.
+ */
+ ni->query_specific_entry = true;
+
+ return NB_OK;
+}
+
+/**
+ * nb_op_ys_init_node_infos() - initialize the node info stack from the query.
+ * @ys: the yield state for this tree walk.
+ *
+ * On starting a walk we initialize the node_info stack as deeply as possible
+ * based on specific node references in the query string. We will stop at the
+ * point in the query string that is not specific (e.g., a list element without
+ * it's keys predicate)
+ *
+ * Return: northbound return value (enum nb_error)
+ */
+static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
+{
+ struct nb_op_node_info *ni;
+ struct lyd_node_inner *inner;
+ struct lyd_node *node;
+ enum nb_error ret;
+ uint i, len;
+ char *tmp;
+
+ /*
+ * Obtain the trunk of the data node tree of the query.
+ *
+ * These are the nodes from the root that could be specifically
+ * identified with the query string. The trunk ends when a no specific
+ * node could be identified (e.g., a list-node name with no keys).
+ */
+
+ ret = nb_op_xpath_to_trunk(ys->xpath, &node);
+ if (ret || !node) {
+ flog_warn(EC_LIB_LIBYANG,
+ "%s: can't instantiate concrete path using xpath: %s",
+ __func__, ys->xpath);
+ if (!ret)
+ ret = NB_ERR_NOT_FOUND;
+ return ret;
+ }
+ assert(CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST));
+
+ inner = (struct lyd_node_inner *)node;
+ for (len = 1; inner->parent; len++)
+ inner = inner->parent;
+
+
+ darr_append_nz_mt(ys->node_infos, len, MTYPE_NB_NODE_INFOS);
+
+ /*
+ * For each node find the prefix of the xpath query that identified it
+ * -- save the prefix length.
+ */
+ inner = (struct lyd_node_inner *)node;
+ for (i = len; i > 0; i--, inner = inner->parent) {
+ ni = &ys->node_infos[i - 1];
+ ni->inner = inner;
+ ni->schema = inner->schema;
+ /*
+ * NOTE: we could build this by hand with a litte more effort,
+ * but this simple implementation works and won't be expensive
+ * since the number of nodes is small and only done once per
+ * query.
+ */
+ tmp = yang_dnode_get_path(&inner->node, NULL, 0);
+ ni->xpath_len = strlen(tmp);
+
+ /* Replace users supplied xpath with the libyang returned value */
+ if (i == len)
+ darr_in_strdup(ys->xpath, tmp);
+
+ /* The prefix must match the prefix of the stored xpath */
+ assert(!strncmp(tmp, ys->xpath, ni->xpath_len));
+ free(tmp);
+ }
+
+ /*
+ * Obtain the specific list-entry objects for each list node on the
+ * trunk and finish initializing the node_info structs.
+ */
+
+ darr_foreach_i (ys->node_infos, i) {
+ ret = nb_op_ys_finalize_node_info(ys, i);
+ if (ret != NB_OK) {
+ darr_free(ys->node_infos);
+ return ret;
+ }
+ }
+
+ ys->walk_start_level = darr_len(ys->node_infos);
+
+ ys->walk_root_level = (int)ys->walk_start_level - 1;
+
+ return NB_OK;
+}
+
+/* ================ */
+/* End of init code */
+/* ================ */
+
+/**
+ * nb_op_add_leaf() - Add leaf data to the get tree results
+ * @ys - the yield state for this tree walk.
+ * @nb_node - the northbound node representing this leaf.
+ * @xpath - the xpath (with key predicates) to this leaf value.
+ *
+ * Return: northbound return value (enum nb_error)
+ */
+static enum nb_error nb_op_iter_leaf(struct nb_op_yield_state *ys,
+ const struct nb_node *nb_node,
+ const char *xpath)
+{
+ const struct lysc_node *snode = nb_node->snode;
+ struct nb_op_node_info *ni = darr_last(ys->node_infos);
+ struct yang_data *data;
+ enum nb_error ret = NB_OK;
+ LY_ERR err;
+
+ if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
+ return NB_OK;
+
+ /* Ignore list keys. */
+ if (lysc_is_key(snode))
+ return NB_OK;
+
+ data = nb_callback_get_elem(nb_node, xpath, ni->list_entry);
+ if (data == NULL)
+ return NB_OK;
+
+ /* Add a dnode to our tree */
+ err = lyd_new_term(&ni->inner->node, snode->module, snode->name,
+ data->value, false, NULL);
+ if (err) {
+ yang_data_free(data);
+ return NB_ERR_RESOURCE;
+ }
+
+ if (ys->cb)
+ ret = (*ys->cb)(nb_node->snode, ys->translator, data,
+ ys->cb_arg);
+ yang_data_free(data);
+
+ return ret;
+}
+
+static enum nb_error nb_op_iter_leaflist(struct nb_op_yield_state *ys,
+ const struct nb_node *nb_node,
+ const char *xpath)
+{
+ const struct lysc_node *snode = nb_node->snode;
+ struct nb_op_node_info *ni = darr_last(ys->node_infos);
+ const void *list_entry = NULL;
+ enum nb_error ret = NB_OK;
+ LY_ERR err;
+
+ if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
+ return NB_OK;
+
+ do {
+ struct yang_data *data;
+
+ list_entry = nb_callback_get_next(nb_node, ni->list_entry,
+ list_entry);
+ if (!list_entry)
+ /* End of the list. */
+ break;
+
+ data = nb_callback_get_elem(nb_node, xpath, list_entry);
+ if (data == NULL)
+ continue;
+
+ /* Add a dnode to our tree */
+ err = lyd_new_term(&ni->inner->node, snode->module, snode->name,
+ data->value, false, NULL);
+ if (err) {
+ yang_data_free(data);
+ return NB_ERR_RESOURCE;
+ }
+
+ if (ys->cb)
+ ret = (*ys->cb)(nb_node->snode, ys->translator, data,
+ ys->cb_arg);
+ yang_data_free(data);
+ } while (ret == NB_OK && list_entry);
+
+ return ret;
+}
+
+
+static bool nb_op_schema_path_has_predicate(struct nb_op_yield_state *ys,
+ int level)
+{
+ if (level > darr_lasti(ys->query_tokens))
+ return false;
+ return strchr(ys->query_tokens[level], '[') != NULL;
+}
+
+/**
+ * nb_op_empty_container_ok() - determine if should keep empty container node.
+ *
+ * Return: true if the empty container should be kept.
+ */
+static bool nb_op_empty_container_ok(const struct lysc_node *snode,
+ const char *xpath, const void *list_entry)
+{
+ struct nb_node *nn = snode->priv;
+ struct yang_data *data;
+
+ if (!CHECK_FLAG(snode->flags, LYS_PRESENCE))
+ return false;
+
+ if (!nn->cbs.get_elem)
+ return false;
+
+ data = nb_callback_get_elem(nn, xpath, list_entry);
+ if (data) {
+ yang_data_free(data);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * nb_op_get_child_path() - add child node name to the xpath.
+ * @xpath_parent - a darr string for the parent node.
+ * @schild - the child schema node.
+ * @xpath_child - a previous return value from this function to reuse.
+ */
+static char *nb_op_get_child_path(const char *xpath_parent,
+ const struct lysc_node *schild,
+ char *xpath_child)
+{
+ /* "/childname" */
+ uint space, extra = strlen(schild->name) + 1;
+ bool new_mod = (!schild->parent ||
+ schild->parent->module != schild->module);
+ int n;
+
+ if (new_mod)
+ /* "modulename:" */
+ extra += strlen(schild->module->name) + 1;
+ space = darr_len(xpath_parent) + extra;
+
+ if (xpath_parent == xpath_child)
+ darr_ensure_cap(xpath_child, space);
+ else
+ darr_in_strdup_cap(xpath_child, xpath_parent, space);
+ if (new_mod)
+ n = snprintf(darr_strnul(xpath_child), extra + 1, "/%s:%s",
+ schild->module->name, schild->name);
+ else
+ n = snprintf(darr_strnul(xpath_child), extra + 1, "/%s",
+ schild->name);
+ assert(n == (int)extra);
+ _darr_len(xpath_child) += extra;
+ return xpath_child;
+}
+
+static bool __is_yielding_node(const struct lysc_node *snode)
+{
+ struct nb_node *nn = snode->priv;
+
+ return nn->cbs.lookup_next != NULL;
+}
+
+static const struct lysc_node *__sib_next(bool yn, const struct lysc_node *sib)
+{
+ for (; sib; sib = sib->next)
+ if (yn == __is_yielding_node(sib))
+ return sib;
+ return NULL;
+}
+
+/**
+ * nb_op_sib_next() - Return the next sibling to walk to
+ * @ys: the yield state for this tree walk.
+ * @sib: the currently being visited sibling
+ *
+ * Return: the next sibling to walk to, walking non-yielding before yielding.
+ */
+static const struct lysc_node *nb_op_sib_next(struct nb_op_yield_state *ys,
+ const struct lysc_node *sib)
+{
+ struct lysc_node *parent = sib->parent;
+ bool yn = __is_yielding_node(sib);
+
+ /*
+ * If the node info stack is shorter than the schema path then we are
+ * doign specific query still on the node from the schema path (should
+ * match) so just return NULL.
+ */
+ if (darr_len(ys->schema_path) > darr_len(ys->node_infos))
+ return NULL;
+ /*
+ * If sib is on top of the node info stack then
+ * 1) it's a container node -or-
+ * 2) it's a list node that we were walking and we've reach the last entry
+ * 3) if sib is a list and the list was empty we never would have
+ * pushed sib on the stack so the top of the stack is the parent
+ *
+ * If the query string included this node then we do not process any
+ * siblings as we are not walking all the parent's children just this
+ * specified one give by the query string.
+ */
+ if (sib == darr_last(ys->node_infos)->schema &&
+ darr_len(ys->schema_path) >= darr_len(ys->node_infos))
+ return NULL;
+ /* case (3) */
+ else if (sib->nodetype == LYS_LIST &&
+ parent == darr_last(ys->node_infos)->schema &&
+ darr_len(ys->schema_path) > darr_len(ys->node_infos))
+ return NULL;
+
+ sib = __sib_next(yn, sib->next);
+ if (sib)
+ return sib;
+ if (yn)
+ return NULL;
+ return __sib_next(true, lysc_node_child(parent));
+}
+/*
+ * sib_walk((struct lyd_node *)ni->inner->node.parent->parent->parent->parent->parent->parent->parent)
+ */
+
+/**
+ * nb_op_sib_first() - obtain the first child to walk to
+ * @ys: the yield state for this tree walk.
+ * @parent: the parent whose child we seek
+ * @skip_keys: if should skip over keys
+ *
+ * Return: the first child to continue the walk to, starting with non-yielding
+ * siblings then yielding ones. There should be no more than 1 yielding sibling.
+ */
+static const struct lysc_node *nb_op_sib_first(struct nb_op_yield_state *ys,
+ const struct lysc_node *parent,
+ bool skip_keys)
+{
+ const struct lysc_node *sib = lysc_node_child(parent);
+ const struct lysc_node *first_sib;
+
+ /*
+ * The top of the node stack points at @parent.
+ *
+ * If the schema path (original query) is longer than our current node
+ * info stack (current xpath location), we are building back up to the
+ * base of the user query, return the next schema node from the query
+ * string (schema_path).
+ */
+ assert(darr_last(ys->node_infos)->schema == parent);
+ if (darr_lasti(ys->node_infos) < ys->query_base_level)
+ return ys->schema_path[darr_lasti(ys->node_infos) + 1];
+
+ if (skip_keys)
+ while (sib && lysc_is_key(sib))
+ sib = sib->next;
+ if (!sib)
+ return NULL;
+
+ /* Return non-yielding node's first */
+ first_sib = sib;
+ if (__is_yielding_node(sib)) {
+ sib = __sib_next(false, sib);
+ if (sib)
+ return sib;
+ }
+ return first_sib;
+}
+
+/*
+ * "3-dimensional" walk from base of the tree to the tip in-order.
+ *
+ * The actual tree is only 2-dimensional as list nodes are organized as adjacent
+ * siblings under a common parent perhaps with other siblings to each side;
+ * however, using 3d view here is easier to diagram.
+ *
+ * - A list node is yielding if it has a lookup_next callback.
+ * - All other node types are not yielding.
+ * - There's only one yielding node in a list of children (i.e., siblings).
+ *
+ * We visit all non-yielding children prior to the yielding child.
+ * That way we have the fullest tree possible even when something is deleted
+ * during a yield.
+ * --- child/parent descendant poinilnters
+ * ... next/prev sibling pointers
+ * o.o list entries pointers
+ * ~~~ diagram extension connector
+ * 1
+ * / \
+ * / \ o~~~~12
+ * / \ . / \
+ * 2.......5 o~~~9 13...14
+ * / \ | . / \
+ * 3...4 6 10...11 Cont Nodes: 1,2,5
+ * / \ List Nodes: 6,9,12
+ * 7...8 Leaf Nodes: 3,4,7,8,10,11,13,14
+ * Schema Leaf A: 3
+ * Schema Leaf B: 4
+ * Schema Leaf C: 7,10,13
+ * Schema Leaf D: 8,11,14
+ */
+static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
+{
+ const struct lysc_node *walk_stem_tip = ys_get_walk_stem_tip(ys);
+ const struct lysc_node *sib;
+ const void *parent_list_entry = NULL;
+ const void *list_entry = NULL;
+ struct nb_op_node_info *ni, *pni;
+ struct lyd_node *node;
+ struct nb_node *nn;
+ char *xpath_child = NULL;
+ // bool at_query_base;
+ bool at_root_level, list_start, is_specific_node;
+ enum nb_error ret = NB_OK;
+ LY_ERR err;
+ int at_clevel;
+ uint len;
+
+
+ monotime(&ys->start_time);
+
+ /* Don't currently support walking all root nodes */
+ if (!walk_stem_tip)
+ return NB_ERR_NOT_FOUND;
+
+ /*
+ * If we are resuming then start with the list container on top.
+ * Otherwise get the first child of the container we are walking,
+ * starting with non-yielding children.
+ */
+ if (is_resume)
+ sib = darr_last(ys->node_infos)->schema;
+ else {
+ /*
+ * Start with non-yielding children first.
+ *
+ * When adding root level walks, the sibling list are the root
+ * level nodes of all modules
+ */
+ sib = nb_op_sib_first(ys, walk_stem_tip, true);
+ if (!sib)
+ return NB_ERR_NOT_FOUND;
+ }
+
+
+ while (true) {
+ /* Grab the top container/list node info on the stack */
+ at_clevel = darr_lasti(ys->node_infos);
+ ni = &ys->node_infos[at_clevel];
+
+ /*
+ * This is the level of the last specific node at init
+ * time. +1 would be the first non-specific list or
+ * non-container if present in the container node.
+ */
+ at_root_level = at_clevel == ys->walk_root_level;
+
+ if (!sib) {
+ /*
+ * We've reached the end of the siblings inside a
+ * containing node; either a container or a specific
+ * list node entry.
+ *
+ * We handle container node inline; however, for lists
+ * we are only done with a specific entry and need to
+ * move to the next element on the list so we drop down
+ * into the switch for that case.
+ */
+
+ /* Grab the containing node. */
+ sib = ni->schema;
+
+ if (sib->nodetype == LYS_CONTAINER) {
+ /* If we added an empty container node (no
+ * children) and it's not a presence container
+ * or it's not backed by the get_elem callback,
+ * remove the node from the tree.
+ */
+ if (!lyd_child(&ni->inner->node) &&
+ !nb_op_empty_container_ok(sib, ys->xpath,
+ ni->list_entry))
+ lyd_free_tree(&ni->inner->node);
+
+ /* If we have returned to our original walk base,
+ * then we are done with the walk.
+ */
+ if (at_root_level) {
+ ret = NB_OK;
+ goto done;
+ }
+ /*
+ * Grab the sibling of the container we are
+ * about to pop, so we will be mid-walk on the
+ * parent containers children.
+ */
+ sib = nb_op_sib_next(ys, sib);
+
+ /* Pop container node to the parent container */
+ ys_pop_inner(ys);
+
+ /*
+ * If are were working on a user narrowed path
+ * then we are done with these siblings.
+ */
+ if (darr_len(ys->schema_path) >
+ darr_len(ys->node_infos))
+ sib = NULL;
+
+ /* Start over */
+ continue;
+ }
+ /*
+ * If we are here we have reached the end of the
+ * children of a list entry node. sib points
+ * at the list node info.
+ */
+ }
+
+ /* TODO: old code checked for "first" here and skipped if set */
+ if (CHECK_FLAG(sib->nodetype,
+ LYS_LEAF | LYS_LEAFLIST | LYS_CONTAINER))
+ xpath_child = nb_op_get_child_path(ys->xpath, sib,
+ xpath_child);
+ nn = sib->priv;
+
+ switch (sib->nodetype) {
+ case LYS_LEAF:
+ /*
+ * If we have a non-specific walk to a specific leaf
+ * (e.g., "..../route-entry/metric") and the leaf value
+ * is not present, then we are left with the data nodes
+ * of the stem of the branch to the missing leaf data.
+ * For containers this will get cleaned up by the
+ * container code above that looks for no children;
+ * however, this doesn't work for lists.
+ *
+ * (FN:A) We need a similar check for empty list
+ * elements. Empty list elements below the
+ * query_base_level (i.e., the schema path length)
+ * should be cleaned up as they don't support anything
+ * the user is querying for, if they are above the
+ * query_base_level then they are part of the walk and
+ * should be kept.
+ */
+ ret = nb_op_iter_leaf(ys, nn, xpath_child);
+ sib = nb_op_sib_next(ys, sib);
+ continue;
+ case LYS_LEAFLIST:
+ ret = nb_op_iter_leaflist(ys, nn, xpath_child);
+ sib = nb_op_sib_next(ys, sib);
+ continue;
+ case LYS_CONTAINER:
+ if (CHECK_FLAG(nn->flags, F_NB_NODE_CONFIG_ONLY)) {
+ sib = nb_op_sib_next(ys, sib);
+ continue;
+ }
+
+ node = NULL;
+ err = lyd_new_inner(&ni->inner->node, sib->module,
+ sib->name, false, &node);
+ if (err) {
+ ret = NB_ERR_RESOURCE;
+ goto done;
+ }
+
+ /* push this container node on top of the stack */
+ ni = darr_appendz(ys->node_infos);
+ ni->inner = (struct lyd_node_inner *)node;
+ ni->schema = node->schema;
+ ni->niters = 0;
+ ni->nents = 0;
+ ni->has_lookup_next = false;
+ ni->lookup_next_ok = ni[-1].lookup_next_ok;
+ ni->list_entry = ni[-1].list_entry;
+
+ darr_in_strdup(ys->xpath, xpath_child);
+ ni->xpath_len = darr_strlen(ys->xpath);
+
+ sib = nb_op_sib_first(ys, sib, false);
+ continue;
+ case LYS_LIST:
+
+ /*
+ * Notes:
+ *
+ * NOTE: ni->inner may be NULL here if we resumed and it
+ * was gone. ni->schema and ni->keys will still be
+ * valid.
+ *
+ * NOTE: At this point sib is never NULL; however, if it
+ * was NULL at the top of the loop, then we were done
+ * working on a list element's children and will be
+ * attempting to get the next list element here so sib
+ * == ni->schema (i.e., !list_start).
+ *
+ * (FN:A): Before doing this let's remove empty list
+ * elements that are "inside" the query string as they
+ * represent a stem which didn't lead to actual data
+ * being requested by the user -- for example,
+ * ".../route-entry/metric" if metric is not present we
+ * don't want to return an empty route-entry to the
+ * user.
+ */
+
+ node = NULL;
+ list_start = ni->schema != sib;
+ if (list_start) {
+ /*
+ * List iteration: First Element
+ * -----------------------------
+ *
+ * Our node info wasn't on top (wasn't an entry
+ * for sib) so this is a new list iteration, we
+ * will push our node info below. The top is our
+ * parent.
+ */
+ if (CHECK_FLAG(nn->flags,
+ F_NB_NODE_CONFIG_ONLY)) {
+ sib = nb_op_sib_next(ys, sib);
+ continue;
+ }
+ /* we are now at one level higher */
+ at_clevel += 1;
+ pni = ni;
+ ni = NULL;
+ } else {
+ /*
+ * List iteration: Next Element
+ * ----------------------------
+ *
+ * This is the case where `sib == NULL` at the
+ * top of the loop, so, we just completed the
+ * walking the children of a list entry, i.e.,
+ * we are done with that list entry.
+ *
+ * `sib` was reset to point at the our list node
+ * at the top of node_infos.
+ *
+ * Within this node_info, `ys->xpath`, `inner`,
+ * `list_entry`, and `xpath_len` are for the
+ * previous list entry, and need to be updated.
+ */
+ pni = darr_len(ys->node_infos) > 1 ? &ni[-1]
+ : NULL;
+ }
+
+ parent_list_entry = pni ? pni->list_entry : NULL;
+ list_entry = ni ? ni->list_entry : NULL;
+
+ /*
+ * Before yielding we check to see if we are doing a
+ * specific list entry instead of a full list iteration.
+ * We do not want to yield during specific list entry
+ * processing.
+ */
+
+ /*
+ * If we are at a list start check to see if the node
+ * has a predicate. If so we will try and fetch the data
+ * node now that we've built part of the tree, if the
+ * predicates are keys or only depend on the tree already
+ * built, it should create the element for us.
+ */
+ is_specific_node = false;
+ if (list_start &&
+ at_clevel <= darr_lasti(ys->query_tokens) &&
+ nb_op_schema_path_has_predicate(ys, at_clevel)) {
+ err = lyd_new_path(&pni->inner->node, NULL,
+ ys->query_tokens[at_clevel],
+ NULL, 0, &node);
+ if (!err)
+ /* predicate resolved to specific node */
+ is_specific_node = true;
+ else {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: unable to create node for specific query string: %s",
+ __func__,
+ ys->query_tokens[at_clevel]);
+ }
+ }
+
+ if (list_entry && ni->query_specific_entry) {
+ /*
+ * Ending specific list entry processing.
+ */
+ assert(!list_start);
+ is_specific_node = true;
+ list_entry = NULL;
+ }
+
+ /*
+ * Should we yield?
+ *
+ * Don't yield if we have a specific entry.
+ */
+ if (!is_specific_node && ni && ni->lookup_next_ok &&
+ // make sure we advance, if the interval is
+ // fast and we are very slow.
+ ((monotime_since(&ys->start_time, NULL) >
+ NB_OP_WALK_INTERVAL_US &&
+ ni->niters) ||
+ (ni->niters + 1) % 10000 == 0)) {
+ /* This is a yield supporting list node and
+ * we've been running at least our yield
+ * interval, so yield.
+ *
+ * NOTE: we never yield on list_start, and we
+ * are always about to be doing a get_next.
+ */
+ DEBUGD(&nb_dbg_events,
+ "%s: yielding after %u iterations",
+ __func__, ni->niters);
+
+ ni->niters = 0;
+ ret = NB_YIELD;
+ goto done;
+ }
+
+ /*
+ * Now get the backend list_entry opaque object for
+ * this list entry from the backend.
+ */
+
+ if (is_specific_node) {
+ /*
+ * Specific List Entry:
+ * --------------------
+ */
+ if (list_start) {
+ list_entry =
+ nb_callback_lookup_node_entry(
+ node, parent_list_entry);
+ /*
+ * If the node we created from a
+ * specific predicate entry is not
+ * actually there we need to delete the
+ * node from our data tree
+ */
+ if (!list_entry) {
+ lyd_free_tree(node);
+ node = NULL;
+ }
+ }
+ } else if (!list_start && !list_entry &&
+ ni->has_lookup_next) {
+ /*
+ * After Yield:
+ * ------------
+ * After a yield the list_entry may have become
+ * invalid, so use lookup_next callback with
+ * parent and keys instead to find next element.
+ */
+ list_entry =
+ nb_callback_lookup_next(nn,
+ parent_list_entry,
+ &ni->keys);
+ } else {
+ /*
+ * Normal List Iteration:
+ * ----------------------
+ * Start (list_entry == NULL) or continue
+ * (list_entry != NULL) the list iteration.
+ */
+ /* Obtain [next] list entry. */
+ list_entry =
+ nb_callback_get_next(nn,
+ parent_list_entry,
+ list_entry);
+ }
+
+ /*
+ * (FN:A) Reap empty list element? Check to see if we
+ * should reap an empty list element. We do this if the
+ * empty list element exists at or below the query base
+ * (i.e., it's not part of the walk, but a failed find
+ * on a more specific query e.g., for below the
+ * `route-entry` element for a query
+ * `.../route-entry/metric` where the list element had
+ * no metric value.
+ */
+ if (!list_start && ni->inner &&
+ !lyd_child_no_keys(&ni->inner->node) &&
+ /* is this at or below the base? */
+ darr_ilen(ys->node_infos) <= ys->query_base_level)
+ lyd_free_tree(&ni->inner->node);
+
+
+ if (!list_entry) {
+ /*
+ * List Iteration Done
+ * -------------------
+ */
+
+ /*
+ * Grab next sibling of the list node
+ */
+ if (is_specific_node)
+ sib = NULL;
+ else
+ sib = nb_op_sib_next(ys, sib);
+
+ /*
+ * If we are at the walk root (base) level then
+ * that specifies a list and we are done iterating
+ * the list, so we are done with the walk entirely.
+ */
+ if (!sib && at_clevel == ys->walk_root_level) {
+ ret = NB_OK;
+ goto done;
+ }
+
+ /*
+ * Pop the our list node info back to our
+ * parent.
+ *
+ * We only do this if we've already pushed a
+ * node for the current list schema. For
+ * `list_start` this hasn't happened yet, as
+ * would have happened below. So when list_start
+ * is true but list_entry if NULL we
+ * are processing an empty list.
+ */
+ if (!list_start)
+ ys_pop_inner(ys);
+
+ /*
+ * We should never be below the walk root
+ */
+ assert(darr_lasti(ys->node_infos) >=
+ ys->walk_root_level);
+
+ /* Move on to the sibling of the list node */
+ continue;
+ }
+
+ /*
+ * From here on, we have selected a new top node_info
+ * list entry (either newly pushed or replacing the
+ * previous entry in the walk), and we are filling in
+ * the details.
+ */
+
+ if (list_start) {
+ /*
+ * Starting iteration of a list type or
+ * processing a specific entry, push the list
+ * node_info on stack.
+ */
+ ni = darr_appendz(ys->node_infos);
+ pni = &ni[-1]; /* memory may have moved */
+ ni->has_lookup_next = nn->cbs.lookup_next !=
+ NULL;
+ ni->lookup_next_ok = ((!pni && ys->finish) ||
+ pni->lookup_next_ok) &&
+ ni->has_lookup_next;
+ ni->query_specific_entry = is_specific_node;
+ ni->niters = 0;
+ ni->nents = 0;
+
+ /* this will be our predicate-less xpath */
+ ys->xpath = nb_op_get_child_path(ys->xpath, sib,
+ ys->xpath);
+ } else {
+ /*
+ * Reset our xpath to the list node (i.e.,
+ * remove the entry predicates)
+ */
+ if (ni->query_specific_entry) {
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: unexpected state",
+ __func__);
+ }
+ assert(!ni->query_specific_entry);
+ len = strlen(sib->name) + 1; /* "/sibname" */
+ if (pni)
+ len += pni->xpath_len;
+ darr_setlen(ys->xpath, len + 1);
+ ys->xpath[len] = 0;
+ ni->xpath_len = len;
+ }
+
+ /* Need to get keys. */
+
+ if (!CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)) {
+ ret = nb_callback_get_keys(nn, list_entry,
+ &ni->keys);
+ if (ret) {
+ darr_pop(ys->node_infos);
+ ret = NB_ERR_RESOURCE;
+ goto done;
+ }
+ }
+ /*
+ * Append predicates to xpath.
+ */
+ len = darr_strlen(ys->xpath);
+ if (ni->keys.num) {
+ yang_get_key_preds(ys->xpath + len, sib,
+ &ni->keys,
+ darr_cap(ys->xpath) - len);
+ } else {
+ /* add a position predicate (1s based?) */
+ darr_ensure_avail(ys->xpath, 10);
+ snprintf(ys->xpath + len,
+ darr_cap(ys->xpath) - len + 1, "[%u]",
+ ni->nents + 1);
+ }
+ darr_setlen(ys->xpath,
+ strlen(ys->xpath + len) + len + 1);
+ ni->xpath_len = darr_strlen(ys->xpath);
+
+ /*
+ * Create the new list entry node.
+ */
+
+ if (!node) {
+ /* NOTE: can also use lyd_new_list2 here when available */
+ err = yang_lyd_new_list(ni[-1].inner, sib,
+ &ni->keys,
+ (struct lyd_node_inner *
+ *)&node);
+ if (err) {
+ darr_pop(ys->node_infos);
+ ret = NB_ERR_RESOURCE;
+ goto done;
+ }
+ }
+
+ /*
+ * Save the new list entry with the list node info
+ */
+ ni->inner = (struct lyd_node_inner *)node;
+ ni->schema = node->schema;
+ ni->list_entry = list_entry;
+ ni->niters += 1;
+ ni->nents += 1;
+
+ /* Skip over the key children, they've been created. */
+ sib = nb_op_sib_first(ys, sib, true);
+ continue;
+
+ case LYS_CHOICE:
+ /* Container type with no data */
+ /*FALLTHROUGH*/
+ case LYS_CASE:
+ /* Container type with no data */
+ /*FALLTHROUGH*/
+ default:
+ /*FALLTHROUGH*/
+ case LYS_ANYXML:
+ case LYS_ANYDATA:
+ /* These schema types are not currently handled */
+ flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
+ "%s: unsupported schema node type: %s",
+ __func__, lys_nodetype2str(sib->nodetype));
+ sib = nb_op_sib_next(ys, sib);
+ continue;
+ }
+ }
+
+done:
+ darr_free(xpath_child);
+ return ret;
+}
+
+static void nb_op_walk_continue(struct event *thread)
+{
+ struct nb_op_yield_state *ys = EVENT_ARG(thread);
+ enum nb_error ret = NB_OK;
+
+ DEBUGD(&nb_dbg_cbs_state, "northbound oper-state: resuming %s",
+ ys->xpath);
+
+ nb_op_resume_data_tree(ys);
+
+ /* if we've popped past the walk start level we're done */
+ if (darr_lasti(ys->node_infos) < ys->walk_root_level)
+ goto finish;
+
+ /* otherwise we are at a resumable node */
+ assert(darr_last(ys->node_infos)->has_lookup_next);
+
+ ret = __walk(ys, true);
+ if (ret == NB_YIELD) {
+ if (nb_op_yield(ys) != NB_OK) {
+ if (ys->should_batch)
+ goto stopped;
+ else
+ goto finish;
+ }
+ return;
+ }
+finish:
+ (*ys->finish)(ys_root_node(ys), ys->finish_arg, ret);
+stopped:
+ nb_op_free_yield_state(ys, false);
+}
+
+static void __free_siblings(struct lyd_node *this)
+{
+ struct lyd_node *next, *sib;
+ uint count = 0;
+
+ LY_LIST_FOR_SAFE(lyd_first_sibling(this), next, sib)
+ {
+ if (lysc_is_key(sib->schema))
+ continue;
+ if (sib == this)
+ continue;
+ lyd_free_tree(sib);
+ count++;
+ }
+ DEBUGD(&nb_dbg_events, "NB oper-state: deleted %u siblings", count);
+}
+
+/*
+ * Trim Algorithm:
+ *
+ * Delete final lookup-next list node and subtree, leave stack slot with keys.
+ *
+ * Then walking up the stack, delete all siblings except:
+ * 1. right-most container or list node (must be lookup-next by design)
+ * 2. keys supporting existing parent list node.
+ *
+ * NOTE the topmost node on the stack will be the final lookup-nexxt list node,
+ * as we only yield on lookup-next list nodes.
+ *
+ */
+static void nb_op_trim_yield_state(struct nb_op_yield_state *ys)
+{
+ struct nb_op_node_info *ni;
+ int i = darr_lasti(ys->node_infos);
+
+ assert(i >= 0);
+
+ DEBUGD(&nb_dbg_events, "NB oper-state: start trimming: top: %d", i);
+
+ ni = &ys->node_infos[i];
+ assert(ni->has_lookup_next);
+
+ DEBUGD(&nb_dbg_events, "NB oper-state: deleting tree at level %d", i);
+ __free_siblings(&ni->inner->node);
+ lyd_free_tree(&ni->inner->node);
+ ni->inner = NULL;
+
+ while (--i > 0) {
+ DEBUGD(&nb_dbg_events,
+ "NB oper-state: deleting siblings at level: %d", i);
+ __free_siblings(&ys->node_infos[i].inner->node);
+ }
+ DEBUGD(&nb_dbg_events, "NB oper-state: stop trimming: new top: %d",
+ (int)darr_lasti(ys->node_infos));
+}
+
+static enum nb_error nb_op_yield(struct nb_op_yield_state *ys)
+{
+ enum nb_error ret;
+ unsigned long min_us = MAX(1, NB_OP_WALK_INTERVAL_US / 50000);
+ struct timeval tv = { .tv_sec = 0, .tv_usec = min_us };
+
+ DEBUGD(&nb_dbg_events, "NB oper-state: yielding %s for %lus (should_batch %d)",
+ ys->xpath, tv.tv_usec, ys->should_batch);
+
+ if (ys->should_batch) {
+ /*
+ * TODO: add ability of finish to influence the timer.
+ * This will allow, for example, flow control based on how long
+ * it takes finish to process the batch.
+ */
+ ret = (*ys->finish)(ys_root_node(ys), ys->finish_arg, NB_YIELD);
+ if (ret != NB_OK)
+ return ret;
+ /* now trim out that data we just "finished" */
+ nb_op_trim_yield_state(ys);
+
+ }
+
+ event_add_timer_tv(event_loop, nb_op_walk_continue, ys, &tv,
+ &ys->walk_ev);
+ return NB_OK;
+}
+
+static enum nb_error nb_op_ys_init_schema_path(struct nb_op_yield_state *ys,
+ struct nb_node **last)
+{
+ const struct lysc_node *sn;
+ struct nb_node *nblast;
+ char *s, *s2;
+ int count;
+ uint i;
+
+ /*
+ * Get the schema node stack for the entire query string
+ *
+ * The user might pass in something like "//metric" which may resolve to
+ * more than one schema node ("trunks"). nb_node_find() returns a single
+ * node though. We should expand the functionality to get the set of
+ * nodes that matches the xpath (not path) query and save that set in
+ * the yield state. Then we should do a walk using the users query
+ * string over each schema trunk in the set.
+ */
+ nblast = nb_node_find(ys->xpath);
+ if (!nblast) {
+ flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
+ "%s: unknown data path: %s", __func__, ys->xpath);
+ return NB_ERR;
+ }
+ *last = nblast;
+
+ /*
+ * Create a stack of schema nodes one element per node in the query
+ * path, only the top (last) element may be a non-container type.
+ *
+ * NOTE: appears to be a bug in nb_node linkage where parent can be NULL,
+ * or I'm misunderstanding the code, in any case we use the libyang
+ * linkage to walk which works fine.
+ *
+ * XXX: we don't actually support choice/case yet, they are container
+ * types in the libyang schema, but won't be in data so our length
+ * checking gets messed up.
+ */
+ for (sn = nblast->snode, count = 0; sn; count++, sn = sn->parent)
+ if (sn != nblast->snode)
+ assert(CHECK_FLAG(sn->nodetype,
+ LYS_CONTAINER | LYS_LIST |
+ LYS_CHOICE | LYS_CASE));
+ /* create our arrays */
+ darr_append_n(ys->schema_path, count);
+ darr_append_n(ys->query_tokens, count);
+ for (sn = nblast->snode; sn; sn = sn->parent)
+ ys->schema_path[--count] = sn;
+
+ /*
+ * Now tokenize the query string and get pointers to each token
+ */
+
+ /* Get copy of query string start after initial '/'s */
+ s = ys->xpath;
+ while (*s && *s == '/')
+ s++;
+ ys->query_tokstr = darr_strdup(s);
+ s = ys->query_tokstr;
+
+ darr_foreach_i (ys->schema_path, i) {
+ const char *modname = ys->schema_path[i]->module->name;
+ const char *name = ys->schema_path[i]->name;
+ int nlen = strlen(name);
+ int mnlen = 0;
+
+ while (true) {
+ s2 = strstr(s, name);
+ if (!s2)
+ goto error;
+
+ if (s2[-1] == ':') {
+ mnlen = strlen(modname) + 1;
+ if (ys->query_tokstr > s2 - mnlen ||
+ strncmp(s2 - mnlen, modname, mnlen - 1))
+ goto error;
+ s2 -= mnlen;
+ nlen += mnlen;
+ }
+
+ s = s2;
+ if ((i == 0 || s[-1] == '/') &&
+ (s[nlen] == 0 || s[nlen] == '[' || s[nlen] == '/'))
+ break;
+ /*
+ * Advance past the incorrect match, must have been
+ * part of previous predicate.
+ */
+ s += nlen;
+ }
+
+ /* NUL terminate previous token and save this one */
+ if (i > 0)
+ s[-1] = 0;
+ ys->query_tokens[i] = s;
+ s += nlen;
+ }
+
+ /* NOTE: need to subtract choice/case nodes when these are supported */
+ ys->query_base_level = darr_lasti(ys->schema_path);
+
+ return NB_OK;
+
+error:
+ darr_free(ys->query_tokstr);
+ darr_free(ys->schema_path);
+ darr_free(ys->query_tokens);
+ return NB_ERR;
+}
+
+
+/**
+ * nb_op_walk_start() - Start walking oper-state directed by query string.
+ * @ys: partially initialized yield state for this walk.
+ *
+ */
+static enum nb_error nb_op_walk_start(struct nb_op_yield_state *ys)
+{
+ struct nb_node *nblast;
+ enum nb_error ret;
+
+ /*
+ * Get nb_node path (stack) corresponding to the xpath query
+ */
+ ret = nb_op_ys_init_schema_path(ys, &nblast);
+ if (ret != NB_OK)
+ return ret;
+
+
+ /*
+ * Get the node_info path (stack) corresponding to the uniquely
+ * resolvable data nodes from the beginning of the xpath query.
+ */
+ // I think this moves
+ ret = nb_op_ys_init_node_infos(ys);
+ if (ret != NB_OK)
+ return ret;
+
+ return __walk(ys, false);
+}
+
+
+void *nb_oper_walk(const char *xpath, struct yang_translator *translator,
+ uint32_t flags, bool should_batch, nb_oper_data_cb cb,
+ void *cb_arg, nb_oper_data_finish_cb finish, void *finish_arg)
+{
+ struct nb_op_yield_state *ys;
+ enum nb_error ret;
+
+ ys = nb_op_create_yield_state(xpath, translator, flags, should_batch,
+ cb, cb_arg, finish, finish_arg);
+
+ ret = nb_op_walk_start(ys);
+ if (ret == NB_YIELD) {
+ if (nb_op_yield(ys) != NB_OK) {
+ if (ys->should_batch)
+ goto stopped;
+ else
+ goto finish;
+ }
+ return ys;
+ }
+finish:
+ (void)(*ys->finish)(ys_root_node(ys), ys->finish_arg, ret);
+stopped:
+ nb_op_free_yield_state(ys, false);
+ return NULL;
+}
+
+
+void nb_oper_cancel_walk(void *walk)
+{
+ if (walk)
+ nb_op_free_yield_state(walk, false);
+}
+
+
+void nb_oper_cancel_all_walks(void)
+{
+ struct nb_op_yield_state *ys;
+
+ frr_each_safe (nb_op_walks, &nb_op_walks, ys)
+ nb_oper_cancel_walk(ys);
+}
+
+
+/*
+ * The old API -- remove when we've update the users to yielding.
+ */
+enum nb_error nb_oper_iterate_legacy(const char *xpath,
+ struct yang_translator *translator,
+ uint32_t flags, nb_oper_data_cb cb,
+ void *cb_arg, struct lyd_node **tree)
+{
+ struct nb_op_yield_state *ys;
+ enum nb_error ret;
+
+ ys = nb_op_create_yield_state(xpath, translator, flags, false, cb,
+ cb_arg, NULL, NULL);
+
+ ret = nb_op_walk_start(ys);
+ assert(ret != NB_YIELD);
+
+ if (tree && ret == NB_OK)
+ *tree = ys_root_node(ys);
+ else {
+ if (ys_root_node(ys))
+ yang_dnode_free(ys_root_node(ys));
+ if (tree)
+ *tree = NULL;
+ }
+
+ nb_op_free_yield_state(ys, true);
+ return ret;
+}
+
+void nb_oper_init(struct event_loop *loop)
+{
+ event_loop = loop;
+ nb_op_walks_init(&nb_op_walks);
+}
+
+void nb_oper_terminate(void)
+{
+ nb_oper_cancel_all_walks();
+}
diff --git a/lib/northbound_sysrepo.c b/lib/northbound_sysrepo.c
index 7fd4af8356..535c8b637e 100644
--- a/lib/northbound_sysrepo.c
+++ b/lib/northbound_sysrepo.c
@@ -118,6 +118,9 @@ static int yang_data_frr2sr(struct yang_data *frr_data, sr_val_t *sr_data)
sr_data->type = SR_INT64_T;
sr_data->data.int64_val = yang_str2int64(frr_data->value);
break;
+ case LY_TYPE_LEAFREF:
+ sr_val_set_str_data(sr_data, SR_STRING_T, frr_data->value);
+ break;
case LY_TYPE_STRING:
sr_val_set_str_data(sr_data, SR_STRING_T, frr_data->value);
break;
@@ -137,6 +140,11 @@ static int yang_data_frr2sr(struct yang_data *frr_data, sr_val_t *sr_data)
sr_data->type = SR_UINT64_T;
sr_data->data.uint64_val = yang_str2uint64(frr_data->value);
break;
+ case LY_TYPE_UNION:
+ /* No way to deal with this using un-typed yang_data object */
+ sr_val_set_str_data(sr_data, SR_STRING_T, frr_data->value);
+ break;
+ case LY_TYPE_UNKNOWN:
default:
return -1;
}
@@ -340,6 +348,8 @@ static int frr_sr_config_change_cb(sr_session_ctx_t *session, uint32_t sub_id,
return frr_sr_config_change_cb_apply(session, module_name);
case SR_EV_ABORT:
return frr_sr_config_change_cb_abort(session, module_name);
+ case SR_EV_RPC:
+ case SR_EV_UPDATE:
default:
flog_err(EC_LIB_LIBSYSREPO, "%s: unexpected sysrepo event: %u",
__func__, sr_ev);
@@ -347,39 +357,16 @@ static int frr_sr_config_change_cb(sr_session_ctx_t *session, uint32_t sub_id,
}
}
-static int frr_sr_state_data_iter_cb(const struct lysc_node *snode,
- struct yang_translator *translator,
- struct yang_data *data, void *arg)
-{
- struct lyd_node *dnode = arg;
- LY_ERR ly_errno;
-
- ly_errno = 0;
- ly_errno = lyd_new_path(NULL, ly_native_ctx, data->xpath, data->value,
- 0, &dnode);
- if (ly_errno) {
- flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
- __func__);
- yang_data_free(data);
- return NB_ERR;
- }
-
- yang_data_free(data);
- return NB_OK;
-}
-
/* Callback for state retrieval. */
static int frr_sr_state_cb(sr_session_ctx_t *session, uint32_t sub_id,
const char *module_name, const char *xpath,
const char *request_xpath, uint32_t request_id,
struct lyd_node **parent, void *private_ctx)
{
- struct lyd_node *dnode;
+ struct lyd_node *dnode = NULL;
dnode = *parent;
- if (nb_oper_data_iterate(request_xpath, NULL, 0,
- frr_sr_state_data_iter_cb, dnode)
- != NB_OK) {
+ if (nb_oper_iterate_legacy(request_xpath, NULL, 0, NULL, NULL, &dnode)) {
flog_warn(EC_LIB_NB_OPERATIONAL_DATA,
"%s: failed to obtain operational data [xpath %s]",
__func__, xpath);
diff --git a/lib/subdir.am b/lib/subdir.am
index c4ddb87c1f..977fd9f9aa 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -68,6 +68,7 @@ lib_libfrr_la_SOURCES = \
lib/mgmt_be_client.c \
lib/mgmt_fe_client.c \
lib/mgmt_msg.c \
+ lib/mgmt_msg_native.c \
lib/mlag.c \
lib/module.c \
lib/mpls.c \
@@ -80,6 +81,7 @@ lib_libfrr_la_SOURCES = \
lib/northbound.c \
lib/northbound_cli.c \
lib/northbound_db.c \
+ lib/northbound_oper.c \
lib/ntop.c \
lib/openbsd-tree.c \
lib/pid_output.c \
@@ -256,6 +258,7 @@ pkginclude_HEADERS += \
lib/mgmt_defines.h \
lib/mgmt_fe_client.h \
lib/mgmt_msg.h \
+ lib/mgmt_msg_native.h \
lib/mgmt_pb.h \
lib/module.h \
lib/monotime.h \
diff --git a/lib/vrf.c b/lib/vrf.c
index 808edd4ae4..5537f71254 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -987,6 +987,19 @@ static const void *lib_vrf_lookup_entry(struct nb_cb_lookup_entry_args *args)
return vrf;
}
+static const void *lib_vrf_lookup_next(struct nb_cb_lookup_entry_args *args)
+{
+ const char *vrfname = args->keys->key[0];
+ struct vrf vrfkey, *vrf;
+
+ strlcpy(vrfkey.name, vrfname, sizeof(vrfkey.name));
+ vrf = RB_FIND(vrf_name_head, &vrfs_by_name, &vrfkey);
+ if (!strcmp(vrf->name, vrfname))
+ vrf = RB_NEXT(vrf_name_head, vrf);
+
+ return vrf;
+}
+
/*
* XPath: /frr-vrf:lib/vrf/id
*/
@@ -1024,6 +1037,7 @@ const struct frr_yang_module_info frr_vrf_info = {
.get_next = lib_vrf_get_next,
.get_keys = lib_vrf_get_keys,
.lookup_entry = lib_vrf_lookup_entry,
+ .lookup_next = lib_vrf_lookup_next,
},
.priority = NB_DFLT_PRIORITY - 2,
},
diff --git a/lib/vty.c b/lib/vty.c
index 2cfe34f211..3c80403cce 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -157,10 +157,9 @@ static int vty_mgmt_unlock_running_inline(struct vty *vty)
return vty->mgmt_locked_running_ds ? -1 : 0;
}
-void vty_mgmt_resume_response(struct vty *vty, bool success)
+void vty_mgmt_resume_response(struct vty *vty, int ret)
{
uint8_t header[4] = {0, 0, 0, 0};
- int ret = CMD_SUCCESS;
if (!vty->mgmt_req_pending_cmd) {
zlog_err(
@@ -168,14 +167,10 @@ void vty_mgmt_resume_response(struct vty *vty, bool success)
return;
}
- if (!success)
- ret = CMD_WARNING_CONFIG_FAILED;
-
- MGMTD_FE_CLIENT_DBG(
- "resuming CLI cmd after %s on vty session-id: %" PRIu64
- " with '%s'",
- vty->mgmt_req_pending_cmd, vty->mgmt_session_id,
- success ? "succeeded" : "failed");
+ MGMTD_FE_CLIENT_DBG("resuming CLI cmd after %s on vty session-id: %" PRIu64
+ " with '%s'",
+ vty->mgmt_req_pending_cmd, vty->mgmt_session_id,
+ ret == CMD_SUCCESS ? "success" : "failed");
vty->mgmt_req_pending_cmd = NULL;
@@ -3560,7 +3555,8 @@ static void vty_mgmt_ds_lock_notified(struct mgmt_fe_client *client,
if (!is_short_circuit && vty->mgmt_req_pending_cmd) {
assert(!strcmp(vty->mgmt_req_pending_cmd, "MESSAGE_LOCKDS_REQ"));
- vty_mgmt_resume_response(vty, success);
+ vty_mgmt_resume_response(vty,
+ success ? CMD_SUCCESS : CMD_WARNING);
}
}
@@ -3592,7 +3588,8 @@ static void vty_mgmt_set_config_result_notified(
vty_mgmt_unlock_running_inline(vty);
}
- vty_mgmt_resume_response(vty, success);
+ vty_mgmt_resume_response(vty, success ? CMD_SUCCESS
+ : CMD_WARNING_CONFIG_FAILED);
}
static void vty_mgmt_commit_config_result_notified(
@@ -3620,7 +3617,8 @@ static void vty_mgmt_commit_config_result_notified(
vty_out(vty, "MGMTD: %s\n", errmsg_if_any);
}
- vty_mgmt_resume_response(vty, success);
+ vty_mgmt_resume_response(vty, success ? CMD_SUCCESS
+ : CMD_WARNING_CONFIG_FAILED);
}
static int vty_mgmt_get_data_result_notified(
@@ -3640,7 +3638,7 @@ static int vty_mgmt_get_data_result_notified(
client_id, errmsg_if_any ? errmsg_if_any : "Unknown");
vty_out(vty, "ERROR: GET_DATA request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
- vty_mgmt_resume_response(vty, success);
+ vty_mgmt_resume_response(vty, CMD_WARNING);
return -1;
}
@@ -3659,9 +3657,290 @@ static int vty_mgmt_get_data_result_notified(
}
if (next_key < 0) {
vty_out(vty, "]\n");
- vty_mgmt_resume_response(vty, success);
+ vty_mgmt_resume_response(vty,
+ success ? CMD_SUCCESS : CMD_WARNING);
+ }
+
+ return 0;
+}
+
+static ssize_t vty_mgmt_libyang_print(void *user_data, const void *buf,
+ size_t count)
+{
+ struct vty *vty = user_data;
+
+ vty_out(vty, "%.*s", (int)count, (const char *)buf);
+ return count;
+}
+
+static void vty_out_yang_error(struct vty *vty, LYD_FORMAT format,
+ struct ly_err_item *ei)
+{
+ bool have_apptag = ei->apptag && ei->apptag[0] != 0;
+ bool have_path = ei->path && ei->path[0] != 0;
+ bool have_msg = ei->msg && ei->msg[0] != 0;
+ const char *severity = NULL;
+ const char *evalid = NULL;
+ const char *ecode = NULL;
+ LY_ERR err = ei->no;
+
+ if (ei->level == LY_LLERR)
+ severity = "error";
+ else if (ei->level == LY_LLWRN)
+ severity = "warning";
+
+ switch (ei->no) {
+ case LY_SUCCESS:
+ ecode = "ok";
+ break;
+ case LY_EMEM:
+ ecode = "out of memory";
+ break;
+ case LY_ESYS:
+ ecode = "system error";
+ break;
+ case LY_EINVAL:
+ ecode = "invalid value given";
+ break;
+ case LY_EEXIST:
+ ecode = "item exists";
+ break;
+ case LY_ENOTFOUND:
+ ecode = "item not found";
+ break;
+ case LY_EINT:
+ ecode = "operation interrupted";
+ break;
+ case LY_EVALID:
+ ecode = "validation failed";
+ break;
+ case LY_EDENIED:
+ ecode = "access denied";
+ break;
+ case LY_EINCOMPLETE:
+ ecode = "incomplete";
+ break;
+ case LY_ERECOMPILE:
+ ecode = "compile error";
+ break;
+ case LY_ENOT:
+ ecode = "not";
+ break;
+ default:
+ case LY_EPLUGIN:
+ case LY_EOTHER:
+ ecode = "other";
+ break;
+ }
+
+ if (err == LY_EVALID) {
+ switch (ei->vecode) {
+ case LYVE_SUCCESS:
+ evalid = NULL;
+ break;
+ case LYVE_SYNTAX:
+ evalid = "syntax";
+ break;
+ case LYVE_SYNTAX_YANG:
+ evalid = "yang-syntax";
+ break;
+ case LYVE_SYNTAX_YIN:
+ evalid = "yin-syntax";
+ break;
+ case LYVE_REFERENCE:
+ evalid = "reference";
+ break;
+ case LYVE_XPATH:
+ evalid = "xpath";
+ break;
+ case LYVE_SEMANTICS:
+ evalid = "semantics";
+ break;
+ case LYVE_SYNTAX_XML:
+ evalid = "xml-syntax";
+ break;
+ case LYVE_SYNTAX_JSON:
+ evalid = "json-syntax";
+ break;
+ case LYVE_DATA:
+ evalid = "data";
+ break;
+ default:
+ case LYVE_OTHER:
+ evalid = "other";
+ break;
+ }
+ }
+
+ switch (format) {
+ case LYD_XML:
+ vty_out(vty,
+ "<rpc-error xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">");
+ vty_out(vty, "<error-type>application</error-type>");
+ if (severity)
+ vty_out(vty, "<error-severity>%s</error-severity>",
+ severity);
+ if (ecode)
+ vty_out(vty, "<error-code>%s</error-code>", ecode);
+ if (evalid)
+ vty_out(vty, "<error-validation>%s</error-validation>\n",
+ evalid);
+ if (have_path)
+ vty_out(vty, "<error-path>%s</error-path>\n", ei->path);
+ if (have_apptag)
+ vty_out(vty, "<error-app-tag>%s</error-app-tag>\n",
+ ei->apptag);
+ if (have_msg)
+ vty_out(vty, "<error-message>%s</error-message>\n",
+ ei->msg);
+
+ vty_out(vty, "</rpc-error>");
+ break;
+ case LYD_JSON:
+ vty_out(vty, "{ \"error-type\": \"application\"");
+ if (severity)
+ vty_out(vty, ", \"error-severity\": \"%s\"", severity);
+ if (ecode)
+ vty_out(vty, ", \"error-code\": \"%s\"", ecode);
+ if (evalid)
+ vty_out(vty, ", \"error-validation\": \"%s\"", evalid);
+ if (have_path)
+ vty_out(vty, ", \"error-path\": \"%s\"", ei->path);
+ if (have_apptag)
+ vty_out(vty, ", \"error-app-tag\": \"%s\"", ei->apptag);
+ if (have_msg)
+ vty_out(vty, ", \"error-message\": \"%s\"", ei->msg);
+
+ vty_out(vty, "}");
+ break;
+ case LYD_UNKNOWN:
+ case LYD_LYB:
+ default:
+ vty_out(vty, "%% error");
+ if (severity)
+ vty_out(vty, " severity: %s", severity);
+ if (evalid)
+ vty_out(vty, " invalid: %s", evalid);
+ if (have_path)
+ vty_out(vty, " path: %s", ei->path);
+ if (have_apptag)
+ vty_out(vty, " app-tag: %s", ei->apptag);
+ if (have_msg)
+ vty_out(vty, " msg: %s", ei->msg);
+ break;
+ }
+}
+
+static uint vty_out_yang_errors(struct vty *vty, LYD_FORMAT format)
+{
+ struct ly_err_item *ei = ly_err_first(ly_native_ctx);
+ uint count;
+
+ if (!ei)
+ return 0;
+
+ if (format == LYD_JSON)
+ vty_out(vty, "\"ietf-restconf:errors\": [ ");
+
+ for (count = 0; ei; count++, ei = ei->next) {
+ if (count)
+ vty_out(vty, ", ");
+ vty_out_yang_error(vty, format, ei);
}
+ if (format == LYD_JSON)
+ vty_out(vty, " ]");
+
+ ly_err_clean(ly_native_ctx, NULL);
+
+ return count;
+}
+
+
+static int vty_mgmt_get_tree_result_notified(
+ struct mgmt_fe_client *client, uintptr_t user_data, uint64_t client_id,
+ uint64_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, LYD_FORMAT result_type, void *result,
+ size_t len, int partial_error)
+{
+ struct vty *vty;
+ struct lyd_node *dnode;
+ int ret = CMD_SUCCESS;
+ LY_ERR err;
+
+ vty = (struct vty *)session_ctx;
+
+ MGMTD_FE_CLIENT_DBG("GET_TREE request %ssucceeded, client 0x%" PRIx64
+ " req-id %" PRIu64,
+ partial_error ? "partially " : "", client_id,
+ req_id);
+
+ assert(result_type == LYD_LYB ||
+ result_type == vty->mgmt_req_pending_data);
+
+ if (vty->mgmt_req_pending_data == LYD_XML && partial_error)
+ vty_out(vty,
+ "<!-- some errors occurred gathering results -->\n");
+
+ if (result_type == LYD_LYB) {
+ /*
+ * parse binary into tree and print in the specified format
+ */
+ result_type = vty->mgmt_req_pending_data;
+
+ err = lyd_parse_data_mem(ly_native_ctx, result, LYD_LYB, 0, 0,
+ &dnode);
+ if (!err)
+ err = lyd_print_clb(vty_mgmt_libyang_print, vty, dnode,
+ result_type, LYD_PRINT_WITHSIBLINGS);
+ lyd_free_all(dnode);
+
+ if (vty_out_yang_errors(vty, result_type) || err)
+ ret = CMD_WARNING;
+ } else {
+ /*
+ * Print the in-format result
+ */
+ assert(result_type == LYD_XML || result_type == LYD_JSON);
+ vty_out(vty, "%.*s\n", (int)len - 1, (const char *)result);
+ }
+
+ vty_mgmt_resume_response(vty, ret);
+
+ return 0;
+}
+
+static int vty_mgmt_error_notified(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uint64_t session_id, uintptr_t session_ctx,
+ uint64_t req_id, int error,
+ const char *errstr)
+{
+ struct vty *vty = (struct vty *)session_ctx;
+ const char *cname = mgmt_fe_client_name(client);
+
+ if (!vty->mgmt_req_pending_cmd) {
+ MGMTD_FE_CLIENT_DBG("Erorr with no pending command: %d returned for client %s 0x%" PRIx64
+ " session-id %" PRIu64 " req-id %" PRIu64
+ "error-str %s",
+ error, cname, client_id, session_id, req_id,
+ errstr);
+ vty_out(vty,
+ "%% Error %d from MGMTD for %s with no pending command: %s\n",
+ error, cname, errstr);
+ return CMD_WARNING;
+ }
+
+ MGMTD_FE_CLIENT_DBG("Erorr %d returned for client %s 0x%" PRIx64
+ " session-id %" PRIu64 " req-id %" PRIu64
+ "error-str %s",
+ error, cname, client_id, session_id, req_id, errstr);
+
+ vty_out(vty, "%% %s (for %s, client %s)\n", errstr,
+ vty->mgmt_req_pending_cmd, cname);
+
+ vty_mgmt_resume_response(vty, error ? CMD_WARNING : CMD_SUCCESS);
+
return 0;
}
@@ -3672,6 +3951,9 @@ static struct mgmt_fe_client_cbs mgmt_cbs = {
.set_config_notify = vty_mgmt_set_config_result_notified,
.commit_config_notify = vty_mgmt_commit_config_result_notified,
.get_data_notify = vty_mgmt_get_data_result_notified,
+ .get_tree_notify = vty_mgmt_get_tree_result_notified,
+ .error_notify = vty_mgmt_error_notified,
+
};
void vty_init_mgmt_fe(void)
@@ -3893,6 +4175,28 @@ int vty_mgmt_send_get_req(struct vty *vty, bool is_config,
return 0;
}
+int vty_mgmt_send_get_tree_req(struct vty *vty, LYD_FORMAT result_type,
+ const char *xpath)
+{
+ LYD_FORMAT intern_format = result_type;
+
+ vty->mgmt_req_id++;
+
+ if (mgmt_fe_send_get_tree_req(mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, intern_format, xpath)) {
+ zlog_err("Failed to send GET-TREE to MGMTD session-id: %" PRIu64
+ " req-id %" PRIu64 ".",
+ vty->mgmt_session_id, vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-TREE to MGMTD!\n");
+ return -1;
+ }
+
+ vty->mgmt_req_pending_cmd = "MESSAGE_GET_TREE_REQ";
+ vty->mgmt_req_pending_data = result_type;
+
+ return 0;
+}
+
/* Install vty's own commands like `who' command. */
void vty_init(struct event_loop *master_thread, bool do_command_logging)
{
diff --git a/lib/vty.h b/lib/vty.h
index 1a431fa16a..5866eccde0 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -229,6 +229,7 @@ struct vty {
* CLI command and we are waiting on the reply so we can respond to the
* vty user. */
const char *mgmt_req_pending_cmd;
+ uintptr_t mgmt_req_pending_data;
bool mgmt_locked_candidate_ds;
bool mgmt_locked_running_ds;
/* Need to track when we file-lock in vtysh to re-lock on end/conf t
@@ -419,9 +420,11 @@ extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
extern int vty_mgmt_send_get_req(struct vty *vty, bool is_config,
Mgmtd__DatastoreId datastore,
const char **xpath_list, int num_req);
+extern int vty_mgmt_send_get_tree_req(struct vty *vty, LYD_FORMAT result_type,
+ const char *xpath);
extern int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
bool lock, bool scok);
-extern void vty_mgmt_resume_response(struct vty *vty, bool success);
+extern void vty_mgmt_resume_response(struct vty *vty, int ret);
static inline bool vty_needs_implicit_commit(struct vty *vty)
{
diff --git a/lib/yang.c b/lib/yang.c
index 131d89cdfa..18d2ac58d3 100644
--- a/lib/yang.c
+++ b/lib/yang.c
@@ -6,6 +6,7 @@
#include <zebra.h>
+#include "darr.h"
#include "log.h"
#include "lib_errors.h"
#include "yang.h"
@@ -363,33 +364,10 @@ unsigned int yang_snode_num_keys(const struct lysc_node *snode)
return count;
}
-void yang_dnode_get_path(const struct lyd_node *dnode, char *xpath,
- size_t xpath_len)
-{
- lyd_path(dnode, LYD_PATH_STD, xpath, xpath_len);
-}
-
-const char *yang_dnode_get_schema_name(const struct lyd_node *dnode,
- const char *xpath_fmt, ...)
+char *yang_dnode_get_path(const struct lyd_node *dnode, char *xpath,
+ size_t xpath_len)
{
- if (xpath_fmt) {
- va_list ap;
- char xpath[XPATH_MAXLEN];
-
- va_start(ap, xpath_fmt);
- vsnprintf(xpath, sizeof(xpath), xpath_fmt, ap);
- va_end(ap);
-
- dnode = yang_dnode_get(dnode, xpath);
- if (!dnode) {
- flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
- "%s: couldn't find %s", __func__, xpath);
- zlog_backtrace(LOG_ERR);
- abort();
- }
- }
-
- return dnode->schema->name;
+ return lyd_path(dnode, LYD_PATH_STD, xpath, xpath_len);
}
struct lyd_node *yang_dnode_get(const struct lyd_node *dnode, const char *xpath)
@@ -673,6 +651,37 @@ static void ly_log_cb(LY_LOG_LEVEL level, const char *msg, const char *path)
zlog(priority, "libyang: %s", msg);
}
+static ssize_t yang_print_darr(void *arg, const void *buf, size_t count)
+{
+ uint8_t *dst = darr_append_n(*(uint8_t **)arg, count);
+
+ memcpy(dst, buf, count);
+ return count;
+}
+
+LY_ERR yang_print_tree_append(uint8_t **darr, const struct lyd_node *root,
+ LYD_FORMAT format, uint32_t options)
+{
+ LY_ERR err;
+
+ err = lyd_print_clb(yang_print_darr, darr, root, format, options);
+ if (err)
+ zlog_err("Failed to save yang tree: %s", ly_last_errmsg());
+ else if (format != LYD_LYB)
+ *darr_append(*darr) = 0;
+ return err;
+}
+
+uint8_t *yang_print_tree(const struct lyd_node *root, LYD_FORMAT format,
+ uint32_t options)
+{
+ uint8_t *darr = NULL;
+
+ if (yang_print_tree_append(&darr, root, format, options))
+ return NULL;
+ return darr;
+}
+
const char *yang_print_errors(struct ly_ctx *ly_ctx, char *buf, size_t buf_len)
{
struct ly_err_item *ei;
@@ -713,6 +722,7 @@ struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile)
{
struct ly_ctx *ctx = NULL;
const char *yang_models_path = YANG_MODELS_PATH;
+ uint options;
LY_ERR err;
if (access(yang_models_path, R_OK | X_OK)) {
@@ -726,7 +736,7 @@ struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile)
YANG_MODELS_PATH);
}
- uint options = LY_CTX_NO_YANGLIBRARY | LY_CTX_DISABLE_SEARCHDIR_CWD;
+ options = LY_CTX_NO_YANGLIBRARY | LY_CTX_DISABLE_SEARCHDIR_CWD;
if (explicit_compile)
options |= LY_CTX_EXPLICIT_COMPILE;
err = ly_ctx_new(yang_models_path, options, &ctx);
@@ -917,3 +927,95 @@ uint32_t yang_get_list_elements_count(const struct lyd_node *node)
} while (node);
return count;
}
+
+int yang_get_key_preds(char *s, const struct lysc_node *snode,
+ struct yang_list_keys *keys, ssize_t space)
+{
+ const struct lysc_node_leaf *skey;
+ ssize_t len2, len = 0;
+ ssize_t i = 0;
+
+ LY_FOR_KEYS (snode, skey) {
+ assert(i < keys->num);
+ len2 = snprintf(s + len, space - len, "[%s='%s']", skey->name,
+ keys->key[i]);
+ if (len2 > space - len)
+ len = space;
+ else
+ len += len2;
+ i++;
+ }
+
+ assert(i == keys->num);
+ return i;
+}
+
+int yang_get_node_keys(struct lyd_node *node, struct yang_list_keys *keys)
+{
+ struct lyd_node *child = lyd_child(node);
+
+ keys->num = 0;
+ for (; child && lysc_is_key(child->schema); child = child->next) {
+ const char *value = lyd_get_value(child);
+
+ if (!value)
+ return NB_ERR;
+ strlcpy(keys->key[keys->num], value,
+ sizeof(keys->key[keys->num]));
+ keys->num++;
+ }
+ return NB_OK;
+}
+
+LY_ERR yang_lyd_new_list(struct lyd_node_inner *parent,
+ const struct lysc_node *snode,
+ const struct yang_list_keys *list_keys,
+ struct lyd_node_inner **node)
+{
+ struct lyd_node *pnode = &parent->node;
+ struct lyd_node **nodepp = (struct lyd_node **)node;
+ const char(*keys)[LIST_MAXKEYLEN] = list_keys->key;
+
+ /*
+ * When
+ * https://github.com/CESNET/libyang/commit/2c1e327c7c2dd3ba12d466a4ebcf62c1c44116c4
+ * is released in libyang we should add a configure.ac check for the
+ * lyd_new_list3 function and use it here.
+ */
+ switch (list_keys->num) {
+ case 0:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp);
+ case 1:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0]);
+ case 2:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1]);
+ case 3:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1], keys[2]);
+ case 4:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1], keys[2], keys[3]);
+ case 5:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1], keys[2], keys[3],
+ keys[4]);
+ case 6:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1], keys[2], keys[3],
+ keys[4], keys[5]);
+ case 7:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1], keys[2], keys[3],
+ keys[4], keys[5], keys[6]);
+ case 8:
+ return lyd_new_list(pnode, snode->module, snode->name, false,
+ nodepp, keys[0], keys[1], keys[2], keys[3],
+ keys[4], keys[5], keys[6], keys[7]);
+ }
+ _Static_assert(LIST_MAXKEYS == 8, "max key mismatch in switch unroll");
+ /*NOTREACHED*/
+ return LY_EINVAL;
+}
diff --git a/lib/yang.h b/lib/yang.h
index 37369c09bf..3ce584b347 100644
--- a/lib/yang.h
+++ b/lib/yang.h
@@ -317,30 +317,16 @@ extern unsigned int yang_snode_num_keys(const struct lysc_node *snode);
* libyang data node to be processed.
*
* xpath
- * Pointer to previously allocated buffer.
+ * Pointer to previously allocated buffer or NULL.
*
* xpath_len
- * Size of the xpath buffer.
- */
-extern void yang_dnode_get_path(const struct lyd_node *dnode, char *xpath,
- size_t xpath_len);
-
-/*
- * Return the schema name of the given libyang data node.
- *
- * dnode
- * libyang data node.
+ * Size of the xpath buffer if xpath non-NULL.
*
- * xpath_fmt
- * Optional XPath expression (absolute or relative) to specify a different
- * data node to operate on in the same data tree.
- *
- * Returns:
- * Schema name of the libyang data node.
+ * If xpath is NULL, the returned string (if non-NULL) needs to be free()d by
+ * the caller.
*/
-extern const char *yang_dnode_get_schema_name(const struct lyd_node *dnode,
- const char *xpath_fmt, ...)
- PRINTFRR(2, 3);
+extern char *yang_dnode_get_path(const struct lyd_node *dnode, char *xpath,
+ size_t xpath_len);
/*
* Find a libyang data node by its YANG data path.
@@ -600,6 +586,39 @@ extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules,
*/
extern void yang_debugging_set(bool enable);
+
+/*
+ * "Print" the yang tree in `root` into dynamic sized array.
+ *
+ * Args:
+ * root: root of the subtree to "print" along with siblings.
+ * format: LYD_FORMAT of output (see lyd_print_mem)
+ * options: printing options (see lyd_print_mem)
+ *
+ * Return:
+ * A darr dynamic array with the "printed" output or NULL on failure.
+ */
+extern uint8_t *yang_print_tree(const struct lyd_node *root, LYD_FORMAT format,
+ uint32_t options);
+
+/*
+ * "Print" the yang tree in `root` into an existing dynamic sized array.
+ *
+ * This function does not initialize or free the dynamic array, the array can
+ * already existing data, the tree will be appended to this data.
+ *
+ * Args:
+ * darr: existing `uint8_t *`, dynamic array.
+ * root: root of the subtree to "print" along with siblings.
+ * format: LYD_FORMAT of output (see lyd_print_mem)
+ * options: printing options (see lyd_print_mem)
+ *
+ * Return:
+ * LY_ERR from underlying calls.
+ */
+extern LY_ERR yang_print_tree_append(uint8_t **darr, const struct lyd_node *root,
+ LYD_FORMAT format, uint32_t options);
+
/*
* Print libyang error messages into the provided buffer.
*
@@ -693,6 +712,18 @@ bool yang_is_last_list_dnode(const struct lyd_node *dnode);
/* API to check if the given node is last node in the data tree level */
bool yang_is_last_level_dnode(const struct lyd_node *dnode);
+/* Create a YANG predicate string based on the keys */
+extern int yang_get_key_preds(char *s, const struct lysc_node *snode,
+ struct yang_list_keys *keys, ssize_t space);
+
+/* Get YANG keys from an existing dnode */
+extern int yang_get_node_keys(struct lyd_node *node, struct yang_list_keys *keys);
+
+/* Create a new list lyd_node using `yang_list_keys` */
+extern LY_ERR yang_lyd_new_list(struct lyd_node_inner *parent,
+ const struct lysc_node *snode,
+ const struct yang_list_keys *keys,
+ struct lyd_node_inner **node);
#ifdef __cplusplus
}
#endif
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
index ed93244b83..0d678452f7 100644
--- a/mgmtd/mgmt_be_adapter.c
+++ b/mgmtd/mgmt_be_adapter.c
@@ -15,6 +15,7 @@
#include "network.h"
#include "libfrr.h"
#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
#include "mgmt_pb.h"
#include "mgmtd/mgmt.h"
#include "mgmtd/mgmt_memory.h"
@@ -34,6 +35,7 @@
/* ---------- */
const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
+ [MGMTD_BE_CLIENT_ID_ZEBRA] = "zebra",
#ifdef HAVE_STATICD
[MGMTD_BE_CLIENT_ID_STATICD] = "staticd",
#endif
@@ -72,7 +74,16 @@ static const char *const *be_client_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
#endif
};
-static const char *const *be_client_oper_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {};
+static const char *const zebra_oper_xpaths[] = {
+ "/frr-interface:lib/interface",
+ "/frr-vrf:lib/vrf/frr-zebra:zebra",
+ "/frr-zebra:zebra",
+ NULL,
+};
+
+static const char *const *be_client_oper_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+ [MGMTD_BE_CLIENT_ID_ZEBRA] = zebra_oper_xpaths,
+};
/*
* We would like to have a better ADT than one with O(n) comparisons
@@ -102,8 +113,7 @@ mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter);
static bool be_is_client_interested(const char *xpath,
enum mgmt_be_client_id id, bool config);
-
-static const char *mgmt_be_client_id2name(enum mgmt_be_client_id id)
+const char *mgmt_be_client_id2name(enum mgmt_be_client_id id)
{
if (id > MGMTD_BE_CLIENT_ID_MAX)
return "invalid client id";
@@ -287,7 +297,6 @@ mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
}
}
-
static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
Mgmtd__BeMessage *be_msg)
{
@@ -410,17 +419,11 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
be_msg->cfg_apply_reply->success,
be_msg->cfg_apply_reply->error_if_any, adapter);
break;
- case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
- /*
- * TODO: Add handling code in future.
- */
- break;
/*
* NOTE: The following messages are always sent from MGMTD to
* Backend clients only and/or need not be handled on MGMTd.
*/
case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
@@ -503,12 +506,77 @@ int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
return mgmt_be_adapter_send_msg(adapter, &be_msg);
}
+int mgmt_be_send_native(enum mgmt_be_client_id id, void *msg)
+{
+ struct mgmt_be_client_adapter *adapter = mgmt_be_get_adapter_by_id(id);
+
+ if (!adapter)
+ return -1;
+
+ return mgmt_msg_native_send_msg(adapter->conn, msg, false);
+}
+
+/*
+ * Handle a native encoded message
+ */
+static void be_adapter_handle_native_msg(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_header *msg,
+ size_t msg_len)
+{
+ struct mgmt_msg_tree_data *tree_msg;
+ struct mgmt_msg_error *error_msg;
+
+ /* get the transaction */
+
+ switch (msg->code) {
+ case MGMT_MSG_CODE_ERROR:
+ error_msg = (typeof(error_msg))msg;
+ MGMTD_BE_ADAPTER_DBG("Got ERROR from '%s' txn-id %" PRIx64,
+ adapter->name, msg->refer_id);
+
+ /* Forward the reply to the txn module */
+ mgmt_txn_notify_error(adapter, msg->refer_id, msg->req_id,
+ error_msg->error, error_msg->errstr);
+
+ break;
+ case MGMT_MSG_CODE_TREE_DATA:
+ /* tree data from a backend client */
+ tree_msg = (typeof(tree_msg))msg;
+ MGMTD_BE_ADAPTER_DBG("Got TREE_DATA from '%s' txn-id %" PRIx64,
+ adapter->name, msg->refer_id);
+
+ /* Forward the reply to the txn module */
+ mgmt_txn_notify_tree_data_reply(adapter, tree_msg, msg_len);
+ break;
+ default:
+ MGMTD_BE_ADAPTER_ERR("unknown native message txn-id %" PRIu64
+ " req-id %" PRIu64
+ " code %u from BE client for adapter %s",
+ msg->refer_id, msg->req_id, msg->code,
+ adapter->name);
+ break;
+ }
+}
+
+
static void mgmt_be_adapter_process_msg(uint8_t version, uint8_t *data,
size_t len, struct msg_conn *conn)
{
struct mgmt_be_client_adapter *adapter = conn->user;
- Mgmtd__BeMessage *be_msg = mgmtd__be_message__unpack(NULL, len, data);
+ Mgmtd__BeMessage *be_msg;
+
+ if (version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *msg = (typeof(msg))data;
+
+ if (len >= sizeof(*msg))
+ be_adapter_handle_native_msg(adapter, msg, len);
+ else
+ MGMTD_BE_ADAPTER_ERR("native message to adapter %s too short %zu",
+ adapter->name, len);
+ return;
+ }
+ be_msg = mgmtd__be_message__unpack(NULL, len, data);
if (!be_msg) {
MGMTD_BE_ADAPTER_DBG(
"Failed to decode %zu bytes for adapter: %s", len,
@@ -662,11 +730,13 @@ struct msg_conn *mgmt_be_create_adapter(int conn_fd, union sockunion *from)
mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
- adapter->conn = msg_server_conn_create(
- mgmt_loop, conn_fd, mgmt_be_adapter_notify_disconnect,
- mgmt_be_adapter_process_msg, MGMTD_BE_MAX_NUM_MSG_PROC,
- MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, adapter,
- "BE-adapter");
+ adapter->conn = msg_server_conn_create(mgmt_loop, conn_fd,
+ mgmt_be_adapter_notify_disconnect,
+ mgmt_be_adapter_process_msg,
+ MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE,
+ MGMTD_BE_MSG_MAX_LEN, adapter,
+ "BE-adapter");
adapter->conn->debug = DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL);
diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h
index e06ee115f0..96e807f6c4 100644
--- a/mgmtd/mgmt_be_adapter.h
+++ b/mgmtd/mgmt_be_adapter.h
@@ -30,6 +30,7 @@ enum mgmt_be_client_id {
#ifdef HAVE_STATICD
MGMTD_BE_CLIENT_ID_STATICD,
#endif
+ MGMTD_BE_CLIENT_ID_ZEBRA,
MGMTD_BE_CLIENT_ID_MAX
};
#define MGMTD_BE_CLIENT_ID_MIN 0
@@ -149,6 +150,9 @@ mgmt_be_get_adapter_by_name(const char *name);
extern struct mgmt_be_client_adapter *
mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id);
+/* Get the client name given a client ID */
+extern const char *mgmt_be_client_id2name(enum mgmt_be_client_id id);
+
/* Toggle debug on or off for connected clients. */
extern void mgmt_be_adapter_toggle_client_debug(bool set);
@@ -211,6 +215,19 @@ extern void mgmt_be_adapter_status_write(struct vty *vty);
*/
extern void mgmt_be_xpath_register_write(struct vty *vty);
+
+/**
+ * Send a native message to a backend client
+ *
+ * Args:
+ * adapter: the client to send the message to.
+ * msg: a native message from mgmt_msg_native_alloc_msg()
+ *
+ * Return:
+ * Any return value from msg_conn_send_msg().
+ */
+extern int mgmt_be_send_native(enum mgmt_be_client_id id, void *msg);
+
/**
* Lookup the clients which are subscribed to a given `xpath`
* and the way they are subscribed.
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
index d613b7467a..87c67491b6 100644
--- a/mgmtd/mgmt_fe_adapter.c
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -8,11 +8,13 @@
*/
#include <zebra.h>
+#include "darr.h"
#include "sockopt.h"
#include "network.h"
#include "libfrr.h"
#include "mgmt_fe_client.h"
#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
#include "mgmt_pb.h"
#include "hash.h"
#include "jhash.h"
@@ -254,6 +256,15 @@ void mgmt_fe_adapter_toggle_client_debug(bool set)
adapter->conn->debug = set;
}
+static struct mgmt_fe_session_ctx *fe_adapter_session_by_txn_id(uint64_t txn_id)
+{
+ uint64_t session_id = mgmt_txn_get_session_id(txn_id);
+
+ if (session_id == MGMTD_SESSION_ID_NONE)
+ return NULL;
+ return mgmt_session_id2ctx(session_id);
+}
+
static struct mgmt_fe_session_ctx *
mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
uint64_t client_id)
@@ -281,6 +292,14 @@ mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
return session;
}
+static int fe_adapter_send_native_msg(struct mgmt_fe_client_adapter *adapter,
+ void *msg, size_t len,
+ bool short_circuit_ok)
+{
+ return msg_conn_send_msg(adapter->conn, MGMT_MSG_VERSION_NATIVE, msg,
+ len, NULL, short_circuit_ok);
+}
+
static int fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
Mgmtd__FeMessage *fe_msg, bool short_circuit_ok)
{
@@ -478,6 +497,28 @@ static int fe_adapter_send_get_reply(struct mgmt_fe_session_ctx *session,
return fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
+static int fe_adapter_send_error(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, ...)
+ PRINTFRR(5, 6);
+
+static int fe_adapter_send_error(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, errfmt);
+ ret = vmgmt_msg_native_send_error(session->adapter->conn,
+ session->session_id, req_id,
+ short_circuit_ok, error, errfmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+
static void mgmt_fe_session_cfg_txn_clnup(struct event *thread)
{
struct mgmt_fe_session_ctx *session;
@@ -748,14 +789,8 @@ static int mgmt_fe_session_handle_get_req_msg(struct mgmt_fe_session_ctx *sessio
struct nb_config *cfg_root = NULL;
Mgmtd__DatastoreId ds_id = get_req->ds_id;
uint64_t req_id = get_req->req_id;
- bool is_cfg = get_req->config;
- bool ds_ok = true;
-
- if (is_cfg && ds_id != MGMTD_DS_CANDIDATE && ds_id != MGMTD_DS_RUNNING)
- ds_ok = false;
- else if (!is_cfg && ds_id != MGMTD_DS_OPERATIONAL)
- ds_ok = false;
- if (!ds_ok) {
+
+ if (ds_id != MGMTD_DS_CANDIDATE && ds_id != MGMTD_DS_RUNNING) {
fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
"get-req on unsupported datastore");
return 0;
@@ -791,8 +826,7 @@ static int mgmt_fe_session_handle_get_req_msg(struct mgmt_fe_session_ctx *sessio
/*
* Get a copy of the datastore config root, avoids locking.
*/
- if (is_cfg)
- cfg_root = nb_config_dup(mgmt_ds_get_nb_config(ds_ctx));
+ cfg_root = nb_config_dup(mgmt_ds_get_nb_config(ds_ctx));
/*
* Create a GET request under the transaction.
@@ -988,9 +1022,8 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
break;
case MGMTD__FE_MESSAGE__MESSAGE_GET_REQ:
session = mgmt_session_id2ctx(fe_msg->get_req->session_id);
- MGMTD_FE_ADAPTER_DBG("Got GET_REQ (iscfg %d) for DS:%s (xpaths: %d) on session-id %" PRIu64
+ MGMTD_FE_ADAPTER_DBG("Got GET_REQ for DS:%s (xpaths: %d) on session-id %" PRIu64
" from '%s'",
- (int)fe_msg->get_req->config,
mgmt_ds_id2name(fe_msg->get_req->ds_id),
(int)fe_msg->get_req->n_data,
fe_msg->get_req->session_id, adapter->name);
@@ -1028,12 +1061,186 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
return 0;
}
+/**
+ * Send result of get-tree request back to the FE client.
+ *
+ * Args:
+ * session: the session.
+ * req_id: the request ID.
+ * short_circuit_ok: if allowed to short circuit the message.
+ * result_format: LYD_FORMAT for the sent output.
+ * tree: the tree to send, can be NULL which will send an empty tree.
+ * partial_error: if an error occurred during gathering results.
+ *
+ * Return:
+ * Any error that occurs -- the message is likely not sent if non-zero.
+ */
+static int fe_adapter_send_tree_data(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, bool short_circuit_ok,
+ uint8_t result_type,
+ const struct lyd_node *tree,
+ int partial_error)
+
+{
+ struct mgmt_msg_tree_data *msg;
+ struct lyd_node *empty = NULL;
+ uint8_t *buf = NULL;
+ int ret = 0;
+
+ darr_append_n(buf, offsetof(typeof(*msg), result));
+ msg = (typeof(msg))buf;
+ msg->refer_id = session->session_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_TREE_DATA;
+ msg->partial_error = partial_error;
+ msg->result_type = result_type;
+
+ if (!tree) {
+ empty = yang_dnode_new(ly_native_ctx, false);
+ tree = empty;
+ }
+
+ ret = yang_print_tree_append(&buf, tree, result_type,
+ (LYD_PRINT_WD_EXPLICIT |
+ LYD_PRINT_WITHSIBLINGS));
+ /* buf may have been reallocated and moved */
+ msg = (typeof(msg))buf;
+
+
+ if (ret != LY_SUCCESS) {
+ MGMTD_FE_ADAPTER_ERR("Error building get-tree result for client %s session-id %" PRIu64
+ " req-id %" PRIu64
+ " scok %d result type %u",
+ session->adapter->name, session->session_id,
+ req_id, short_circuit_ok, result_type);
+ goto done;
+ }
+
+ MGMTD_FE_ADAPTER_DBG("Sending get-tree result from adapter %s to session-id %" PRIu64
+ " req-id %" PRIu64 " scok %d result type %u len %u",
+ session->adapter->name, session->session_id, req_id,
+ short_circuit_ok, result_type, darr_len(buf));
+
+ ret = fe_adapter_send_native_msg(session->adapter, buf, darr_len(buf),
+ short_circuit_ok);
+done:
+ if (empty)
+ yang_dnode_free(empty);
+ darr_free(buf);
+
+ return ret;
+}
+
+/**
+ * Handle a get-tree message from the client.
+ */
+static void fe_adapter_handle_get_tree(struct mgmt_fe_session_ctx *session,
+ void *data, size_t len)
+{
+ struct mgmt_msg_get_tree *msg = data;
+ uint64_t req_id = msg->req_id;
+ uint64_t clients;
+ int ret;
+
+ MGMTD_FE_ADAPTER_DBG("Received get-tree request from client %s for session-id %" PRIu64
+ " req-id %" PRIu64,
+ session->adapter->name, session->session_id,
+ msg->req_id);
+
+ if (session->txn_id != MGMTD_TXN_ID_NONE) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "Transaction in progress txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ return;
+ }
+
+ clients = mgmt_be_interested_clients(msg->xpath, false);
+ if (!clients) {
+ MGMTD_FE_ADAPTER_DBG("No backends provide xpath: %s for txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ msg->xpath, session->txn_id,
+ session->session_id);
+
+ fe_adapter_send_tree_data(session, req_id, false,
+ msg->result_type, NULL, 0);
+ return;
+ }
+
+ /* Start a SHOW Transaction */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_SHOW);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "failed to create a 'show' txn");
+ return;
+ }
+
+ MGMTD_FE_ADAPTER_DBG("Created new show txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+
+ /* Create a GET-TREE request under the transaction */
+ ret = mgmt_txn_send_get_tree_oper(session->txn_id, req_id, clients,
+ msg->result_type, msg->xpath);
+ if (ret) {
+ /* destroy the just created txn */
+ mgmt_destroy_txn(&session->txn_id);
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "failed to create a 'show' txn");
+ }
+}
+
+/**
+ * Handle a native encoded message from the FE client.
+ */
+static void fe_adapter_handle_native_msg(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_msg_header *msg,
+ size_t msg_len)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(msg->refer_id);
+ if (!session) {
+ MGMTD_FE_ADAPTER_ERR("adapter %s: recv msg unknown session-id %" PRIu64,
+ adapter->name, msg->refer_id);
+ return;
+ }
+ assert(session->adapter == adapter);
+
+ switch (msg->code) {
+ case MGMT_MSG_CODE_GET_TREE:
+ fe_adapter_handle_get_tree(session, msg, msg_len);
+ break;
+ default:
+ MGMTD_FE_ADAPTER_ERR("unknown native message session-id %" PRIu64
+ " req-id %" PRIu64
+ " code %u to FE adapter %s",
+ msg->refer_id, msg->req_id, msg->code,
+ adapter->name);
+ break;
+ }
+}
+
+
static void mgmt_fe_adapter_process_msg(uint8_t version, uint8_t *data,
size_t len, struct msg_conn *conn)
{
struct mgmt_fe_client_adapter *adapter = conn->user;
- Mgmtd__FeMessage *fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+ Mgmtd__FeMessage *fe_msg;
+
+ if (version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *msg = (typeof(msg))data;
+
+ if (len >= sizeof(*msg))
+ fe_adapter_handle_native_msg(adapter, msg, len);
+ else
+ MGMTD_FE_ADAPTER_ERR("native message to adapter %s too short %zu",
+ adapter->name, len);
+ return;
+ }
+ fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
if (!fe_msg) {
MGMTD_FE_ADAPTER_DBG(
"Failed to decode %zu bytes for adapter: %s", len,
@@ -1209,8 +1416,54 @@ int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
error_if_any);
}
-struct mgmt_setcfg_stats *
-mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
+int mgmt_fe_adapter_send_tree_data(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id, LYD_FORMAT result_type,
+ const struct lyd_node *tree,
+ int partial_error, bool short_circuit_ok)
+{
+ struct mgmt_fe_session_ctx *session;
+ int ret;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ ret = fe_adapter_send_tree_data(session, req_id, short_circuit_ok,
+ result_type, tree, partial_error);
+
+ mgmt_destroy_txn(&session->txn_id);
+
+ return ret;
+}
+
+/**
+ * Send an error back to the FE client and cleanup any in-progress txn.
+ */
+int mgmt_fe_adapter_txn_error(uint64_t txn_id, uint64_t req_id,
+ bool short_circuit_ok, int16_t error,
+ const char *errstr)
+{
+ struct mgmt_fe_session_ctx *session;
+ int ret;
+
+ session = fe_adapter_session_by_txn_id(txn_id);
+ if (!session) {
+ MGMTD_FE_ADAPTER_ERR("failed sending error for txn-id %" PRIu64
+ " session not found",
+ txn_id);
+ return -ENOENT;
+ }
+
+
+ ret = fe_adapter_send_error(session, req_id, false, error, "%s", errstr);
+
+ mgmt_destroy_txn(&session->txn_id);
+
+ return ret;
+}
+
+
+struct mgmt_setcfg_stats *mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
{
struct mgmt_fe_session_ctx *session;
diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h
index 1172262a45..09d64415bc 100644
--- a/mgmtd/mgmt_fe_adapter.h
+++ b/mgmtd/mgmt_fe_adapter.h
@@ -138,6 +138,52 @@ extern int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
Mgmtd__YangDataReply *data_resp,
const char *error_if_any);
+/**
+ * Send get-tree data reply back to client.
+ *
+ * This also cleans up and frees the transaction.
+ *
+ * Args:
+ * session_id: the session.
+ * txn_id: the txn_id this data pertains to
+ * req_id: the req id for the get_tree message
+ * result_type: the format of the result data.
+ * tree: the results.
+ * partial_error: if there were errors while gather results.
+ * short_circuit_ok: True if OK to short-circuit the call.
+ *
+ * Return:
+ * the return value from the underlying send function.
+ *
+ */
+extern int mgmt_fe_adapter_send_tree_data(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id,
+ LYD_FORMAT result_type,
+ const struct lyd_node *tree,
+ int partial_error,
+ bool short_circuit_ok);
+
+/**
+ * Send an error back to the FE client using native messaging.
+ *
+ * This also cleans up and frees the transaction.
+ *
+ * Args:
+ * txn_id: the txn_id this error pertains to.
+ * short_circuit_ok: True if OK to short-circuit the call.
+ * error: An integer error value.
+ * errfmt: An error format string (i.e., printfrr)
+ * ...: args for use by the `errfmt` format string.
+ *
+ * Return:
+ * the return value from the underlying send function.
+ *
+ */
+extern int mgmt_fe_adapter_txn_error(uint64_t txn_id, uint64_t req_id,
+ bool short_circuit_ok, int16_t error,
+ const char *errstr);
+
+
/* Fetch frontend client session set-config stats */
extern struct mgmt_setcfg_stats *
mgmt_fe_get_session_setcfg_stats(uint64_t session_id);
diff --git a/mgmtd/mgmt_history.c b/mgmtd/mgmt_history.c
index d4069325ca..ddc5a1844e 100644
--- a/mgmtd/mgmt_history.c
+++ b/mgmtd/mgmt_history.c
@@ -261,7 +261,9 @@ failed_unlock:
void mgmt_history_rollback_complete(bool success)
{
- vty_mgmt_resume_response(rollback_vty, success);
+ vty_mgmt_resume_response(rollback_vty,
+ success ? CMD_SUCCESS
+ : CMD_WARNING_CONFIG_FAILED);
rollback_vty = NULL;
}
diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c
index b58b93c71d..f0fb7f8a7b 100644
--- a/mgmtd/mgmt_main.c
+++ b/mgmtd/mgmt_main.c
@@ -189,12 +189,33 @@ static void mgmt_vrf_terminate(void)
extern const struct frr_yang_module_info frr_staticd_info;
#endif
+
+/*
+ * These are stub info structs that are used to load the modules used by backend
+ * clients into mgmtd. The modules are used by libyang in order to support
+ * parsing binary data returns from the backend.
+ */
+const struct frr_yang_module_info zebra_info = {
+ .name = "frr-zebra",
+ .ignore_cbs = true,
+ .nodes = { { .xpath = NULL } },
+};
+
+const struct frr_yang_module_info affinity_map_info = {
+ .name = "frr-affinity-map",
+ .ignore_cbs = true,
+ .nodes = { { .xpath = NULL } },
+};
+
+const struct frr_yang_module_info zebra_route_map_info = {
+ .name = "frr-zebra-route-map",
+ .ignore_cbs = true,
+ .nodes = { { .xpath = NULL } },
+};
+
/*
* List of YANG modules to be loaded in the process context of
* MGMTd.
- *
- * NOTE: In future this will also include the YANG modules of
- * all individual Backend clients.
*/
static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
&frr_filter_info,
@@ -202,11 +223,15 @@ static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
&frr_route_map_info,
&frr_routing_info,
&frr_vrf_info,
-/*
- * YANG module info supported by backend clients get added here.
- * NOTE: Always set .ignore_cbs true for to avoid validating
- * backend configuration northbound callbacks during loading.
- */
+
+ /*
+ * YANG module info used by backend clients get added here.
+ */
+
+ &zebra_info,
+ &affinity_map_info,
+ &zebra_route_map_info,
+
#ifdef HAVE_STATICD
&frr_staticd_info,
#endif
diff --git a/mgmtd/mgmt_memory.c b/mgmtd/mgmt_memory.c
index b2a0f0e848..0fce61aa97 100644
--- a/mgmtd/mgmt_memory.c
+++ b/mgmtd/mgmt_memory.c
@@ -29,5 +29,6 @@ DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ, "txn set-config requests");
DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ, "txn commit-config requests");
DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ, "txn get-data requests");
DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY, "txn get-data replies");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETTREE_REQ, "txn get-tree requests");
DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "txn config batches");
DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "commit info");
diff --git a/mgmtd/mgmt_memory.h b/mgmtd/mgmt_memory.h
index 06518e3838..d5b6aa632e 100644
--- a/mgmtd/mgmt_memory.h
+++ b/mgmtd/mgmt_memory.h
@@ -23,6 +23,7 @@ DECLARE_MTYPE(MGMTD_TXN_SETCFG_REQ);
DECLARE_MTYPE(MGMTD_TXN_COMMCFG_REQ);
DECLARE_MTYPE(MGMTD_TXN_GETDATA_REQ);
DECLARE_MTYPE(MGMTD_TXN_GETDATA_REPLY);
+DECLARE_MTYPE(MGMTD_TXN_GETTREE_REQ);
DECLARE_MTYPE(MGMTD_TXN_CFG_BATCH);
DECLARE_MTYPE(MGMTD_BE_ADAPTER_MSG_BUF);
DECLARE_MTYPE(MGMTD_CMT_INFO);
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
index c2dca2aea1..7e625c73ec 100644
--- a/mgmtd/mgmt_txn.c
+++ b/mgmtd/mgmt_txn.c
@@ -7,9 +7,12 @@
*/
#include <zebra.h>
+#include "darr.h"
#include "hash.h"
#include "jhash.h"
#include "libfrr.h"
+#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
#include "mgmtd/mgmt.h"
#include "mgmtd/mgmt_memory.h"
#include "mgmtd/mgmt_txn.h"
@@ -26,8 +29,9 @@ enum mgmt_txn_event {
MGMTD_TXN_PROC_SETCFG = 1,
MGMTD_TXN_PROC_COMMITCFG,
MGMTD_TXN_PROC_GETCFG,
- MGMTD_TXN_PROC_GETDATA,
+ MGMTD_TXN_PROC_GETTREE,
MGMTD_TXN_COMMITCFG_TIMEOUT,
+ MGMTD_TXN_GETTREE_TIMEOUT,
MGMTD_TXN_CLEANUP
};
@@ -166,6 +170,16 @@ struct mgmt_get_data_req {
int total_reply;
};
+
+struct txn_req_get_tree {
+ char *xpath; /* xpath of tree to get */
+ uint8_t result_type; /* LYD_FORMAT for results */
+ uint64_t sent_clients; /* Bitmask of clients sent req to */
+ uint64_t recv_clients; /* Bitmask of clients recv reply from */
+ int32_t partial_error; /* an error while gather results */
+ struct lyd_node *client_results; /* result tree from clients */
+};
+
struct mgmt_txn_req {
struct mgmt_txn_ctx *txn;
enum mgmt_txn_event req_event;
@@ -173,6 +187,7 @@ struct mgmt_txn_req {
union {
struct mgmt_set_cfg_req *set_cfg;
struct mgmt_get_data_req *get_data;
+ struct txn_req_get_tree *get_tree;
struct mgmt_commit_cfg_req commit_cfg;
} req;
@@ -196,7 +211,9 @@ struct mgmt_txn_ctx {
struct event *proc_comm_cfg;
struct event *proc_get_cfg;
struct event *proc_get_data;
+ struct event *proc_get_tree;
struct event *comm_cfg_timeout;
+ struct event *get_tree_timeout;
struct event *clnup;
/* List of backend adapters involved in this transaction */
@@ -206,6 +223,10 @@ struct mgmt_txn_ctx {
struct mgmt_txns_item list_linkage;
+ /* TODO: why do we need unique lists for each type of transaction since
+ * a transaction is of only 1 type?
+ */
+
/*
* List of pending set-config requests for a given
* transaction/session. Just one list for requests
@@ -221,13 +242,9 @@ struct mgmt_txn_ctx {
*/
struct mgmt_txn_reqs_head get_cfg_reqs;
/*
- * List of pending get-data requests for a given
- * transaction/session Two lists, one for requests
- * not processed at all, and one for requests that
- * has been sent to backend for processing.
+ * List of pending get-tree requests.
*/
- struct mgmt_txn_reqs_head get_data_reqs;
- struct mgmt_txn_reqs_head pending_get_datas;
+ struct mgmt_txn_reqs_head get_tree_reqs;
/*
* There will always be one commit-config allowed for a given
* transaction/session. No need to maintain lists for it.
@@ -386,17 +403,16 @@ static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
" txn-id: %" PRIu64 " session-id: %" PRIu64,
txn_req->req_id, txn->txn_id, txn->session_id);
break;
- case MGMTD_TXN_PROC_GETDATA:
- txn_req->req.get_data =
- XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
- sizeof(struct mgmt_get_data_req));
- assert(txn_req->req.get_data);
- mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
- MGMTD_TXN_DBG("Added a new GETDATA req-id: %" PRIu64
+ case MGMTD_TXN_PROC_GETTREE:
+ txn_req->req.get_tree = XCALLOC(MTYPE_MGMTD_TXN_GETTREE_REQ,
+ sizeof(struct txn_req_get_tree));
+ mgmt_txn_reqs_add_tail(&txn->get_tree_reqs, txn_req);
+ MGMTD_TXN_DBG("Added a new GETTREE req-id: %" PRIu64
" txn-id: %" PRIu64 " session-id: %" PRIu64,
txn_req->req_id, txn->txn_id, txn->session_id);
break;
case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_GETTREE_TIMEOUT:
case MGMTD_TXN_CLEANUP:
break;
}
@@ -496,24 +512,17 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
break;
- case MGMTD_TXN_PROC_GETDATA:
- for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
- indx++) {
- if ((*txn_req)->req.get_data->xpaths[indx])
- free((void *)(*txn_req)
- ->req.get_data->xpaths[indx]);
- }
- pending_list = &(*txn_req)->txn->pending_get_datas;
- req_list = &(*txn_req)->txn->get_data_reqs;
- MGMTD_TXN_DBG("Deleting GETDATA req-id: %" PRIu64
- " txn-id: %" PRIu64,
+ case MGMTD_TXN_PROC_GETTREE:
+ MGMTD_TXN_DBG("Deleting GETTREE req-id: %" PRIu64
+ " of txn-id: %" PRIu64,
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
- if ((*txn_req)->req.get_data->reply)
- XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
- (*txn_req)->req.get_data->reply);
- XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+ req_list = &(*txn_req)->txn->get_tree_reqs;
+ lyd_free_all((*txn_req)->req.get_tree->client_results);
+ XFREE(MTYPE_MGMTD_XPATH, (*txn_req)->req.get_tree->xpath);
+ XFREE(MTYPE_MGMTD_TXN_GETTREE_REQ, (*txn_req)->req.get_tree);
break;
case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_GETTREE_TIMEOUT:
case MGMTD_TXN_CLEANUP:
break;
}
@@ -1260,6 +1269,66 @@ static void mgmt_txn_cfg_commit_timedout(struct event *thread)
"Operation on the backend timed-out. Aborting commit!");
}
+
+static int txn_get_tree_data_done(struct mgmt_txn_ctx *txn,
+ struct mgmt_txn_req *txn_req)
+{
+ struct txn_req_get_tree *get_tree = txn_req->req.get_tree;
+ int ret = 0;
+
+ /* cancel timer and send reply onward */
+ EVENT_OFF(txn->get_tree_timeout);
+
+ ret = mgmt_fe_adapter_send_tree_data(txn->session_id, txn->txn_id,
+ txn_req->req_id,
+ get_tree->result_type,
+ get_tree->client_results,
+ get_tree->partial_error, false);
+
+ /* we're done with the request */
+ mgmt_txn_req_free(&txn_req);
+
+ if (ret) {
+ MGMTD_TXN_ERR("Error saving the results of GETTREE for txn-id %" PRIu64
+ " req_id %" PRIu64 " to requested type %u",
+ txn->txn_id, txn_req->req_id,
+ get_tree->result_type);
+
+ (void)mgmt_fe_adapter_txn_error(txn->txn_id, txn_req->req_id,
+ false, ret,
+ "Error converting results of GETTREE");
+ }
+
+ return ret;
+}
+
+
+static void txn_get_tree_timeout(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ txn_req = (struct mgmt_txn_req *)EVENT_ARG(thread);
+ txn = txn_req->txn;
+
+ assert(txn);
+ assert(txn->type == MGMTD_TXN_TYPE_SHOW);
+
+
+ MGMTD_TXN_ERR("Backend timeout txn-id: %" PRIu64 " ending get-tree",
+ txn->txn_id);
+
+ /*
+ * Send a get-tree data reply.
+ *
+ * NOTE: The transaction cleanup will be triggered from Front-end
+ * adapter.
+ */
+
+ txn_req->req.get_tree->partial_error = -ETIMEDOUT;
+ txn_get_tree_data_done(txn, txn_req);
+}
+
/*
* Send CFG_APPLY_REQs to all the backend client.
*
@@ -1474,20 +1543,10 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
txn_req->txn->session_id, txn_req->req_id);
}
break;
- case MGMTD_TXN_PROC_GETDATA:
- if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
- txn_req->txn->txn_id, get_req->ds_id,
- txn_req->req_id, MGMTD_SUCCESS,
- data_reply, NULL) != 0) {
- MGMTD_TXN_ERR("Failed to send GET-DATA-REPLY txn-id: %" PRIu64
- " session-id: %" PRIu64
- " req-id: %" PRIu64,
- txn_req->txn->txn_id,
- txn_req->txn->session_id, txn_req->req_id);
- }
- break;
case MGMTD_TXN_PROC_SETCFG:
case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_PROC_GETTREE:
+ case MGMTD_TXN_GETTREE_TIMEOUT:
case MGMTD_TXN_COMMITCFG_TIMEOUT:
case MGMTD_TXN_CLEANUP:
MGMTD_TXN_ERR("Invalid Txn-Req-Event %u", txn_req->req_event);
@@ -1500,10 +1559,8 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
mgmt_reset_get_data_reply_buf(get_req);
}
-static void mgmt_txn_iter_and_send_get_cfg_reply(const char *xpath,
- struct lyd_node *node,
- struct nb_node *nb_node,
- void *ctx)
+static void txn_iter_get_config_data_cb(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx)
{
struct mgmt_txn_req *txn_req;
struct mgmt_get_data_req *get_req;
@@ -1518,8 +1575,7 @@ static void mgmt_txn_iter_and_send_get_cfg_reply(const char *xpath,
if (!(node->schema->nodetype & LYD_NODE_TERM))
return;
- assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG ||
- txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
get_req = txn_req->req.get_data;
assert(get_req);
@@ -1581,7 +1637,7 @@ static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
*/
if (mgmt_ds_iter_data(get_data->ds_id, root,
get_data->xpaths[indx],
- mgmt_txn_iter_and_send_get_cfg_reply,
+ txn_iter_get_config_data_cb,
(void *)txn_req) == -1) {
MGMTD_TXN_DBG("Invalid Xpath '%s",
get_data->xpaths[indx]);
@@ -1664,54 +1720,6 @@ static void mgmt_txn_process_get_cfg(struct event *thread)
}
}
-static void mgmt_txn_process_get_data(struct event *thread)
-{
- struct mgmt_txn_ctx *txn;
- struct mgmt_txn_req *txn_req;
- int num_processed = 0;
-
- txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
- assert(txn);
-
- MGMTD_TXN_DBG("Processing %zu GET_DATA requests txn-id: %" PRIu64
- " session-id: %" PRIu64,
- mgmt_txn_reqs_count(&txn->get_data_reqs), txn->txn_id,
- txn->session_id);
-
- FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
- assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
-
- /*
- * TODO: Trigger GET procedures for Backend
- * For now return back error.
- */
- mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
- txn_req->req.get_data->ds_id,
- txn_req->req_id, MGMTD_INTERNAL_ERROR,
- NULL, "GET-DATA is not supported yet!");
- /*
- * Delete the txn request.
- * Note: The following will remove it from the list
- * as well.
- */
- mgmt_txn_req_free(&txn_req);
-
- /*
- * Else the transaction would have been already deleted or
- * moved to corresponding pending list. No need to delete it.
- */
- num_processed++;
- if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
- break;
- }
-
- if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
- MGMTD_TXN_DBG("Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
- num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
- mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
- }
-}
-
static struct mgmt_txn_ctx *
mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
enum mgmt_txn_type type)
@@ -1733,7 +1741,7 @@ static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
/*
* For 'CONFIG' transaction check if one is already created
- * or not.
+ * or not. TODO: figure out what code counts on this and fix it.
*/
if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
if (mgmt_config_txn_in_progress() == session_id)
@@ -1749,10 +1757,10 @@ static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
txn->session_id = session_id;
txn->type = type;
mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
+ /* TODO: why do we need N lists for one transaction */
mgmt_txn_reqs_init(&txn->set_cfg_reqs);
mgmt_txn_reqs_init(&txn->get_cfg_reqs);
- mgmt_txn_reqs_init(&txn->get_data_reqs);
- mgmt_txn_reqs_init(&txn->pending_get_datas);
+ mgmt_txn_reqs_init(&txn->get_tree_reqs);
txn->commit_cfg_req = NULL;
txn->refcount = 0;
if (!mgmt_txn_mm->next_txn_id)
@@ -1834,6 +1842,13 @@ static inline struct mgmt_txn_ctx *mgmt_txn_id2ctx(uint64_t txn_id)
return txn;
}
+uint64_t mgmt_txn_get_session_id(uint64_t txn_id)
+{
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+
+ return txn ? txn->session_id : MGMTD_SESSION_ID_NONE;
+}
+
static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line)
{
txn->refcount++;
@@ -1859,6 +1874,7 @@ static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
EVENT_OFF((*txn)->proc_get_data);
EVENT_OFF((*txn)->proc_comm_cfg);
EVENT_OFF((*txn)->comm_cfg_timeout);
+ EVENT_OFF((*txn)->get_tree_timeout);
hash_release(mgmt_txn_mm->txn_hash, *txn);
mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
@@ -1922,19 +1938,24 @@ static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg, txn,
&tv, &txn->proc_get_cfg);
break;
- case MGMTD_TXN_PROC_GETDATA:
- event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data, txn,
- &tv, &txn->proc_get_data);
- break;
case MGMTD_TXN_COMMITCFG_TIMEOUT:
- event_add_timer_msec(mgmt_txn_tm, mgmt_txn_cfg_commit_timedout,
- txn, MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
- &txn->comm_cfg_timeout);
+ event_add_timer(mgmt_txn_tm, mgmt_txn_cfg_commit_timedout, txn,
+ MGMTD_TXN_CFG_COMMIT_MAX_DELAY_SEC,
+ &txn->comm_cfg_timeout);
+ break;
+ case MGMTD_TXN_GETTREE_TIMEOUT:
+ event_add_timer(mgmt_txn_tm, txn_get_tree_timeout, txn,
+ MGMTD_TXN_GET_TREE_MAX_DELAY_SEC,
+ &txn->get_tree_timeout);
break;
case MGMTD_TXN_CLEANUP:
tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
&txn->clnup);
+ break;
+ case MGMTD_TXN_PROC_GETTREE:
+ assert(!"code bug do not register this event");
+ break;
}
}
@@ -2314,8 +2335,7 @@ int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
if (!txn)
return -1;
- req_event = cfg_root ? MGMTD_TXN_PROC_GETCFG : MGMTD_TXN_PROC_GETDATA;
-
+ req_event = MGMTD_TXN_PROC_GETCFG;
txn_req = mgmt_txn_req_alloc(txn, req_id, req_event);
txn_req->req.get_data->ds_id = ds_id;
txn_req->req.get_data->cfg_root = cfg_root;
@@ -2333,6 +2353,203 @@ int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
return 0;
}
+
+/**
+ * Send get-tree requests to each client indicated in `clients` bitmask, which
+ * has registered operational state that matches the given `xpath`
+ */
+int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
+ uint64_t clients, LYD_FORMAT result_type,
+ const char *xpath)
+{
+ struct mgmt_msg_get_tree *msg;
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct txn_req_get_tree *get_tree;
+ enum mgmt_be_client_id id;
+ ssize_t slen = strlen(xpath);
+ int ret;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ /* If error in this function below here, be sure to free the req */
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETTREE);
+ get_tree = txn_req->req.get_tree;
+ get_tree->result_type = result_type;
+ get_tree->xpath = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_tree, slen + 1,
+ MTYPE_MSG_NATIVE_GET_TREE);
+ msg->refer_id = txn_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_GET_TREE;
+ /* Always operate with the binary format in the backend */
+ msg->result_type = LYD_LYB;
+ strlcpy(msg->xpath, xpath, slen + 1);
+
+ assert(clients);
+ FOREACH_BE_CLIENT_BITS (id, clients) {
+ ret = mgmt_be_send_native(id, msg);
+ if (ret) {
+ MGMTD_TXN_ERR("Could not send get-tree message to backend client %s",
+ mgmt_be_client_id2name(id));
+ continue;
+ }
+
+ MGMTD_TXN_DBG("Sent get-tree req to backend client %s",
+ mgmt_be_client_id2name(id));
+
+ /* record that we sent the request to the client */
+ get_tree->sent_clients |= (1u << id);
+ }
+
+ mgmt_msg_native_free_msg(msg);
+
+ /* Start timeout timer - pulled out of register event code so we can
+ * pass a different arg
+ */
+ event_add_timer(mgmt_txn_tm, txn_get_tree_timeout, txn_req,
+ MGMTD_TXN_GET_TREE_MAX_DELAY_SEC,
+ &txn->get_tree_timeout);
+ return 0;
+}
+
+/*
+ * Error reply from the backend client.
+ */
+int mgmt_txn_notify_error(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t req_id, int error,
+ const char *errstr)
+{
+ enum mgmt_be_client_id id = adapter->id;
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+ struct txn_req_get_tree *get_tree;
+ struct mgmt_txn_req *txn_req;
+
+ if (!txn) {
+ MGMTD_TXN_ERR("Error reply from %s cannot find txn-id %" PRIu64,
+ adapter->name, txn_id);
+ return -1;
+ }
+
+ /* Find the request. */
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_tree_reqs, txn_req)
+ if (txn_req->req_id == req_id)
+ break;
+ if (!txn_req) {
+ MGMTD_TXN_ERR("Error reply from %s for txn-id %" PRIu64
+ " cannot find req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+ return -1;
+ }
+
+ MGMTD_TXN_ERR("Error reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_GETTREE:
+ get_tree = txn_req->req.get_tree;
+ get_tree->recv_clients |= (1u << id);
+ get_tree->partial_error = error;
+
+ /* check if done yet */
+ if (get_tree->recv_clients != get_tree->sent_clients)
+ return 0;
+ return txn_get_tree_data_done(txn, txn_req);
+
+ /* non-native message events */
+ case MGMTD_TXN_PROC_SETCFG:
+ case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_PROC_GETCFG:
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_GETTREE_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ default:
+ assert(!"non-native req event in native erorr path");
+ return -1;
+ }
+}
+
+/*
+ * Get-tree data from the backend client.
+ */
+int mgmt_txn_notify_tree_data_reply(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_tree_data *data_msg,
+ size_t msg_len)
+{
+ uint64_t txn_id = data_msg->refer_id;
+ uint64_t req_id = data_msg->req_id;
+
+ enum mgmt_be_client_id id = adapter->id;
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+ struct mgmt_txn_req *txn_req;
+ struct txn_req_get_tree *get_tree;
+ struct lyd_node *tree = NULL;
+ LY_ERR err;
+
+ if (!txn) {
+ MGMTD_TXN_ERR("GETTREE reply from %s for a missing txn-id %" PRIu64,
+ adapter->name, txn_id);
+ return -1;
+ }
+
+ /* Find the request. */
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_tree_reqs, txn_req)
+ if (txn_req->req_id == req_id)
+ break;
+ if (!txn_req) {
+ MGMTD_TXN_ERR("GETTREE reply from %s for txn-id %" PRIu64
+ " missing req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+ return -1;
+ }
+
+ get_tree = txn_req->req.get_tree;
+
+ /* store the result */
+ err = lyd_parse_data_mem(ly_native_ctx, (const char *)data_msg->result,
+ data_msg->result_type,
+ LYD_PARSE_STRICT | LYD_PARSE_ONLY,
+ 0 /*LYD_VALIDATE_OPERATIONAL*/, &tree);
+ if (err) {
+ MGMTD_TXN_ERR("GETTREE reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64
+ " error parsing result of type %u",
+ adapter->name, txn_id, req_id,
+ data_msg->result_type);
+ }
+ if (!err) {
+ /* TODO: we could merge ly_errs here if it's not binary */
+
+ if (!get_tree->client_results)
+ get_tree->client_results = tree;
+ else
+ err = lyd_merge_siblings(&get_tree->client_results,
+ tree, LYD_MERGE_DESTRUCT);
+ if (err) {
+ MGMTD_TXN_ERR("GETTREE reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64 " error merging result",
+ adapter->name, txn_id, req_id);
+ }
+ }
+ if (!get_tree->partial_error)
+ get_tree->partial_error = (data_msg->partial_error
+ ? data_msg->partial_error
+ : (int)err);
+
+ if (!data_msg->more)
+ get_tree->recv_clients |= (1u << id);
+
+ /* check if done yet */
+ if (get_tree->recv_clients != get_tree->sent_clients)
+ return 0;
+
+ return txn_get_tree_data_done(txn, txn_req);
+}
+
void mgmt_txn_status_write(struct vty *vty)
{
struct mgmt_txn_ctx *txn;
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
index a89d5fb939..4aa0677755 100644
--- a/mgmtd/mgmt_txn.h
+++ b/mgmtd/mgmt_txn.h
@@ -9,21 +9,19 @@
#ifndef _FRR_MGMTD_TXN_H_
#define _FRR_MGMTD_TXN_H_
+#include "lib/mgmt_msg_native.h"
#include "mgmtd/mgmt_be_adapter.h"
#include "mgmtd/mgmt.h"
#include "mgmtd/mgmt_ds.h"
-#define MGMTD_TXN_PROC_DELAY_MSEC 5
#define MGMTD_TXN_PROC_DELAY_USEC 10
#define MGMTD_TXN_MAX_NUM_SETCFG_PROC 128
#define MGMTD_TXN_MAX_NUM_GETCFG_PROC 128
#define MGMTD_TXN_MAX_NUM_GETDATA_PROC 128
-#define MGMTD_TXN_SEND_CFGVALIDATE_DELAY_MSEC 100
-#define MGMTD_TXN_SEND_CFGAPPLY_DELAY_MSEC 100
-#define MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC 30000 /* 30 seconds */
+#define MGMTD_TXN_CFG_COMMIT_MAX_DELAY_SEC 600
+#define MGMTD_TXN_GET_TREE_MAX_DELAY_SEC 600
-#define MGMTD_TXN_CLEANUP_DELAY_MSEC 100
#define MGMTD_TXN_CLEANUP_DELAY_USEC 10
#define MGMTD_TXN_ID_NONE 0
@@ -80,6 +78,12 @@ extern void mgmt_txn_destroy(void);
*/
extern uint64_t mgmt_config_txn_in_progress(void);
+/**
+ * Get the session ID associated with the given ``txn-id``.
+ *
+ */
+extern uint64_t mgmt_txn_get_session_id(uint64_t txn_id);
+
/*
* Create transaction.
*
@@ -190,6 +194,23 @@ extern int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
Mgmtd__YangGetDataReq **data_req,
size_t num_reqs);
+
+/**
+ * Send get-tree to the backend `clients`.
+ *
+ * Args:
+ * txn_id: Transaction identifier.
+ * req_id: FE client request identifier.
+ * clients: Bitmask of clients to send get-tree to.
+ * result_type: LYD_FORMAT result format.
+ * xpath: The xpath to get the tree from.
+ * Return:
+ * 0 on success.
+ */
+extern int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
+ uint64_t clients, LYD_FORMAT result_type,
+ const char *xpath);
+
/*
* Notifiy backend adapter on connection.
*/
@@ -228,6 +249,34 @@ mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
char *error_if_any,
struct mgmt_be_client_adapter *adapter);
+
+/**
+ * Process a reply from a backend client to our get-tree request
+ *
+ * Args:
+ * adapter: The adapter that received the result.
+ * txn_id: The transaction for this get-tree request.
+ * req_id: The request ID for this transaction.
+ * error: the integer error value (negative)
+ * errstr: the string description of the error.
+ */
+int mgmt_txn_notify_error(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t req_id, int error,
+ const char *errstr);
+
+/**
+ * Process a reply from a backend client to our get-tree request
+ *
+ * Args:
+ * adapter: The adapter that received the result.
+ * data_msg: The message from the backend.
+ * msg_len: Total length of the message.
+ */
+
+extern int mgmt_txn_notify_tree_data_reply(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_tree_data *data_msg,
+ size_t msg_len);
+
/*
* Dump transaction status to vty.
*/
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
index 3116ccbaf7..64abb462c3 100644
--- a/mgmtd/mgmt_vty.c
+++ b/mgmtd/mgmt_vty.c
@@ -199,22 +199,32 @@ DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
}
DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
- "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
- SHOW_STR MGMTD_STR
- "Get data from a specific datastore\n"
- "Candidate datastore\n"
- "Operational datastore (default)\n"
- "Running datastore\n"
- "XPath expression specifying the YANG data path\n")
+ "show mgmt get-data WORD$path [json|xml]$fmt",
+ SHOW_STR
+ MGMTD_STR
+ "Get a data from the operational datastore\n"
+ "XPath expression specifying the YANG data root\n"
+ "JSON output format\n"
+ "XML output format\n")
{
- const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
- Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+ LYD_FORMAT format = (fmt && fmt[0] == 'x') ? LYD_XML : LYD_JSON;
+ int plen = strlen(path);
+ char *xpath = NULL;
+
+ /* get rid of extraneous trailing slash-* or single '/' unless root */
+ if (plen > 2 && ((path[plen - 2] == '/' && path[plen - 1] == '*') ||
+ (path[plen - 2] != '/' && path[plen - 1] == '/'))) {
+ plen = path[plen - 1] == '/' ? plen - 1 : plen - 2;
+ xpath = XSTRDUP(MTYPE_TMP, path);
+ xpath[plen] = 0;
+ path = xpath;
+ }
- if (dsname)
- datastore = mgmt_ds_name2id(dsname);
+ vty_mgmt_send_get_tree_req(vty, format, path);
+
+ if (xpath)
+ XFREE(MTYPE_TMP, xpath);
- xpath_list[0] = path;
- vty_mgmt_send_get_req(vty, false, datastore, xpath_list, 1);
return CMD_SUCCESS;
}
diff --git a/python/xref2vtysh.py b/python/xref2vtysh.py
index 0a7e28ec7a..75fff8ddd9 100644
--- a/python/xref2vtysh.py
+++ b/python/xref2vtysh.py
@@ -37,7 +37,7 @@ daemon_flags = {
"lib/filter_cli.c": "VTYSH_ACL",
"lib/if.c": "VTYSH_INTERFACE",
"lib/keychain.c": "VTYSH_KEYS",
- "lib/mgmt_be_client.c": "VTYSH_STATICD",
+ "lib/mgmt_be_client.c": "VTYSH_STATICD|VTYSH_ZEBRA",
"lib/mgmt_fe_client.c": "VTYSH_MGMTD",
"lib/lib_vty.c": "VTYSH_ALL",
"lib/log_vty.c": "VTYSH_ALL",
diff --git a/tests/lib/subdir.am b/tests/lib/subdir.am
index 6c1be50201..9247ac3358 100644
--- a/tests/lib/subdir.am
+++ b/tests/lib/subdir.am
@@ -162,6 +162,7 @@ tests_lib_test_darr_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_darr_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_darr_LDADD = $(ALL_TESTS_LDADD)
tests_lib_test_darr_SOURCES = tests/lib/test_darr.c
+EXTRA_DIST += tests/lib/test_darr.py
check_PROGRAMS += tests/lib/test_graph
diff --git a/tests/lib/test_darr.c b/tests/lib/test_darr.c
index 9150aed09d..74aedac4b7 100644
--- a/tests/lib/test_darr.c
+++ b/tests/lib/test_darr.c
@@ -14,7 +14,8 @@
* [x] - darr_append_n
* [x] - darr_append_nz
* [x] - darr_cap
- * [-] - darr_ensure_cap
+ * [x] - darr_ensure_avail
+ * [x] - darr_ensure_cap
* [x] - darr_ensure_i
* [x] - darr_foreach_i
* [x] - darr_foreach_p
@@ -23,6 +24,15 @@
* [ ] - darr_insertz
* [x] - darr_insert_n
* [x] - darr_insert_nz
+ * [x] - darr_in_sprintf
+ * [x] - darr_in_strcat
+ * [x] - darr_in_strcat_tail
+ * [ ] - darr_in_strcatf
+ * [ ] - darr_in_vstrcatf
+ * [x] - darr_in_strdup
+ * [x] - darr_in_strdup_cap
+ * [-] - darr_in_vsprintf
+ * [x] - darr_lasti
* [x] - darr_maxi
* [x] - darr_pop
* [x] - darr_push
@@ -31,6 +41,13 @@
* [x] - darr_remove_n
* [x] - darr_reset
* [x] - darr_setlen
+ * [x] - darr_set_strlen
+ * [x] - darr_sprintf
+ * [x] - darr_strdup
+ * [x] - darr_strdup_cap
+ * [x] - darr_strlen
+ * [x] - darr_strnul
+ * [ ] - darr_vsprintf
*/
static void test_int(void)
@@ -43,6 +60,11 @@ static void test_int(void)
int *dap;
uint i;
+ assert(darr_len(da1) == 0);
+ assert(darr_lasti(da1) == -1);
+ assert(darr_last(da1) == NULL);
+ assert(darr_end(da1) == NULL);
+
darr_ensure_i(da1, 0);
da1[0] = 0;
assert(darr_len(da1) == 1);
@@ -57,9 +79,11 @@ static void test_int(void)
da1[i] = i;
assert(darr_len(da1) == 5);
+ assert(darr_lasti(da1) == 4);
/* minimum non-pow2 array size for long long and smaller */
assert(darr_cap(da1) == 8);
assert(!memcmp(da1, a1, sizeof(a1)));
+ assert(&da1[darr_lasti(da1)] == darr_last(da1));
/* reverse the numbers */
darr_foreach_p (da1, dap)
@@ -185,6 +209,20 @@ static void test_struct(void)
assert(darr_cap(da1) == 8);
assert(!memcmp(da1, a1, sizeof(a1)));
+ assert(darr_cap(da1) - darr_len(da1) == 3);
+ darr_ensure_avail(da1, 2);
+ assert(darr_cap(da1) == 8);
+ darr_ensure_avail(da1, 3);
+ assert(darr_cap(da1) == 8);
+ darr_ensure_avail(da1, 4);
+ assert(darr_cap(da1) == 16);
+
+ darr_ensure_cap(da1, 16);
+ assert(darr_cap(da1) == 16);
+
+ darr_ensure_cap(da1, 20);
+ assert(darr_cap(da1) == 32);
+
darr_append_n(da1, 100);
assert(darr_len(da1) == 105);
@@ -272,8 +310,113 @@ static void test_struct(void)
darr_free(da2);
}
+static void test_string(void)
+{
+ const char *src = "ABCDE";
+ const char *add = "FGHIJ";
+ uint srclen = strlen(src);
+ uint addlen = strlen(add);
+ char *da1 = NULL;
+ char *da2 = NULL;
+
+ assert(darr_strlen(da1) == 0);
+
+ da1 = darr_strdup(src);
+ assert(darr_strlen(da1) == strlen(da1));
+ assert(darr_strlen(da1) == srclen);
+ assert(darr_len(da1) == srclen + 1);
+ assert(darr_ilen(da1) == (int)srclen + 1);
+ assert(darr_cap(da1) >= 8);
+ assert(darr_last(da1) == darr_strnul(da1));
+ assert(darr_strnul(da1) == da1 + darr_strlen(da1));
+
+ da2 = da1;
+ darr_in_strdup(da1, src);
+ assert(da1 == da2);
+ assert(darr_strlen(da1) == strlen(da1));
+ assert(darr_strlen(da1) == srclen);
+ assert(darr_len(da1) == srclen + 1);
+ darr_free(da1);
+ assert(da1 == NULL);
+
+ da1 = darr_strdup_cap(src, 128);
+ assert(darr_strlen(da1) == srclen);
+ assert(darr_cap(da1) >= 128);
+
+ darr_in_strdup_cap(da1, src, 256);
+ assert(darr_strlen(da1) == srclen);
+ assert(darr_cap(da1) >= 256);
+ darr_free(da1);
+
+ da1 = darr_strdup_cap(add, 2);
+ assert(darr_strlen(da1) == addlen);
+ assert(darr_cap(da1) >= 8);
+
+ darr_in_strdup(da1, "ab");
+ darr_in_strcat(da1, "/");
+ darr_in_strcat(da1, "foo");
+ assert(!strcmp("ab/foo", da1));
+ darr_free(da1);
+
+ da1 = darr_in_strcat(da1, "ab");
+ darr_in_strcat(da1, "/");
+ darr_in_strcat(da1, "foo");
+ assert(!strcmp("ab/foo", da1));
+
+ darr_set_strlen(da1, 5);
+ assert(!strcmp("ab/fo", da1));
+ darr_set_strlen(da1, 1);
+ assert(!strcmp("a", da1));
+
+ darr_in_strdup(da1, "ab");
+ da2 = darr_strdup(add);
+ darr_in_strcat_tail(da1, da2);
+ assert(!strcmp("abHIJ", da1));
+ assert(darr_strlen(da1) == 5);
+ assert(darr_len(da1) == 6);
+ darr_free(da1);
+ darr_free(da2);
+
+ da1 = darr_strdup("abcde");
+ da2 = darr_strdup(add);
+ darr_in_strcat_tail(da1, da2);
+ assert(!strcmp("abcde", da1));
+ assert(darr_strlen(da1) == 5);
+ assert(darr_len(da1) == 6);
+ darr_free(da1);
+ darr_free(da2);
+
+ da1 = darr_sprintf("0123456789: %08X", 0xDEADBEEF);
+ assert(!strcmp(da1, "0123456789: DEADBEEF"));
+ assert(darr_strlen(da1) == 20);
+ assert(darr_cap(da1) == 128);
+ da2 = da1;
+ darr_in_sprintf(da1, "9876543210: %08x", 0x0BADF00D);
+ assert(da1 == da2);
+ assert(!strcmp("9876543210: 0badf00d", da2));
+ darr_free(da1);
+ da2 = NULL;
+
+ da1 = NULL;
+ darr_in_sprintf(da1, "0123456789: %08X", 0xDEADBEEF);
+ assert(!strcmp(da1, "0123456789: DEADBEEF"));
+ assert(darr_strlen(da1) == 20);
+ assert(darr_cap(da1) == 128);
+ darr_free(da1);
+
+ da1 = darr_sprintf("0123456789: %08x", 0xDEADBEEF);
+ darr_in_strcatf(da1, " 9876543210: %08x", 0x0BADF00D);
+ assert(!strcmp("0123456789: deadbeef 9876543210: 0badf00d", da1));
+ darr_free(da1);
+
+ da1 = darr_in_strcatf(da1, "0123456789: %08x", 0xDEADBEEF);
+ assert(!strcmp("0123456789: deadbeef", da1));
+ darr_free(da1);
+}
+
int main(int argc, char **argv)
{
test_int();
test_struct();
+ test_string();
}
diff --git a/tests/lib/test_darr.py b/tests/lib/test_darr.py
new file mode 100644
index 0000000000..dea3bdf785
--- /dev/null
+++ b/tests/lib/test_darr.py
@@ -0,0 +1,8 @@
+import frrtest
+
+
+class TestDarr(frrtest.TestMultiOut):
+ program = "./test_darr"
+
+
+TestDarr.exit_cleanly()
diff --git a/tests/topotests/mgmt_fe_client/fe_client.py b/tests/topotests/mgmt_fe_client/fe_client.py
new file mode 100644
index 0000000000..04b4184e5b
--- /dev/null
+++ b/tests/topotests/mgmt_fe_client/fe_client.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# November 27 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+# noqa: E501
+#
+import argparse
+import errno
+import logging
+import os
+import socket
+import sys
+import time
+from pathlib import Path
+
+import mgmt_pb2
+
+MGMT_MSG_MARKER_PROTOBUF = b"\000###"
+MGMT_MSG_MARKER_NATIVE = b"\001###"
+
+
+def __parse_args():
+ MPATH = "/var/run/frr/mgmtd_fe.sock"
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--verbose", action="store_true", help="Be verbose")
+ parser.add_argument("--server", default=MPATH, help="path to server socket")
+ args = parser.parse_args()
+
+ level = logging.DEBUG if args.verbose else logging.INFO
+ logging.basicConfig(level=level, format="%(asctime)s %(levelname)s: %(message)s")
+
+ return args
+
+
+def __server_connect(spath):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ logging.debug("Connecting to server on %s", spath)
+ while ec := sock.connect_ex(str(spath)):
+ logging.warn("retry server connection in .5s (%s)", os.strerror(ec))
+ time.sleep(0.5)
+ logging.info("Connected to server on %s", spath)
+ return sock
+
+
+def mgmt_pb_recv_msg(sock, msg):
+ """Receive a mgmtd protobuf message from a stream socket."""
+ marker = sock.recv(4)
+ assert marker in (MGMT_MSG_MARKER_PROTOBUF, MGMT_MSG_MARKER_NATIVE)
+
+ msize = int.from_bytes(sock.recv(4), byteorder="big")
+ mdata = sock.recv(msize)
+
+ msg.ParseFromString(mdata)
+ return msg
+
+
+def mgmt_pb_send_msg(sock, msg):
+ """Send a mgmtd protobuf message from a stream socket."""
+ marker = MGMT_MSG_MARKER_PROTOBUF
+ mdata = msg.SerializeToString()
+ msize = int.to_bytes(len(mdata), byteorder="big", length=4)
+ sock.send(marker)
+ sock.send(msize)
+ sock.send(mdata)
+
+
+def create_session(sock):
+ req = mgmt_pb2.FeRegisterReq()
+ req.client_name = "test-client"
+ mgmt_pb_send_msg(sock, req)
+ logging.debug("Sent FeRegisterReq: %s", req)
+
+ req = mgmt_pb2.FeSessionReq()
+ req.create = 1
+ req.client_conn_id = 1
+ mgmt_pb_send_msg(sock, req)
+ logging.debug("Sent FeSessionReq: %s", req)
+
+ reply = mgmt_pb_recv_msg(sock, mgmt_pb2.FeSessionReply())
+ logging.debug("Received FeSessionReply: %s", reply)
+
+
+def __main():
+ args = __parse_args()
+ sock = __server_connect(Path(args.server))
+ create_session(sock)
+
+
+def main():
+ try:
+ __main()
+ except KeyboardInterrupt:
+ logging.info("Exiting")
+ except Exception as error:
+ logging.error("Unexpected error exiting: %s", error, exc_info=True)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/topotests/mgmt_fe_client/mgmt_pb2.py b/tests/topotests/mgmt_fe_client/mgmt_pb2.py
new file mode 100644
index 0000000000..0aa8803f7f
--- /dev/null
+++ b/tests/topotests/mgmt_fe_client/mgmt_pb2.py
@@ -0,0 +1,1990 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: mgmt.proto
+
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='mgmt.proto',
+ package='mgmtd',
+ syntax='proto2',
+ serialized_options=None,
+ create_key=_descriptor._internal_create_key,
+ serialized_pb=b'\n\nmgmt.proto\x12\x05mgmtd\"\x1e\n\rYangDataXPath\x12\r\n\x05xpath\x18\x01 \x02(\t\"3\n\rYangDataValue\x12\x19\n\x0f\x65ncoded_str_val\x18\x64 \x01(\tH\x00\x42\x07\n\x05value\">\n\x08YangData\x12\r\n\x05xpath\x18\x01 \x02(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.mgmtd.YangDataValue\"X\n\x0eYangCfgDataReq\x12\x1d\n\x04\x64\x61ta\x18\x01 \x02(\x0b\x32\x0f.mgmtd.YangData\x12\'\n\x08req_type\x18\x02 \x02(\x0e\x32\x15.mgmtd.CfgDataReqType\"B\n\x0eYangGetDataReq\x12\x1d\n\x04\x64\x61ta\x18\x01 \x02(\x0b\x32\x0f.mgmtd.YangData\x12\x11\n\tnext_indx\x18\x02 \x02(\x03\"R\n\x0e\x42\x65SubscribeReq\x12\x13\n\x0b\x63lient_name\x18\x01 \x02(\t\x12\x18\n\x10subscribe_xpaths\x18\x02 \x02(\x08\x12\x11\n\txpath_reg\x18\x03 \x03(\t\"#\n\x10\x42\x65SubscribeReply\x12\x0f\n\x07success\x18\x01 \x02(\x08\"*\n\x08\x42\x65TxnReq\x12\x0e\n\x06txn_id\x18\x01 \x02(\x04\x12\x0e\n\x06\x63reate\x18\x02 \x02(\x08\"=\n\nBeTxnReply\x12\x0e\n\x06txn_id\x18\x01 \x02(\x04\x12\x0e\n\x06\x63reate\x18\x02 \x02(\x08\x12\x0f\n\x07success\x18\x03 \x02(\x08\"b\n\x12\x42\x65\x43\x66gDataCreateReq\x12\x0e\n\x06txn_id\x18\x01 \x02(\x04\x12\'\n\x08\x64\x61ta_req\x18\x02 \x03(\x0b\x32\x15.mgmtd.YangCfgDataReq\x12\x13\n\x0b\x65nd_of_data\x18\x03 \x02(\x08\"M\n\x14\x42\x65\x43\x66gDataCreateReply\x12\x0e\n\x06txn_id\x18\x01 \x02(\x04\x12\x0f\n\x07success\x18\x02 \x02(\x08\x12\x14\n\x0c\x65rror_if_any\x18\x03 \x01(\t\"#\n\x11\x42\x65\x43\x66gDataApplyReq\x12\x0e\n\x06txn_id\x18\x01 \x02(\x04\"L\n\x13\x42\x65\x43\x66gDataApplyReply\x12\x0e\n\x06txn_id\x18\x01 \x02(\x04\x12\x0f\n\x07success\x18\x02 \x02(\x08\x12\x14\n\x0c\x65rror_if_any\x18\x03 \x01(\t\"A\n\rYangDataReply\x12\x1d\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x0f.mgmtd.YangData\x12\x11\n\tnext_indx\x18\x02 \x02(\x03\"\x94\x03\n\tBeMessage\x12+\n\nsubscr_req\x18\x02 \x01(\x0b\x32\x15.mgmtd.BeSubscribeReqH\x00\x12/\n\x0csubscr_reply\x18\x03 \x01(\x0b\x32\x17.mgmtd.BeSubscribeReplyH\x00\x12\"\n\x07txn_req\x18\x04 \x01(\x0b\x32\x0f.mgmtd.BeTxnReqH\x00\x12&\n\ttxn_reply\x18\x05 \x01(\x0b\x32\x11.mgmtd.BeTxnReplyH\x00\x12\x31\n\x0c\x63\x66g_data_req\x18\x06 \x01(\x0b\x32\x19.mgmtd.BeCfgDataCreateReqH\x00\x12\x35\n\x0e\x63\x66g_data_reply\x18\x07 \x01(\x0b\x32\x1b.mgmtd.BeCfgDataCreateReplyH\x00\x12\x31\n\rcfg_apply_req\x18\x08 \x01(\x0b\x32\x18.mgmtd.BeCfgDataApplyReqH\x00\x12\x35\n\x0f\x63\x66g_apply_reply\x18\t \x01(\x0b\x32\x1a.mgmtd.BeCfgDataApplyReplyH\x00\x42\t\n\x07message\"$\n\rFeRegisterReq\x12\x13\n\x0b\x63lient_name\x18\x01 \x02(\t\"T\n\x0c\x46\x65SessionReq\x12\x0e\n\x06\x63reate\x18\x01 \x02(\x08\x12\x18\n\x0e\x63lient_conn_id\x18\x02 \x01(\x04H\x00\x12\x14\n\nsession_id\x18\x03 \x01(\x04H\x00\x42\x04\n\x02id\"]\n\x0e\x46\x65SessionReply\x12\x0e\n\x06\x63reate\x18\x01 \x02(\x08\x12\x0f\n\x07success\x18\x02 \x02(\x08\x12\x16\n\x0e\x63lient_conn_id\x18\x03 \x01(\x04\x12\x12\n\nsession_id\x18\x04 \x02(\x04\"b\n\x0b\x46\x65LockDsReq\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12\x0e\n\x06req_id\x18\x02 \x02(\x04\x12!\n\x05\x64s_id\x18\x03 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0c\n\x04lock\x18\x04 \x02(\x08\"\x8b\x01\n\rFeLockDsReply\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12\x0e\n\x06req_id\x18\x02 \x02(\x04\x12!\n\x05\x64s_id\x18\x03 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0c\n\x04lock\x18\x04 \x02(\x08\x12\x0f\n\x07success\x18\x05 \x02(\x08\x12\x14\n\x0c\x65rror_if_any\x18\x06 \x01(\t\"\xbf\x01\n\x0e\x46\x65SetConfigReq\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12!\n\x05\x64s_id\x18\x02 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0e\n\x06req_id\x18\x03 \x02(\x04\x12#\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\x15.mgmtd.YangCfgDataReq\x12\x17\n\x0fimplicit_commit\x18\x05 \x02(\x08\x12(\n\x0c\x63ommit_ds_id\x18\x06 \x02(\x0e\x32\x12.mgmtd.DatastoreId\"\x99\x01\n\x10\x46\x65SetConfigReply\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12!\n\x05\x64s_id\x18\x02 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0e\n\x06req_id\x18\x03 \x02(\x04\x12\x0f\n\x07success\x18\x04 \x02(\x08\x12\x17\n\x0fimplicit_commit\x18\x05 \x02(\x08\x12\x14\n\x0c\x65rror_if_any\x18\x06 \x01(\t\"\xab\x01\n\x11\x46\x65\x43ommitConfigReq\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12%\n\tsrc_ds_id\x18\x02 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12%\n\tdst_ds_id\x18\x03 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0e\n\x06req_id\x18\x04 \x02(\x04\x12\x15\n\rvalidate_only\x18\x05 \x02(\x08\x12\r\n\x05\x61\x62ort\x18\x06 \x02(\x08\"\xd4\x01\n\x13\x46\x65\x43ommitConfigReply\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12%\n\tsrc_ds_id\x18\x02 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12%\n\tdst_ds_id\x18\x03 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0e\n\x06req_id\x18\x04 \x02(\x04\x12\x15\n\rvalidate_only\x18\x05 \x02(\x08\x12\x0f\n\x07success\x18\x06 \x02(\x08\x12\r\n\x05\x61\x62ort\x18\x07 \x02(\x08\x12\x14\n\x0c\x65rror_if_any\x18\x08 \x01(\t\"\x86\x01\n\x08\x46\x65GetReq\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12\x0e\n\x06\x63onfig\x18\x02 \x02(\x08\x12!\n\x05\x64s_id\x18\x03 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0e\n\x06req_id\x18\x04 \x02(\x04\x12#\n\x04\x64\x61ta\x18\x05 \x03(\x0b\x32\x15.mgmtd.YangGetDataReq\"\xae\x01\n\nFeGetReply\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12\x0e\n\x06\x63onfig\x18\x02 \x02(\x08\x12!\n\x05\x64s_id\x18\x03 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x0e\n\x06req_id\x18\x04 \x02(\x04\x12\x0f\n\x07success\x18\x05 \x02(\x08\x12\x14\n\x0c\x65rror_if_any\x18\x06 \x01(\t\x12\"\n\x04\x64\x61ta\x18\x07 \x01(\x0b\x32\x14.mgmtd.YangDataReply\"0\n\x0f\x46\x65NotifyDataReq\x12\x1d\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x0f.mgmtd.YangData\"\x9c\x01\n\x13\x46\x65RegisterNotifyReq\x12\x12\n\nsession_id\x18\x01 \x02(\x04\x12!\n\x05\x64s_id\x18\x02 \x02(\x0e\x32\x12.mgmtd.DatastoreId\x12\x14\n\x0cregister_req\x18\x03 \x02(\x08\x12\x0e\n\x06req_id\x18\x04 \x02(\x04\x12(\n\ndata_xpath\x18\x05 \x03(\x0b\x32\x14.mgmtd.YangDataXPath\"\xf0\x04\n\tFeMessage\x12,\n\x0cregister_req\x18\x02 \x01(\x0b\x32\x14.mgmtd.FeRegisterReqH\x00\x12*\n\x0bsession_req\x18\x03 \x01(\x0b\x32\x13.mgmtd.FeSessionReqH\x00\x12.\n\rsession_reply\x18\x04 \x01(\x0b\x32\x15.mgmtd.FeSessionReplyH\x00\x12(\n\nlockds_req\x18\x05 \x01(\x0b\x32\x12.mgmtd.FeLockDsReqH\x00\x12,\n\x0clockds_reply\x18\x06 \x01(\x0b\x32\x14.mgmtd.FeLockDsReplyH\x00\x12+\n\nsetcfg_req\x18\x07 \x01(\x0b\x32\x15.mgmtd.FeSetConfigReqH\x00\x12/\n\x0csetcfg_reply\x18\x08 \x01(\x0b\x32\x17.mgmtd.FeSetConfigReplyH\x00\x12/\n\x0b\x63ommcfg_req\x18\t \x01(\x0b\x32\x18.mgmtd.FeCommitConfigReqH\x00\x12\x33\n\rcommcfg_reply\x18\n \x01(\x0b\x32\x1a.mgmtd.FeCommitConfigReplyH\x00\x12\"\n\x07get_req\x18\x0b \x01(\x0b\x32\x0f.mgmtd.FeGetReqH\x00\x12&\n\tget_reply\x18\x0c \x01(\x0b\x32\x11.mgmtd.FeGetReplyH\x00\x12\x31\n\x0fnotify_data_req\x18\x0f \x01(\x0b\x32\x16.mgmtd.FeNotifyDataReqH\x00\x12\x33\n\rregnotify_req\x18\x10 \x01(\x0b\x32\x1a.mgmtd.FeRegisterNotifyReqH\x00\x42\t\n\x07message*B\n\x0e\x43\x66gDataReqType\x12\x11\n\rREQ_TYPE_NONE\x10\x00\x12\x0c\n\x08SET_DATA\x10\x01\x12\x0f\n\x0b\x44\x45LETE_DATA\x10\x02*`\n\x0b\x44\x61tastoreId\x12\x0b\n\x07\x44S_NONE\x10\x00\x12\x0e\n\nRUNNING_DS\x10\x01\x12\x10\n\x0c\x43\x41NDIDATE_DS\x10\x02\x12\x12\n\x0eOPERATIONAL_DS\x10\x03\x12\x0e\n\nSTARTUP_DS\x10\x04'
+)
+
+_CFGDATAREQTYPE = _descriptor.EnumDescriptor(
+ name='CfgDataReqType',
+ full_name='mgmtd.CfgDataReqType',
+ filename=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='REQ_TYPE_NONE', index=0, number=0,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='SET_DATA', index=1, number=1,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='DELETE_DATA', index=2, number=2,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=3674,
+ serialized_end=3740,
+)
+_sym_db.RegisterEnumDescriptor(_CFGDATAREQTYPE)
+
+CfgDataReqType = enum_type_wrapper.EnumTypeWrapper(_CFGDATAREQTYPE)
+_DATASTOREID = _descriptor.EnumDescriptor(
+ name='DatastoreId',
+ full_name='mgmtd.DatastoreId',
+ filename=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='DS_NONE', index=0, number=0,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='RUNNING_DS', index=1, number=1,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='CANDIDATE_DS', index=2, number=2,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='OPERATIONAL_DS', index=3, number=3,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='STARTUP_DS', index=4, number=4,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=3742,
+ serialized_end=3838,
+)
+_sym_db.RegisterEnumDescriptor(_DATASTOREID)
+
+DatastoreId = enum_type_wrapper.EnumTypeWrapper(_DATASTOREID)
+REQ_TYPE_NONE = 0
+SET_DATA = 1
+DELETE_DATA = 2
+DS_NONE = 0
+RUNNING_DS = 1
+CANDIDATE_DS = 2
+OPERATIONAL_DS = 3
+STARTUP_DS = 4
+
+
+
+_YANGDATAXPATH = _descriptor.Descriptor(
+ name='YangDataXPath',
+ full_name='mgmtd.YangDataXPath',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='xpath', full_name='mgmtd.YangDataXPath.xpath', index=0,
+ number=1, type=9, cpp_type=9, label=2,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=21,
+ serialized_end=51,
+)
+
+
+_YANGDATAVALUE = _descriptor.Descriptor(
+ name='YangDataValue',
+ full_name='mgmtd.YangDataValue',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='encoded_str_val', full_name='mgmtd.YangDataValue.encoded_str_val', index=0,
+ number=100, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='value', full_name='mgmtd.YangDataValue.value',
+ index=0, containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[]),
+ ],
+ serialized_start=53,
+ serialized_end=104,
+)
+
+
+_YANGDATA = _descriptor.Descriptor(
+ name='YangData',
+ full_name='mgmtd.YangData',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='xpath', full_name='mgmtd.YangData.xpath', index=0,
+ number=1, type=9, cpp_type=9, label=2,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='mgmtd.YangData.value', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=106,
+ serialized_end=168,
+)
+
+
+_YANGCFGDATAREQ = _descriptor.Descriptor(
+ name='YangCfgDataReq',
+ full_name='mgmtd.YangCfgDataReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.YangCfgDataReq.data', index=0,
+ number=1, type=11, cpp_type=10, label=2,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_type', full_name='mgmtd.YangCfgDataReq.req_type', index=1,
+ number=2, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=170,
+ serialized_end=258,
+)
+
+
+_YANGGETDATAREQ = _descriptor.Descriptor(
+ name='YangGetDataReq',
+ full_name='mgmtd.YangGetDataReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.YangGetDataReq.data', index=0,
+ number=1, type=11, cpp_type=10, label=2,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='next_indx', full_name='mgmtd.YangGetDataReq.next_indx', index=1,
+ number=2, type=3, cpp_type=2, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=260,
+ serialized_end=326,
+)
+
+
+_BESUBSCRIBEREQ = _descriptor.Descriptor(
+ name='BeSubscribeReq',
+ full_name='mgmtd.BeSubscribeReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='client_name', full_name='mgmtd.BeSubscribeReq.client_name', index=0,
+ number=1, type=9, cpp_type=9, label=2,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='subscribe_xpaths', full_name='mgmtd.BeSubscribeReq.subscribe_xpaths', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='xpath_reg', full_name='mgmtd.BeSubscribeReq.xpath_reg', index=2,
+ number=3, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=328,
+ serialized_end=410,
+)
+
+
+_BESUBSCRIBEREPLY = _descriptor.Descriptor(
+ name='BeSubscribeReply',
+ full_name='mgmtd.BeSubscribeReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.BeSubscribeReply.success', index=0,
+ number=1, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=412,
+ serialized_end=447,
+)
+
+
+_BETXNREQ = _descriptor.Descriptor(
+ name='BeTxnReq',
+ full_name='mgmtd.BeTxnReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='txn_id', full_name='mgmtd.BeTxnReq.txn_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='create', full_name='mgmtd.BeTxnReq.create', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=449,
+ serialized_end=491,
+)
+
+
+_BETXNREPLY = _descriptor.Descriptor(
+ name='BeTxnReply',
+ full_name='mgmtd.BeTxnReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='txn_id', full_name='mgmtd.BeTxnReply.txn_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='create', full_name='mgmtd.BeTxnReply.create', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.BeTxnReply.success', index=2,
+ number=3, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=493,
+ serialized_end=554,
+)
+
+
+_BECFGDATACREATEREQ = _descriptor.Descriptor(
+ name='BeCfgDataCreateReq',
+ full_name='mgmtd.BeCfgDataCreateReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='txn_id', full_name='mgmtd.BeCfgDataCreateReq.txn_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='data_req', full_name='mgmtd.BeCfgDataCreateReq.data_req', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='end_of_data', full_name='mgmtd.BeCfgDataCreateReq.end_of_data', index=2,
+ number=3, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=556,
+ serialized_end=654,
+)
+
+
+_BECFGDATACREATEREPLY = _descriptor.Descriptor(
+ name='BeCfgDataCreateReply',
+ full_name='mgmtd.BeCfgDataCreateReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='txn_id', full_name='mgmtd.BeCfgDataCreateReply.txn_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.BeCfgDataCreateReply.success', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='error_if_any', full_name='mgmtd.BeCfgDataCreateReply.error_if_any', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=656,
+ serialized_end=733,
+)
+
+
+_BECFGDATAAPPLYREQ = _descriptor.Descriptor(
+ name='BeCfgDataApplyReq',
+ full_name='mgmtd.BeCfgDataApplyReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='txn_id', full_name='mgmtd.BeCfgDataApplyReq.txn_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=735,
+ serialized_end=770,
+)
+
+
+_BECFGDATAAPPLYREPLY = _descriptor.Descriptor(
+ name='BeCfgDataApplyReply',
+ full_name='mgmtd.BeCfgDataApplyReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='txn_id', full_name='mgmtd.BeCfgDataApplyReply.txn_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.BeCfgDataApplyReply.success', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='error_if_any', full_name='mgmtd.BeCfgDataApplyReply.error_if_any', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=772,
+ serialized_end=848,
+)
+
+
+_YANGDATAREPLY = _descriptor.Descriptor(
+ name='YangDataReply',
+ full_name='mgmtd.YangDataReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.YangDataReply.data', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='next_indx', full_name='mgmtd.YangDataReply.next_indx', index=1,
+ number=2, type=3, cpp_type=2, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=850,
+ serialized_end=915,
+)
+
+
+_BEMESSAGE = _descriptor.Descriptor(
+ name='BeMessage',
+ full_name='mgmtd.BeMessage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='subscr_req', full_name='mgmtd.BeMessage.subscr_req', index=0,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='subscr_reply', full_name='mgmtd.BeMessage.subscr_reply', index=1,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='txn_req', full_name='mgmtd.BeMessage.txn_req', index=2,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='txn_reply', full_name='mgmtd.BeMessage.txn_reply', index=3,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='cfg_data_req', full_name='mgmtd.BeMessage.cfg_data_req', index=4,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='cfg_data_reply', full_name='mgmtd.BeMessage.cfg_data_reply', index=5,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='cfg_apply_req', full_name='mgmtd.BeMessage.cfg_apply_req', index=6,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='cfg_apply_reply', full_name='mgmtd.BeMessage.cfg_apply_reply', index=7,
+ number=9, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='message', full_name='mgmtd.BeMessage.message',
+ index=0, containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[]),
+ ],
+ serialized_start=918,
+ serialized_end=1322,
+)
+
+
+_FEREGISTERREQ = _descriptor.Descriptor(
+ name='FeRegisterReq',
+ full_name='mgmtd.FeRegisterReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='client_name', full_name='mgmtd.FeRegisterReq.client_name', index=0,
+ number=1, type=9, cpp_type=9, label=2,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1324,
+ serialized_end=1360,
+)
+
+
+_FESESSIONREQ = _descriptor.Descriptor(
+ name='FeSessionReq',
+ full_name='mgmtd.FeSessionReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='create', full_name='mgmtd.FeSessionReq.create', index=0,
+ number=1, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='client_conn_id', full_name='mgmtd.FeSessionReq.client_conn_id', index=1,
+ number=2, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeSessionReq.session_id', index=2,
+ number=3, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='id', full_name='mgmtd.FeSessionReq.id',
+ index=0, containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[]),
+ ],
+ serialized_start=1362,
+ serialized_end=1446,
+)
+
+
+_FESESSIONREPLY = _descriptor.Descriptor(
+ name='FeSessionReply',
+ full_name='mgmtd.FeSessionReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='create', full_name='mgmtd.FeSessionReply.create', index=0,
+ number=1, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.FeSessionReply.success', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='client_conn_id', full_name='mgmtd.FeSessionReply.client_conn_id', index=2,
+ number=3, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeSessionReply.session_id', index=3,
+ number=4, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1448,
+ serialized_end=1541,
+)
+
+
+_FELOCKDSREQ = _descriptor.Descriptor(
+ name='FeLockDsReq',
+ full_name='mgmtd.FeLockDsReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeLockDsReq.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeLockDsReq.req_id', index=1,
+ number=2, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeLockDsReq.ds_id', index=2,
+ number=3, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='lock', full_name='mgmtd.FeLockDsReq.lock', index=3,
+ number=4, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1543,
+ serialized_end=1641,
+)
+
+
+_FELOCKDSREPLY = _descriptor.Descriptor(
+ name='FeLockDsReply',
+ full_name='mgmtd.FeLockDsReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeLockDsReply.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeLockDsReply.req_id', index=1,
+ number=2, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeLockDsReply.ds_id', index=2,
+ number=3, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='lock', full_name='mgmtd.FeLockDsReply.lock', index=3,
+ number=4, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.FeLockDsReply.success', index=4,
+ number=5, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='error_if_any', full_name='mgmtd.FeLockDsReply.error_if_any', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1644,
+ serialized_end=1783,
+)
+
+
+_FESETCONFIGREQ = _descriptor.Descriptor(
+ name='FeSetConfigReq',
+ full_name='mgmtd.FeSetConfigReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeSetConfigReq.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeSetConfigReq.ds_id', index=1,
+ number=2, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeSetConfigReq.req_id', index=2,
+ number=3, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.FeSetConfigReq.data', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='implicit_commit', full_name='mgmtd.FeSetConfigReq.implicit_commit', index=4,
+ number=5, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='commit_ds_id', full_name='mgmtd.FeSetConfigReq.commit_ds_id', index=5,
+ number=6, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1786,
+ serialized_end=1977,
+)
+
+
+_FESETCONFIGREPLY = _descriptor.Descriptor(
+ name='FeSetConfigReply',
+ full_name='mgmtd.FeSetConfigReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeSetConfigReply.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeSetConfigReply.ds_id', index=1,
+ number=2, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeSetConfigReply.req_id', index=2,
+ number=3, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.FeSetConfigReply.success', index=3,
+ number=4, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='implicit_commit', full_name='mgmtd.FeSetConfigReply.implicit_commit', index=4,
+ number=5, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='error_if_any', full_name='mgmtd.FeSetConfigReply.error_if_any', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1980,
+ serialized_end=2133,
+)
+
+
+_FECOMMITCONFIGREQ = _descriptor.Descriptor(
+ name='FeCommitConfigReq',
+ full_name='mgmtd.FeCommitConfigReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeCommitConfigReq.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='src_ds_id', full_name='mgmtd.FeCommitConfigReq.src_ds_id', index=1,
+ number=2, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='dst_ds_id', full_name='mgmtd.FeCommitConfigReq.dst_ds_id', index=2,
+ number=3, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeCommitConfigReq.req_id', index=3,
+ number=4, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='validate_only', full_name='mgmtd.FeCommitConfigReq.validate_only', index=4,
+ number=5, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='abort', full_name='mgmtd.FeCommitConfigReq.abort', index=5,
+ number=6, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2136,
+ serialized_end=2307,
+)
+
+
+_FECOMMITCONFIGREPLY = _descriptor.Descriptor(
+ name='FeCommitConfigReply',
+ full_name='mgmtd.FeCommitConfigReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeCommitConfigReply.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='src_ds_id', full_name='mgmtd.FeCommitConfigReply.src_ds_id', index=1,
+ number=2, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='dst_ds_id', full_name='mgmtd.FeCommitConfigReply.dst_ds_id', index=2,
+ number=3, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeCommitConfigReply.req_id', index=3,
+ number=4, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='validate_only', full_name='mgmtd.FeCommitConfigReply.validate_only', index=4,
+ number=5, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.FeCommitConfigReply.success', index=5,
+ number=6, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='abort', full_name='mgmtd.FeCommitConfigReply.abort', index=6,
+ number=7, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='error_if_any', full_name='mgmtd.FeCommitConfigReply.error_if_any', index=7,
+ number=8, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2310,
+ serialized_end=2522,
+)
+
+
+_FEGETREQ = _descriptor.Descriptor(
+ name='FeGetReq',
+ full_name='mgmtd.FeGetReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeGetReq.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='config', full_name='mgmtd.FeGetReq.config', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeGetReq.ds_id', index=2,
+ number=3, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeGetReq.req_id', index=3,
+ number=4, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.FeGetReq.data', index=4,
+ number=5, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2525,
+ serialized_end=2659,
+)
+
+
+_FEGETREPLY = _descriptor.Descriptor(
+ name='FeGetReply',
+ full_name='mgmtd.FeGetReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeGetReply.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='config', full_name='mgmtd.FeGetReply.config', index=1,
+ number=2, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeGetReply.ds_id', index=2,
+ number=3, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeGetReply.req_id', index=3,
+ number=4, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='success', full_name='mgmtd.FeGetReply.success', index=4,
+ number=5, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='error_if_any', full_name='mgmtd.FeGetReply.error_if_any', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.FeGetReply.data', index=6,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2662,
+ serialized_end=2836,
+)
+
+
+_FENOTIFYDATAREQ = _descriptor.Descriptor(
+ name='FeNotifyDataReq',
+ full_name='mgmtd.FeNotifyDataReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='mgmtd.FeNotifyDataReq.data', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2838,
+ serialized_end=2886,
+)
+
+
+_FEREGISTERNOTIFYREQ = _descriptor.Descriptor(
+ name='FeRegisterNotifyReq',
+ full_name='mgmtd.FeRegisterNotifyReq',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='session_id', full_name='mgmtd.FeRegisterNotifyReq.session_id', index=0,
+ number=1, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='ds_id', full_name='mgmtd.FeRegisterNotifyReq.ds_id', index=1,
+ number=2, type=14, cpp_type=8, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='register_req', full_name='mgmtd.FeRegisterNotifyReq.register_req', index=2,
+ number=3, type=8, cpp_type=7, label=2,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='req_id', full_name='mgmtd.FeRegisterNotifyReq.req_id', index=3,
+ number=4, type=4, cpp_type=4, label=2,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='data_xpath', full_name='mgmtd.FeRegisterNotifyReq.data_xpath', index=4,
+ number=5, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2889,
+ serialized_end=3045,
+)
+
+
+_FEMESSAGE = _descriptor.Descriptor(
+ name='FeMessage',
+ full_name='mgmtd.FeMessage',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='register_req', full_name='mgmtd.FeMessage.register_req', index=0,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='session_req', full_name='mgmtd.FeMessage.session_req', index=1,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='session_reply', full_name='mgmtd.FeMessage.session_reply', index=2,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='lockds_req', full_name='mgmtd.FeMessage.lockds_req', index=3,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='lockds_reply', full_name='mgmtd.FeMessage.lockds_reply', index=4,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='setcfg_req', full_name='mgmtd.FeMessage.setcfg_req', index=5,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='setcfg_reply', full_name='mgmtd.FeMessage.setcfg_reply', index=6,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='commcfg_req', full_name='mgmtd.FeMessage.commcfg_req', index=7,
+ number=9, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='commcfg_reply', full_name='mgmtd.FeMessage.commcfg_reply', index=8,
+ number=10, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='get_req', full_name='mgmtd.FeMessage.get_req', index=9,
+ number=11, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='get_reply', full_name='mgmtd.FeMessage.get_reply', index=10,
+ number=12, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='notify_data_req', full_name='mgmtd.FeMessage.notify_data_req', index=11,
+ number=15, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='regnotify_req', full_name='mgmtd.FeMessage.regnotify_req', index=12,
+ number=16, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto2',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='message', full_name='mgmtd.FeMessage.message',
+ index=0, containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[]),
+ ],
+ serialized_start=3048,
+ serialized_end=3672,
+)
+
+_YANGDATAVALUE.oneofs_by_name['value'].fields.append(
+ _YANGDATAVALUE.fields_by_name['encoded_str_val'])
+_YANGDATAVALUE.fields_by_name['encoded_str_val'].containing_oneof = _YANGDATAVALUE.oneofs_by_name['value']
+_YANGDATA.fields_by_name['value'].message_type = _YANGDATAVALUE
+_YANGCFGDATAREQ.fields_by_name['data'].message_type = _YANGDATA
+_YANGCFGDATAREQ.fields_by_name['req_type'].enum_type = _CFGDATAREQTYPE
+_YANGGETDATAREQ.fields_by_name['data'].message_type = _YANGDATA
+_BECFGDATACREATEREQ.fields_by_name['data_req'].message_type = _YANGCFGDATAREQ
+_YANGDATAREPLY.fields_by_name['data'].message_type = _YANGDATA
+_BEMESSAGE.fields_by_name['subscr_req'].message_type = _BESUBSCRIBEREQ
+_BEMESSAGE.fields_by_name['subscr_reply'].message_type = _BESUBSCRIBEREPLY
+_BEMESSAGE.fields_by_name['txn_req'].message_type = _BETXNREQ
+_BEMESSAGE.fields_by_name['txn_reply'].message_type = _BETXNREPLY
+_BEMESSAGE.fields_by_name['cfg_data_req'].message_type = _BECFGDATACREATEREQ
+_BEMESSAGE.fields_by_name['cfg_data_reply'].message_type = _BECFGDATACREATEREPLY
+_BEMESSAGE.fields_by_name['cfg_apply_req'].message_type = _BECFGDATAAPPLYREQ
+_BEMESSAGE.fields_by_name['cfg_apply_reply'].message_type = _BECFGDATAAPPLYREPLY
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['subscr_req'])
+_BEMESSAGE.fields_by_name['subscr_req'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['subscr_reply'])
+_BEMESSAGE.fields_by_name['subscr_reply'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['txn_req'])
+_BEMESSAGE.fields_by_name['txn_req'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['txn_reply'])
+_BEMESSAGE.fields_by_name['txn_reply'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['cfg_data_req'])
+_BEMESSAGE.fields_by_name['cfg_data_req'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['cfg_data_reply'])
+_BEMESSAGE.fields_by_name['cfg_data_reply'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['cfg_apply_req'])
+_BEMESSAGE.fields_by_name['cfg_apply_req'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_BEMESSAGE.oneofs_by_name['message'].fields.append(
+ _BEMESSAGE.fields_by_name['cfg_apply_reply'])
+_BEMESSAGE.fields_by_name['cfg_apply_reply'].containing_oneof = _BEMESSAGE.oneofs_by_name['message']
+_FESESSIONREQ.oneofs_by_name['id'].fields.append(
+ _FESESSIONREQ.fields_by_name['client_conn_id'])
+_FESESSIONREQ.fields_by_name['client_conn_id'].containing_oneof = _FESESSIONREQ.oneofs_by_name['id']
+_FESESSIONREQ.oneofs_by_name['id'].fields.append(
+ _FESESSIONREQ.fields_by_name['session_id'])
+_FESESSIONREQ.fields_by_name['session_id'].containing_oneof = _FESESSIONREQ.oneofs_by_name['id']
+_FELOCKDSREQ.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FELOCKDSREPLY.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FESETCONFIGREQ.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FESETCONFIGREQ.fields_by_name['data'].message_type = _YANGCFGDATAREQ
+_FESETCONFIGREQ.fields_by_name['commit_ds_id'].enum_type = _DATASTOREID
+_FESETCONFIGREPLY.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FECOMMITCONFIGREQ.fields_by_name['src_ds_id'].enum_type = _DATASTOREID
+_FECOMMITCONFIGREQ.fields_by_name['dst_ds_id'].enum_type = _DATASTOREID
+_FECOMMITCONFIGREPLY.fields_by_name['src_ds_id'].enum_type = _DATASTOREID
+_FECOMMITCONFIGREPLY.fields_by_name['dst_ds_id'].enum_type = _DATASTOREID
+_FEGETREQ.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FEGETREQ.fields_by_name['data'].message_type = _YANGGETDATAREQ
+_FEGETREPLY.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FEGETREPLY.fields_by_name['data'].message_type = _YANGDATAREPLY
+_FENOTIFYDATAREQ.fields_by_name['data'].message_type = _YANGDATA
+_FEREGISTERNOTIFYREQ.fields_by_name['ds_id'].enum_type = _DATASTOREID
+_FEREGISTERNOTIFYREQ.fields_by_name['data_xpath'].message_type = _YANGDATAXPATH
+_FEMESSAGE.fields_by_name['register_req'].message_type = _FEREGISTERREQ
+_FEMESSAGE.fields_by_name['session_req'].message_type = _FESESSIONREQ
+_FEMESSAGE.fields_by_name['session_reply'].message_type = _FESESSIONREPLY
+_FEMESSAGE.fields_by_name['lockds_req'].message_type = _FELOCKDSREQ
+_FEMESSAGE.fields_by_name['lockds_reply'].message_type = _FELOCKDSREPLY
+_FEMESSAGE.fields_by_name['setcfg_req'].message_type = _FESETCONFIGREQ
+_FEMESSAGE.fields_by_name['setcfg_reply'].message_type = _FESETCONFIGREPLY
+_FEMESSAGE.fields_by_name['commcfg_req'].message_type = _FECOMMITCONFIGREQ
+_FEMESSAGE.fields_by_name['commcfg_reply'].message_type = _FECOMMITCONFIGREPLY
+_FEMESSAGE.fields_by_name['get_req'].message_type = _FEGETREQ
+_FEMESSAGE.fields_by_name['get_reply'].message_type = _FEGETREPLY
+_FEMESSAGE.fields_by_name['notify_data_req'].message_type = _FENOTIFYDATAREQ
+_FEMESSAGE.fields_by_name['regnotify_req'].message_type = _FEREGISTERNOTIFYREQ
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['register_req'])
+_FEMESSAGE.fields_by_name['register_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['session_req'])
+_FEMESSAGE.fields_by_name['session_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['session_reply'])
+_FEMESSAGE.fields_by_name['session_reply'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['lockds_req'])
+_FEMESSAGE.fields_by_name['lockds_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['lockds_reply'])
+_FEMESSAGE.fields_by_name['lockds_reply'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['setcfg_req'])
+_FEMESSAGE.fields_by_name['setcfg_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['setcfg_reply'])
+_FEMESSAGE.fields_by_name['setcfg_reply'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['commcfg_req'])
+_FEMESSAGE.fields_by_name['commcfg_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['commcfg_reply'])
+_FEMESSAGE.fields_by_name['commcfg_reply'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['get_req'])
+_FEMESSAGE.fields_by_name['get_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['get_reply'])
+_FEMESSAGE.fields_by_name['get_reply'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['notify_data_req'])
+_FEMESSAGE.fields_by_name['notify_data_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+_FEMESSAGE.oneofs_by_name['message'].fields.append(
+ _FEMESSAGE.fields_by_name['regnotify_req'])
+_FEMESSAGE.fields_by_name['regnotify_req'].containing_oneof = _FEMESSAGE.oneofs_by_name['message']
+DESCRIPTOR.message_types_by_name['YangDataXPath'] = _YANGDATAXPATH
+DESCRIPTOR.message_types_by_name['YangDataValue'] = _YANGDATAVALUE
+DESCRIPTOR.message_types_by_name['YangData'] = _YANGDATA
+DESCRIPTOR.message_types_by_name['YangCfgDataReq'] = _YANGCFGDATAREQ
+DESCRIPTOR.message_types_by_name['YangGetDataReq'] = _YANGGETDATAREQ
+DESCRIPTOR.message_types_by_name['BeSubscribeReq'] = _BESUBSCRIBEREQ
+DESCRIPTOR.message_types_by_name['BeSubscribeReply'] = _BESUBSCRIBEREPLY
+DESCRIPTOR.message_types_by_name['BeTxnReq'] = _BETXNREQ
+DESCRIPTOR.message_types_by_name['BeTxnReply'] = _BETXNREPLY
+DESCRIPTOR.message_types_by_name['BeCfgDataCreateReq'] = _BECFGDATACREATEREQ
+DESCRIPTOR.message_types_by_name['BeCfgDataCreateReply'] = _BECFGDATACREATEREPLY
+DESCRIPTOR.message_types_by_name['BeCfgDataApplyReq'] = _BECFGDATAAPPLYREQ
+DESCRIPTOR.message_types_by_name['BeCfgDataApplyReply'] = _BECFGDATAAPPLYREPLY
+DESCRIPTOR.message_types_by_name['YangDataReply'] = _YANGDATAREPLY
+DESCRIPTOR.message_types_by_name['BeMessage'] = _BEMESSAGE
+DESCRIPTOR.message_types_by_name['FeRegisterReq'] = _FEREGISTERREQ
+DESCRIPTOR.message_types_by_name['FeSessionReq'] = _FESESSIONREQ
+DESCRIPTOR.message_types_by_name['FeSessionReply'] = _FESESSIONREPLY
+DESCRIPTOR.message_types_by_name['FeLockDsReq'] = _FELOCKDSREQ
+DESCRIPTOR.message_types_by_name['FeLockDsReply'] = _FELOCKDSREPLY
+DESCRIPTOR.message_types_by_name['FeSetConfigReq'] = _FESETCONFIGREQ
+DESCRIPTOR.message_types_by_name['FeSetConfigReply'] = _FESETCONFIGREPLY
+DESCRIPTOR.message_types_by_name['FeCommitConfigReq'] = _FECOMMITCONFIGREQ
+DESCRIPTOR.message_types_by_name['FeCommitConfigReply'] = _FECOMMITCONFIGREPLY
+DESCRIPTOR.message_types_by_name['FeGetReq'] = _FEGETREQ
+DESCRIPTOR.message_types_by_name['FeGetReply'] = _FEGETREPLY
+DESCRIPTOR.message_types_by_name['FeNotifyDataReq'] = _FENOTIFYDATAREQ
+DESCRIPTOR.message_types_by_name['FeRegisterNotifyReq'] = _FEREGISTERNOTIFYREQ
+DESCRIPTOR.message_types_by_name['FeMessage'] = _FEMESSAGE
+DESCRIPTOR.enum_types_by_name['CfgDataReqType'] = _CFGDATAREQTYPE
+DESCRIPTOR.enum_types_by_name['DatastoreId'] = _DATASTOREID
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+YangDataXPath = _reflection.GeneratedProtocolMessageType('YangDataXPath', (_message.Message,), {
+ 'DESCRIPTOR' : _YANGDATAXPATH,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.YangDataXPath)
+ })
+_sym_db.RegisterMessage(YangDataXPath)
+
+YangDataValue = _reflection.GeneratedProtocolMessageType('YangDataValue', (_message.Message,), {
+ 'DESCRIPTOR' : _YANGDATAVALUE,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.YangDataValue)
+ })
+_sym_db.RegisterMessage(YangDataValue)
+
+YangData = _reflection.GeneratedProtocolMessageType('YangData', (_message.Message,), {
+ 'DESCRIPTOR' : _YANGDATA,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.YangData)
+ })
+_sym_db.RegisterMessage(YangData)
+
+YangCfgDataReq = _reflection.GeneratedProtocolMessageType('YangCfgDataReq', (_message.Message,), {
+ 'DESCRIPTOR' : _YANGCFGDATAREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.YangCfgDataReq)
+ })
+_sym_db.RegisterMessage(YangCfgDataReq)
+
+YangGetDataReq = _reflection.GeneratedProtocolMessageType('YangGetDataReq', (_message.Message,), {
+ 'DESCRIPTOR' : _YANGGETDATAREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.YangGetDataReq)
+ })
+_sym_db.RegisterMessage(YangGetDataReq)
+
+BeSubscribeReq = _reflection.GeneratedProtocolMessageType('BeSubscribeReq', (_message.Message,), {
+ 'DESCRIPTOR' : _BESUBSCRIBEREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeSubscribeReq)
+ })
+_sym_db.RegisterMessage(BeSubscribeReq)
+
+BeSubscribeReply = _reflection.GeneratedProtocolMessageType('BeSubscribeReply', (_message.Message,), {
+ 'DESCRIPTOR' : _BESUBSCRIBEREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeSubscribeReply)
+ })
+_sym_db.RegisterMessage(BeSubscribeReply)
+
+BeTxnReq = _reflection.GeneratedProtocolMessageType('BeTxnReq', (_message.Message,), {
+ 'DESCRIPTOR' : _BETXNREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeTxnReq)
+ })
+_sym_db.RegisterMessage(BeTxnReq)
+
+BeTxnReply = _reflection.GeneratedProtocolMessageType('BeTxnReply', (_message.Message,), {
+ 'DESCRIPTOR' : _BETXNREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeTxnReply)
+ })
+_sym_db.RegisterMessage(BeTxnReply)
+
+BeCfgDataCreateReq = _reflection.GeneratedProtocolMessageType('BeCfgDataCreateReq', (_message.Message,), {
+ 'DESCRIPTOR' : _BECFGDATACREATEREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeCfgDataCreateReq)
+ })
+_sym_db.RegisterMessage(BeCfgDataCreateReq)
+
+BeCfgDataCreateReply = _reflection.GeneratedProtocolMessageType('BeCfgDataCreateReply', (_message.Message,), {
+ 'DESCRIPTOR' : _BECFGDATACREATEREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeCfgDataCreateReply)
+ })
+_sym_db.RegisterMessage(BeCfgDataCreateReply)
+
+BeCfgDataApplyReq = _reflection.GeneratedProtocolMessageType('BeCfgDataApplyReq', (_message.Message,), {
+ 'DESCRIPTOR' : _BECFGDATAAPPLYREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeCfgDataApplyReq)
+ })
+_sym_db.RegisterMessage(BeCfgDataApplyReq)
+
+BeCfgDataApplyReply = _reflection.GeneratedProtocolMessageType('BeCfgDataApplyReply', (_message.Message,), {
+ 'DESCRIPTOR' : _BECFGDATAAPPLYREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeCfgDataApplyReply)
+ })
+_sym_db.RegisterMessage(BeCfgDataApplyReply)
+
+YangDataReply = _reflection.GeneratedProtocolMessageType('YangDataReply', (_message.Message,), {
+ 'DESCRIPTOR' : _YANGDATAREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.YangDataReply)
+ })
+_sym_db.RegisterMessage(YangDataReply)
+
+BeMessage = _reflection.GeneratedProtocolMessageType('BeMessage', (_message.Message,), {
+ 'DESCRIPTOR' : _BEMESSAGE,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.BeMessage)
+ })
+_sym_db.RegisterMessage(BeMessage)
+
+FeRegisterReq = _reflection.GeneratedProtocolMessageType('FeRegisterReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FEREGISTERREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeRegisterReq)
+ })
+_sym_db.RegisterMessage(FeRegisterReq)
+
+FeSessionReq = _reflection.GeneratedProtocolMessageType('FeSessionReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FESESSIONREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeSessionReq)
+ })
+_sym_db.RegisterMessage(FeSessionReq)
+
+FeSessionReply = _reflection.GeneratedProtocolMessageType('FeSessionReply', (_message.Message,), {
+ 'DESCRIPTOR' : _FESESSIONREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeSessionReply)
+ })
+_sym_db.RegisterMessage(FeSessionReply)
+
+FeLockDsReq = _reflection.GeneratedProtocolMessageType('FeLockDsReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FELOCKDSREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeLockDsReq)
+ })
+_sym_db.RegisterMessage(FeLockDsReq)
+
+FeLockDsReply = _reflection.GeneratedProtocolMessageType('FeLockDsReply', (_message.Message,), {
+ 'DESCRIPTOR' : _FELOCKDSREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeLockDsReply)
+ })
+_sym_db.RegisterMessage(FeLockDsReply)
+
+FeSetConfigReq = _reflection.GeneratedProtocolMessageType('FeSetConfigReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FESETCONFIGREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeSetConfigReq)
+ })
+_sym_db.RegisterMessage(FeSetConfigReq)
+
+FeSetConfigReply = _reflection.GeneratedProtocolMessageType('FeSetConfigReply', (_message.Message,), {
+ 'DESCRIPTOR' : _FESETCONFIGREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeSetConfigReply)
+ })
+_sym_db.RegisterMessage(FeSetConfigReply)
+
+FeCommitConfigReq = _reflection.GeneratedProtocolMessageType('FeCommitConfigReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FECOMMITCONFIGREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeCommitConfigReq)
+ })
+_sym_db.RegisterMessage(FeCommitConfigReq)
+
+FeCommitConfigReply = _reflection.GeneratedProtocolMessageType('FeCommitConfigReply', (_message.Message,), {
+ 'DESCRIPTOR' : _FECOMMITCONFIGREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeCommitConfigReply)
+ })
+_sym_db.RegisterMessage(FeCommitConfigReply)
+
+FeGetReq = _reflection.GeneratedProtocolMessageType('FeGetReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FEGETREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeGetReq)
+ })
+_sym_db.RegisterMessage(FeGetReq)
+
+FeGetReply = _reflection.GeneratedProtocolMessageType('FeGetReply', (_message.Message,), {
+ 'DESCRIPTOR' : _FEGETREPLY,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeGetReply)
+ })
+_sym_db.RegisterMessage(FeGetReply)
+
+FeNotifyDataReq = _reflection.GeneratedProtocolMessageType('FeNotifyDataReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FENOTIFYDATAREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeNotifyDataReq)
+ })
+_sym_db.RegisterMessage(FeNotifyDataReq)
+
+FeRegisterNotifyReq = _reflection.GeneratedProtocolMessageType('FeRegisterNotifyReq', (_message.Message,), {
+ 'DESCRIPTOR' : _FEREGISTERNOTIFYREQ,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeRegisterNotifyReq)
+ })
+_sym_db.RegisterMessage(FeRegisterNotifyReq)
+
+FeMessage = _reflection.GeneratedProtocolMessageType('FeMessage', (_message.Message,), {
+ 'DESCRIPTOR' : _FEMESSAGE,
+ '__module__' : 'mgmt_pb2'
+ # @@protoc_insertion_point(class_scope:mgmtd.FeMessage)
+ })
+_sym_db.RegisterMessage(FeMessage)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/topotests/mgmt_fe_client/oper.py b/tests/topotests/mgmt_fe_client/oper.py
new file mode 120000
index 0000000000..924439251a
--- /dev/null
+++ b/tests/topotests/mgmt_fe_client/oper.py
@@ -0,0 +1 @@
+../mgmt_oper/oper.py \ No newline at end of file
diff --git a/tests/topotests/mgmt_fe_client/r1/frr.conf b/tests/topotests/mgmt_fe_client/r1/frr.conf
new file mode 100644
index 0000000000..cf8ba160f4
--- /dev/null
+++ b/tests/topotests/mgmt_fe_client/r1/frr.conf
@@ -0,0 +1,23 @@
+log timestamp precision 6
+log file frr.log
+
+no debug memstats-at-exit
+
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+
+debug mgmt backend datastore frontend transaction
+debug mgmt client frontend
+debug mgmt client backend
+
+interface r1-eth0
+ ip address 1.1.1.1/24
+exit
+
+interface r1-eth1 vrf red
+ ip address 3.3.3.1/24
+exit
+ip route 11.11.11.11/32 1.1.1.2
+!ip route 13.13.13.13/32 3.3.3.2 vrf red \ No newline at end of file
diff --git a/tests/topotests/mgmt_fe_client/test_client.py b/tests/topotests/mgmt_fe_client/test_client.py
new file mode 100644
index 0000000000..6268d2f123
--- /dev/null
+++ b/tests/topotests/mgmt_fe_client/test_client.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# noqa: E501
+#
+"""
+Test static route functionality
+"""
+import pytest
+from lib.topogen import Topogen
+from oper import check_kernel_32
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",), "s2": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ # Setup VRF red
+ router.net.add_l3vrf("red", 10)
+ router.net.add_loop("lo-red")
+ router.net.attach_iface_to_l3vrf("lo-red", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+ router.load_frr_config("frr.conf")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_oper_simple(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"].net
+ check_kernel_32(r1, "11.11.11.11", 1, "")
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
new file mode 100644
index 0000000000..435d7336fc
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
@@ -0,0 +1,576 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
new file mode 100644
index 0000000000..1a1f6480fa
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
@@ -0,0 +1,1145 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "name": "red",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "13.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "13.13.13.13/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "3.3.3.2",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "14.14.14.14/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "4.4.4.2",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2003:333::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2003:333::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2003:333::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2003:333::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
new file mode 100644
index 0000000000..cfabd49c45
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
@@ -0,0 +1,576 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "red",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "13.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "13.13.13.13/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "3.3.3.2",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "14.14.14.14/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "4.4.4.2",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2003:333::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2003:333::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2003:333::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2003:333::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
new file mode 100644
index 0000000000..2e2b8ec7ad
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
@@ -0,0 +1,572 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
new file mode 100644
index 0000000000..2e2b8ec7ad
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
@@ -0,0 +1,572 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib.json b/tests/topotests/mgmt_oper/oper-results/result-lib.json
new file mode 100644
index 0000000000..1a1f6480fa
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib.json
@@ -0,0 +1,1145 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "name": "red",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "13.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "13.13.13.13/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "3.3.3.2",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "14.14.14.14/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "4.4.4.2",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "4.4.4.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2003:333::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2003:333::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2003:333::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2003:333::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth2",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2004:4444::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth3",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
new file mode 100644
index 0000000000..956d3a8922
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
@@ -0,0 +1,225 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
new file mode 100644
index 0000000000..2e2b8ec7ad
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
@@ -0,0 +1,572 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ },
+ {
+ "prefix": "2001:1111::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2001:1111::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::/64",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2002:2222::1/128",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper.py b/tests/topotests/mgmt_oper/oper.py
new file mode 100644
index 0000000000..e3386067bc
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# October 29 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+
+import datetime
+import ipaddress
+import json
+import logging
+import math
+import os
+import pprint
+import re
+
+from lib.common_config import retry, step
+from lib.topolog import logger
+from lib.topotest import json_cmp as tt_json_cmp
+
+try:
+ from deepdiff import DeepDiff as dd_json_cmp
+except ImportError:
+ dd_json_cmp = None
+
+
+def json_cmp(got, expect, exact_match):
+ if dd_json_cmp:
+ if exact_match:
+ deep_diff = dd_json_cmp(expect, got)
+ # Convert DeepDiff completely into dicts or lists at all levels
+ json_diff = json.loads(deep_diff.to_json())
+ else:
+ json_diff = dd_json_cmp(expect, got, ignore_order=True)
+ # Convert DeepDiff completely into dicts or lists at all levels
+ # json_diff = json.loads(deep_diff.to_json())
+ # Remove new fields in json object from diff
+ if json_diff.get("dictionary_item_added") is not None:
+ del json_diff["dictionary_item_added"]
+ # Remove new json objects in json array from diff
+ if (new_items := json_diff.get("iterable_item_added")) is not None:
+ new_item_paths = list(new_items.keys())
+ for path in new_item_paths:
+ if type(new_items[path]) is dict:
+ del new_items[path]
+ if len(new_items) == 0:
+ del json_diff["iterable_item_added"]
+ if not json_diff:
+ json_diff = None
+ else:
+ json_diff = tt_json_cmp(got, expect, exact_match)
+ json_diff = str(json_diff)
+ return json_diff
+
+
+def enable_debug(router):
+ router.vtysh_cmd("debug northbound callbacks configuration")
+
+
+def disable_debug(router):
+ router.vtysh_cmd("no debug northbound callbacks configuration")
+
+
+def do_oper_test(tgen, query_results):
+ r1 = tgen.gears["r1"].net
+
+ qcmd = (
+ r"vtysh -c 'show mgmt get-data {}' "
+ r"""| sed -e 's/"phy-address": ".*"/"phy-address": "rubout"/'"""
+ r"""| sed -e 's/"uptime": ".*"/"uptime": "rubout"/'"""
+ r"""| sed -e 's/"vrf": "[0-9]*"/"vrf": "rubout"/'"""
+ r"""| sed -e 's/"if-index": [0-9][0-9]*/"if-index": "rubout"/'"""
+ r"""| sed -e 's/"id": [0-9][0-9]*/"id": "rubout"/'"""
+ )
+
+ doreset = True
+ dd_json_cmp = None
+ for qr in query_results:
+ step(f"Perform query '{qr[0]}'", reset=doreset)
+ if doreset:
+ doreset = False
+ expected = open(qr[1], encoding="ascii").read()
+ output = r1.cmd_nostatus(qcmd.format(qr[0]))
+
+ try:
+ ojson = json.loads(output)
+ except json.decoder.JSONDecodeError as error:
+ logging.error("Error decoding json: %s\noutput:\n%s", error, output)
+ raise
+
+ try:
+ ejson = json.loads(expected)
+ except json.decoder.JSONDecodeError as error:
+ logging.error(
+ "Error decoding json exp result: %s\noutput:\n%s", error, expected
+ )
+ raise
+
+ if dd_json_cmp:
+ cmpout = json_cmp(ojson, ejson, exact_match=True)
+ if cmpout:
+ logging.warning(
+ "-------DIFF---------\n%s\n---------DIFF----------",
+ pprint.pformat(cmpout),
+ )
+ else:
+ cmpout = tt_json_cmp(ojson, ejson, exact=True)
+ if cmpout:
+ logging.warning(
+ "-------EXPECT--------\n%s\n------END-EXPECT------",
+ json.dumps(ejson, indent=4),
+ )
+ logging.warning(
+ "--------GOT----------\n%s\n-------END-GOT--------",
+ json.dumps(ojson, indent=4),
+ )
+
+ assert cmpout is None
+
+
+def get_ip_networks(super_prefix, count):
+ count_log2 = math.log(count, 2)
+ if count_log2 != int(count_log2):
+ count_log2 = int(count_log2) + 1
+ else:
+ count_log2 = int(count_log2)
+ network = ipaddress.ip_network(super_prefix)
+ return tuple(network.subnets(count_log2))[0:count]
+
+
+@retry(retry_timeout=30, initial_wait=0.1)
+def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia):
+ network = ipaddress.ip_network(super_prefix)
+ vrfstr = f" vrf {vrf}" if vrf else ""
+ if network.version == 6:
+ kernel = r1.cmd_raises(f"ip -6 route show{vrfstr}")
+ else:
+ kernel = r1.cmd_raises(f"ip -4 route show{vrfstr}")
+
+ # logger.debug("checking kernel routing table%s:\n%s", vrfstr, kernel)
+
+ for i, net in enumerate(get_ip_networks(super_prefix, count)):
+ if not add:
+ assert str(net) not in kernel
+ continue
+
+ if is_blackhole:
+ route = f"blackhole {str(net)} proto (static|196) metric 20"
+ else:
+ route = (
+ f"{str(net)}(?: nhid [0-9]+)? {matchvia} "
+ "proto (static|196) metric 20"
+ )
+ assert re.search(route, kernel), f"Failed to find \n'{route}'\n in \n'{kernel}'"
+
+
+def addrgen(a, count, step=1):
+ for _ in range(0, count, step):
+ yield a
+ a += step
+
+
+@retry(retry_timeout=30, initial_wait=0.1)
+def check_kernel_32(r1, start_addr, count, vrf, step=1):
+ start = ipaddress.ip_address(start_addr)
+ vrfstr = f" vrf {vrf}" if vrf else ""
+ if start.version == 6:
+ kernel = r1.cmd_raises(f"ip -6 route show{vrfstr}")
+ else:
+ kernel = r1.cmd_raises(f"ip -4 route show{vrfstr}")
+
+ nentries = len(re.findall("\n", kernel))
+ logging.info("checking kernel routing table%s: (%s entries)", vrfstr, nentries)
+
+ for addr in addrgen(start, count, step):
+ assert str(addr) in kernel, f"Failed to find '{addr}' in {nentries} entries"
+
+
+def do_config(
+ r1,
+ count,
+ add=True,
+ do_ipv6=False,
+ via=None,
+ vrf=None,
+ use_cli=False,
+):
+ optype = "adding" if add else "removing"
+ iptype = "IPv6" if do_ipv6 else "IPv4"
+
+ #
+ # Set the route details
+ #
+
+ if vrf:
+ super_prefix = "2111::/48" if do_ipv6 else "111.0.0.0/8"
+ else:
+ super_prefix = "2055::/48" if do_ipv6 else "55.0.0.0/8"
+
+ matchvia = ""
+ if via == "blackhole":
+ pass
+ elif via:
+ matchvia = f"dev {via}"
+ else:
+ if vrf:
+ via = "2102::2" if do_ipv6 else "3.3.3.2"
+ matchvia = f"via {via} dev r1-eth1"
+ else:
+ via = "2101::2" if do_ipv6 else "1.1.1.2"
+ matchvia = f"via {via} dev r1-eth0"
+
+ vrfdbg = " in vrf {}".format(vrf) if vrf else ""
+ logger.debug("{} {} static {} routes{}".format(optype, count, iptype, vrfdbg))
+
+ #
+ # Generate config file in a retrievable place
+ #
+
+ config_file = os.path.join(
+ r1.logdir, r1.name, "{}-routes-{}.conf".format(iptype.lower(), optype)
+ )
+ with open(config_file, "w") as f:
+ if use_cli:
+ f.write("configure terminal\n")
+ if vrf:
+ f.write("vrf {}\n".format(vrf))
+
+ for i, net in enumerate(get_ip_networks(super_prefix, count)):
+ if add:
+ f.write("ip route {} {}\n".format(net, via))
+ else:
+ f.write("no ip route {} {}\n".format(net, via))
+
+ #
+ # Load config file.
+ #
+
+ if use_cli:
+ load_command = 'vtysh < "{}"'.format(config_file)
+ else:
+ load_command = 'vtysh -f "{}"'.format(config_file)
+ tstamp = datetime.datetime.now()
+ output = r1.cmd_raises(load_command)
+ delta = (datetime.datetime.now() - tstamp).total_seconds()
+
+ #
+ # Verify the results are in the kernel
+ #
+ check_kernel(r1, super_prefix, count, add, via == "blackhole", vrf, matchvia)
+
+ optyped = "added" if add else "removed"
+ logger.debug(
+ "{} {} {} static routes under {}{} in {}s".format(
+ optyped, count, iptype.lower(), super_prefix, vrfdbg, delta
+ )
+ )
diff --git a/tests/topotests/mgmt_oper/r1/frr-scale.conf b/tests/topotests/mgmt_oper/r1/frr-scale.conf
new file mode 100644
index 0000000000..237d013aec
--- /dev/null
+++ b/tests/topotests/mgmt_oper/r1/frr-scale.conf
@@ -0,0 +1,25 @@
+log timestamp precision 6
+log file frr.log
+
+no debug memstats-at-exit
+
+! debug northbound libyang
+! debug northbound callbacks
+
+debug northbound notifications
+debug northbound events
+
+debug mgmt backend datastore frontend transaction
+debug mgmt client frontend
+debug mgmt client backend
+
+interface r1-eth0
+ ip address 1.1.1.1/24
+exit
+
+interface r1-eth1 vrf red
+ ip address 3.3.3.1/24
+exit
+
+ip route 11.11.11.11/32 1.1.1.2
+ip route 13.13.13.13/32 3.3.3.2 vrf red \ No newline at end of file
diff --git a/tests/topotests/mgmt_oper/r1/frr-simple.conf b/tests/topotests/mgmt_oper/r1/frr-simple.conf
new file mode 100644
index 0000000000..cf8ba160f4
--- /dev/null
+++ b/tests/topotests/mgmt_oper/r1/frr-simple.conf
@@ -0,0 +1,23 @@
+log timestamp precision 6
+log file frr.log
+
+no debug memstats-at-exit
+
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+
+debug mgmt backend datastore frontend transaction
+debug mgmt client frontend
+debug mgmt client backend
+
+interface r1-eth0
+ ip address 1.1.1.1/24
+exit
+
+interface r1-eth1 vrf red
+ ip address 3.3.3.1/24
+exit
+ip route 11.11.11.11/32 1.1.1.2
+!ip route 13.13.13.13/32 3.3.3.2 vrf red \ No newline at end of file
diff --git a/tests/topotests/mgmt_oper/r1/frr.conf b/tests/topotests/mgmt_oper/r1/frr.conf
new file mode 100644
index 0000000000..72a67bf020
--- /dev/null
+++ b/tests/topotests/mgmt_oper/r1/frr.conf
@@ -0,0 +1,41 @@
+log timestamp precision 6
+log file frr.log
+
+no debug memstats-at-exit
+
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+
+debug mgmt backend datastore frontend transaction
+debug mgmt client frontend
+debug mgmt client backend
+
+interface r1-eth0
+ ip address 1.1.1.1/24
+ ipv6 address 2001:1111::1/64
+exit
+
+interface r1-eth1
+ ip address 2.2.2.1/24
+ ipv6 address 2002:2222::1/64
+exit
+
+interface r1-eth2 vrf red
+ ip address 3.3.3.1/24
+ ipv6 address 2003:333::1/64
+exit
+
+interface r1-eth3 vrf red
+ ip address 4.4.4.1/24
+ ipv6 address 2004:4444::1/64
+exit
+
+ip route 11.0.0.0/8 Null0
+ip route 11.11.11.11/32 1.1.1.2
+ip route 12.12.12.12/32 2.2.2.2
+
+ip route 13.0.0.0/8 Null0 vrf red
+ip route 13.13.13.13/32 3.3.3.2 vrf red
+ip route 14.14.14.14/32 4.4.4.2 vrf red \ No newline at end of file
diff --git a/tests/topotests/mgmt_oper/simple-results/result-empty.json b/tests/topotests/mgmt_oper/simple-results/result-empty.json
new file mode 100644
index 0000000000..2c63c08510
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-empty.json
@@ -0,0 +1,2 @@
+{
+}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-intf-state-mtu.json b/tests/topotests/mgmt_oper/simple-results/result-intf-state-mtu.json
new file mode 100644
index 0000000000..60359716d7
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-intf-state-mtu.json
@@ -0,0 +1,12 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "r1-eth0",
+ "state": {
+ "mtu": 1500
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-intf-state.json b/tests/topotests/mgmt_oper/simple-results/result-intf-state.json
new file mode 100644
index 0000000000..981df024cd
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-intf-state.json
@@ -0,0 +1,17 @@
+{
+ "frr-interface:lib": {
+ "interface": [
+ {
+ "name": "r1-eth0",
+ "state": {
+ "if-index": "rubout",
+ "mtu": 1500,
+ "mtu6": 1500,
+ "speed": 10000,
+ "metric": 0,
+ "phy-address": "rubout"
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
new file mode 100644
index 0000000000..cea4bf5a6b
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
@@ -0,0 +1,193 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
new file mode 100644
index 0000000000..75414ca045
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
@@ -0,0 +1,350 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "name": "red",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
new file mode 100644
index 0000000000..05382316a3
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
@@ -0,0 +1,164 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "red",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
new file mode 100644
index 0000000000..4f40820bb6
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
@@ -0,0 +1,189 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
new file mode 100644
index 0000000000..4f40820bb6
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
@@ -0,0 +1,189 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib.json b/tests/topotests/mgmt_oper/simple-results/result-lib.json
new file mode 100644
index 0000000000..75414ca045
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib.json
@@ -0,0 +1,350 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "name": "red",
+ "state": {
+ "id": "rubout",
+ "active": true
+ },
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "3.3.3.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "3.3.3.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 10,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-ipv4-unicast.json b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-ipv4-unicast.json
new file mode 100644
index 0000000000..7ce60c3bdb
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-ipv4-unicast.json
@@ -0,0 +1,110 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
new file mode 100644
index 0000000000..4f40820bb6
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
@@ -0,0 +1,189 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv4-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "internal-flags": 8,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ },
+ {
+ "afi-safi-name": "frr-routing:ipv6-multicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "::/0"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-nokey.json b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-nokey.json
new file mode 100644
index 0000000000..7ce60c3bdb
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-nokey.json
@@ -0,0 +1,110 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-prefix.json b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-prefix.json
new file mode 100644
index 0000000000..833d418f9a
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-route-prefix.json
@@ -0,0 +1,50 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/simple-results/result-singleton-metric.json b/tests/topotests/mgmt_oper/simple-results/result-singleton-metric.json
new file mode 100644
index 0000000000..b3a7df54ea
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-singleton-metric.json
@@ -0,0 +1,30 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "metric": 0
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/mgmt_oper/test_oper.py b/tests/topotests/mgmt_oper/test_oper.py
new file mode 100644
index 0000000000..e8d5cfb50b
--- /dev/null
+++ b/tests/topotests/mgmt_oper/test_oper.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+"""
+Test static route functionality
+"""
+
+import ipaddress
+import math
+import time
+
+import pytest
+from lib.topogen import Topogen
+from oper import check_kernel_32, do_oper_test
+
+try:
+ from deepdiff import DeepDiff as dd_json_cmp
+except ImportError:
+ dd_json_cmp = None
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",), "s2": ("r1",), "s3": ("r1",), "s4": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ # Setup VRF red
+ router.net.add_l3vrf("red", 10)
+ router.net.add_loop("lo-red")
+ router.net.attach_iface_to_l3vrf("lo-red", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth2", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth3", "red")
+ router.load_frr_config("frr.conf")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def get_ip_networks(super_prefix, count):
+ count_log2 = math.log(count, 2)
+ if count_log2 != int(count_log2):
+ count_log2 = int(count_log2) + 1
+ else:
+ count_log2 = int(count_log2)
+ network = ipaddress.ip_network(super_prefix)
+ return tuple(network.subnets(count_log2))[0:count]
+
+
+def test_oper(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ query_results = [
+ ("/frr-vrf:lib", "oper-results/result-lib.json"),
+ ("/frr-vrf:lib/vrf", "oper-results/result-lib-vrf-nokey.json"),
+ (
+ '/frr-vrf:lib/vrf[name="default"]',
+ "oper-results/result-lib-vrf-default.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra',
+ "oper-results/result-lib-vrf-zebra.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs',
+ "oper-results/result-lib-vrf-zebra-ribs.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib',
+ "oper-results/result-ribs-rib-nokeys.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]',
+ "oper-results/result-ribs-rib-ipv4-unicast.json",
+ ),
+ ]
+
+ r1 = tgen.gears["r1"].net
+ check_kernel_32(r1, "11.11.11.11", 1, "")
+ check_kernel_32(r1, "12.12.12.12", 1, "")
+ check_kernel_32(r1, "13.13.13.13", 1, "red")
+ check_kernel_32(r1, "14.14.14.14", 1, "red")
+ time.sleep(2)
+ do_oper_test(tgen, query_results)
+
+
+to_gen_new_results = """
+scriptdir=~chopps/w/frr/tests/topotests/mgmt_oper
+resdir=${scriptdir}/oper-results
+vtysh -c 'show mgmt get-data /frr-vrf:lib' > ${resdir}/result-lib.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf' > ${resdir}/result-lib-vrf-nokey.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]' > ${resdir}/result-lib-vrf-default.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="red"]' > ${resdir}/result-lib-vrf-red.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra' > ${resdir}/result-lib-vrf-zebra.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs' > ${resdir}/result-lib-vrf-zebra-ribs.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib' > ${resdir}/result-ribs-rib-nokeys.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]' > ${resdir}/result-ribs-rib-ipv4-unicast.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/route' > ${resdir}/result-ribs-rib-route-nokey.json
+
+for f in ${resdir}/result-*; do
+ sed -i -e 's/"uptime": ".*"/"uptime": "rubout"/;s/"id": [0-9][0-9]*/"id": "rubout"/' $f
+ sed -i -e 's/"if-index": [0-9][0-9]*/"if-index": "rubout"/' $f
+ sed -i -e 's,"vrf": "[0-9]*","vrf": "rubout",' $f
+done
+""" # noqa: 501
+# should not differ
+# diff result-lib.json result-lib-vrf-nokey.json
+# diff result-lib-vrf-zebra.json result-lib-vrf-zebra-ribs.json
diff --git a/tests/topotests/mgmt_oper/test_querying.py b/tests/topotests/mgmt_oper/test_querying.py
new file mode 100644
index 0000000000..e53ea52c98
--- /dev/null
+++ b/tests/topotests/mgmt_oper/test_querying.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# noqa: E501
+#
+"""
+Test various query types
+"""
+import json
+import logging
+
+import pytest
+from lib.common_config import step
+from lib.topogen import Topogen
+from oper import check_kernel_32
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",), "s2": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ # Setup VRF red
+ router.net.add_l3vrf("red", 10)
+ router.net.add_loop("lo-red")
+ router.net.attach_iface_to_l3vrf("lo-red", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+ router.load_frr_config("frr-simple.conf")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_oper_simple(tgen):
+ """This test is useful for doing manual testing"""
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ query_results = [
+ # Specific list entry after non-specific lists
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route/route-entry[protocol="connected"]',
+ # crashes: All specific until the end, then walk
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route[prefix="1.1.1.0/24"]/route-entry[protocol="connected"]',
+ # Does nothing: Root level query
+ "//metric",
+ # specific leaf after non-specific lists
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ "route/route-entry/metric",
+ # All specific until the end generic.
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route[prefix="1.1.1.0/24"]/route-entry',
+ # All specific until the penultimate generic with a specific leaf child.
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route[prefix="1.1.1.0/24"]/route-entry/metric',
+ # All generic until the end (middle) specific with unspecified
+ # children below to walk.
+ '/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route[prefix="1.1.1.0/24"]',
+ # All generic until the end which is a specific leaf.
+ "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/route-entry/metric",
+ ]
+ # query_results = [
+ # '/frr-interface:lib/frr-interface:interface/frr-zebra:zebra/ip-addrs[frr-rt:address-family="frr-rt:ipv4"][prefix="1.1.1.1/24"]'
+ # ]
+
+ r1 = tgen.gears["r1"].net
+ check_kernel_32(r1, "11.11.11.11", 1, "")
+
+ step("Oper test start", reset=True)
+
+ for qr in query_results:
+ step(f"Perform query '{qr}'")
+ try:
+ output = r1.cmd_nostatus(f"vtysh -c 'show mgmt get-data {qr}'")
+ except Exception as error:
+ logging.error("Error sending query: %s: %s", qr, error)
+ continue
+
+ try:
+ ojson = json.loads(output)
+ logging.info("'%s': generates:\n%s", qr, ojson)
+ except json.decoder.JSONDecodeError as error:
+ logging.error("Error decoding json: %s\noutput:\n%s", error, output)
diff --git a/tests/topotests/mgmt_oper/test_scale.py b/tests/topotests/mgmt_oper/test_scale.py
new file mode 100644
index 0000000000..d7a0e25ad8
--- /dev/null
+++ b/tests/topotests/mgmt_oper/test_scale.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# noqa: E501
+#
+"""
+Test static route functionality
+"""
+import logging
+import time
+
+import pytest
+from lib.common_config import step
+from lib.topogen import Topogen, TopoRouter
+from oper import check_kernel_32
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",), "s2": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ # Setup VRF red
+ router.net.add_l3vrf("red", 10)
+ router.net.add_loop("lo-red")
+ router.net.attach_iface_to_l3vrf("lo-red", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+ router.load_frr_config("frr-scale.conf")
+ router.load_config(TopoRouter.RD_SHARP, "")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_oper_simple(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"].net
+
+ time.sleep(2)
+ count = 20 * 1000
+
+ vrf = None # "red"
+ check_kernel_32(r1, "11.11.11.11", 1, vrf)
+
+ step("Found 11.11.11.11 in kernel adding sharpd routes")
+ r1.cmd_raises(f"vtysh -c 'sharp install routes 20.0.0.0 nexthop 1.1.1.2 {count}'")
+ check_kernel_32(r1, "20.0.0.0", count, vrf, 1000)
+
+ step(f"All {count} routes installed in kernel, continuing")
+ output = r1.cmd_raises("vtysh -c 'show mgmt get-data /frr-vrf:lib'")
+ step("Got output: output")
diff --git a/tests/topotests/mgmt_oper/test_simple.py b/tests/topotests/mgmt_oper/test_simple.py
new file mode 100644
index 0000000000..f3d64e156a
--- /dev/null
+++ b/tests/topotests/mgmt_oper/test_simple.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# noqa: E501
+#
+"""
+Test static route functionality
+"""
+import pytest
+from lib.topogen import Topogen
+from oper import check_kernel_32, do_oper_test
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",), "s2": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ # Setup VRF red
+ router.net.add_l3vrf("red", 10)
+ router.net.add_loop("lo-red")
+ router.net.attach_iface_to_l3vrf("lo-red", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+ router.load_frr_config("frr-simple.conf")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_oper_simple(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ query_results = [
+ ("/frr-vrf:lib", "simple-results/result-lib.json"),
+ ("/frr-vrf:lib/vrf", "simple-results/result-lib-vrf-nokey.json"),
+ (
+ '/frr-vrf:lib/vrf[name="default"]',
+ "simple-results/result-lib-vrf-default.json",
+ ),
+ ('/frr-vrf:lib/vrf[name="red"]', "simple-results/result-lib-vrf-red.json"),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra',
+ "simple-results/result-lib-vrf-zebra.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs',
+ "simple-results/result-lib-vrf-zebra-ribs.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib',
+ "simple-results/result-ribs-rib-nokeys.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]',
+ "simple-results/result-ribs-rib-ipv4-unicast.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/route',
+ "simple-results/result-ribs-rib-route-nokey.json",
+ ),
+ # Missing entry
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route[prefix="1.1.0.0/24"]',
+ "simple-results/result-empty.json",
+ ),
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route[prefix="1.1.1.0/24"]',
+ "simple-results/result-ribs-rib-route-prefix.json",
+ ),
+ # Leaf reference
+ (
+ '/frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/'
+ 'rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/'
+ 'route[prefix="1.1.1.0/24"]/route-entry[protocol="connected"]/metric',
+ "simple-results/result-singleton-metric.json",
+ ),
+ # Interface state
+ (
+ '/frr-interface:lib/interface[name="r1-eth0"]/state',
+ "simple-results/result-intf-state.json",
+ ),
+ (
+ '/frr-interface:lib/interface[name="r1-eth0"]/state/mtu',
+ "simple-results/result-intf-state-mtu.json",
+ ),
+ ]
+
+ r1 = tgen.gears["r1"].net
+ check_kernel_32(r1, "11.11.11.11", 1, "")
+ do_oper_test(tgen, query_results)
+
+
+to_gen_new_results = """
+scriptdir=~chopps/w/frr/tests/topotests/mgmt_oper
+resdir=${scriptdir}/simple-results
+vtysh -c 'show mgmt get-data /frr-vrf:lib' > ${resdir}/result-lib.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf' > ${resdir}/result-lib-vrf-nokey.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]' > ${resdir}/result-lib-vrf-default.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="red"]' > ${resdir}/result-lib-vrf-red.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra' > ${resdir}/result-lib-vrf-zebra.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs' > ${resdir}/result-lib-vrf-zebra-ribs.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib' > ${resdir}/result-ribs-rib-nokeys.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]' > ${resdir}/result-ribs-rib-ipv4-unicast.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/route' > ${resdir}/result-ribs-rib-route-nokey.json
+
+vtysh -c 'show mgmt get-data /frr-interface:lib/interface[name="r1-eth0"]/state' > ${resdir}/result-intf-state.json
+vtysh -c 'show mgmt get-data /frr-interface:lib/interface[name="r1-eth0"]/state/mtu' > ${resdir}/result-intf-state-mtu.json
+
+for f in ${resdir}/result-*; do
+ sed -i -e 's/"uptime": ".*"/"uptime": "rubout"/;s/"id": [0-9][0-9]*/"id": "rubout"/' $f
+ sed -i -e 's/"phy-address": ".*"/"phy-address": "rubout"/' $f
+ sed -i -e 's/"if-index": [0-9][0-9]*/"if-index": "rubout"/' $f
+ sed -i -e 's,"vrf": "[0-9]*","vrf": "rubout",' $f
+done
+""" # noqa: 501
+
+# Example commands:
+# show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/route[prefix="1.1.0.0/24"] # noqa: E501
+# show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]/route[prefix="1.1.1.0/24"] # noqa: E501
diff --git a/tests/topotests/static_simple/r1/mgmtd.conf b/tests/topotests/static_simple/r1/mgmtd.conf
index 0f9f97ca1a..dd5761aa84 100644
--- a/tests/topotests/static_simple/r1/mgmtd.conf
+++ b/tests/topotests/static_simple/r1/mgmtd.conf
@@ -1 +1,11 @@
log timestamp precision 3
+
+! way too noisy
+! debug northbound libyang
+
+debug northbound notifications
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client frontend
+debug mgmt client backend
diff --git a/tests/topotests/static_simple/r1/zebra.conf b/tests/topotests/static_simple/r1/zebra.conf
index ec827617ab..e3a44362b5 100644
--- a/tests/topotests/static_simple/r1/zebra.conf
+++ b/tests/topotests/static_simple/r1/zebra.conf
@@ -1,5 +1,15 @@
log timestamp precision 3
+! way too noisy
+! debug northbound libyang
+
+debug northbound notifications
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client frontend
+debug mgmt client backend
+
interface r1-eth0
ip address 101.0.0.1/24
ipv6 address 2101::1/64
diff --git a/tests/topotests/static_simple/test_static_simple.py b/tests/topotests/static_simple/test_static_simple.py
index fd87224b57..f862d81239 100644
--- a/tests/topotests/static_simple/test_static_simple.py
+++ b/tests/topotests/static_simple/test_static_simple.py
@@ -40,6 +40,8 @@ def tgen(request):
router.net.add_loop("lo-red")
router.net.attach_iface_to_l3vrf("lo-red", "red")
router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+ #
+ # router.load_frr_config("frr.conf")
# and select daemons to run
router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
router.load_config(TopoRouter.RD_MGMTD)
@@ -181,10 +183,11 @@ def guts(tgen, vrf, use_cli):
r1 = tgen.routers()["r1"]
- step("add via gateway", reset=True)
- do_config(r1, 1, True, False, vrf=vrf, use_cli=use_cli)
- step("remove via gateway")
- do_config(r1, 1, False, False, vrf=vrf, use_cli=use_cli)
+ count = 10
+ step(f"add {count} via gateway", reset=True)
+ do_config(r1, count, True, False, vrf=vrf, use_cli=use_cli)
+ step(f"remove {count} via gateway")
+ do_config(r1, count, False, False, vrf=vrf, use_cli=use_cli)
via = f"lo-{vrf}" if vrf else "lo"
step("add via loopback")
diff --git a/zebra/debug.c b/zebra/debug.c
index 68bedaf057..cf1701be19 100644
--- a/zebra/debug.c
+++ b/zebra/debug.c
@@ -7,6 +7,7 @@
#include <zebra.h>
#include "command.h"
#include "debug.h"
+#include "mgmt_be_client.h"
#include "zebra/debug_clippy.c"
@@ -846,4 +847,7 @@ void zebra_debug_init(void)
install_element(CONFIG_NODE, &no_debug_zebra_pbr_cmd);
install_element(CONFIG_NODE, &debug_zebra_mlag_cmd);
install_element(CONFIG_NODE, &debug_zebra_evpn_mh_cmd);
+
+ /* Init mgmtd backend client debug commands. */
+ mgmt_be_client_lib_vty_init();
}
diff --git a/zebra/main.c b/zebra/main.c
index 604d8974b3..b0a5a23284 100644
--- a/zebra/main.c
+++ b/zebra/main.c
@@ -25,6 +25,7 @@
#include "affinitymap.h"
#include "routemap.h"
#include "routing_nb.h"
+#include "mgmt_be_client.h"
#include "zebra/zebra_router.h"
#include "zebra/zebra_errors.h"
@@ -58,6 +59,8 @@ pid_t pid;
/* Pacify zclient.o in libfrr, which expects this variable. */
struct event_loop *master;
+struct mgmt_be_client *mgmt_be_client;
+
/* Route retain mode flag. */
int retain_mode = 0;
@@ -142,6 +145,10 @@ static void sigint(void)
zlog_notice("Terminating on signal");
+ nb_oper_cancel_all_walks();
+ mgmt_be_client_destroy(mgmt_be_client);
+ mgmt_be_client = NULL;
+
atomic_store_explicit(&zrouter.in_shutdown, true,
memory_order_relaxed);
@@ -430,6 +437,8 @@ int main(int argc, char **argv)
zebra_ns_init();
router_id_cmd_init();
zebra_vty_init();
+ mgmt_be_client = mgmt_be_client_create("zebra", NULL, 0,
+ zrouter.master);
access_list_init();
prefix_list_init();
diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c
index a93dbbb008..7cdcaedd7e 100644
--- a/zebra/zebra_nb.c
+++ b/zebra/zebra_nb.c
@@ -434,6 +434,7 @@ const struct frr_yang_module_info frr_zebra_info = {
.get_next = lib_vrf_zebra_ribs_rib_get_next,
.get_keys = lib_vrf_zebra_ribs_rib_get_keys,
.lookup_entry = lib_vrf_zebra_ribs_rib_lookup_entry,
+ .lookup_next = lib_vrf_zebra_ribs_rib_lookup_next,
}
},
{
@@ -454,6 +455,7 @@ const struct frr_yang_module_info frr_zebra_info = {
.get_next = lib_vrf_zebra_ribs_rib_route_get_next,
.get_keys = lib_vrf_zebra_ribs_rib_route_get_keys,
.lookup_entry = lib_vrf_zebra_ribs_rib_route_lookup_entry,
+ .lookup_next = lib_vrf_zebra_ribs_rib_route_lookup_next,
}
},
{
diff --git a/zebra/zebra_nb.h b/zebra/zebra_nb.h
index 80d2aaa6fe..6762ebd314 100644
--- a/zebra/zebra_nb.h
+++ b/zebra/zebra_nb.h
@@ -125,6 +125,8 @@ const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args);
int lib_vrf_zebra_ribs_rib_get_keys(struct nb_cb_get_keys_args *args);
const void *
lib_vrf_zebra_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args);
+const void *
+lib_vrf_zebra_ribs_rib_lookup_next(struct nb_cb_lookup_entry_args *args);
struct yang_data *
lib_vrf_zebra_ribs_rib_afi_safi_name_get_elem(struct nb_cb_get_elem_args *args);
struct yang_data *
@@ -134,6 +136,8 @@ lib_vrf_zebra_ribs_rib_route_get_next(struct nb_cb_get_next_args *args);
int lib_vrf_zebra_ribs_rib_route_get_keys(struct nb_cb_get_keys_args *args);
const void *
lib_vrf_zebra_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args);
+const void *
+lib_vrf_zebra_ribs_rib_route_lookup_next(struct nb_cb_lookup_entry_args *args);
struct yang_data *
lib_vrf_zebra_ribs_rib_route_prefix_get_elem(struct nb_cb_get_elem_args *args);
struct yang_data *lib_vrf_zebra_ribs_rib_route_protocol_get_elem(
diff --git a/zebra/zebra_nb_state.c b/zebra/zebra_nb_state.c
index ba537475cb..00df9bfc55 100644
--- a/zebra/zebra_nb_state.c
+++ b/zebra/zebra_nb_state.c
@@ -156,6 +156,8 @@ const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args)
safi_t safi;
zvrf = zebra_vrf_lookup_by_id(vrf->vrf_id);
+ if (!zvrf)
+ return NULL;
if (args->list_entry == NULL) {
afi = AFI_IP;
@@ -167,7 +169,8 @@ const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args)
} else {
zrt = RB_NEXT(zebra_router_table_head, zrt);
/* vrf_id/ns_id do not match, only walk for the given VRF */
- while (zrt && zrt->ns_id != zvrf->zns->ns_id)
+ while (zrt && (zrt->tableid != zvrf->table_id ||
+ zrt->ns_id != zvrf->zns->ns_id))
zrt = RB_NEXT(zebra_router_table_head, zrt);
}
@@ -198,6 +201,8 @@ lib_vrf_zebra_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args)
uint32_t table_id = 0;
zvrf = zebra_vrf_lookup_by_id(vrf->vrf_id);
+ if (!zvrf)
+ return NULL;
yang_afi_safi_identity2value(args->keys->key[0], &afi, &safi);
table_id = yang_str2uint32(args->keys->key[1]);
@@ -208,6 +213,28 @@ lib_vrf_zebra_ribs_rib_lookup_entry(struct nb_cb_lookup_entry_args *args)
return zebra_router_find_zrt(zvrf, table_id, afi, safi);
}
+const void *
+lib_vrf_zebra_ribs_rib_lookup_next(struct nb_cb_lookup_entry_args *args)
+{
+ struct vrf *vrf = (struct vrf *)args->parent_list_entry;
+ struct zebra_vrf *zvrf;
+ afi_t afi;
+ safi_t safi;
+ uint32_t table_id = 0;
+
+ zvrf = zebra_vrf_lookup_by_id(vrf->vrf_id);
+ if (!zvrf)
+ return NULL;
+
+ yang_afi_safi_identity2value(args->keys->key[0], &afi, &safi);
+ table_id = yang_str2uint32(args->keys->key[1]);
+ /* table_id 0 assume vrf's table_id. */
+ if (!table_id)
+ table_id = zvrf->table_id;
+
+ return zebra_router_find_next_zrt(zvrf, table_id, afi, safi);
+}
+
/*
* XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/afi-safi-name
*/
@@ -285,6 +312,25 @@ lib_vrf_zebra_ribs_rib_route_lookup_entry(struct nb_cb_lookup_entry_args *args)
return rn;
}
+const void *
+lib_vrf_zebra_ribs_rib_route_lookup_next(struct nb_cb_lookup_entry_args *args)
+{
+ const struct zebra_router_table *zrt = args->parent_list_entry;
+ struct prefix p;
+ struct route_node *rn;
+
+ yang_str2prefix(args->keys->key[0], &p);
+
+ rn = route_table_get_next(zrt->table, &p);
+
+ if (!rn)
+ return NULL;
+
+ route_unlock_node(rn);
+
+ return rn;
+}
+
/*
* XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib/route/prefix
*/
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index 6271d029fb..3fd4e6eb1f 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -70,6 +70,26 @@ struct zebra_router_table *zebra_router_find_zrt(struct zebra_vrf *zvrf,
return zrt;
}
+struct zebra_router_table *zebra_router_find_next_zrt(struct zebra_vrf *zvrf,
+ uint32_t tableid,
+ afi_t afi, safi_t safi)
+{
+ struct zebra_router_table finder;
+ struct zebra_router_table *zrt;
+
+ memset(&finder, 0, sizeof(finder));
+ finder.afi = afi;
+ finder.safi = safi;
+ finder.tableid = tableid;
+ finder.ns_id = zvrf->zns->ns_id;
+ zrt = RB_NFIND(zebra_router_table_head, &zrouter.tables, &finder);
+ if (zrt->afi == afi && zrt->safi == safi && zrt->tableid == tableid &&
+ zrt->ns_id == finder.ns_id)
+ zrt = RB_NEXT(zebra_router_table_head, zrt);
+
+ return zrt;
+}
+
struct route_table *zebra_router_find_table(struct zebra_vrf *zvrf,
uint32_t tableid, afi_t afi,
safi_t safi)
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index a926369ef8..3041707439 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -250,6 +250,9 @@ extern void zebra_router_terminate(void);
extern struct zebra_router_table *zebra_router_find_zrt(struct zebra_vrf *zvrf,
uint32_t tableid,
afi_t afi, safi_t safi);
+extern struct zebra_router_table *
+zebra_router_find_next_zrt(struct zebra_vrf *zvrf, uint32_t tableid, afi_t afi,
+ safi_t safi);
extern struct route_table *zebra_router_find_table(struct zebra_vrf *zvrf,
uint32_t tableid, afi_t afi,
safi_t safi);