summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/atomlist.c348
-rw-r--r--lib/atomlist.h356
-rw-r--r--lib/bfd.c19
-rw-r--r--lib/bfd.h10
-rw-r--r--lib/checksum.c18
-rw-r--r--lib/checksum.h27
-rw-r--r--lib/command.c21
-rw-r--r--lib/command.h1
-rw-r--r--lib/compiler.h94
-rw-r--r--lib/distribute.c2
-rw-r--r--lib/ferr.c4
-rw-r--r--lib/fifo.h66
-rw-r--r--lib/frratomic.h7
-rw-r--r--lib/frrlua.c (renamed from lib/lua.c)2
-rw-r--r--lib/frrlua.h (renamed from lib/lua.h)6
-rw-r--r--lib/frrstr.c42
-rw-r--r--lib/frrstr.h41
-rw-r--r--lib/grammar_sandbox_main.c2
-rw-r--r--lib/hash.c4
-rw-r--r--lib/hash.h6
-rw-r--r--lib/if.c60
-rw-r--r--lib/if.h7
-rw-r--r--lib/if_rmap.c2
-rw-r--r--lib/ipaddr.h3
-rw-r--r--lib/json.c5
-rw-r--r--lib/json.h2
-rw-r--r--lib/lib_errors.c6
-rw-r--r--lib/lib_errors.h1
-rw-r--r--lib/libfrr.c7
-rw-r--r--lib/linklist.c15
-rw-r--r--lib/linklist.h20
-rw-r--r--lib/memory.h2
-rw-r--r--lib/nexthop.c206
-rw-r--r--lib/nexthop.h9
-rw-r--r--lib/northbound.c251
-rw-r--r--lib/northbound.h75
-rw-r--r--lib/northbound_cli.c154
-rw-r--r--lib/northbound_confd.c8
-rw-r--r--lib/northbound_grpc.cpp936
-rw-r--r--lib/northbound_sysrepo.c12
-rw-r--r--lib/openbsd-tree.c12
-rw-r--r--lib/openbsd-tree.h54
-rw-r--r--lib/prefix.c2
-rw-r--r--lib/prefix.h2
-rw-r--r--lib/privs.c2
-rw-r--r--lib/qobj.c46
-rw-r--r--lib/qobj.h7
-rw-r--r--lib/route_types.txt2
-rw-r--r--lib/routemap.c89
-rw-r--r--lib/routemap.h10
-rw-r--r--lib/seqlock.c167
-rw-r--r--lib/seqlock.h106
-rw-r--r--lib/sockunion.c19
-rw-r--r--lib/sockunion.h2
-rw-r--r--lib/subdir.am29
-rw-r--r--lib/table.c75
-rw-r--r--lib/table.h53
-rw-r--r--lib/thread.c142
-rw-r--r--lib/thread.h15
-rw-r--r--lib/typerb.c472
-rw-r--r--lib/typerb.h190
-rw-r--r--lib/typesafe.c555
-rw-r--r--lib/typesafe.h866
-rw-r--r--lib/vrf.c21
-rw-r--r--lib/vty.c47
-rw-r--r--lib/vty.h5
-rw-r--r--lib/wheel.c2
-rw-r--r--lib/wheel.h8
-rw-r--r--lib/workqueue.c2
-rw-r--r--lib/yang.c45
-rw-r--r--lib/yang_translator.c53
-rw-r--r--lib/yang_wrappers.c8
-rw-r--r--lib/zclient.c29
-rw-r--r--lib/zclient.h103
-rw-r--r--lib/zebra.h47
75 files changed, 5301 insertions, 843 deletions
diff --git a/lib/atomlist.c b/lib/atomlist.c
new file mode 100644
index 0000000000..8169ba9eb4
--- /dev/null
+++ b/lib/atomlist.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "atomlist.h"
+
+void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item)
+{
+ atomptr_t prevval;
+ atomptr_t i = atomptr_i(item);
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* updating ->last is possible here, but makes the code considerably
+ * more complicated... let's not.
+ */
+ prevval = ATOMPTR_NULL;
+ item->next = ATOMPTR_NULL;
+
+ /* head-insert atomically
+ * release barrier: item + item->next writes must be completed
+ */
+ while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i,
+ memory_order_release, memory_order_relaxed))
+ atomic_store_explicit(&item->next, prevval,
+ memory_order_relaxed);
+}
+
+void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item)
+{
+ atomptr_t prevval = ATOMPTR_NULL;
+ atomptr_t i = atomptr_i(item);
+ atomptr_t hint;
+ struct atomlist_item *prevptr;
+ _Atomic atomptr_t *prev;
+
+ item->next = ATOMPTR_NULL;
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* place new item into ->last
+ * release: item writes completed; acquire: DD barrier on hint
+ */
+ hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel);
+
+ while (1) {
+ if (atomptr_p(hint) == NULL)
+ prev = &h->first;
+ else
+ prev = &atomlist_itemp(hint)->next;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+ prevptr = atomlist_itemp(prevval);
+ if (prevptr == NULL)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ /* last item is being deleted - start over */
+ if (atomptr_l(prevval)) {
+ hint = ATOMPTR_NULL;
+ continue;
+ }
+
+ /* no barrier - item->next is NULL and was so in xchg above */
+ if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i,
+ memory_order_consume,
+ memory_order_consume)) {
+ hint = prevval;
+ continue;
+ }
+ break;
+ }
+}
+
+static void atomlist_del_core(struct atomlist_head *h,
+ struct atomlist_item *item,
+ _Atomic atomptr_t *hint,
+ atomptr_t next)
+{
+ _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
+ atomptr_t prevval, updval;
+ struct atomlist_item *prevptr;
+
+ /* drop us off "last" if needed. no r/w to barrier. */
+ prevval = atomptr_i(item);
+ atomic_compare_exchange_strong_explicit(&h->last, &prevval,
+ ATOMPTR_NULL,
+ memory_order_relaxed, memory_order_relaxed);
+
+ atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* the following code should be identical (except sort<>list) to
+ * atomsort_del_hint()
+ */
+ while (1) {
+ upd = NULL;
+ updval = ATOMPTR_LOCK;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+
+ /* track the beginning of a chain of deleted items
+ * this is neccessary to make this lock-free; we can
+ * complete deletions started by other threads.
+ */
+ if (!atomptr_l(prevval)) {
+ updval = prevval;
+ upd = prev;
+ }
+
+ prevptr = atomlist_itemp(prevval);
+ if (prevptr == item)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ if (prevptr != item)
+ /* another thread completed our deletion */
+ return;
+
+ if (!upd || atomptr_l(updval)) {
+ /* failed to find non-deleted predecessor...
+ * have to try again
+ */
+ prev = &h->first;
+ continue;
+ }
+
+ if (!atomic_compare_exchange_strong_explicit(upd, &updval,
+ next, memory_order_consume,
+ memory_order_consume)) {
+ /* prev doesn't point to item anymore, something
+ * was inserted. continue at same position forward.
+ */
+ continue;
+ }
+ break;
+ }
+}
+
+void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
+ _Atomic atomptr_t *hint)
+{
+ atomptr_t next;
+
+ /* mark ourselves in-delete - full barrier */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+ assert(!atomptr_l(next)); /* delete race on same item */
+
+ atomlist_del_core(h, item, hint, next);
+}
+
+struct atomlist_item *atomlist_pop(struct atomlist_head *h)
+{
+ struct atomlist_item *item;
+ atomptr_t next;
+
+ /* grab head of the list - and remember it in replval for the
+ * actual delete below. No matter what, the head of the list is
+ * where we start deleting because either it's our item, or it's
+ * some delete-marked items and then our item.
+ */
+ next = atomic_load_explicit(&h->first, memory_order_consume);
+
+ do {
+ item = atomlist_itemp(next);
+ if (!item)
+ return NULL;
+
+ /* try to mark deletion */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+
+ } while (atomptr_l(next));
+ /* if loop is taken: delete race on same item (another pop or del)
+ * => proceed to next item
+ * if loop exited here: we have our item selected and marked
+ */
+ atomlist_del_core(h, item, &h->first, next);
+ return item;
+}
+
+struct atomsort_item *atomsort_add(struct atomsort_head *h,
+ struct atomsort_item *item, int (*cmpfn)(
+ const struct atomsort_item *,
+ const struct atomsort_item *))
+{
+ _Atomic atomptr_t *prev;
+ atomptr_t prevval;
+ atomptr_t i = atomptr_i(item);
+ struct atomsort_item *previtem;
+ int cmpval;
+
+ do {
+ prev = &h->first;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_acquire);
+ previtem = atomptr_p(prevval);
+
+ if (!previtem || (cmpval = cmpfn(previtem, item)) > 0)
+ break;
+ if (cmpval == 0)
+ return previtem;
+
+ prev = &previtem->next;
+ } while (1);
+
+ if (atomptr_l(prevval))
+ continue;
+
+ item->next = prevval;
+ if (atomic_compare_exchange_strong_explicit(prev, &prevval, i,
+ memory_order_release, memory_order_relaxed))
+ break;
+ } while (1);
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+ return NULL;
+}
+
+static void atomsort_del_core(struct atomsort_head *h,
+ struct atomsort_item *item, _Atomic atomptr_t *hint,
+ atomptr_t next)
+{
+ _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
+ atomptr_t prevval, updval;
+ struct atomsort_item *prevptr;
+
+ atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* the following code should be identical (except sort<>list) to
+ * atomlist_del_core()
+ */
+ while (1) {
+ upd = NULL;
+ updval = ATOMPTR_LOCK;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+
+ /* track the beginning of a chain of deleted items
+ * this is neccessary to make this lock-free; we can
+ * complete deletions started by other threads.
+ */
+ if (!atomptr_l(prevval)) {
+ updval = prevval;
+ upd = prev;
+ }
+
+ prevptr = atomsort_itemp(prevval);
+ if (prevptr == item)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ if (prevptr != item)
+ /* another thread completed our deletion */
+ return;
+
+ if (!upd || atomptr_l(updval)) {
+ /* failed to find non-deleted predecessor...
+ * have to try again
+ */
+ prev = &h->first;
+ continue;
+ }
+
+ if (!atomic_compare_exchange_strong_explicit(upd, &updval,
+ next, memory_order_relaxed,
+ memory_order_relaxed)) {
+ /* prev doesn't point to item anymore, something
+ * was inserted. continue at same position forward.
+ */
+ continue;
+ }
+ break;
+ }
+}
+
+void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item,
+ _Atomic atomptr_t *hint)
+{
+ atomptr_t next;
+
+ /* mark ourselves in-delete - full barrier */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_seq_cst);
+ assert(!atomptr_l(next)); /* delete race on same item */
+
+ atomsort_del_core(h, item, hint, next);
+}
+
+struct atomsort_item *atomsort_pop(struct atomsort_head *h)
+{
+ struct atomsort_item *item;
+ atomptr_t next;
+
+ /* grab head of the list - and remember it in replval for the
+ * actual delete below. No matter what, the head of the list is
+ * where we start deleting because either it's our item, or it's
+ * some delete-marked items and then our item.
+ */
+ next = atomic_load_explicit(&h->first, memory_order_consume);
+
+ do {
+ item = atomsort_itemp(next);
+ if (!item)
+ return NULL;
+
+ /* try to mark deletion */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+
+ } while (atomptr_l(next));
+ /* if loop is taken: delete race on same item (another pop or del)
+ * => proceed to next item
+ * if loop exited here: we have our item selected and marked
+ */
+ atomsort_del_core(h, item, &h->first, next);
+ return item;
+}
diff --git a/lib/atomlist.h b/lib/atomlist.h
new file mode 100644
index 0000000000..e4098ccb54
--- /dev/null
+++ b/lib/atomlist.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_ATOMLIST_H
+#define _FRR_ATOMLIST_H
+
+#include "typesafe.h"
+#include "frratomic.h"
+
+/* pointer with lock/deleted/invalid bit in lowest bit
+ *
+ * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
+ * item is being deleted". it is permissible to assume the item will indeed
+ * be deleted (as there are no replace/etc. ops in this).
+ *
+ * in general, lowest 2/3 bits on 32/64bit architectures are available for
+ * uses like this; the only thing that will really break this is putting an
+ * atomlist_item in a struct with "packed" attribute. (it'll break
+ * immediately and consistently.) -- don't do that.
+ *
+ * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
+ * implementations.)
+ */
+typedef uintptr_t atomptr_t;
+#define ATOMPTR_MASK (UINTPTR_MAX - 3)
+#define ATOMPTR_LOCK (1)
+#define ATOMPTR_USER (2)
+#define ATOMPTR_NULL (0)
+
+static inline atomptr_t atomptr_i(void *val)
+{
+ atomptr_t atomval = (atomptr_t)val;
+
+ assert(!(atomval & ATOMPTR_LOCK));
+ return atomval;
+}
+static inline void *atomptr_p(atomptr_t val)
+{
+ return (void *)(val & ATOMPTR_MASK);
+}
+static inline bool atomptr_l(atomptr_t val)
+{
+ return (bool)(val & ATOMPTR_LOCK);
+}
+static inline bool atomptr_u(atomptr_t val)
+{
+ return (bool)(val & ATOMPTR_USER);
+}
+
+
+/* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
+ * they're neither an "acquire" nor a "release" operation; the element that
+ * was found is still on the list and doesn't change ownership. Therefore,
+ * an atomic transition in ownership state can't be implemented.
+ *
+ * Contrast this with add() or pop(): both function calls atomically transfer
+ * ownership of an item to or from the list, which makes them "acquire" /
+ * "release" operations.
+ *
+ * What can be implemented atomically is a "find_pop()", i.e. try to locate an
+ * item and atomically try to remove it if found. It's not currently
+ * implemented but can be added when needed.
+ *
+ * Either way - for find(), generally speaking, if you need to use find() on
+ * a list then the whole thing probably isn't well-suited to atomic
+ * implementation and you'll need to have extra locks around to make it work
+ * correctly.
+ */
+#ifdef WNO_ATOMLIST_UNSAFE_FIND
+# define atomic_find_warn
+#else
+# define atomic_find_warn __attribute__((_DEPRECATED( \
+ "WARNING: find() on atomic lists cannot be atomic by principle; " \
+ "check code to make sure usage pattern is OK and if it is, use " \
+ "#define WNO_ATOMLIST_UNSAFE_FIND")))
+#endif
+
+
+/* single-linked list, unsorted/arbitrary.
+ * can be used as queue with add_tail / pop
+ *
+ * all operations are lock-free, but not neccessarily wait-free. this means
+ * that there is no state where the system as a whole stops making process,
+ * but it *is* possible that a *particular* thread is delayed by some time.
+ *
+ * the only way for this to happen is for other threads to continuously make
+ * updates. an inactive / blocked / deadlocked other thread cannot cause such
+ * delays, and to cause such delays a thread must be heavily hitting the list -
+ * it's a rather theoretical concern.
+ */
+
+/* don't use these structs directly */
+struct atomlist_item {
+ _Atomic atomptr_t next;
+};
+#define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
+
+struct atomlist_head {
+ _Atomic atomptr_t first, last;
+ _Atomic size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_ATOMLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_ATOMLIST(namelist, struct name, nlitem)
+ */
+#define PREDECL_ATOMLIST(prefix) \
+struct prefix ## _head { struct atomlist_head ah; }; \
+struct prefix ## _item { struct atomlist_item ai; };
+
+#define INIT_ATOMLIST(var) { }
+
+#define DECLARE_ATOMLIST(prefix, type, field) \
+macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
+{ atomlist_add_head(&h->ah, &item->field.ai); } \
+macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
+{ atomlist_add_tail(&h->ah, &item->field.ai); } \
+macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
+ _Atomic atomptr_t *hint) \
+{ atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ atomlist_del_hint(&h->ah, &item->field.ai, NULL); } \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ char *p = (char *)atomlist_pop(&h->ah); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _first(struct prefix##_head *h) \
+{ char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
+ memory_order_acquire)); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
+ memory_order_acquire)); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ return item ? prefix##_next(h, item) : NULL; } \
+macro_inline size_t prefix ## _count(struct prefix##_head *h) \
+{ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(prefix ## _count(h) == 0); \
+ memset(h, 0, sizeof(*h)); \
+} \
+/* ... */
+
+/* add_head:
+ * - contention on ->first pointer
+ * - return implies completion
+ */
+void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
+
+/* add_tail:
+ * - concurrent add_tail can cause wait but has progress guarantee
+ * - return does NOT imply completion. completion is only guaranteed after
+ * all other add_tail operations that started before this add_tail have
+ * completed as well.
+ */
+void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
+
+/* del/del_hint:
+ *
+ * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
+ * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
+ *
+ * as with all deletions, threads that started reading earlier may still hold
+ * pointers to the deleted item. completion is however guaranteed for all
+ * reads starting later.
+ */
+void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
+ _Atomic atomptr_t *hint);
+
+/* pop:
+ *
+ * as with all deletions, threads that started reading earlier may still hold
+ * pointers to the deleted item. completion is however guaranteed for all
+ * reads starting later.
+ */
+struct atomlist_item *atomlist_pop(struct atomlist_head *h);
+
+
+
+struct atomsort_item {
+ _Atomic atomptr_t next;
+};
+#define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
+
+struct atomsort_head {
+ _Atomic atomptr_t first;
+ _Atomic size_t count;
+};
+
+#define _PREDECL_ATOMSORT(prefix) \
+struct prefix ## _head { struct atomsort_head ah; }; \
+struct prefix ## _item { struct atomsort_item ai; };
+
+#define INIT_ATOMSORT_UNIQ(var) { }
+#define INIT_ATOMSORT_NONUNIQ(var) { }
+
+#define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(h->ah.count == 0); \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct atomsort_item *p; \
+ p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct atomsort_item *p; \
+ p = atomptr_p(atomic_load_explicit(&h->ah.first, \
+ memory_order_acquire)); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct atomsort_item *p; \
+ p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
+ memory_order_acquire)); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ return item ? prefix##_next(h, item) : NULL; \
+} \
+atomic_find_warn \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ type *p = prefix ## _first(h); \
+ while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
+ p = prefix ## _next(h, p); \
+ return p; \
+} \
+atomic_find_warn \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ type *p = prefix ## _first(h), *prev = NULL; \
+ while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
+ p = prefix ## _next(h, (prev = p)); \
+ return prev; \
+} \
+macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
+ _Atomic atomptr_t *hint) \
+{ \
+ atomsort_del_hint(&h->ah, &item->field.ai, hint); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
+} \
+macro_inline size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct atomsort_item *p = atomsort_pop(&h->ah); \
+ return p ? container_of(p, type, field.ai) : NULL; \
+} \
+/* ... */
+
+#define PREDECL_ATOMSORT_UNIQ(prefix) \
+ _PREDECL_ATOMSORT(prefix)
+#define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+} \
+ \
+_DECLARE_ATOMSORT(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp) \
+ \
+atomic_find_warn \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ type *p = prefix ## _first(h); \
+ int cmpval = 0; \
+ while (p && (cmpval = cmpfn(p, item)) < 0) \
+ p = prefix ## _next(h, p); \
+ if (!p || cmpval > 0) \
+ return NULL; \
+ return p; \
+} \
+/* ... */
+
+#define PREDECL_ATOMSORT_NONUNIQ(prefix) \
+ _PREDECL_ATOMSORT(prefix)
+#define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_ATOMSORT(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+struct atomsort_item *atomsort_add(struct atomsort_head *h,
+ struct atomsort_item *item, int (*cmpfn)(
+ const struct atomsort_item *,
+ const struct atomsort_item *));
+
+void atomsort_del_hint(struct atomsort_head *h,
+ struct atomsort_item *item, _Atomic atomptr_t *hint);
+
+struct atomsort_item *atomsort_pop(struct atomsort_head *h);
+
+#endif /* _FRR_ATOMLIST_H */
diff --git a/lib/bfd.c b/lib/bfd.c
index 7e27a64de7..00dbd1b3d1 100644
--- a/lib/bfd.c
+++ b/lib/bfd.c
@@ -127,8 +127,8 @@ void bfd_set_param(struct bfd_info **bfd_info, uint32_t min_rx, uint32_t min_tx,
*/
void bfd_peer_sendmsg(struct zclient *zclient, struct bfd_info *bfd_info,
int family, void *dst_ip, void *src_ip, char *if_name,
- int ttl, int multihop, int command, int set_flag,
- vrf_id_t vrf_id)
+ int ttl, int multihop, int cbit, int command,
+ int set_flag, vrf_id_t vrf_id)
{
struct stream *s;
int ret;
@@ -208,6 +208,11 @@ void bfd_peer_sendmsg(struct zclient *zclient, struct bfd_info *bfd_info,
stream_putc(s, 0);
}
}
+ /* cbit */
+ if (cbit)
+ stream_putc(s, 1);
+ else
+ stream_putc(s, 0);
stream_putw_at(s, 0, stream_get_endp(s));
@@ -253,11 +258,13 @@ const char *bfd_get_command_dbg_str(int command)
*/
struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
struct prefix *sp, int *status,
+ int *remote_cbit,
vrf_id_t vrf_id)
{
unsigned int ifindex;
struct interface *ifp = NULL;
int plen;
+ int local_remote_cbit;
/* Get interface index. */
ifindex = stream_getl(s);
@@ -292,6 +299,9 @@ struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
stream_get(&sp->u.prefix, s, plen);
sp->prefixlen = stream_getc(s);
}
+ local_remote_cbit = stream_getc(s);
+ if (remote_cbit)
+ *remote_cbit = local_remote_cbit;
return ifp;
}
@@ -433,7 +443,8 @@ void bfd_show_info(struct vty *vty, struct bfd_info *bfd_info, int multihop,
* bfd_client_sendmsg - Format and send a client register
* command to Zebra to be forwarded to BFD
*/
-void bfd_client_sendmsg(struct zclient *zclient, int command)
+void bfd_client_sendmsg(struct zclient *zclient, int command,
+ vrf_id_t vrf_id)
{
struct stream *s;
int ret;
@@ -450,7 +461,7 @@ void bfd_client_sendmsg(struct zclient *zclient, int command)
s = zclient->obuf;
stream_reset(s);
- zclient_create_header(s, command, VRF_DEFAULT);
+ zclient_create_header(s, command, vrf_id);
stream_putl(s, getpid());
diff --git a/lib/bfd.h b/lib/bfd.h
index a93875c4cf..e4781f4eaf 100644
--- a/lib/bfd.h
+++ b/lib/bfd.h
@@ -48,6 +48,8 @@ struct bfd_gbl {
#define BFD_FLAG_PARAM_CFG (1 << 0) /* parameters have been configured */
#define BFD_FLAG_BFD_REG (1 << 1) /* Peer registered with BFD */
#define BFD_FLAG_BFD_TYPE_MULTIHOP (1 << 2) /* Peer registered with BFD as multihop */
+#define BFD_FLAG_BFD_CBIT_ON (1 << 3) /* Peer registered with CBIT set to on */
+#define BFD_FLAG_BFD_CHECK_CONTROLPLANE (1 << 4) /* BFD and controlplane daemon are linked */
#define BFD_STATUS_UNKNOWN (1 << 0) /* BFD session status never received */
#define BFD_STATUS_DOWN (1 << 1) /* BFD session status is down */
@@ -83,13 +85,14 @@ extern void bfd_set_param(struct bfd_info **bfd_info, uint32_t min_rx,
int *command);
extern void bfd_peer_sendmsg(struct zclient *zclient, struct bfd_info *bfd_info,
int family, void *dst_ip, void *src_ip,
- char *if_name, int ttl, int multihop, int command,
- int set_flag, vrf_id_t vrf_id);
+ char *if_name, int ttl, int multihop, int cbit,
+ int command, int set_flag, vrf_id_t vrf_id);
extern const char *bfd_get_command_dbg_str(int command);
extern struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp,
struct prefix *sp, int *status,
+ int *remote_cbit,
vrf_id_t vrf_id);
const char *bfd_get_status_str(int status);
@@ -102,7 +105,8 @@ extern void bfd_show_info(struct vty *vty, struct bfd_info *bfd_info,
int multihop, int extra_space, bool use_json,
json_object *json_obj);
-extern void bfd_client_sendmsg(struct zclient *zclient, int command);
+extern void bfd_client_sendmsg(struct zclient *zclient, int command,
+ vrf_id_t vrf_id);
extern void bfd_gbl_init(void);
diff --git a/lib/checksum.c b/lib/checksum.c
index 18e3850474..3473370041 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -46,6 +46,24 @@ int /* return checksum in low-order 16 bits */
return (answer);
}
+int in_cksum_with_ph4(struct ipv4_ph *ph, void *data, int nbytes)
+{
+ uint8_t dat[sizeof(struct ipv4_ph) + nbytes];
+
+ memcpy(dat, ph, sizeof(struct ipv4_ph));
+ memcpy(dat + sizeof(struct ipv4_ph), data, nbytes);
+ return in_cksum(dat, sizeof(dat));
+}
+
+int in_cksum_with_ph6(struct ipv6_ph *ph, void *data, int nbytes)
+{
+ uint8_t dat[sizeof(struct ipv6_ph) + nbytes];
+
+ memcpy(dat, ph, sizeof(struct ipv6_ph));
+ memcpy(dat + sizeof(struct ipv6_ph), data, nbytes);
+ return in_cksum(dat, sizeof(dat));
+}
+
/* Fletcher Checksum -- Refer to RFC1008. */
#define MODX 4102U /* 5802 should be fine */
diff --git a/lib/checksum.h b/lib/checksum.h
index 7d50371439..56771d4f24 100644
--- a/lib/checksum.h
+++ b/lib/checksum.h
@@ -1,8 +1,33 @@
+#include <stdint.h>
+#include <netinet/in.h>
+
#ifdef __cplusplus
extern "C" {
#endif
-extern int in_cksum(void *, int);
+
+/* IPv4 pseudoheader */
+struct ipv4_ph {
+ struct in_addr src;
+ struct in_addr dst;
+ uint8_t rsvd;
+ uint8_t proto;
+ uint16_t len;
+} __attribute__((packed));
+
+/* IPv6 pseudoheader */
+struct ipv6_ph {
+ struct in6_addr src;
+ struct in6_addr dst;
+ uint32_t ulpl;
+ uint8_t zero[3];
+ uint8_t next_hdr;
+} __attribute__((packed));
+
+extern int in_cksum(void *data, int nbytes);
+extern int in_cksum_with_ph4(struct ipv4_ph *ph, void *data, int nbytes);
+extern int in_cksum_with_ph6(struct ipv6_ph *ph, void *data, int nbytes);
+
#define FLETCHER_CHECKSUM_VALIDATE 0xffff
extern uint16_t fletcher_checksum(uint8_t *, const size_t len,
const uint16_t offset);
diff --git a/lib/command.c b/lib/command.c
index 559457c119..18426e0c51 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -149,6 +149,7 @@ const char *node_names[] = {
"bfd", /* BFD_NODE */
"bfd peer", /* BFD_PEER_NODE */
"openfabric", // OPENFABRIC_NODE
+ "vrrp", /* VRRP_NODE */
};
/* clang-format on */
@@ -332,7 +333,7 @@ int argv_find(struct cmd_token **argv, int argc, const char *text, int *index)
return found;
}
-static unsigned int cmd_hash_key(void *p)
+static unsigned int cmd_hash_key(const void *p)
{
int size = sizeof(p);
@@ -1386,7 +1387,7 @@ int config_from_file(struct vty *vty, FILE *fp, unsigned int *line_num)
/* Configuration from terminal */
DEFUN (config_terminal,
config_terminal_cmd,
- "configure terminal",
+ "configure [terminal]",
"Configuration from vty interface\n"
"Configuration terminal\n")
{
@@ -1705,12 +1706,16 @@ static int vty_write_config(struct vty *vty)
vty_out(vty, "frr defaults %s\n", DFLT_NAME);
vty_out(vty, "!\n");
- for (i = 0; i < vector_active(cmdvec); i++)
- if ((node = vector_slot(cmdvec, i)) && node->func
- && (node->vtysh || vty->type != VTY_SHELL)) {
- if ((*node->func)(vty))
- vty_out(vty, "!\n");
- }
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ for (i = 0; i < vector_active(cmdvec); i++)
+ if ((node = vector_slot(cmdvec, i)) && node->func
+ && (node->vtysh || vty->type != VTY_SHELL)) {
+ if ((*node->func)(vty))
+ vty_out(vty, "!\n");
+ }
+ }
+ pthread_rwlock_unlock(&running_config->lock);
if (vty->type == VTY_TERM) {
vty_out(vty, "end\n");
diff --git a/lib/command.h b/lib/command.h
index a5f9616dbf..d96ec97e67 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -147,6 +147,7 @@ enum node_type {
BFD_NODE, /* BFD protocol mode. */
BFD_PEER_NODE, /* BFD peer configuration mode. */
OPENFABRIC_NODE, /* OpenFabric router configuration node */
+ VRRP_NODE, /* VRRP node */
NODE_TYPE_MAX, /* maximum */
};
diff --git a/lib/compiler.h b/lib/compiler.h
index cb4f7531ec..c2e57db7f8 100644
--- a/lib/compiler.h
+++ b/lib/compiler.h
@@ -32,6 +32,7 @@ extern "C" {
# define _FALLTHROUGH __attribute__((fallthrough));
#endif
# define _CONSTRUCTOR(x) constructor(x)
+# define _DEPRECATED(x) deprecated(x)
#elif defined(__GNUC__)
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
# define _RET_NONNULL , returns_nonnull
@@ -41,6 +42,9 @@ extern "C" {
# define _DESTRUCTOR(x) destructor(x)
# define _ALLOC_SIZE(x) alloc_size(x)
#endif
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+# define _DEPRECATED(x) deprecated(x)
+#endif
#if __GNUC__ >= 7
# define _FALLTHROUGH __attribute__((fallthrough));
#endif
@@ -68,6 +72,22 @@ extern "C" {
#ifndef _FALLTHROUGH
#define _FALLTHROUGH
#endif
+#ifndef _DEPRECATED
+#define _DEPRECATED(x) deprecated
+#endif
+
+/* pure = function does not modify memory & return value is the same if
+ * memory hasn't changed (=> allows compiler to optimize)
+ *
+ * Mostly autodetected by the compiler if function body is available (i.e.
+ * static inline functions in headers). Since that implies it should only be
+ * used in headers for non-inline functions, the "extern" is included here.
+ */
+#define ext_pure extern __attribute__((pure))
+
+/* for helper functions defined inside macros */
+#define macro_inline static inline __attribute__((unused))
+#define macro_pure static inline __attribute__((unused, pure))
/*
* for warnings on macros, put in the macro content like this:
@@ -92,6 +112,80 @@ extern "C" {
#define CPP_NOTICE(text)
#endif
+/* MAX / MIN are not commonly defined, but useful */
+/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */
+#ifdef MAX
+#undef MAX
+#endif
+#define MAX(a, b) \
+ ({ \
+ typeof(a) _max_a = (a); \
+ typeof(b) _max_b = (b); \
+ _max_a > _max_b ? _max_a : _max_b; \
+ })
+#ifdef MIN
+#undef MIN
+#endif
+#define MIN(a, b) \
+ ({ \
+ typeof(a) _min_a = (a); \
+ typeof(b) _min_b = (b); \
+ _min_a < _min_b ? _min_a : _min_b; \
+ })
+
+#ifndef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+#endif
+
+/* this variant of container_of() retains 'const' on pointers without needing
+ * to be told to do so. The following will all work without warning:
+ *
+ * struct member *p;
+ * const struct member *cp;
+ *
+ * const struct cont *x = container_of(cp, struct cont, member);
+ * const struct cont *x = container_of(cp, const struct cont, member);
+ * const struct cont *x = container_of(p, struct cont, member);
+ * const struct cont *x = container_of(p, const struct cont, member);
+ * struct cont *x = container_of(p, struct cont, member);
+ *
+ * but the following will generate warnings about stripping const:
+ *
+ * struct cont *x = container_of(cp, struct cont, member);
+ * struct cont *x = container_of(cp, const struct cont, member);
+ * struct cont *x = container_of(p, const struct cont, member);
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#define container_of(ptr, type, member) \
+ (__builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(&((type *)0)->member), \
+ typeof(ptr)) \
+ || __builtin_types_compatible_p(void *, typeof(ptr)), \
+ ({ \
+ typeof(((type *)0)->member) *__mptr = (void *)(ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); \
+ }), \
+ ({ \
+ typeof(((const type *)0)->member) *__mptr = (ptr); \
+ (const type *)((const char *)__mptr - \
+ offsetof(type, member)); \
+ }) \
+ ))
+
+#define container_of_null(ptr, type, member) \
+ ({ \
+ typeof(ptr) _tmp = (ptr); \
+ _tmp ? container_of(_tmp, type, member) : NULL; \
+ })
+
+#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/distribute.c b/lib/distribute.c
index be40bd2603..2aa6b927fb 100644
--- a/lib/distribute.c
+++ b/lib/distribute.c
@@ -131,7 +131,7 @@ static struct distribute *distribute_get(struct distribute_ctx *ctx,
return ret;
}
-static unsigned int distribute_hash_make(void *arg)
+static unsigned int distribute_hash_make(const void *arg)
{
const struct distribute *dist = arg;
diff --git a/lib/ferr.c b/lib/ferr.c
index d7fa1a84f8..65c0cf886d 100644
--- a/lib/ferr.c
+++ b/lib/ferr.c
@@ -72,9 +72,9 @@ static bool ferr_hash_cmp(const void *a, const void *b)
return f_a->code == f_b->code;
}
-static inline unsigned int ferr_hash_key(void *a)
+static inline unsigned int ferr_hash_key(const void *a)
{
- struct log_ref *f = a;
+ const struct log_ref *f = a;
return f->code;
}
diff --git a/lib/fifo.h b/lib/fifo.h
deleted file mode 100644
index 6f9c59b5c1..0000000000
--- a/lib/fifo.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* FIFO common header.
- * Copyright (C) 2015 Kunihiro Ishiguro
- *
- * This file is part of Quagga.
- *
- * Quagga is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * Quagga is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; see the file COPYING; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __LIB_FIFO_H__
-#define __LIB_FIFO_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* FIFO -- first in first out structure and macros. */
-struct fifo {
- struct fifo *next;
- struct fifo *prev;
-};
-
-#define FIFO_INIT(F) \
- do { \
- struct fifo *Xfifo = (struct fifo *)(F); \
- Xfifo->next = Xfifo->prev = Xfifo; \
- } while (0)
-
-#define FIFO_ADD(F, N) \
- do { \
- struct fifo *Xfifo = (struct fifo *)(F); \
- struct fifo *Xnode = (struct fifo *)(N); \
- Xnode->next = Xfifo; \
- Xnode->prev = Xfifo->prev; \
- Xfifo->prev = Xfifo->prev->next = Xnode; \
- } while (0)
-
-#define FIFO_DEL(N) \
- do { \
- struct fifo *Xnode = (struct fifo *)(N); \
- Xnode->prev->next = Xnode->next; \
- Xnode->next->prev = Xnode->prev; \
- } while (0)
-
-#define FIFO_HEAD(F) \
- ((((struct fifo *)(F))->next == (struct fifo *)(F)) ? NULL : (F)->next)
-
-#define FIFO_EMPTY(F) (((struct fifo *)(F))->next == (struct fifo *)(F))
-
-#define FIFO_TOP(F) (FIFO_EMPTY(F) ? NULL : ((struct fifo *)(F))->next)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __LIB_FIFO_H__ */
diff --git a/lib/frratomic.h b/lib/frratomic.h
index e86030f83c..1e28253f2b 100644
--- a/lib/frratomic.h
+++ b/lib/frratomic.h
@@ -80,6 +80,9 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
#define atomic_compare_exchange_weak_explicit(atom, expect, desire, mem1, \
mem2) \
__atomic_compare_exchange_n(atom, expect, desire, 1, mem1, mem2)
+#define atomic_compare_exchange_strong_explicit(atom, expect, desire, mem1, \
+ mem2) \
+ __atomic_compare_exchange_n(atom, expect, desire, 0, mem1, mem2)
/* gcc 4.1 and newer,
* clang 3.3 (possibly older)
@@ -152,7 +155,7 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
rval; \
})
-#define atomic_compare_exchange_weak_explicit(atom, expect, desire, mem1, \
+#define atomic_compare_exchange_strong_explicit(atom, expect, desire, mem1, \
mem2) \
({ \
typeof(atom) _atom = (atom); \
@@ -166,6 +169,8 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
*_expect = rval; \
ret; \
})
+#define atomic_compare_exchange_weak_explicit \
+ atomic_compare_exchange_strong_explicit
#define atomic_fetch_and_explicit(ptr, val, mem) \
({ \
diff --git a/lib/lua.c b/lib/frrlua.c
index 3d701a9364..b7d8eea6e8 100644
--- a/lib/lua.c
+++ b/lib/frrlua.c
@@ -26,7 +26,7 @@
#if defined(HAVE_LUA)
#include "prefix.h"
-#include "lua.h"
+#include "frrlua.h"
#include "log.h"
static int lua_zlog_debug(lua_State *L)
diff --git a/lib/lua.h b/lib/frrlua.h
index a864ab30e0..374eb70311 100644
--- a/lib/lua.h
+++ b/lib/frrlua.h
@@ -25,9 +25,9 @@
#if defined(HAVE_LUA)
-#include <lua.h>
-#include <lualib.h>
-#include <lauxlib.h>
+#include "lua.h"
+#include "lualib.h"
+#include "lauxlib.h"
#ifdef __cplusplus
extern "C" {
diff --git a/lib/frrstr.c b/lib/frrstr.c
index fd337073f8..c575c0b568 100644
--- a/lib/frrstr.c
+++ b/lib/frrstr.c
@@ -152,7 +152,33 @@ void frrstr_strvec_free(vector v)
vector_free(v);
}
-bool begins_with(const char *str, const char *prefix)
+char *frrstr_replace(const char *str, const char *find, const char *replace)
+{
+ char *ch;
+ char *nustr = XSTRDUP(MTYPE_TMP, str);
+
+ size_t findlen = strlen(find);
+ size_t repllen = strlen(replace);
+
+ while ((ch = strstr(nustr, find))) {
+ if (repllen > findlen) {
+ size_t nusz = strlen(nustr) + repllen - findlen + 1;
+ nustr = XREALLOC(MTYPE_TMP, nustr, nusz);
+ ch = strstr(nustr, find);
+ }
+
+ size_t nustrlen = strlen(nustr);
+ size_t taillen = (nustr + nustrlen) - (ch + findlen);
+
+ memmove(ch + findlen + (repllen - findlen), ch + findlen,
+ taillen + 1);
+ memcpy(ch, replace, repllen);
+ }
+
+ return nustr;
+}
+
+bool frrstr_startswith(const char *str, const char *prefix)
{
if (!str || !prefix)
return false;
@@ -166,6 +192,20 @@ bool begins_with(const char *str, const char *prefix)
return strncmp(str, prefix, lenprefix) == 0;
}
+bool frrstr_endswith(const char *str, const char *suffix)
+{
+ if (!str || !suffix)
+ return false;
+
+ size_t lenstr = strlen(str);
+ size_t lensuffix = strlen(suffix);
+
+ if (lensuffix > lenstr)
+ return false;
+
+ return strncmp(&str[lenstr - lensuffix], suffix, lensuffix) == 0;
+}
+
int all_digit(const char *str)
{
for (; *str != '\0'; str++)
diff --git a/lib/frrstr.h b/lib/frrstr.h
index 8b591849a3..3a935c90cb 100644
--- a/lib/frrstr.h
+++ b/lib/frrstr.h
@@ -88,6 +88,29 @@ void frrstr_filter_vec(vector v, regex_t *filter);
void frrstr_strvec_free(vector v);
/*
+ * Given a string, replaces all occurrences of a substring with a different
+ * string. The result is a new string. The original string is not modified.
+ *
+ * If 'replace' is longer than 'find', this function performs N+1 allocations,
+ * where N is the number of times 'find' occurs in 'str'. If 'replace' is equal
+ * in length or shorter than 'find', only 1 allocation is performed.
+ *
+ * str
+ * String to perform replacement on.
+ *
+ * find
+ * Substring to replace.
+ *
+ * replace
+ * String to replace 'find' with.
+ *
+ * Returns:
+ * A new string, allocated with MTYPE_TMP, that is the result of performing
+ * the replacement on 'str'. This must be freed by the caller.
+ */
+char *frrstr_replace(const char *str, const char *find, const char *replace);
+
+/*
* Prefix match for string.
*
* str
@@ -97,9 +120,23 @@ void frrstr_strvec_free(vector v);
* prefix to look for
*
* Returns:
- * true str starts with prefix, false otherwise
+ * true if str starts with prefix, false otherwise
+ */
+bool frrstr_startswith(const char *str, const char *prefix);
+
+/*
+ * Suffix match for string.
+ *
+ * str
+ * string to check for suffix match
+ *
+ * suffix
+ * suffix to look for
+ *
+ * Returns:
+ * true if str ends with suffix, false otherwise
*/
-bool begins_with(const char *str, const char *prefix);
+bool frrstr_endswith(const char *str, const char *suffix);
/*
* Check the string only contains digit characters.
diff --git a/lib/grammar_sandbox_main.c b/lib/grammar_sandbox_main.c
index c9c942f9bf..38494fb007 100644
--- a/lib/grammar_sandbox_main.c
+++ b/lib/grammar_sandbox_main.c
@@ -58,6 +58,8 @@ int main(int argc, char **argv)
vty_init(master);
memory_init();
+ yang_init();
+ nb_init(master, NULL, 0);
vty_stdio(vty_do_exit);
diff --git a/lib/hash.c b/lib/hash.c
index 884c8f22af..fad7de5138 100644
--- a/lib/hash.c
+++ b/lib/hash.c
@@ -37,7 +37,7 @@ static pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER;
static struct list *_hashes;
struct hash *hash_create_size(unsigned int size,
- unsigned int (*hash_key)(void *),
+ unsigned int (*hash_key)(const void *),
bool (*hash_cmp)(const void *, const void *),
const char *name)
{
@@ -66,7 +66,7 @@ struct hash *hash_create_size(unsigned int size,
return hash;
}
-struct hash *hash_create(unsigned int (*hash_key)(void *),
+struct hash *hash_create(unsigned int (*hash_key)(const void *),
bool (*hash_cmp)(const void *, const void *),
const char *name)
{
diff --git a/lib/hash.h b/lib/hash.h
index 60c412b8e0..c56a98d50c 100644
--- a/lib/hash.h
+++ b/lib/hash.h
@@ -79,7 +79,7 @@ struct hash {
unsigned int max_size;
/* Key make function. */
- unsigned int (*hash_key)(void *);
+ unsigned int (*hash_key)(const void *);
/* Data compare function. */
bool (*hash_cmp)(const void *, const void *);
@@ -123,7 +123,7 @@ struct hash {
* Returns:
* a new hash table
*/
-extern struct hash *hash_create(unsigned int (*hash_key)(void *),
+extern struct hash *hash_create(unsigned int (*hash_key)(const void *),
bool (*hash_cmp)(const void *, const void *),
const char *name);
@@ -158,7 +158,7 @@ extern struct hash *hash_create(unsigned int (*hash_key)(void *),
* a new hash table
*/
extern struct hash *
-hash_create_size(unsigned int size, unsigned int (*hash_key)(void *),
+hash_create_size(unsigned int size, unsigned int (*hash_key)(const void *),
bool (*hash_cmp)(const void *, const void *),
const char *name);
diff --git a/lib/if.c b/lib/if.c
index 86b850c059..3f489e0c3e 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -187,18 +187,21 @@ void if_update_to_new_vrf(struct interface *ifp, vrf_id_t vrf_id)
if (yang_module_find("frr-interface")) {
struct lyd_node *if_dnode;
- if_dnode = yang_dnode_get(
- running_config->dnode,
- "/frr-interface:lib/interface[name='%s'][vrf='%s']/vrf",
- ifp->name, old_vrf->name);
- if (if_dnode) {
- yang_dnode_change_leaf(if_dnode, vrf->name);
- running_config->version++;
+ pthread_rwlock_wrlock(&running_config->lock);
+ {
+ if_dnode = yang_dnode_get(
+ running_config->dnode,
+ "/frr-interface:lib/interface[name='%s'][vrf='%s']/vrf",
+ ifp->name, old_vrf->name);
+ if (if_dnode) {
+ yang_dnode_change_leaf(if_dnode, vrf->name);
+ running_config->version++;
+ }
}
+ pthread_rwlock_unlock(&running_config->lock);
}
}
-
/* Delete interface structure. */
void if_delete_retain(struct interface *ifp)
{
@@ -386,6 +389,34 @@ struct interface *if_lookup_prefix(struct prefix *prefix, vrf_id_t vrf_id)
return NULL;
}
+size_t if_lookup_by_hwaddr(const uint8_t *hw_addr, size_t addrsz,
+ struct interface ***result, vrf_id_t vrf_id)
+{
+ struct vrf *vrf = vrf_lookup_by_id(vrf_id);
+
+ struct list *rs = list_new();
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ if (ifp->hw_addr_len == (int)addrsz
+ && !memcmp(hw_addr, ifp->hw_addr, addrsz))
+ listnode_add(rs, ifp);
+ }
+
+ if (rs->count) {
+ *result = XCALLOC(MTYPE_TMP,
+ sizeof(struct interface *) * rs->count);
+ list_to_array(rs, (void **)*result, rs->count);
+ }
+
+ int count = rs->count;
+
+ list_delete(&rs);
+
+ return count;
+}
+
+
/* Get interface by name if given name interface doesn't exist create
one. */
struct interface *if_get_by_name(const char *name, vrf_id_t vrf_id)
@@ -873,6 +904,19 @@ struct connected *connected_add_by_prefix(struct interface *ifp,
return ifc;
}
+struct connected *connected_get_linklocal(struct interface *ifp)
+{
+ struct listnode *n;
+ struct connected *c = NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, n, c)) {
+ if (c->address->family == AF_INET6
+ && IN6_IS_ADDR_LINKLOCAL(&c->address->u.prefix6))
+ break;
+ }
+ return c;
+}
+
#if 0 /* this route_table of struct connected's is unused \
* however, it would be good to use a route_table rather than \
* a list.. \
diff --git a/lib/if.h b/lib/if.h
index d26d4dd68b..2dc1a7b2de 100644
--- a/lib/if.h
+++ b/lib/if.h
@@ -225,6 +225,10 @@ struct interface {
not work as expected.
*/
ifindex_t ifindex;
+ /*
+ * ifindex of parent interface, if any
+ */
+ ifindex_t link_ifindex;
#define IFINDEX_INTERNAL 0
/* Zebra internal interface status */
@@ -482,6 +486,8 @@ extern struct connected *if_lookup_address(void *matchaddr, int family,
vrf_id_t vrf_id);
extern struct interface *if_lookup_prefix(struct prefix *prefix,
vrf_id_t vrf_id);
+size_t if_lookup_by_hwaddr(const uint8_t *hw_addr, size_t addrsz,
+ struct interface ***result, vrf_id_t vrf_id);
/* These 3 functions are to be used when the ifname argument is terminated
by a '\0' character: */
@@ -540,6 +546,7 @@ extern struct connected *connected_lookup_prefix_exact(struct interface *,
extern struct nbr_connected *nbr_connected_new(void);
extern void nbr_connected_free(struct nbr_connected *);
struct nbr_connected *nbr_connected_check(struct interface *, struct prefix *);
+struct connected *connected_get_linklocal(struct interface *ifp);
/* link parameters */
struct if_link_params *if_link_params_get(struct interface *);
diff --git a/lib/if_rmap.c b/lib/if_rmap.c
index b0802da961..ca6f512ec6 100644
--- a/lib/if_rmap.c
+++ b/lib/if_rmap.c
@@ -107,7 +107,7 @@ static struct if_rmap *if_rmap_get(struct if_rmap_ctx *ctx, const char *ifname)
return ret;
}
-static unsigned int if_rmap_hash_make(void *data)
+static unsigned int if_rmap_hash_make(const void *data)
{
const struct if_rmap *if_rmap = data;
diff --git a/lib/ipaddr.h b/lib/ipaddr.h
index f4ddadc66e..1c2399fdd3 100644
--- a/lib/ipaddr.h
+++ b/lib/ipaddr.h
@@ -56,6 +56,9 @@ struct ipaddr {
#define SET_IPADDR_V4(p) (p)->ipa_type = IPADDR_V4
#define SET_IPADDR_V6(p) (p)->ipa_type = IPADDR_V6
+#define IPADDRSZ(p) \
+ (IS_IPADDR_V4((p)) ? sizeof(struct in_addr) : sizeof(struct in6_addr))
+
static inline int str2ipaddr(const char *str, struct ipaddr *ip)
{
int ret;
diff --git a/lib/json.c b/lib/json.c
index 4ea20ba178..efc3794040 100644
--- a/lib/json.c
+++ b/lib/json.c
@@ -64,6 +64,11 @@ void json_object_boolean_true_add(struct json_object *obj, const char *key)
json_object_object_add(obj, key, json_object_new_boolean(1));
}
+void json_object_boolean_add(struct json_object *obj, const char *key, bool val)
+{
+ json_object_object_add(obj, key, json_object_new_boolean(val));
+}
+
struct json_object *json_object_lock(struct json_object *obj)
{
return json_object_get(obj);
diff --git a/lib/json.h b/lib/json.h
index a5251662be..c4d566b318 100644
--- a/lib/json.h
+++ b/lib/json.h
@@ -61,6 +61,8 @@ extern void json_object_string_add(struct json_object *obj, const char *key,
const char *s);
extern void json_object_int_add(struct json_object *obj, const char *key,
int64_t i);
+void json_object_boolean_add(struct json_object *obj, const char *key,
+ bool val);
extern void json_object_boolean_false_add(struct json_object *obj,
const char *key);
extern void json_object_boolean_true_add(struct json_object *obj,
diff --git a/lib/lib_errors.c b/lib/lib_errors.c
index 5f6c25b770..b6c764d873 100644
--- a/lib/lib_errors.c
+++ b/lib/lib_errors.c
@@ -333,6 +333,12 @@ static struct log_ref ferr_lib_err[] = {
.suggestion = "Open an Issue with all relevant log files and restart FRR"
},
{
+ .code = EC_LIB_GRPC_INIT,
+ .title = "gRPC initialization error",
+ .description = "Upon startup FRR failed to properly initialize and startup the gRPC northbound plugin",
+ .suggestion = "Check if the gRPC libraries are installed correctly in the system.",
+ },
+ {
.code = EC_LIB_NB_CB_CONFIG_ABORT,
.title = "A northbound configuration callback has failed in the ABORT phase",
.description = "A callback used to process a configuration change has returned an error while trying to abort a change",
diff --git a/lib/lib_errors.h b/lib/lib_errors.h
index fc405c2098..39b39fb065 100644
--- a/lib/lib_errors.h
+++ b/lib/lib_errors.h
@@ -80,6 +80,7 @@ enum lib_log_refs {
EC_LIB_SYSREPO_INIT,
EC_LIB_SYSREPO_DATA_CONVERT,
EC_LIB_LIBSYSREPO,
+ EC_LIB_GRPC_INIT,
EC_LIB_ID_CONSISTENCY,
EC_LIB_ID_EXHAUST,
};
diff --git a/lib/libfrr.c b/lib/libfrr.c
index 0d4c8d6c0f..5970e70a6b 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -830,7 +830,12 @@ static int frr_config_read_in(struct thread *t)
/*
* Update the shared candidate after reading the startup configuration.
*/
- nb_config_replace(vty_shared_candidate_config, running_config, true);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_config_replace(vty_shared_candidate_config, running_config,
+ true);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
return 0;
}
diff --git a/lib/linklist.c b/lib/linklist.c
index 6cb639b27c..e8ba9edc12 100644
--- a/lib/linklist.c
+++ b/lib/linklist.c
@@ -391,3 +391,18 @@ struct listnode *listnode_add_force(struct list **list, void *val)
*list = list_new();
return listnode_add(*list, val);
}
+
+void **list_to_array(struct list *list, void **arr, size_t arrlen)
+{
+ struct listnode *ln;
+ void *vp;
+ size_t idx = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(list, ln, vp)) {
+ arr[idx++] = vp;
+ if (idx == arrlen)
+ break;
+ }
+
+ return arr;
+}
diff --git a/lib/linklist.h b/lib/linklist.h
index 8761bc3d16..da42aa2688 100644
--- a/lib/linklist.h
+++ b/lib/linklist.h
@@ -240,6 +240,26 @@ extern void list_sort(struct list *list,
int (*cmp)(const void **, const void **));
/*
+ * Convert a list to an array of void pointers.
+ *
+ * Starts from the list head and ends either on the last node of the list or
+ * when the provided array cannot store any more elements.
+ *
+ * list
+ * list to convert
+ *
+ * arr
+ * Pre-allocated array of void *
+ *
+ * arrlen
+ * Number of elements in arr
+ *
+ * Returns:
+ * arr
+ */
+void **list_to_array(struct list *list, void **arr, size_t arrlen);
+
+/*
* Delete a list and NULL its pointer.
*
* If non-null, list->del is called with each data element.
diff --git a/lib/memory.h b/lib/memory.h
index 91a02b7966..0002ea3349 100644
--- a/lib/memory.h
+++ b/lib/memory.h
@@ -26,8 +26,6 @@
extern "C" {
#endif
-#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
-
#if defined(HAVE_MALLOC_SIZE) && !defined(HAVE_MALLOC_USABLE_SIZE)
#define malloc_usable_size(x) malloc_size(x)
#define HAVE_MALLOC_USABLE_SIZE
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 8e16e70590..57a2f1daaa 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -36,40 +36,132 @@
DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop")
DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label")
-/* check if nexthops are same, non-recursive */
-int nexthop_same_no_recurse(const struct nexthop *next1,
- const struct nexthop *next2)
+static int _nexthop_labels_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
{
- if (next1->type != next2->type)
+ const struct mpls_label_stack *nhl1 = NULL;
+ const struct mpls_label_stack *nhl2 = NULL;
+
+ nhl1 = nh1->nh_label;
+ nhl2 = nh2->nh_label;
+
+ /* No labels is a match */
+ if (!nhl1 && !nhl2)
return 0;
- switch (next1->type) {
+ if (nhl1 && !nhl2)
+ return 1;
+
+ if (nhl2 && !nhl1)
+ return -1;
+
+ if (nhl1->num_labels > nhl2->num_labels)
+ return 1;
+
+ if (nhl1->num_labels < nhl2->num_labels)
+ return -1;
+
+ return memcmp(nhl1->label, nhl2->label, nhl1->num_labels);
+}
+
+static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
+ const union g_addr *addr1,
+ const union g_addr *addr2)
+{
+ int ret = 0;
+
+ switch (type) {
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (!IPV4_ADDR_SAME(&next1->gate.ipv4, &next2->gate.ipv4))
- return 0;
- if (next1->ifindex && (next1->ifindex != next2->ifindex))
- return 0;
+ ret = IPV4_ADDR_CMP(&addr1->ipv4, &addr2->ipv4);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ ret = IPV6_ADDR_CMP(&addr1->ipv6, &addr2->ipv6);
break;
case NEXTHOP_TYPE_IFINDEX:
- if (next1->ifindex != next2->ifindex)
- return 0;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* No addr here */
break;
+ }
+
+ return ret;
+}
+
+static int _nexthop_gateway_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return _nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
+}
+
+static int _nexthop_source_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return _nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
+}
+
+static int _nexthop_cmp_no_labels(const struct nexthop *next1,
+ const struct nexthop *next2)
+{
+ int ret = 0;
+
+ if (next1->vrf_id < next2->vrf_id)
+ return -1;
+
+ if (next1->vrf_id > next2->vrf_id)
+ return 1;
+
+ if (next1->type < next2->type)
+ return -1;
+
+ if (next1->type > next2->type)
+ return 1;
+
+ switch (next1->type) {
+ case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV6:
- if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
- return 0;
+ ret = _nexthop_gateway_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
- return 0;
- if (next1->ifindex != next2->ifindex)
- return 0;
+ ret = _nexthop_gateway_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+ /* Intentional Fall-Through */
+ case NEXTHOP_TYPE_IFINDEX:
+ if (next1->ifindex < next2->ifindex)
+ return -1;
+
+ if (next1->ifindex > next2->ifindex)
+ return 1;
break;
- default:
- /* do nothing */
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (next1->bh_type < next2->bh_type)
+ return -1;
+
+ if (next1->bh_type > next2->bh_type)
+ return 1;
break;
}
- return 1;
+
+ ret = _nexthop_source_cmp(next1, next2);
+
+ return ret;
+}
+
+int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2)
+{
+ int ret = 0;
+
+ ret = _nexthop_cmp_no_labels(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = _nexthop_labels_cmp(next1, next2);
+
+ return ret;
}
int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2)
@@ -121,27 +213,12 @@ const char *nexthop_type_to_str(enum nexthop_types_t nh_type)
/*
* Check if the labels match for the 2 nexthops specified.
*/
-int nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
+bool nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
{
- const struct mpls_label_stack *nhl1, *nhl2;
-
- nhl1 = nh1->nh_label;
- nhl2 = nh2->nh_label;
-
- /* No labels is a match */
- if (!nhl1 && !nhl2)
- return 1;
-
- if (!nhl1 || !nhl2)
- return 0;
-
- if (nhl1->num_labels != nhl2->num_labels)
- return 0;
-
- if (memcmp(nhl1->label, nhl2->label, nhl1->num_labels))
- return 0;
+ if (_nexthop_labels_cmp(nh1, nh2) != 0)
+ return false;
- return 1;
+ return true;
}
struct nexthop *nexthop_new(void)
@@ -180,45 +257,28 @@ bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2)
if (nh1 == nh2)
return true;
- if (nh1->vrf_id != nh2->vrf_id)
+ if (nexthop_cmp(nh1, nh2) != 0)
return false;
- if (nh1->type != nh2->type)
+ return true;
+}
+
+bool nexthop_same_no_labels(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ if (nh1 && !nh2)
return false;
- switch (nh1->type) {
- case NEXTHOP_TYPE_IFINDEX:
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_IPV4:
- if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
- return false;
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
- return false;
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_IPV6:
- if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
- return false;
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
- return false;
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- if (nh1->bh_type != nh2->bh_type)
- return false;
- break;
- }
+ if (!nh1 && nh2)
+ return false;
+
+ if (nh1 == nh2)
+ return true;
+
+ if (_nexthop_cmp_no_labels(nh1, nh2) != 0)
+ return false;
- /* Compare labels too (if present) */
- return (!!nexthop_labels_match(nh1, nh2));
+ return true;
}
/* Update nexthop with label information. */
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 663acaeb69..5b6c12d4ef 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -139,12 +139,13 @@ void nexthop_del_labels(struct nexthop *);
uint32_t nexthop_hash(const struct nexthop *nexthop);
extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
+extern bool nexthop_same_no_labels(const struct nexthop *nh1,
+ const struct nexthop *nh2);
+extern int nexthop_cmp(const struct nexthop *nh1, const struct nexthop *nh2);
extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type);
-extern int nexthop_same_no_recurse(const struct nexthop *next1,
- const struct nexthop *next2);
-extern int nexthop_labels_match(const struct nexthop *nh1,
- const struct nexthop *nh2);
+extern bool nexthop_labels_match(const struct nexthop *nh1,
+ const struct nexthop *nh2);
extern int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2);
extern const char *nexthop2str(const struct nexthop *nexthop,
diff --git a/lib/northbound.c b/lib/northbound.c
index 5e031ac2ce..dbf90c58d4 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -40,6 +40,21 @@ struct nb_config *running_config;
/* Hash table of user pointers associated with configuration entries. */
static struct hash *running_config_entries;
+/* Management lock for the running configuration. */
+static struct {
+ /* Mutex protecting this structure. */
+ pthread_mutex_t mtx;
+
+ /* Actual lock. */
+ bool locked;
+
+ /* Northbound client who owns this lock. */
+ enum nb_client owner_client;
+
+ /* Northbound user who owns this lock. */
+ const void *owner_user;
+} running_config_mgmt_lock;
+
/*
* Global lock used to prevent multiple configuration transactions from
* happening concurrently.
@@ -51,6 +66,7 @@ static int nb_callback_configuration(const enum nb_event event,
static struct nb_transaction *nb_transaction_new(struct nb_config *config,
struct nb_config_cbs *changes,
enum nb_client client,
+ const void *user,
const char *comment);
static void nb_transaction_free(struct nb_transaction *transaction);
static int nb_transaction_process(enum nb_event event,
@@ -248,6 +264,7 @@ struct nb_config *nb_config_new(struct lyd_node *dnode)
else
config->dnode = yang_dnode_new(ly_native_ctx, true);
config->version = 0;
+ pthread_rwlock_init(&config->lock, NULL);
return config;
}
@@ -256,6 +273,7 @@ void nb_config_free(struct nb_config *config)
{
if (config->dnode)
yang_dnode_free(config->dnode);
+ pthread_rwlock_destroy(&config->lock);
XFREE(MTYPE_NB_CONFIG, config);
}
@@ -266,6 +284,7 @@ struct nb_config *nb_config_dup(const struct nb_config *config)
dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
dup->dnode = yang_dnode_dup(config->dnode);
dup->version = config->version;
+ pthread_rwlock_init(&dup->lock, NULL);
return dup;
}
@@ -513,17 +532,28 @@ int nb_candidate_edit(struct nb_config *candidate,
bool nb_candidate_needs_update(const struct nb_config *candidate)
{
- if (candidate->version < running_config->version)
- return true;
+ bool ret = false;
- return false;
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ if (candidate->version < running_config->version)
+ ret = true;
+ }
+ pthread_rwlock_unlock(&running_config->lock);
+
+ return ret;
}
int nb_candidate_update(struct nb_config *candidate)
{
struct nb_config *updated_config;
- updated_config = nb_config_dup(running_config);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ updated_config = nb_config_dup(running_config);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
+
if (nb_config_merge(updated_config, candidate, true) != NB_OK)
return NB_ERR;
@@ -575,15 +605,20 @@ int nb_candidate_validate(struct nb_config *candidate)
return NB_ERR_VALIDATION;
RB_INIT(nb_config_cbs, &changes);
- nb_config_diff(running_config, candidate, &changes);
- ret = nb_candidate_validate_changes(candidate, &changes);
- nb_config_diff_del_changes(&changes);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_config_diff(running_config, candidate, &changes);
+ ret = nb_candidate_validate_changes(candidate, &changes);
+ nb_config_diff_del_changes(&changes);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
return ret;
}
int nb_candidate_commit_prepare(struct nb_config *candidate,
- enum nb_client client, const char *comment,
+ enum nb_client client, const void *user,
+ const char *comment,
struct nb_transaction **transaction)
{
struct nb_config_cbs changes;
@@ -596,25 +631,36 @@ int nb_candidate_commit_prepare(struct nb_config *candidate,
}
RB_INIT(nb_config_cbs, &changes);
- nb_config_diff(running_config, candidate, &changes);
- if (RB_EMPTY(nb_config_cbs, &changes))
- return NB_ERR_NO_CHANGES;
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_config_diff(running_config, candidate, &changes);
+ if (RB_EMPTY(nb_config_cbs, &changes)) {
+ pthread_rwlock_unlock(&running_config->lock);
+ return NB_ERR_NO_CHANGES;
+ }
- if (nb_candidate_validate_changes(candidate, &changes) != NB_OK) {
- flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
- "%s: failed to validate candidate configuration",
- __func__);
- nb_config_diff_del_changes(&changes);
- return NB_ERR_VALIDATION;
- }
+ if (nb_candidate_validate_changes(candidate, &changes)
+ != NB_OK) {
+ flog_warn(
+ EC_LIB_NB_CANDIDATE_INVALID,
+ "%s: failed to validate candidate configuration",
+ __func__);
+ nb_config_diff_del_changes(&changes);
+ pthread_rwlock_unlock(&running_config->lock);
+ return NB_ERR_VALIDATION;
+ }
- *transaction = nb_transaction_new(candidate, &changes, client, comment);
- if (*transaction == NULL) {
- flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
- "%s: failed to create transaction", __func__);
- nb_config_diff_del_changes(&changes);
- return NB_ERR_LOCKED;
+ *transaction = nb_transaction_new(candidate, &changes, client,
+ user, comment);
+ if (*transaction == NULL) {
+ flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
+ "%s: failed to create transaction", __func__);
+ nb_config_diff_del_changes(&changes);
+ pthread_rwlock_unlock(&running_config->lock);
+ return NB_ERR_LOCKED;
+ }
}
+ pthread_rwlock_unlock(&running_config->lock);
return nb_transaction_process(NB_EV_PREPARE, *transaction);
}
@@ -633,7 +679,11 @@ void nb_candidate_commit_apply(struct nb_transaction *transaction,
/* Replace running by candidate. */
transaction->config->version++;
- nb_config_replace(running_config, transaction->config, true);
+ pthread_rwlock_wrlock(&running_config->lock);
+ {
+ nb_config_replace(running_config, transaction->config, true);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
/* Record transaction. */
if (save_transaction
@@ -645,13 +695,13 @@ void nb_candidate_commit_apply(struct nb_transaction *transaction,
}
int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
- bool save_transaction, const char *comment,
- uint32_t *transaction_id)
+ const void *user, bool save_transaction,
+ const char *comment, uint32_t *transaction_id)
{
struct nb_transaction *transaction = NULL;
int ret;
- ret = nb_candidate_commit_prepare(candidate, client, comment,
+ ret = nb_candidate_commit_prepare(candidate, client, user, comment,
&transaction);
/*
* Apply the changes if the preparation phase succeeded. Otherwise abort
@@ -666,6 +716,60 @@ int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
return ret;
}
+int nb_running_lock(enum nb_client client, const void *user)
+{
+ int ret = -1;
+
+ pthread_mutex_lock(&running_config_mgmt_lock.mtx);
+ {
+ if (!running_config_mgmt_lock.locked) {
+ running_config_mgmt_lock.locked = true;
+ running_config_mgmt_lock.owner_client = client;
+ running_config_mgmt_lock.owner_user = user;
+ ret = 0;
+ }
+ }
+ pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
+
+ return ret;
+}
+
+int nb_running_unlock(enum nb_client client, const void *user)
+{
+ int ret = -1;
+
+ pthread_mutex_lock(&running_config_mgmt_lock.mtx);
+ {
+ if (running_config_mgmt_lock.locked
+ && running_config_mgmt_lock.owner_client == client
+ && running_config_mgmt_lock.owner_user == user) {
+ running_config_mgmt_lock.locked = false;
+ running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
+ running_config_mgmt_lock.owner_user = NULL;
+ ret = 0;
+ }
+ }
+ pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
+
+ return ret;
+}
+
+int nb_running_lock_check(enum nb_client client, const void *user)
+{
+ int ret = -1;
+
+ pthread_mutex_lock(&running_config_mgmt_lock.mtx);
+ {
+ if (!running_config_mgmt_lock.locked
+ || (running_config_mgmt_lock.owner_client == client
+ && running_config_mgmt_lock.owner_user == user))
+ ret = 0;
+ }
+ pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
+
+ return ret;
+}
+
static void nb_log_callback(const enum nb_event event,
enum nb_operation operation, const char *xpath,
const char *value)
@@ -812,13 +916,20 @@ int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
return nb_node->cbs.rpc(xpath, input, output);
}
-static struct nb_transaction *nb_transaction_new(struct nb_config *config,
- struct nb_config_cbs *changes,
- enum nb_client client,
- const char *comment)
+static struct nb_transaction *
+nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
+ enum nb_client client, const void *user, const char *comment)
{
struct nb_transaction *transaction;
+ if (nb_running_lock_check(client, user)) {
+ flog_warn(
+ EC_LIB_NB_TRANSACTION_CREATION_FAILED,
+ "%s: running configuration is locked by another client",
+ __func__);
+ return NULL;
+ }
+
if (transaction_in_progress) {
flog_warn(
EC_LIB_NB_TRANSACTION_CREATION_FAILED,
@@ -852,40 +963,52 @@ static int nb_transaction_process(enum nb_event event,
{
struct nb_config_cb *cb;
- RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
- struct nb_config_change *change = (struct nb_config_change *)cb;
- int ret;
-
- /*
- * Only try to release resources that were allocated
- * successfully.
- */
- if (event == NB_EV_ABORT && change->prepare_ok == false)
- break;
+ /*
+ * Need to lock the running configuration since transaction->changes
+ * can contain pointers to data nodes from the running configuration.
+ */
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
+ struct nb_config_change *change =
+ (struct nb_config_change *)cb;
+ int ret;
- /* Call the appropriate callback. */
- ret = nb_callback_configuration(event, change);
- switch (event) {
- case NB_EV_PREPARE:
- if (ret != NB_OK)
- return ret;
- change->prepare_ok = true;
- break;
- case NB_EV_ABORT:
- case NB_EV_APPLY:
/*
- * At this point it's not possible to reject the
- * transaction anymore, so any failure here can lead to
- * inconsistencies and should be treated as a bug.
- * Operations prone to errors, like validations and
- * resource allocations, should be performed during the
- * 'prepare' phase.
+ * Only try to release resources that were allocated
+ * successfully.
*/
- break;
- default:
- break;
+ if (event == NB_EV_ABORT && change->prepare_ok == false)
+ break;
+
+ /* Call the appropriate callback. */
+ ret = nb_callback_configuration(event, change);
+ switch (event) {
+ case NB_EV_PREPARE:
+ if (ret != NB_OK) {
+ pthread_rwlock_unlock(
+ &running_config->lock);
+ return ret;
+ }
+ change->prepare_ok = true;
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ /*
+ * At this point it's not possible to reject the
+ * transaction anymore, so any failure here can
+ * lead to inconsistencies and should be treated
+ * as a bug. Operations prone to errors, like
+ * validations and resource allocations, should
+ * be performed during the 'prepare' phase.
+ */
+ break;
+ default:
+ break;
+ }
}
}
+ pthread_rwlock_unlock(&running_config->lock);
return NB_OK;
}
@@ -1531,7 +1654,7 @@ static bool running_config_entry_cmp(const void *value1, const void *value2)
return strmatch(c1->xpath, c2->xpath);
}
-static unsigned int running_config_entry_key_make(void *value)
+static unsigned int running_config_entry_key_make(const void *value)
{
return string_hash_make(value);
}
@@ -1704,6 +1827,8 @@ const char *nb_client_name(enum nb_client client)
return "ConfD";
case NB_CLIENT_SYSREPO:
return "Sysrepo";
+ case NB_CLIENT_GRPC:
+ return "gRPC";
default:
return "unknown";
}
@@ -1761,6 +1886,7 @@ void nb_init(struct thread_master *tm,
running_config_entries = hash_create(running_config_entry_key_make,
running_config_entry_cmp,
"Running Configuration Entries");
+ pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
/* Initialize the northbound CLI. */
nb_cli_init(tm);
@@ -1778,4 +1904,5 @@ void nb_terminate(void)
hash_clean(running_config_entries, running_config_entry_free);
hash_free(running_config_entries);
nb_config_free(running_config);
+ pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
}
diff --git a/lib/northbound.h b/lib/northbound.h
index 14f27c1d41..8f6753506b 100644
--- a/lib/northbound.h
+++ b/lib/northbound.h
@@ -414,15 +414,28 @@ enum nb_error {
/* Northbound clients. */
enum nb_client {
- NB_CLIENT_CLI = 0,
+ NB_CLIENT_NONE = 0,
+ NB_CLIENT_CLI,
NB_CLIENT_CONFD,
NB_CLIENT_SYSREPO,
+ NB_CLIENT_GRPC,
};
/* Northbound configuration. */
struct nb_config {
+ /* Configuration data. */
struct lyd_node *dnode;
+
+ /* Configuration version. */
uint32_t version;
+
+ /*
+ * Lock protecting this structure. The use of this lock is always
+ * necessary when reading or modifying the global running configuration.
+ * For candidate configurations, use of this lock is optional depending
+ * on the threading scheme of the northbound plugin.
+ */
+ pthread_rwlock_t lock;
};
/* Northbound configuration callback. */
@@ -662,6 +675,9 @@ extern int nb_candidate_validate(struct nb_config *candidate);
* client
* Northbound client performing the commit.
*
+ * user
+ * Northbound user performing the commit (can be NULL).
+ *
* comment
* Optional comment describing the commit.
*
@@ -682,7 +698,7 @@ extern int nb_candidate_validate(struct nb_config *candidate);
* - NB_ERR for other errors.
*/
extern int nb_candidate_commit_prepare(struct nb_config *candidate,
- enum nb_client client,
+ enum nb_client client, const void *user,
const char *comment,
struct nb_transaction **transaction);
@@ -727,6 +743,9 @@ extern void nb_candidate_commit_apply(struct nb_transaction *transaction,
* client
* Northbound client performing the commit.
*
+ * user
+ * Northbound user performing the commit (can be NULL).
+ *
* save_transaction
* Specify whether the transaction should be recorded in the transactions log
* or not.
@@ -748,11 +767,57 @@ extern void nb_candidate_commit_apply(struct nb_transaction *transaction,
* - NB_ERR for other errors.
*/
extern int nb_candidate_commit(struct nb_config *candidate,
- enum nb_client client, bool save_transaction,
- const char *comment, uint32_t *transaction_id);
+ enum nb_client client, const void *user,
+ bool save_transaction, const char *comment,
+ uint32_t *transaction_id);
+
+/*
+ * Lock the running configuration.
+ *
+ * client
+ * Northbound client.
+ *
+ * user
+ * Northbound user (can be NULL).
+ *
+ * Returns:
+ * 0 on success, -1 when the running configuration is already locked.
+ */
+extern int nb_running_lock(enum nb_client client, const void *user);
+
+/*
+ * Unlock the running configuration.
+ *
+ * client
+ * Northbound client.
+ *
+ * user
+ * Northbound user (can be NULL).
+ *
+ * Returns:
+ * 0 on success, -1 when the running configuration is already unlocked or
+ * locked by another client/user.
+ */
+extern int nb_running_unlock(enum nb_client client, const void *user);
+
+/*
+ * Check if the running configuration is locked or not for the given
+ * client/user.
+ *
+ * client
+ * Northbound client.
+ *
+ * user
+ * Northbound user (can be NULL).
+ *
+ * Returns:
+ * 0 if the running configuration is unlocked or if the client/user owns the
+ * lock, -1 otherwise.
+ */
+extern int nb_running_lock_check(enum nb_client client, const void *user);
/*
- * Iterate over operetional data.
+ * Iterate over operational data.
*
* xpath
* Data path of the YANG data we want to iterate over.
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index 48faa7595a..ae1b0578a0 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -185,7 +185,7 @@ int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
/* Do an implicit "commit" when using the classic CLI mode. */
if (frr_get_cli_mode() == FRR_CLI_CLASSIC) {
ret = nb_candidate_commit(vty->candidate_config, NB_CLIENT_CLI,
- false, NULL, NULL);
+ vty, false, NULL, NULL);
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES) {
vty_out(vty, "%% Configuration failed: %s.\n\n",
nb_err_name(ret));
@@ -193,8 +193,13 @@ int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
"Please check the logs for more details.\n");
/* Regenerate candidate for consistency. */
- nb_config_replace(vty->candidate_config, running_config,
- true);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_config_replace(vty->candidate_config,
+ running_config, true);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
+
return CMD_WARNING_CONFIG_FAILED;
}
}
@@ -237,7 +242,7 @@ int nb_cli_confirmed_commit_rollback(struct vty *vty)
/* Perform the rollback. */
ret = nb_candidate_commit(
- vty->confirmed_commit_rollback, NB_CLIENT_CLI, true,
+ vty->confirmed_commit_rollback, NB_CLIENT_CLI, vty, true,
"Rollback to previous configuration - confirmed commit has timed out",
&transaction_id);
if (ret == NB_OK)
@@ -291,11 +296,6 @@ static int nb_cli_commit(struct vty *vty, bool force,
return CMD_SUCCESS;
}
- if (vty_exclusive_lock != NULL && vty_exclusive_lock != vty) {
- vty_out(vty, "%% Configuration is locked by another VTY.\n\n");
- return CMD_WARNING;
- }
-
/* "force" parameter. */
if (!force && nb_candidate_needs_update(vty->candidate_config)) {
vty_out(vty,
@@ -307,7 +307,12 @@ static int nb_cli_commit(struct vty *vty, bool force,
/* "confirm" parameter. */
if (confirmed_timeout) {
- vty->confirmed_commit_rollback = nb_config_dup(running_config);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ vty->confirmed_commit_rollback =
+ nb_config_dup(running_config);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
vty->t_confirmed_commit_timeout = NULL;
thread_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
@@ -315,14 +320,19 @@ static int nb_cli_commit(struct vty *vty, bool force,
&vty->t_confirmed_commit_timeout);
}
- ret = nb_candidate_commit(vty->candidate_config, NB_CLIENT_CLI, true,
- comment, &transaction_id);
+ ret = nb_candidate_commit(vty->candidate_config, NB_CLIENT_CLI, vty,
+ true, comment, &transaction_id);
/* Map northbound return code to CLI return code. */
switch (ret) {
case NB_OK:
- nb_config_replace(vty->candidate_config_base, running_config,
- true);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_config_replace(vty->candidate_config_base,
+ running_config, true);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
+
vty_out(vty,
"%% Configuration committed successfully (Transaction ID #%u).\n\n",
transaction_id);
@@ -687,7 +697,12 @@ DEFPY (config_update,
return CMD_WARNING;
}
- nb_config_replace(vty->candidate_config_base, running_config, true);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_config_replace(vty->candidate_config_base, running_config,
+ true);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
vty_out(vty, "%% Candidate configuration updated successfully.\n\n");
@@ -787,8 +802,12 @@ DEFPY (show_config_running,
}
}
- nb_cli_show_config(vty, running_config, format, translator,
- !!with_defaults);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ nb_cli_show_config(vty, running_config, format, translator,
+ !!with_defaults);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
return CMD_SUCCESS;
}
@@ -902,57 +921,68 @@ DEFPY (show_config_compare,
struct nb_config *config2, *config_transaction2 = NULL;
int ret = CMD_WARNING;
- if (c1_candidate)
- config1 = vty->candidate_config;
- else if (c1_running)
- config1 = running_config;
- else {
- config_transaction1 = nb_db_transaction_load(c1_tid);
- if (!config_transaction1) {
- vty_out(vty, "%% Transaction %u does not exist\n\n",
- (unsigned int)c1_tid);
- goto exit;
+ /*
+ * For simplicity, lock the running configuration regardless if it's
+ * going to be used or not.
+ */
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ if (c1_candidate)
+ config1 = vty->candidate_config;
+ else if (c1_running)
+ config1 = running_config;
+ else {
+ config_transaction1 = nb_db_transaction_load(c1_tid);
+ if (!config_transaction1) {
+ vty_out(vty,
+ "%% Transaction %u does not exist\n\n",
+ (unsigned int)c1_tid);
+ goto exit;
+ }
+ config1 = config_transaction1;
}
- config1 = config_transaction1;
- }
- if (c2_candidate)
- config2 = vty->candidate_config;
- else if (c2_running)
- config2 = running_config;
- else {
- config_transaction2 = nb_db_transaction_load(c2_tid);
- if (!config_transaction2) {
- vty_out(vty, "%% Transaction %u does not exist\n\n",
- (unsigned int)c2_tid);
- goto exit;
+ if (c2_candidate)
+ config2 = vty->candidate_config;
+ else if (c2_running)
+ config2 = running_config;
+ else {
+ config_transaction2 = nb_db_transaction_load(c2_tid);
+ if (!config_transaction2) {
+ vty_out(vty,
+ "%% Transaction %u does not exist\n\n",
+ (unsigned int)c2_tid);
+ goto exit;
+ }
+ config2 = config_transaction2;
}
- config2 = config_transaction2;
- }
- if (json)
- format = NB_CFG_FMT_JSON;
- else if (xml)
- format = NB_CFG_FMT_XML;
- else
- format = NB_CFG_FMT_CMDS;
+ if (json)
+ format = NB_CFG_FMT_JSON;
+ else if (xml)
+ format = NB_CFG_FMT_XML;
+ else
+ format = NB_CFG_FMT_CMDS;
- if (translator_family) {
- translator = yang_translator_find(translator_family);
- if (!translator) {
- vty_out(vty, "%% Module translator \"%s\" not found\n",
- translator_family);
- goto exit;
+ if (translator_family) {
+ translator = yang_translator_find(translator_family);
+ if (!translator) {
+ vty_out(vty,
+ "%% Module translator \"%s\" not found\n",
+ translator_family);
+ goto exit;
+ }
}
- }
- ret = nb_cli_show_config_compare(vty, config1, config2, format,
- translator);
-exit:
- if (config_transaction1)
- nb_config_free(config_transaction1);
- if (config_transaction2)
- nb_config_free(config_transaction2);
+ ret = nb_cli_show_config_compare(vty, config1, config2, format,
+ translator);
+ exit:
+ if (config_transaction1)
+ nb_config_free(config_transaction1);
+ if (config_transaction2)
+ nb_config_free(config_transaction2);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
return ret;
}
@@ -1509,7 +1539,7 @@ static int nb_cli_rollback_configuration(struct vty *vty,
snprintf(comment, sizeof(comment), "Rollback to transaction %u",
transaction_id);
- ret = nb_candidate_commit(candidate, NB_CLIENT_CLI, true, comment,
+ ret = nb_candidate_commit(candidate, NB_CLIENT_CLI, vty, true, comment,
NULL);
nb_config_free(candidate);
switch (ret) {
diff --git a/lib/northbound_confd.c b/lib/northbound_confd.c
index 0df286511e..e9669fc7e1 100644
--- a/lib/northbound_confd.c
+++ b/lib/northbound_confd.c
@@ -289,7 +289,11 @@ static int frr_confd_cdb_read_cb_prepare(int fd, int *subp, int reslen)
struct cdb_iter_args iter_args;
int ret;
- candidate = nb_config_dup(running_config);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ candidate = nb_config_dup(running_config);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
/* Iterate over all configuration changes. */
iter_args.candidate = candidate;
@@ -322,7 +326,7 @@ static int frr_confd_cdb_read_cb_prepare(int fd, int *subp, int reslen)
*/
transaction = NULL;
ret = nb_candidate_commit_prepare(candidate, NB_CLIENT_CONFD, NULL,
- &transaction);
+ NULL, &transaction);
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES) {
enum confd_errcode errcode;
const char *errmsg;
diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp
new file mode 100644
index 0000000000..a55da23dd1
--- /dev/null
+++ b/lib/northbound_grpc.cpp
@@ -0,0 +1,936 @@
+//
+// Copyright (C) 2019 NetDEF, Inc.
+// Renato Westphal
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 2 of the License, or (at your option)
+// any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+// more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; see the file COPYING; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+//
+
+#include <zebra.h>
+
+#include "log.h"
+#include "libfrr.h"
+#include "version.h"
+#include "command.h"
+#include "lib_errors.h"
+#include "northbound.h"
+#include "northbound_db.h"
+
+#include <iostream>
+#include <sstream>
+#include <memory>
+#include <string>
+
+#include <grpcpp/grpcpp.h>
+#include "grpc/frr-northbound.grpc.pb.h"
+
+#define GRPC_DEFAULT_PORT 50051
+
+/*
+ * NOTE: we can't use the FRR debugging infrastructure here since it uses
+ * atomics and C++ has a different atomics API. Enable gRPC debugging
+ * unconditionally until we figure out a way to solve this problem.
+ */
+static bool nb_dbg_client_grpc = 1;
+
+static pthread_t grpc_pthread;
+
+class NorthboundImpl final : public frr::Northbound::Service
+{
+ public:
+ NorthboundImpl(void)
+ {
+ _nextCandidateId = 0;
+ }
+
+ ~NorthboundImpl(void)
+ {
+ // Delete candidates.
+ for (auto it = _candidates.begin(); it != _candidates.end();
+ it++)
+ delete_candidate(&it->second);
+ }
+
+ grpc::Status
+ GetCapabilities(grpc::ServerContext *context,
+ frr::GetCapabilitiesRequest const *request,
+ frr::GetCapabilitiesResponse *response) override
+ {
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC GetCapabilities()");
+
+ // Response: string frr_version = 1;
+ response->set_frr_version(FRR_VERSION);
+
+ // Response: bool rollback_support = 2;
+#ifdef HAVE_CONFIG_ROLLBACKS
+ response->set_rollback_support(true);
+#else
+ response->set_rollback_support(false);
+#endif
+
+ // Response: repeated ModuleData supported_modules = 3;
+ struct yang_module *module;
+ RB_FOREACH (module, yang_modules, &yang_modules) {
+ auto m = response->add_supported_modules();
+
+ m->set_name(module->name);
+ if (module->info->rev_size)
+ m->set_revision(module->info->rev[0].date);
+ m->set_organization(module->info->org);
+ }
+
+ // Response: repeated Encoding supported_encodings = 4;
+ response->add_supported_encodings(frr::JSON);
+ response->add_supported_encodings(frr::XML);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status Get(grpc::ServerContext *context,
+ frr::GetRequest const *request,
+ grpc::ServerWriter<frr::GetResponse> *writer) override
+ {
+ // Request: DataType type = 1;
+ int type = request->type();
+ // Request: Encoding encoding = 2;
+ frr::Encoding encoding = request->encoding();
+ // Request: bool with_defaults = 3;
+ bool with_defaults = request->with_defaults();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC Get(type: %u, encoding: %u, with_defaults: %u)",
+ type, encoding, with_defaults);
+
+ // Request: repeated string path = 4;
+ auto paths = request->path();
+ for (const std::string &path : paths) {
+ frr::GetResponse response;
+ grpc::Status status;
+
+ // Response: int64 timestamp = 1;
+ response.set_timestamp(time(NULL));
+
+ // Response: DataTree data = 2;
+ auto *data = response.mutable_data();
+ data->set_encoding(request->encoding());
+ status = get_path(data, path, type,
+ encoding2lyd_format(encoding),
+ with_defaults);
+
+ // Something went wrong...
+ if (!status.ok())
+ return status;
+
+ writer->Write(response);
+ }
+
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC Get() end");
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ CreateCandidate(grpc::ServerContext *context,
+ frr::CreateCandidateRequest const *request,
+ frr::CreateCandidateResponse *response) override
+ {
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC CreateCandidate()");
+
+ struct candidate *candidate = create_candidate();
+ if (!candidate)
+ return grpc::Status(
+ grpc::StatusCode::RESOURCE_EXHAUSTED,
+ "Can't create candidate configuration");
+
+ // Response: uint32 candidate_id = 1;
+ response->set_candidate_id(candidate->id);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ DeleteCandidate(grpc::ServerContext *context,
+ frr::DeleteCandidateRequest const *request,
+ frr::DeleteCandidateResponse *response) override
+ {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = request->candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC DeleteCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate = get_candidate(candidate_id);
+ if (!candidate)
+ return grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+
+ delete_candidate(candidate);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ UpdateCandidate(grpc::ServerContext *context,
+ frr::UpdateCandidateRequest const *request,
+ frr::UpdateCandidateResponse *response) override
+ {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = request->candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC UpdateCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate = get_candidate(candidate_id);
+ if (!candidate)
+ return grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+
+ if (candidate->transaction)
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "candidate is in the middle of a transaction");
+
+ if (nb_candidate_update(candidate->config) != NB_OK)
+ return grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "failed to update candidate configuration");
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ EditCandidate(grpc::ServerContext *context,
+ frr::EditCandidateRequest const *request,
+ frr::EditCandidateResponse *response) override
+ {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = request->candidate_id();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC EditCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate = get_candidate(candidate_id);
+ if (!candidate)
+ return grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+
+ // Create a copy of the candidate. For consistency, we need to
+ // ensure that either all changes are accepted or none are (in
+ // the event of an error).
+ struct nb_config *candidate_tmp =
+ nb_config_dup(candidate->config);
+
+ auto pvs = request->update();
+ for (const frr::PathValue &pv : pvs) {
+ if (yang_dnode_edit(candidate_tmp->dnode, pv.path(),
+ pv.value())
+ != 0) {
+ nb_config_free(candidate_tmp);
+ return grpc::Status(
+ grpc::StatusCode::INVALID_ARGUMENT,
+ "Failed to update \"" + pv.path()
+ + "\"");
+ }
+ }
+
+ pvs = request->delete_();
+ for (const frr::PathValue &pv : pvs) {
+ if (yang_dnode_delete(candidate_tmp->dnode, pv.path())
+ != 0) {
+ nb_config_free(candidate_tmp);
+ return grpc::Status(
+ grpc::StatusCode::INVALID_ARGUMENT,
+ "Failed to remove \"" + pv.path()
+ + "\"");
+ }
+ }
+
+ // No errors, accept all changes.
+ nb_config_replace(candidate->config, candidate_tmp, false);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ LoadToCandidate(grpc::ServerContext *context,
+ frr::LoadToCandidateRequest const *request,
+ frr::LoadToCandidateResponse *response) override
+ {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = request->candidate_id();
+ // Request: LoadType type = 2;
+ int load_type = request->type();
+ // Request: DataTree config = 3;
+ auto config = request->config();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC LoadToCandidate(candidate_id: %u)",
+ candidate_id);
+
+ struct candidate *candidate = get_candidate(candidate_id);
+ if (!candidate)
+ return grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+
+ struct lyd_node *dnode = dnode_from_data_tree(&config, true);
+ if (!dnode)
+ return grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "Failed to parse the configuration");
+
+ struct nb_config *loaded_config = nb_config_new(dnode);
+
+ if (load_type == frr::LoadToCandidateRequest::REPLACE)
+ nb_config_replace(candidate->config, loaded_config,
+ false);
+ else if (nb_config_merge(candidate->config, loaded_config,
+ false)
+ != NB_OK)
+ return grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "Failed to merge the loaded configuration");
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status Commit(grpc::ServerContext *context,
+ frr::CommitRequest const *request,
+ frr::CommitResponse *response) override
+ {
+ // Request: uint32 candidate_id = 1;
+ uint32_t candidate_id = request->candidate_id();
+ // Request: Phase phase = 2;
+ int phase = request->phase();
+ // Request: string comment = 3;
+ const std::string comment = request->comment();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC Commit(candidate_id: %u)",
+ candidate_id);
+
+ // Find candidate configuration.
+ struct candidate *candidate = get_candidate(candidate_id);
+ if (!candidate)
+ return grpc::Status(
+ grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+
+ int ret = NB_OK;
+ uint32_t transaction_id = 0;
+
+ // Check for misuse of the two-phase commit protocol.
+ switch (phase) {
+ case frr::CommitRequest::PREPARE:
+ case frr::CommitRequest::ALL:
+ if (candidate->transaction)
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "pending transaction in progress");
+ break;
+ case frr::CommitRequest::ABORT:
+ case frr::CommitRequest::APPLY:
+ if (!candidate->transaction)
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "no transaction in progress");
+ break;
+ default:
+ break;
+ }
+
+ // Execute the user request.
+ switch (phase) {
+ case frr::CommitRequest::VALIDATE:
+ ret = nb_candidate_validate(candidate->config);
+ break;
+ case frr::CommitRequest::PREPARE:
+ ret = nb_candidate_commit_prepare(
+ candidate->config, NB_CLIENT_GRPC, NULL,
+ comment.c_str(), &candidate->transaction);
+ break;
+ case frr::CommitRequest::ABORT:
+ nb_candidate_commit_abort(candidate->transaction);
+ break;
+ case frr::CommitRequest::APPLY:
+ nb_candidate_commit_apply(candidate->transaction, true,
+ &transaction_id);
+ break;
+ case frr::CommitRequest::ALL:
+ ret = nb_candidate_commit(
+ candidate->config, NB_CLIENT_GRPC, NULL, true,
+ comment.c_str(), &transaction_id);
+ break;
+ }
+
+ // Map northbound error codes to gRPC error codes.
+ switch (ret) {
+ case NB_ERR_NO_CHANGES:
+ return grpc::Status(
+ grpc::StatusCode::ABORTED,
+ "No configuration changes detected");
+ case NB_ERR_LOCKED:
+ return grpc::Status(
+ grpc::StatusCode::UNAVAILABLE,
+ "There's already a transaction in progress");
+ case NB_ERR_VALIDATION:
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Validation error");
+ case NB_ERR_RESOURCE:
+ return grpc::Status(
+ grpc::StatusCode::RESOURCE_EXHAUSTED,
+ "Failed do allocate resources");
+ case NB_ERR:
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "Internal error");
+ default:
+ break;
+ }
+
+ // Response: uint32 transaction_id = 1;
+ if (transaction_id)
+ response->set_transaction_id(transaction_id);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ ListTransactions(grpc::ServerContext *context,
+ frr::ListTransactionsRequest const *request,
+ grpc::ServerWriter<frr::ListTransactionsResponse>
+ *writer) override
+ {
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC ListTransactions()");
+
+ nb_db_transactions_iterate(list_transactions_cb, writer);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status
+ GetTransaction(grpc::ServerContext *context,
+ frr::GetTransactionRequest const *request,
+ frr::GetTransactionResponse *response) override
+ {
+ struct nb_config *nb_config;
+
+ // Request: uint32 transaction_id = 1;
+ uint32_t transaction_id = request->transaction_id();
+ // Request: Encoding encoding = 2;
+ frr::Encoding encoding = request->encoding();
+ // Request: bool with_defaults = 3;
+ bool with_defaults = request->with_defaults();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug(
+ "received RPC GetTransaction(transaction_id: %u, encoding: %u)",
+ transaction_id, encoding);
+
+ // Load configuration from the transactions database.
+ nb_config = nb_db_transaction_load(transaction_id);
+ if (!nb_config)
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Transaction not found");
+
+ // Response: DataTree config = 1;
+ auto config = response->mutable_config();
+ config->set_encoding(encoding);
+
+ // Dump data using the requested format.
+ if (data_tree_from_dnode(config, nb_config->dnode,
+ encoding2lyd_format(encoding),
+ with_defaults)
+ != 0) {
+ nb_config_free(nb_config);
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "Failed to dump data");
+ }
+
+ nb_config_free(nb_config);
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status LockConfig(grpc::ServerContext *context,
+ frr::LockConfigRequest const *request,
+ frr::LockConfigResponse *response) override
+ {
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC LockConfig()");
+
+ if (nb_running_lock(NB_CLIENT_GRPC, NULL))
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "running configuration is locked already");
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status UnlockConfig(grpc::ServerContext *context,
+ frr::UnlockConfigRequest const *request,
+ frr::UnlockConfigResponse *response) override
+ {
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC UnlockConfig()");
+
+ if (nb_running_unlock(NB_CLIENT_GRPC, NULL))
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "failed to unlock the running configuration");
+
+ return grpc::Status::OK;
+ }
+
+ grpc::Status Execute(grpc::ServerContext *context,
+ frr::ExecuteRequest const *request,
+ frr::ExecuteResponse *response) override
+ {
+ struct nb_node *nb_node;
+ struct list *input_list;
+ struct list *output_list;
+ struct listnode *node;
+ struct yang_data *data;
+ const char *xpath;
+
+ // Request: string path = 1;
+ xpath = request->path().c_str();
+
+ if (nb_dbg_client_grpc)
+ zlog_debug("received RPC Execute(path: \"%s\")", xpath);
+
+ if (request->path().empty())
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Data path is empty");
+
+ nb_node = nb_node_find(xpath);
+ if (!nb_node)
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Unknown data path");
+
+ input_list = yang_data_list_new();
+ output_list = yang_data_list_new();
+
+ // Read input parameters.
+ auto input = request->input();
+ for (const frr::PathValue &pv : input) {
+ // Request: repeated PathValue input = 2;
+ data = yang_data_new(pv.path().c_str(),
+ pv.value().c_str());
+ listnode_add(input_list, data);
+ }
+
+ // Execute callback registered for this XPath.
+ if (nb_node->cbs.rpc(xpath, input_list, output_list) != NB_OK) {
+ flog_warn(EC_LIB_NB_CB_RPC,
+ "%s: rpc callback failed: %s", __func__,
+ xpath);
+ list_delete(&input_list);
+ list_delete(&output_list);
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "RPC failed");
+ }
+
+ // Process output parameters.
+ for (ALL_LIST_ELEMENTS_RO(output_list, node, data)) {
+ // Response: repeated PathValue output = 1;
+ frr::PathValue *pv = response->add_output();
+ pv->set_path(data->xpath);
+ pv->set_value(data->value);
+ }
+
+ // Release memory.
+ list_delete(&input_list);
+ list_delete(&output_list);
+
+ return grpc::Status::OK;
+ }
+
+ private:
+ struct candidate {
+ uint32_t id;
+ struct nb_config *config;
+ struct nb_transaction *transaction;
+ };
+ std::map<uint32_t, struct candidate> _candidates;
+ uint32_t _nextCandidateId;
+
+ static int yang_dnode_edit(struct lyd_node *dnode,
+ const std::string &path,
+ const std::string &value)
+ {
+ ly_errno = LY_SUCCESS;
+ dnode = lyd_new_path(dnode, ly_native_ctx, path.c_str(),
+ (void *)value.c_str(),
+ (LYD_ANYDATA_VALUETYPE)0,
+ LYD_PATH_OPT_UPDATE);
+ if (!dnode && ly_errno != LY_SUCCESS) {
+ flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
+ __func__);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ static int yang_dnode_delete(struct lyd_node *dnode,
+ const std::string &path)
+ {
+ dnode = yang_dnode_get(dnode, path.c_str());
+ if (!dnode)
+ return -1;
+
+ lyd_free(dnode);
+
+ return 0;
+ }
+
+ static LYD_FORMAT encoding2lyd_format(enum frr::Encoding encoding)
+ {
+ switch (encoding) {
+ case frr::JSON:
+ return LYD_JSON;
+ case frr::XML:
+ return LYD_XML;
+ }
+ }
+
+ static int get_oper_data_cb(const struct lys_node *snode,
+ struct yang_translator *translator,
+ struct yang_data *data, void *arg)
+ {
+ struct lyd_node *dnode = static_cast<struct lyd_node *>(arg);
+ int ret = yang_dnode_edit(dnode, data->xpath, data->value);
+ yang_data_free(data);
+
+ return (ret == 0) ? NB_OK : NB_ERR;
+ }
+
+ static void list_transactions_cb(void *arg, int transaction_id,
+ const char *client_name,
+ const char *date, const char *comment)
+ {
+ grpc::ServerWriter<frr::ListTransactionsResponse> *writer =
+ static_cast<grpc::ServerWriter<
+ frr::ListTransactionsResponse> *>(arg);
+ frr::ListTransactionsResponse response;
+
+ // Response: uint32 id = 1;
+ response.set_id(transaction_id);
+
+ // Response: string client = 2;
+ response.set_client(client_name);
+
+ // Response: string date = 3;
+ response.set_date(date);
+
+ // Response: string comment = 4;
+ response.set_comment(comment);
+
+ writer->Write(response);
+ }
+
+ static int data_tree_from_dnode(frr::DataTree *dt,
+ const struct lyd_node *dnode,
+ LYD_FORMAT lyd_format,
+ bool with_defaults)
+ {
+ char *strp;
+ int options = 0;
+
+ SET_FLAG(options, LYP_FORMAT | LYP_WITHSIBLINGS);
+ if (with_defaults)
+ SET_FLAG(options, LYP_WD_ALL);
+ else
+ SET_FLAG(options, LYP_WD_TRIM);
+
+ if (lyd_print_mem(&strp, dnode, lyd_format, options) == 0) {
+ if (strp) {
+ dt->set_data(strp);
+ free(strp);
+ }
+ return 0;
+ }
+
+ return -1;
+ }
+
+ static struct lyd_node *dnode_from_data_tree(const frr::DataTree *dt,
+ bool config_only)
+ {
+ struct lyd_node *dnode;
+ int options;
+
+ if (config_only)
+ options = LYD_OPT_CONFIG;
+ else
+ options = LYD_OPT_DATA | LYD_OPT_DATA_NO_YANGLIB;
+
+ dnode = lyd_parse_mem(ly_native_ctx, dt->data().c_str(),
+ encoding2lyd_format(dt->encoding()),
+ options);
+
+ return dnode;
+ }
+
+ static struct lyd_node *get_dnode_config(const std::string &path)
+ {
+ struct lyd_node *dnode;
+
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ dnode = yang_dnode_get(running_config->dnode,
+ path.empty() ? NULL
+ : path.c_str());
+ if (dnode)
+ dnode = yang_dnode_dup(dnode);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
+
+ return dnode;
+ }
+
+ static struct lyd_node *get_dnode_state(const std::string &path)
+ {
+ struct lyd_node *dnode;
+
+ dnode = yang_dnode_new(ly_native_ctx, false);
+ if (nb_oper_data_iterate(path.c_str(), NULL, 0,
+ get_oper_data_cb, dnode)
+ != NB_OK) {
+ yang_dnode_free(dnode);
+ return NULL;
+ }
+
+ return dnode;
+ }
+
+ static grpc::Status get_path(frr::DataTree *dt, const std::string &path,
+ int type, LYD_FORMAT lyd_format,
+ bool with_defaults)
+ {
+ struct lyd_node *dnode_config = NULL;
+ struct lyd_node *dnode_state = NULL;
+ struct lyd_node *dnode_final;
+
+ // Configuration data.
+ if (type == frr::GetRequest_DataType_ALL
+ || type == frr::GetRequest_DataType_CONFIG) {
+ dnode_config = get_dnode_config(path);
+ if (!dnode_config)
+ return grpc::Status(
+ grpc::StatusCode::INVALID_ARGUMENT,
+ "Data path not found");
+ }
+
+ // Operational data.
+ if (type == frr::GetRequest_DataType_ALL
+ || type == frr::GetRequest_DataType_STATE) {
+ dnode_state = get_dnode_state(path);
+ if (!dnode_state) {
+ if (dnode_config)
+ yang_dnode_free(dnode_config);
+ return grpc::Status(
+ grpc::StatusCode::INVALID_ARGUMENT,
+ "Failed to fetch operational data");
+ }
+ }
+
+ switch (type) {
+ case frr::GetRequest_DataType_ALL:
+ //
+ // Combine configuration and state data into a single
+ // dnode.
+ //
+ if (lyd_merge(dnode_state, dnode_config,
+ LYD_OPT_EXPLICIT)
+ != 0) {
+ yang_dnode_free(dnode_state);
+ yang_dnode_free(dnode_config);
+ return grpc::Status(
+ grpc::StatusCode::INTERNAL,
+ "Failed to merge configuration and state data");
+ }
+
+ dnode_final = dnode_state;
+ break;
+ case frr::GetRequest_DataType_CONFIG:
+ dnode_final = dnode_config;
+ break;
+ case frr::GetRequest_DataType_STATE:
+ dnode_final = dnode_state;
+ break;
+ }
+
+ // Validate data to create implicit default nodes if necessary.
+ int validate_opts = 0;
+ if (type == frr::GetRequest_DataType_CONFIG)
+ validate_opts = LYD_OPT_CONFIG;
+ else
+ validate_opts = LYD_OPT_DATA | LYD_OPT_DATA_NO_YANGLIB;
+ lyd_validate(&dnode_final, validate_opts, ly_native_ctx);
+
+ // Dump data using the requested format.
+ int ret = data_tree_from_dnode(dt, dnode_final, lyd_format,
+ with_defaults);
+ yang_dnode_free(dnode_final);
+ if (ret != 0)
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "Failed to dump data");
+
+ return grpc::Status::OK;
+ }
+
+ struct candidate *create_candidate(void)
+ {
+ uint32_t candidate_id = ++_nextCandidateId;
+
+ // Check for overflow.
+ // TODO: implement an algorithm for unique reusable IDs.
+ if (candidate_id == 0)
+ return NULL;
+
+ struct candidate *candidate = &_candidates[candidate_id];
+ candidate->id = candidate_id;
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ candidate->config = nb_config_dup(running_config);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
+ candidate->transaction = NULL;
+
+ return candidate;
+ }
+
+ void delete_candidate(struct candidate *candidate)
+ {
+ _candidates.erase(candidate->id);
+ nb_config_free(candidate->config);
+ if (candidate->transaction)
+ nb_candidate_commit_abort(candidate->transaction);
+ }
+
+ struct candidate *get_candidate(uint32_t candidate_id)
+ {
+ struct candidate *candidate;
+
+ if (_candidates.count(candidate_id) == 0)
+ return NULL;
+
+ return &_candidates[candidate_id];
+ }
+};
+
+static void *grpc_pthread_start(void *arg)
+{
+ unsigned long *port = static_cast<unsigned long *>(arg);
+ NorthboundImpl service;
+ std::stringstream server_address;
+
+ server_address << "0.0.0.0:" << *port;
+
+ grpc::ServerBuilder builder;
+ builder.AddListeningPort(server_address.str(),
+ grpc::InsecureServerCredentials());
+ builder.RegisterService(&service);
+
+ std::unique_ptr<grpc::Server> server(builder.BuildAndStart());
+
+ zlog_notice("gRPC server listening on %s",
+ server_address.str().c_str());
+
+ server->Wait();
+
+ return NULL;
+}
+
+static int frr_grpc_init(unsigned long *port)
+{
+ /* Create a pthread for gRPC since it runs its own event loop. */
+ if (pthread_create(&grpc_pthread, NULL, grpc_pthread_start, port)) {
+ flog_err(EC_LIB_SYSTEM_CALL, "%s: error creating pthread: %s",
+ __func__, safe_strerror(errno));
+ return -1;
+ }
+ pthread_detach(grpc_pthread);
+
+ return 0;
+}
+
+static int frr_grpc_finish(void)
+{
+ // TODO: cancel the gRPC pthreads gracefully.
+
+ return 0;
+}
+
+static int frr_grpc_module_late_init(struct thread_master *tm)
+{
+ static unsigned long port = GRPC_DEFAULT_PORT;
+ const char *args = THIS_MODULE->load_args;
+
+ // Parse port number.
+ if (args) {
+ try {
+ port = std::stoul(args);
+ if (port < 1024)
+ throw std::invalid_argument(
+ "can't use privileged port");
+ if (port > UINT16_MAX)
+ throw std::invalid_argument(
+ "port number is too big");
+ } catch (std::exception &e) {
+ flog_err(EC_LIB_GRPC_INIT,
+ "%s: failed to parse port number: %s",
+ __func__, e.what());
+ goto error;
+ }
+ }
+
+ if (frr_grpc_init(&port) < 0)
+ goto error;
+
+ hook_register(frr_fini, frr_grpc_finish);
+
+ return 0;
+
+error:
+ flog_err(EC_LIB_GRPC_INIT, "failed to initialize the gRPC module");
+ return -1;
+}
+
+static int frr_grpc_module_init(void)
+{
+ hook_register(frr_late_init, frr_grpc_module_late_init);
+
+ return 0;
+}
+
+FRR_MODULE_SETUP(.name = "frr_grpc", .version = FRR_VERSION,
+ .description = "FRR gRPC northbound module",
+ .init = frr_grpc_module_init, )
diff --git a/lib/northbound_sysrepo.c b/lib/northbound_sysrepo.c
index 33b6c24782..44a55137f8 100644
--- a/lib/northbound_sysrepo.c
+++ b/lib/northbound_sysrepo.c
@@ -256,7 +256,11 @@ static int frr_sr_config_change_cb_verify(sr_session_ctx_t *session,
return ret;
}
- candidate = nb_config_dup(running_config);
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ candidate = nb_config_dup(running_config);
+ }
+ pthread_rwlock_unlock(&running_config->lock);
while ((ret = sr_get_change_next(session, it, &sr_op, &sr_old_val,
&sr_new_val))
@@ -282,15 +286,15 @@ static int frr_sr_config_change_cb_verify(sr_session_ctx_t *session,
* single event (SR_EV_ENABLED). This means we need to perform
* the full two-phase commit protocol in one go here.
*/
- ret = nb_candidate_commit(candidate, NB_CLIENT_SYSREPO, true,
- NULL, NULL);
+ ret = nb_candidate_commit(candidate, NB_CLIENT_SYSREPO, NULL,
+ true, NULL, NULL);
} else {
/*
* Validate the configuration changes and allocate all resources
* required to apply them.
*/
ret = nb_candidate_commit_prepare(candidate, NB_CLIENT_SYSREPO,
- NULL, &transaction);
+ NULL, NULL, &transaction);
}
/* Map northbound return code to sysrepo return code. */
diff --git a/lib/openbsd-tree.c b/lib/openbsd-tree.c
index eadef9902b..ddcc59fa8f 100644
--- a/lib/openbsd-tree.c
+++ b/lib/openbsd-tree.c
@@ -435,7 +435,8 @@ void *_rb_insert(const struct rb_type *t, struct rbt_tree *rbt, void *elm)
}
/* Finds the node with the same key as elm */
-void *_rb_find(const struct rb_type *t, struct rbt_tree *rbt, const void *key)
+void *_rb_find(const struct rb_type *t, const struct rbt_tree *rbt,
+ const void *key)
{
struct rb_entry *tmp = RBH_ROOT(rbt);
void *node;
@@ -456,7 +457,8 @@ void *_rb_find(const struct rb_type *t, struct rbt_tree *rbt, const void *key)
}
/* Finds the first node greater than or equal to the search key */
-void *_rb_nfind(const struct rb_type *t, struct rbt_tree *rbt, const void *key)
+void *_rb_nfind(const struct rb_type *t, const struct rbt_tree *rbt,
+ const void *key)
{
struct rb_entry *tmp = RBH_ROOT(rbt);
void *node;
@@ -522,14 +524,14 @@ void *_rb_prev(const struct rb_type *t, void *elm)
return (rbe == NULL ? NULL : rb_e2n(t, rbe));
}
-void *_rb_root(const struct rb_type *t, struct rbt_tree *rbt)
+void *_rb_root(const struct rb_type *t, const struct rbt_tree *rbt)
{
struct rb_entry *rbe = RBH_ROOT(rbt);
return (rbe == NULL ? rbe : rb_e2n(t, rbe));
}
-void *_rb_min(const struct rb_type *t, struct rbt_tree *rbt)
+void *_rb_min(const struct rb_type *t, const struct rbt_tree *rbt)
{
struct rb_entry *rbe = RBH_ROOT(rbt);
struct rb_entry *parent = NULL;
@@ -542,7 +544,7 @@ void *_rb_min(const struct rb_type *t, struct rbt_tree *rbt)
return (parent == NULL ? NULL : rb_e2n(t, parent));
}
-void *_rb_max(const struct rb_type *t, struct rbt_tree *rbt)
+void *_rb_max(const struct rb_type *t, const struct rbt_tree *rbt)
{
struct rb_entry *rbe = RBH_ROOT(rbt);
struct rb_entry *parent = NULL;
diff --git a/lib/openbsd-tree.h b/lib/openbsd-tree.h
index d2f0781333..832a10141e 100644
--- a/lib/openbsd-tree.h
+++ b/lib/openbsd-tree.h
@@ -364,18 +364,18 @@ static inline void _rb_init(struct rbt_tree *rbt)
rbt->rbt_root = NULL;
}
-static inline int _rb_empty(struct rbt_tree *rbt)
+static inline int _rb_empty(const struct rbt_tree *rbt)
{
return (rbt->rbt_root == NULL);
}
void *_rb_insert(const struct rb_type *, struct rbt_tree *, void *);
void *_rb_remove(const struct rb_type *, struct rbt_tree *, void *);
-void *_rb_find(const struct rb_type *, struct rbt_tree *, const void *);
-void *_rb_nfind(const struct rb_type *, struct rbt_tree *, const void *);
-void *_rb_root(const struct rb_type *, struct rbt_tree *);
-void *_rb_min(const struct rb_type *, struct rbt_tree *);
-void *_rb_max(const struct rb_type *, struct rbt_tree *);
+void *_rb_find(const struct rb_type *, const struct rbt_tree *, const void *);
+void *_rb_nfind(const struct rb_type *, const struct rbt_tree *, const void *);
+void *_rb_root(const struct rb_type *, const struct rbt_tree *);
+void *_rb_min(const struct rb_type *, const struct rbt_tree *);
+void *_rb_max(const struct rb_type *, const struct rbt_tree *);
void *_rb_next(const struct rb_type *, void *);
void *_rb_prev(const struct rb_type *, void *);
void *_rb_left(const struct rb_type *, void *);
@@ -401,56 +401,58 @@ int _rb_check(const struct rb_type *, void *, unsigned long);
__attribute__((__unused__)) static inline struct _type \
*_name##_RB_INSERT(struct _name *head, struct _type *elm) \
{ \
- return (struct _type *)_rb_insert( \
- _name##_RB_TYPE, &head->rbh_root, elm); \
+ return (struct _type *)_rb_insert(_name##_RB_TYPE, \
+ &head->rbh_root, elm); \
} \
\
__attribute__((__unused__)) static inline struct _type \
*_name##_RB_REMOVE(struct _name *head, struct _type *elm) \
{ \
- return (struct _type *)_rb_remove( \
- _name##_RB_TYPE, &head->rbh_root, elm); \
+ return (struct _type *)_rb_remove(_name##_RB_TYPE, \
+ &head->rbh_root, elm); \
} \
\
__attribute__((__unused__)) static inline struct _type \
- *_name##_RB_FIND(struct _name *head, const struct _type *key) \
+ *_name##_RB_FIND(const struct _name *head, \
+ const struct _type *key) \
{ \
- return (struct _type *)_rb_find( \
- _name##_RB_TYPE, &head->rbh_root, key); \
+ return (struct _type *)_rb_find(_name##_RB_TYPE, \
+ &head->rbh_root, key); \
} \
\
__attribute__((__unused__)) static inline struct _type \
- *_name##_RB_NFIND(struct _name *head, const struct _type *key) \
+ *_name##_RB_NFIND(const struct _name *head, \
+ const struct _type *key) \
{ \
- return (struct _type *)_rb_nfind( \
- _name##_RB_TYPE, &head->rbh_root, key); \
+ return (struct _type *)_rb_nfind(_name##_RB_TYPE, \
+ &head->rbh_root, key); \
} \
\
__attribute__((__unused__)) static inline struct _type \
- *_name##_RB_ROOT(struct _name *head) \
+ *_name##_RB_ROOT(const struct _name *head) \
{ \
- return (struct _type *)_rb_root( \
- _name##_RB_TYPE, &head->rbh_root); \
+ return (struct _type *)_rb_root(_name##_RB_TYPE, \
+ &head->rbh_root); \
} \
\
__attribute__((__unused__)) static inline int _name##_RB_EMPTY( \
- struct _name *head) \
+ const struct _name *head) \
{ \
return _rb_empty(&head->rbh_root); \
} \
\
__attribute__((__unused__)) static inline struct _type \
- *_name##_RB_MIN(struct _name *head) \
+ *_name##_RB_MIN(const struct _name *head) \
{ \
- return (struct _type *)_rb_min( \
- _name##_RB_TYPE, &head->rbh_root); \
+ return (struct _type *)_rb_min(_name##_RB_TYPE, \
+ &head->rbh_root); \
} \
\
__attribute__((__unused__)) static inline struct _type \
- *_name##_RB_MAX(struct _name *head) \
+ *_name##_RB_MAX(const struct _name *head) \
{ \
- return (struct _type *)_rb_max( \
- _name##_RB_TYPE, &head->rbh_root); \
+ return (struct _type *)_rb_max(_name##_RB_TYPE, \
+ &head->rbh_root); \
} \
\
__attribute__((__unused__)) static inline struct _type \
diff --git a/lib/prefix.c b/lib/prefix.c
index 6b91969218..d2a4c3a432 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -1543,7 +1543,7 @@ char *prefix_mac2str(const struct ethaddr *mac, char *buf, int size)
return ptr;
}
-unsigned prefix_hash_key(void *pp)
+unsigned prefix_hash_key(const void *pp)
{
struct prefix copy;
diff --git a/lib/prefix.h b/lib/prefix.h
index d3c387e102..d57b43dac6 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -466,7 +466,7 @@ extern int is_zero_mac(struct ethaddr *mac);
extern int prefix_str2mac(const char *str, struct ethaddr *mac);
extern char *prefix_mac2str(const struct ethaddr *mac, char *buf, int size);
-extern unsigned prefix_hash_key(void *pp);
+extern unsigned prefix_hash_key(const void *pp);
extern int str_to_esi(const char *str, esi_t *esi);
extern char *esi_to_str(const esi_t *esi, char *buf, int size);
diff --git a/lib/privs.c b/lib/privs.c
index a19707b1c9..a3314c6c3c 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -917,7 +917,7 @@ void zprivs_init(struct zebra_privs_t *zprivs)
zprivs->user, zprivs->vty_group);
exit(1);
}
- if (i >= ngroups && ngroups < (int)ZEBRA_NUM_OF(groups)) {
+ if (i >= ngroups && ngroups < (int)array_size(groups)) {
groups[i] = zprivs_state.vtygrp;
}
}
diff --git a/lib/qobj.c b/lib/qobj.c
index 811645f3c3..3e3860a96a 100644
--- a/lib/qobj.c
+++ b/lib/qobj.c
@@ -27,21 +27,27 @@
#include "qobj.h"
#include "jhash.h"
-static pthread_rwlock_t nodes_lock;
-static struct hash *nodes = NULL;
-
-static unsigned int qobj_key(void *data)
+static uint32_t qobj_hash(const struct qobj_node *node)
{
- struct qobj_node *node = data;
- return (unsigned int)node->nid;
+ return (uint32_t)node->nid;
}
-static bool qobj_cmp(const void *a, const void *b)
+static int qobj_cmp(const struct qobj_node *na, const struct qobj_node *nb)
{
- const struct qobj_node *na = a, *nb = b;
- return na->nid == nb->nid;
+ if (na->nid < nb->nid)
+ return -1;
+ if (na->nid > nb->nid)
+ return 1;
+ return 0;
}
+DECLARE_HASH(qobj_nodes, struct qobj_node, nodehash,
+ qobj_cmp, qobj_hash)
+
+static pthread_rwlock_t nodes_lock;
+static struct qobj_nodes_head nodes = { };
+
+
void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type)
{
node->type = type;
@@ -49,15 +55,15 @@ void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type)
do {
node->nid = (uint64_t)random();
node->nid ^= (uint64_t)random() << 32;
- } while (!node->nid
- || hash_get(nodes, node, hash_alloc_intern) != node);
+ } while (!node->nid || qobj_nodes_find(&nodes, node));
+ qobj_nodes_add(&nodes, node);
pthread_rwlock_unlock(&nodes_lock);
}
void qobj_unreg(struct qobj_node *node)
{
pthread_rwlock_wrlock(&nodes_lock);
- hash_release(nodes, node);
+ qobj_nodes_del(&nodes, node);
pthread_rwlock_unlock(&nodes_lock);
}
@@ -65,7 +71,7 @@ struct qobj_node *qobj_get(uint64_t id)
{
struct qobj_node dummy = {.nid = id}, *rv;
pthread_rwlock_rdlock(&nodes_lock);
- rv = hash_lookup(nodes, &dummy);
+ rv = qobj_nodes_find(&nodes, &dummy);
pthread_rwlock_unlock(&nodes_lock);
return rv;
}
@@ -77,7 +83,7 @@ void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type)
void *rv;
pthread_rwlock_rdlock(&nodes_lock);
- node = hash_lookup(nodes, &dummy);
+ node = qobj_nodes_find(&nodes, &dummy);
/* note: we explicitly hold the lock until after we have checked the
* type.
@@ -96,16 +102,14 @@ void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type)
void qobj_init(void)
{
- if (!nodes) {
- pthread_rwlock_init(&nodes_lock, NULL);
- nodes = hash_create_size(16, qobj_key, qobj_cmp, "QOBJ Hash");
- }
+ pthread_rwlock_init(&nodes_lock, NULL);
+ qobj_nodes_init(&nodes);
}
void qobj_finish(void)
{
- hash_clean(nodes, NULL);
- hash_free(nodes);
- nodes = NULL;
+ struct qobj_node *node;
+ while ((node = qobj_nodes_pop(&nodes)))
+ qobj_nodes_del(&nodes, node);
pthread_rwlock_destroy(&nodes_lock);
}
diff --git a/lib/qobj.h b/lib/qobj.h
index d63988cbab..415eae02ef 100644
--- a/lib/qobj.h
+++ b/lib/qobj.h
@@ -21,6 +21,8 @@
#include <stdlib.h>
#include <stddef.h>
+#include "typesafe.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -69,6 +71,8 @@ struct qobj_nodetype_capnp {
};
#endif
+#include "typesafe.h"
+
/* each different kind of object will have a global variable of this type,
* which can be used by various other pieces to store type-related bits.
* type equality can be tested as pointer equality. (cf. QOBJ_GET_TYPESAFE)
@@ -79,9 +83,12 @@ struct qobj_nodetype {
RESERVED_SPACE_STRUCT(qobj_nodetype_capnp, capnp, 256)
};
+PREDECL_HASH(qobj_nodes)
+
/* anchor to be embedded somewhere in the object's struct */
struct qobj_node {
uint64_t nid;
+ struct qobj_nodes_item nodehash;
struct qobj_nodetype *type;
};
diff --git a/lib/route_types.txt b/lib/route_types.txt
index c5eff44ca7..59f3a91cf0 100644
--- a/lib/route_types.txt
+++ b/lib/route_types.txt
@@ -83,6 +83,7 @@ ZEBRA_ROUTE_SHARP, sharp, sharpd, 'D', 1, 1, 1, "SHARP"
ZEBRA_ROUTE_PBR, pbr, pbrd, 'F', 1, 1, 0, "PBR"
ZEBRA_ROUTE_BFD, bfd, bfdd, '-', 0, 0, 0, "BFD"
ZEBRA_ROUTE_OPENFABRIC, openfabric, fabricd, 'f', 1, 1, 1, "OpenFabric"
+ZEBRA_ROUTE_VRRP, vrrp, vrrpd, '-', 0, 0, 0, "VRRP"
ZEBRA_ROUTE_ALL, wildcard, none, '-', 0, 0, 0, "-"
@@ -110,4 +111,5 @@ ZEBRA_ROUTE_BABEL, "Babel routing protocol (Babel)"
ZEBRA_ROUTE_SHARP, "Super Happy Advanced Routing Protocol (sharpd)"
ZEBRA_ROUTE_PBR, "Policy Based Routing (PBR)"
ZEBRA_ROUTE_BFD, "Bidirectional Fowarding Detection (BFD)"
+ZEBRA_ROUTE_VRRP, "Virtual Router Redundancy Protocol (VRRP)"
ZEBRA_ROUTE_OPENFABRIC, "OpenFabric Routing Protocol"
diff --git a/lib/routemap.c b/lib/routemap.c
index 4898a8d0fa..3542994e65 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -616,14 +616,14 @@ struct route_map_list {
void (*add_hook)(const char *);
void (*delete_hook)(const char *);
- void (*event_hook)(route_map_event_t, const char *);
+ void (*event_hook)(const char *);
};
/* Master list of route map. */
static struct route_map_list route_map_master = {NULL, NULL, NULL, NULL, NULL};
struct hash *route_map_master_hash = NULL;
-static unsigned int route_map_hash_key_make(void *p)
+static unsigned int route_map_hash_key_make(const void *p)
{
const struct route_map *map = p;
return string_hash_make(map->name);
@@ -673,7 +673,7 @@ struct route_map_dep {
/* Hashes maintaining dependency between various sublists used by route maps */
struct hash *route_map_dep_hash[ROUTE_MAP_DEP_MAX];
-static unsigned int route_map_dep_hash_make_key(void *p);
+static unsigned int route_map_dep_hash_make_key(const void *p);
static void route_map_clear_all_references(char *rmap_name);
static void route_map_rule_delete(struct route_map_rule_list *,
struct route_map_rule *);
@@ -902,10 +902,12 @@ static const char *route_map_type_str(enum route_map_type type)
case RMAP_DENY:
return "deny";
break;
- default:
+ case RMAP_ANY:
return "";
break;
}
+
+ return "";
}
static int route_map_empty(struct route_map *map)
@@ -1077,8 +1079,7 @@ static void route_map_index_delete(struct route_map_index *index, int notify)
/* Execute event hook. */
if (route_map_master.event_hook && notify) {
- (*route_map_master.event_hook)(RMAP_EVENT_INDEX_DELETED,
- index->map->name);
+ (*route_map_master.event_hook)(index->map->name);
route_map_notify_dependencies(index->map->name,
RMAP_EVENT_CALL_ADDED);
}
@@ -1137,8 +1138,7 @@ route_map_index_add(struct route_map *map, enum route_map_type type, int pref)
/* Execute event hook. */
if (route_map_master.event_hook) {
- (*route_map_master.event_hook)(RMAP_EVENT_INDEX_ADDED,
- map->name);
+ (*route_map_master.event_hook)(map->name);
route_map_notify_dependencies(map->name, RMAP_EVENT_CALL_ADDED);
}
return index;
@@ -1289,7 +1289,6 @@ int route_map_add_match(struct route_map_index *index, const char *match_name,
struct route_map_rule *next;
struct route_map_rule_cmd *cmd;
void *compile;
- int replaced = 0;
/* First lookup rule for add match statement. */
cmd = route_map_lookup_match(match_name);
@@ -1308,8 +1307,17 @@ int route_map_add_match(struct route_map_index *index, const char *match_name,
for (rule = index->match_list.head; rule; rule = next) {
next = rule->next;
if (rule->cmd == cmd) {
+ /* If the configured route-map match rule is exactly
+ * the same as the existing configuration then,
+ * ignore the duplicate configuration.
+ */
+ if (strcmp(match_arg, rule->rule_str) == 0) {
+ if (cmd->func_free)
+ (*cmd->func_free)(compile);
+ return RMAP_COMPILE_SUCCESS;
+ }
+
route_map_rule_delete(&index->match_list, rule);
- replaced = 1;
}
}
@@ -1327,10 +1335,7 @@ int route_map_add_match(struct route_map_index *index, const char *match_name,
/* Execute event hook. */
if (route_map_master.event_hook) {
- (*route_map_master.event_hook)(
- replaced ? RMAP_EVENT_MATCH_REPLACED
- : RMAP_EVENT_MATCH_ADDED,
- index->map->name);
+ (*route_map_master.event_hook)(index->map->name);
route_map_notify_dependencies(index->map->name,
RMAP_EVENT_CALL_ADDED);
}
@@ -1355,9 +1360,7 @@ int route_map_delete_match(struct route_map_index *index,
route_map_rule_delete(&index->match_list, rule);
/* Execute event hook. */
if (route_map_master.event_hook) {
- (*route_map_master.event_hook)(
- RMAP_EVENT_MATCH_DELETED,
- index->map->name);
+ (*route_map_master.event_hook)(index->map->name);
route_map_notify_dependencies(
index->map->name,
RMAP_EVENT_CALL_ADDED);
@@ -1376,7 +1379,6 @@ int route_map_add_set(struct route_map_index *index, const char *set_name,
struct route_map_rule *next;
struct route_map_rule_cmd *cmd;
void *compile;
- int replaced = 0;
cmd = route_map_lookup_set(set_name);
if (cmd == NULL)
@@ -1395,10 +1397,8 @@ int route_map_add_set(struct route_map_index *index, const char *set_name,
route_map_index. */
for (rule = index->set_list.head; rule; rule = next) {
next = rule->next;
- if (rule->cmd == cmd) {
+ if (rule->cmd == cmd)
route_map_rule_delete(&index->set_list, rule);
- replaced = 1;
- }
}
/* Add new route map match rule. */
@@ -1415,10 +1415,7 @@ int route_map_add_set(struct route_map_index *index, const char *set_name,
/* Execute event hook. */
if (route_map_master.event_hook) {
- (*route_map_master.event_hook)(replaced
- ? RMAP_EVENT_SET_REPLACED
- : RMAP_EVENT_SET_ADDED,
- index->map->name);
+ (*route_map_master.event_hook)(index->map->name);
route_map_notify_dependencies(index->map->name,
RMAP_EVENT_CALL_ADDED);
}
@@ -1442,9 +1439,7 @@ int route_map_delete_set(struct route_map_index *index, const char *set_name,
route_map_rule_delete(&index->set_list, rule);
/* Execute event hook. */
if (route_map_master.event_hook) {
- (*route_map_master.event_hook)(
- RMAP_EVENT_SET_DELETED,
- index->map->name);
+ (*route_map_master.event_hook)(index->map->name);
route_map_notify_dependencies(
index->map->name,
RMAP_EVENT_CALL_ADDED);
@@ -1641,7 +1636,7 @@ void route_map_delete_hook(void (*func)(const char *))
route_map_master.delete_hook = func;
}
-void route_map_event_hook(void (*func)(route_map_event_t, const char *))
+void route_map_event_hook(void (*func)(const char *name))
{
route_map_master.event_hook = func;
}
@@ -1711,7 +1706,7 @@ static void *route_map_name_hash_alloc(void *p)
return ((void *)XSTRDUP(MTYPE_ROUTE_MAP_NAME, (const char *)p));
}
-static unsigned int route_map_dep_hash_make_key(void *p)
+static unsigned int route_map_dep_hash_make_key(const void *p)
{
return (string_hash_make((char *)p));
}
@@ -1785,7 +1780,14 @@ static int route_map_dep_update(struct hash *dephash, const char *dep_name,
dep = NULL;
}
break;
- default:
+ case RMAP_EVENT_SET_ADDED:
+ case RMAP_EVENT_SET_DELETED:
+ case RMAP_EVENT_SET_REPLACED:
+ case RMAP_EVENT_MATCH_ADDED:
+ case RMAP_EVENT_MATCH_DELETED:
+ case RMAP_EVENT_MATCH_REPLACED:
+ case RMAP_EVENT_INDEX_ADDED:
+ case RMAP_EVENT_INDEX_DELETED:
break;
}
@@ -1828,13 +1830,26 @@ static struct hash *route_map_get_dep_hash(route_map_event_t event)
break;
case RMAP_EVENT_CALL_ADDED:
case RMAP_EVENT_CALL_DELETED:
+ case RMAP_EVENT_MATCH_ADDED:
+ case RMAP_EVENT_MATCH_DELETED:
upd8_hash = route_map_dep_hash[ROUTE_MAP_DEP_RMAP];
break;
case RMAP_EVENT_FILTER_ADDED:
case RMAP_EVENT_FILTER_DELETED:
upd8_hash = route_map_dep_hash[ROUTE_MAP_DEP_FILTER];
break;
- default:
+ /*
+ * Should we actually be ignoring these?
+ * I am not sure but at this point in time, let
+ * us get them into this switch and we can peel
+ * them into the appropriate place in the future
+ */
+ case RMAP_EVENT_SET_ADDED:
+ case RMAP_EVENT_SET_DELETED:
+ case RMAP_EVENT_SET_REPLACED:
+ case RMAP_EVENT_MATCH_REPLACED:
+ case RMAP_EVENT_INDEX_ADDED:
+ case RMAP_EVENT_INDEX_DELETED:
upd8_hash = NULL;
break;
}
@@ -1844,13 +1859,12 @@ static struct hash *route_map_get_dep_hash(route_map_event_t event)
static void route_map_process_dependency(struct hash_bucket *bucket, void *data)
{
char *rmap_name = (char *)bucket->data;
- route_map_event_t type = (route_map_event_t)(ptrdiff_t)data;
if (rmap_debug)
zlog_debug("%s: Notifying %s of dependency",
__FUNCTION__, rmap_name);
if (route_map_master.event_hook)
- (*route_map_master.event_hook)(type, rmap_name);
+ (*route_map_master.event_hook)(rmap_name);
}
void route_map_upd8_dependency(route_map_event_t type, const char *arg,
@@ -2803,6 +2817,13 @@ DEFUN (rmap_call,
assert(index);
+ /* If "call" is invoked with the same route-map name as
+ * the one previously configured then, ignore the duplicate
+ * configuration.
+ */
+ if (index->nextrm && (strcmp(index->nextrm, rmap) == 0))
+ return CMD_SUCCESS;
+
if (index->nextrm) {
route_map_upd8_dependency(RMAP_EVENT_CALL_DELETED,
index->nextrm, index->map->name);
diff --git a/lib/routemap.h b/lib/routemap.h
index e43e74a633..9969936a6b 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -238,7 +238,15 @@ extern route_map_result_t route_map_apply(struct route_map *map,
extern void route_map_add_hook(void (*func)(const char *));
extern void route_map_delete_hook(void (*func)(const char *));
-extern void route_map_event_hook(void (*func)(route_map_event_t, const char *));
+
+/*
+ * This is the callback for when something has changed about a
+ * route-map. The interested parties can register to receive
+ * this data.
+ *
+ * name - Is the name of the changed route-map
+ */
+extern void route_map_event_hook(void (*func)(const char *name));
extern int route_map_mark_updated(const char *name);
extern void route_map_walk_update_list(void (*update_fn)(char *name));
extern void route_map_upd8_dependency(route_map_event_t type, const char *arg,
diff --git a/lib/seqlock.c b/lib/seqlock.c
new file mode 100644
index 0000000000..223d14952c
--- /dev/null
+++ b/lib/seqlock.c
@@ -0,0 +1,167 @@
+/*
+ * "Sequence" lock primitive
+ *
+ * Copyright (C) 2015 David Lamparter <equinox@diac24.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <unistd.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <pthread.h>
+#include <assert.h>
+
+#include "seqlock.h"
+
+#ifdef HAVE_SYNC_LINUX_FUTEX
+/* Linux-specific - sys_futex() */
+#include <sys/syscall.h>
+#include <linux/futex.h>
+
+static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
+ void *addr2, int val3)
+{
+ return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+#define wait_once(sqlo, val) \
+ sys_futex((int *)&sqlo->pos, FUTEX_WAIT, (int)val, NULL, NULL, 0)
+#define wait_poke(sqlo) \
+ sys_futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
+
+#elif defined(HAVE_SYNC_OPENBSD_FUTEX)
+/* OpenBSD variant of the above. untested, not upstream in OpenBSD. */
+#include <sys/syscall.h>
+#include <sys/futex.h>
+
+#define wait_once(sqlo, val) \
+ futex((int *)&sqlo->pos, FUTEX_WAIT, (int)val, NULL, NULL, 0)
+#define wait_poke(sqlo) \
+ futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
+
+#elif defined(HAVE_SYNC_UMTX_OP)
+/* FreeBSD-specific: umtx_op() */
+#include <sys/umtx.h>
+
+#define wait_once(sqlo, val) \
+ _umtx_op((void *)&sqlo->pos, UMTX_OP_WAIT_UINT, val, NULL, NULL)
+#define wait_poke(sqlo) \
+ _umtx_op((void *)&sqlo->pos, UMTX_OP_WAKE, INT_MAX, NULL, NULL)
+
+#else
+/* generic version. used on *BSD, Solaris and OSX.
+ */
+
+#define wait_init(sqlo) do { \
+ pthread_mutex_init(&sqlo->lock, NULL); \
+ pthread_cond_init(&sqlo->wake, NULL); \
+ } while (0)
+#define wait_prep(sqlo) pthread_mutex_lock(&sqlo->lock)
+#define wait_once(sqlo, val) pthread_cond_wait(&sqlo->wake, &sqlo->lock)
+#define wait_done(sqlo) pthread_mutex_unlock(&sqlo->lock)
+#define wait_poke(sqlo) do { \
+ pthread_mutex_lock(&sqlo->lock); \
+ pthread_cond_broadcast(&sqlo->wake); \
+ pthread_mutex_unlock(&sqlo->lock); \
+ } while (0)
+
+#endif
+
+#ifndef wait_init
+#define wait_init(sqlo) /**/
+#define wait_prep(sqlo) /**/
+#define wait_done(sqlo) /**/
+#endif /* wait_init */
+
+
+void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val)
+{
+ seqlock_val_t cur, cal;
+
+ seqlock_assert_valid(val);
+
+ wait_prep(sqlo);
+ while (1) {
+ cur = atomic_load_explicit(&sqlo->pos, memory_order_acquire);
+ if (!(cur & 1))
+ break;
+ cal = cur - val - 1;
+ assert(cal < 0x40000000 || cal > 0xc0000000);
+ if (cal < 0x80000000)
+ break;
+
+ wait_once(sqlo, cur);
+ }
+ wait_done(sqlo);
+}
+
+bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val)
+{
+ seqlock_val_t cur;
+
+ seqlock_assert_valid(val);
+
+ cur = atomic_load_explicit(&sqlo->pos, memory_order_acquire);
+ if (!(cur & 1))
+ return 1;
+ cur -= val;
+ assert(cur < 0x40000000 || cur > 0xc0000000);
+ return cur < 0x80000000;
+}
+
+void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val)
+{
+ seqlock_assert_valid(val);
+
+ atomic_store_explicit(&sqlo->pos, val, memory_order_release);
+ wait_poke(sqlo);
+}
+
+void seqlock_release(struct seqlock *sqlo)
+{
+ atomic_store_explicit(&sqlo->pos, 0, memory_order_release);
+ wait_poke(sqlo);
+}
+
+void seqlock_init(struct seqlock *sqlo)
+{
+ sqlo->pos = 0;
+ wait_init(sqlo);
+}
+
+
+seqlock_val_t seqlock_cur(struct seqlock *sqlo)
+{
+ return atomic_load_explicit(&sqlo->pos, memory_order_acquire);
+}
+
+seqlock_val_t seqlock_bump(struct seqlock *sqlo)
+{
+ seqlock_val_t val;
+
+ val = atomic_fetch_add_explicit(&sqlo->pos, 2, memory_order_release);
+ wait_poke(sqlo);
+ return val;
+}
diff --git a/lib/seqlock.h b/lib/seqlock.h
new file mode 100644
index 0000000000..eef05a4307
--- /dev/null
+++ b/lib/seqlock.h
@@ -0,0 +1,106 @@
+/*
+ * "Sequence" lock primitive
+ *
+ * Copyright (C) 2015 David Lamparter <equinox@diac24.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301 USA
+ */
+
+#ifndef _SEQLOCK_H
+#define _SEQLOCK_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <pthread.h>
+#include "frratomic.h"
+
+/*
+ * this locking primitive is intended to use in a 1:N setup.
+ *
+ * - one "counter" seqlock issuing increasing numbers
+ * - multiple seqlock users hold references on these numbers
+ *
+ * this is intended for implementing RCU reference-holding. There is one
+ * global counter, with threads locking a seqlock whenever they take a
+ * reference. A seqlock can also be idle/unlocked.
+ *
+ * The "counter" seqlock will always stay locked; the RCU cleanup thread
+ * continuously counts it up, waiting for threads to release or progress to a
+ * sequence number further ahead. If all threads are > N, references dropped
+ * in N can be free'd.
+ *
+ * generally, the lock function is:
+ *
+ * Thread-A Thread-B
+ *
+ * seqlock_acquire(a)
+ * | running seqlock_wait(b) -- a <= b
+ * seqlock_release() | blocked
+ * OR: seqlock_acquire(a') | -- a' > b
+ * (resumes)
+ */
+
+/* use sequentially increasing "ticket numbers". lowest bit will always
+ * be 1 to have a 'cleared' indication (i.e., counts 1,3,5,7,etc. )
+ */
+typedef _Atomic uint32_t seqlock_ctr_t;
+typedef uint32_t seqlock_val_t;
+#define seqlock_assert_valid(val) assert(val & 1)
+
+
+struct seqlock {
+/* always used */
+ seqlock_ctr_t pos;
+/* used when futexes not available: (i.e. non-linux) */
+ pthread_mutex_t lock;
+ pthread_cond_t wake;
+};
+
+
+/* sqlo = 0 - init state: not held */
+extern void seqlock_init(struct seqlock *sqlo);
+
+
+/* while (sqlo <= val) - wait until seqlock->pos > val, or seqlock unheld */
+extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
+extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
+
+static inline bool seqlock_held(struct seqlock *sqlo)
+{
+ return !!atomic_load_explicit(&sqlo->pos, memory_order_relaxed);
+}
+
+/* sqlo - get seqlock position -- for the "counter" seqlock */
+extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
+/* sqlo++ - note: like x++, returns previous value, before bumping */
+extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
+
+
+/* sqlo = val - can be used on held seqlock. */
+extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
+/* sqlo = ref - standard pattern: acquire relative to other seqlock */
+static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
+{
+ seqlock_acquire_val(sqlo, seqlock_cur(ref));
+}
+
+/* sqlo = 0 - set seqlock position to 0, marking as non-held */
+extern void seqlock_release(struct seqlock *sqlo);
+/* release should normally be followed by a bump on the "counter", if
+ * anything other than reading RCU items was done
+ */
+
+#endif /* _SEQLOCK_H */
diff --git a/lib/sockunion.c b/lib/sockunion.c
index af4f41f37c..8fa9a3fad9 100644
--- a/lib/sockunion.c
+++ b/lib/sockunion.c
@@ -163,7 +163,7 @@ int sockunion_accept(int sock, union sockunion *su)
}
/* Return sizeof union sockunion. */
-static int sockunion_sizeof(const union sockunion *su)
+int sockunion_sizeof(const union sockunion *su)
{
int ret;
@@ -366,21 +366,6 @@ int sockopt_cork(int sock, int onoff)
return 0;
}
-int sockopt_mark_default(int sock, int mark, struct zebra_privs_t *cap)
-{
-#ifdef SO_MARK
- int ret;
-
- frr_elevate_privs(cap) {
- ret = setsockopt(sock, SOL_SOCKET, SO_MARK, &mark,
- sizeof(mark));
- }
- return ret;
-#else
- return 0;
-#endif
-}
-
int sockopt_minttl(int family, int sock, int minttl)
{
#ifdef IP_MINTTL
@@ -472,7 +457,7 @@ unsigned int sockunion_hash(const union sockunion *su)
return jhash_1word(su->sin.sin_addr.s_addr, 0);
case AF_INET6:
return jhash2(su->sin6.sin6_addr.s6_addr32,
- ZEBRA_NUM_OF(su->sin6.sin6_addr.s6_addr32), 0);
+ array_size(su->sin6.sin6_addr.s6_addr32), 0);
}
return 0;
}
diff --git a/lib/sockunion.h b/lib/sockunion.h
index d7d26ba85a..7091c1b5e7 100644
--- a/lib/sockunion.h
+++ b/lib/sockunion.h
@@ -83,6 +83,7 @@ extern void sockunion_set(union sockunion *, int family, const uint8_t *addr,
extern union sockunion *sockunion_str2su(const char *str);
extern int sockunion_accept(int sock, union sockunion *);
+extern int sockunion_sizeof(const union sockunion *su);
extern int sockunion_stream_socket(union sockunion *);
extern int sockopt_reuseaddr(int);
extern int sockopt_reuseport(int);
@@ -92,7 +93,6 @@ extern int sockunion_bind(int sock, union sockunion *, unsigned short,
extern int sockopt_ttl(int family, int sock, int ttl);
extern int sockopt_minttl(int family, int sock, int minttl);
extern int sockopt_cork(int sock, int onoff);
-extern int sockopt_mark_default(int sock, int mark, struct zebra_privs_t *);
extern int sockunion_socket(const union sockunion *su);
extern const char *inet_sutop(const union sockunion *su, char *str);
extern enum connect_result sockunion_connect(int fd, const union sockunion *su,
diff --git a/lib/subdir.am b/lib/subdir.am
index 3b14be4676..4897f5e8e5 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -3,10 +3,11 @@
#
lib_LTLIBRARIES += lib/libfrr.la
lib_libfrr_la_LDFLAGS = -version-info 0:0:0 -Xlinker -e_libfrr_version
-lib_libfrr_la_LIBADD = $(LIBCAP) $(UNWIND_LIBS) $(LIBYANG_LIBS)
+lib_libfrr_la_LIBADD = $(LIBCAP) $(UNWIND_LIBS) $(LIBYANG_LIBS) $(LUA_LIB)
lib_libfrr_la_SOURCES = \
lib/agg_table.c \
+ lib/atomlist.c \
lib/bfd.c \
lib/buffer.c \
lib/checksum.c \
@@ -20,6 +21,7 @@ lib_libfrr_la_SOURCES = \
lib/distribute.c \
lib/ferr.c \
lib/filter.c \
+ lib/frrlua.c \
lib/frr_pthread.c \
lib/frrstr.c \
lib/getopt.c \
@@ -65,6 +67,7 @@ lib_libfrr_la_SOURCES = \
lib/ringbuf.c \
lib/routemap.c \
lib/sbuf.c \
+ lib/seqlock.c \
lib/sha256.c \
lib/sigevent.c \
lib/skiplist.c \
@@ -79,6 +82,8 @@ lib_libfrr_la_SOURCES = \
lib/table.c \
lib/termtable.c \
lib/thread.c \
+ lib/typerb.c \
+ lib/typesafe.c \
lib/vector.c \
lib/vrf.c \
lib/vty.c \
@@ -89,7 +94,6 @@ lib_libfrr_la_SOURCES = \
lib/yang_wrappers.c \
lib/zclient.c \
lib/logicalrouter.c \
- lib/lua.c \
# end
nodist_lib_libfrr_la_SOURCES = \
@@ -130,6 +134,7 @@ lib/northbound_cli.lo: lib/northbound_cli_clippy.c
pkginclude_HEADERS += \
lib/agg_table.h \
+ lib/atomlist.h \
lib/bfd.h \
lib/bitfield.h \
lib/buffer.h \
@@ -144,9 +149,9 @@ pkginclude_HEADERS += \
lib/debug.h \
lib/distribute.h \
lib/ferr.h \
- lib/fifo.h \
lib/filter.h \
lib/freebsd-queue.h \
+ lib/frrlua.h \
lib/frr_pthread.h \
lib/frratomic.h \
lib/frrstr.h \
@@ -193,6 +198,7 @@ pkginclude_HEADERS += \
lib/ringbuf.h \
lib/routemap.h \
lib/sbuf.h \
+ lib/seqlock.h \
lib/sha256.h \
lib/sigevent.h \
lib/skiplist.h \
@@ -206,6 +212,8 @@ pkginclude_HEADERS += \
lib/table.h \
lib/termtable.h \
lib/thread.h \
+ lib/typerb.h \
+ lib/typesafe.h \
lib/vector.h \
lib/vlan.h \
lib/vrf.h \
@@ -221,7 +229,6 @@ pkginclude_HEADERS += \
lib/zclient.h \
lib/zebra.h \
lib/logicalrouter.h \
- lib/lua.h \
lib/pbr.h \
# end
@@ -303,6 +310,18 @@ lib_sysrepo_la_LIBADD = lib/libfrr.la $(SYSREPO_LIBS)
lib_sysrepo_la_SOURCES = lib/northbound_sysrepo.c
#
+# gRPC northbound plugin
+#
+if GRPC
+module_LTLIBRARIES += lib/grpc.la
+endif
+
+lib_grpc_la_CXXFLAGS = $(WERROR) $(GRPC_CFLAGS)
+lib_grpc_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+lib_grpc_la_LIBADD = lib/libfrr.la grpc/libfrrgrpc_pb.la $(GRPC_LIBS)
+lib_grpc_la_SOURCES = lib/northbound_grpc.cpp
+
+#
# CLI utilities
#
noinst_PROGRAMS += \
@@ -346,7 +365,7 @@ am__v_CLIPPY_1 =
CLIPPY_DEPS = $(HOSTTOOLS)lib/clippy $(top_srcdir)/python/clidef.py
-SUFFIXES = _clippy.c .proto .pb-c.c .pb-c.h .pb.h
+SUFFIXES = _clippy.c .proto .pb-c.c .pb-c.h .pb.h .pb.cc .grpc.pb.cc
.c_clippy.c:
@{ test -x $(top_builddir)/$(HOSTTOOLS)lib/clippy || \
$(MAKE) -C $(top_builddir)/$(HOSTTOOLS) lib/clippy; }
diff --git a/lib/table.c b/lib/table.c
index edba7f1932..728615c776 100644
--- a/lib/table.c
+++ b/lib/table.c
@@ -33,12 +33,14 @@ DEFINE_MTYPE(LIB, ROUTE_NODE, "Route node")
static void route_table_free(struct route_table *);
-static bool route_table_hash_cmp(const void *a, const void *b)
+static int route_table_hash_cmp(const struct route_node *a,
+ const struct route_node *b)
{
- const struct prefix *pa = a, *pb = b;
- return prefix_cmp(pa, pb) == 0;
+ return prefix_cmp(&a->p, &b->p);
}
+DECLARE_HASH(rn_hash_node, struct route_node, nodehash, route_table_hash_cmp,
+ prefix_hash_key)
/*
* route_table_init_with_delegate
*/
@@ -49,8 +51,7 @@ route_table_init_with_delegate(route_table_delegate_t *delegate)
rt = XCALLOC(MTYPE_ROUTE_TABLE, sizeof(struct route_table));
rt->delegate = delegate;
- rt->hash = hash_create(prefix_hash_key, route_table_hash_cmp,
- "route table hash");
+ rn_hash_node_init(&rt->hash);
return rt;
}
@@ -69,15 +70,14 @@ static struct route_node *route_node_new(struct route_table *table)
static struct route_node *route_node_set(struct route_table *table,
const struct prefix *prefix)
{
- struct route_node *node, *inserted;
+ struct route_node *node;
node = route_node_new(table);
prefix_copy(&node->p, prefix);
node->table = table;
- inserted = hash_get(node->table->hash, node, hash_alloc_intern);
- assert(inserted == node);
+ rn_hash_node_add(&node->table->hash, node);
return node;
}
@@ -99,9 +99,6 @@ static void route_table_free(struct route_table *rt)
if (rt == NULL)
return;
- hash_clean(rt->hash, NULL);
- hash_free(rt->hash);
-
node = rt->top;
/* Bulk deletion of nodes remaining in this table. This function is not
@@ -123,6 +120,7 @@ static void route_table_free(struct route_table *rt)
tmp_node->table->count--;
tmp_node->lock = 0; /* to cause assert if unlocked after this */
+ rn_hash_node_del(&rt->hash, tmp_node);
route_node_free(rt, tmp_node);
if (node != NULL) {
@@ -137,6 +135,7 @@ static void route_table_free(struct route_table *rt)
assert(rt->count == 0);
+ rn_hash_node_fini(&rt->hash);
XFREE(MTYPE_ROUTE_TABLE, rt);
return;
}
@@ -192,7 +191,7 @@ static void set_link(struct route_node *node, struct route_node *new)
}
/* Find matched prefix. */
-struct route_node *route_node_match(const struct route_table *table,
+struct route_node *route_node_match(struct route_table *table,
union prefixconstptr pu)
{
const struct prefix *p = pu.p;
@@ -222,7 +221,7 @@ struct route_node *route_node_match(const struct route_table *table,
return NULL;
}
-struct route_node *route_node_match_ipv4(const struct route_table *table,
+struct route_node *route_node_match_ipv4(struct route_table *table,
const struct in_addr *addr)
{
struct prefix_ipv4 p;
@@ -235,7 +234,7 @@ struct route_node *route_node_match_ipv4(const struct route_table *table,
return route_node_match(table, (struct prefix *)&p);
}
-struct route_node *route_node_match_ipv6(const struct route_table *table,
+struct route_node *route_node_match_ipv6(struct route_table *table,
const struct in6_addr *addr)
{
struct prefix_ipv6 p;
@@ -245,49 +244,50 @@ struct route_node *route_node_match_ipv6(const struct route_table *table,
p.prefixlen = IPV6_MAX_PREFIXLEN;
p.prefix = *addr;
- return route_node_match(table, (struct prefix *)&p);
+ return route_node_match(table, &p);
}
/* Lookup same prefix node. Return NULL when we can't find route. */
-struct route_node *route_node_lookup(const struct route_table *table,
+struct route_node *route_node_lookup(struct route_table *table,
union prefixconstptr pu)
{
- struct prefix p;
- struct route_node *node;
- prefix_copy(&p, pu.p);
- apply_mask(&p);
+ struct route_node rn, *node;
+ prefix_copy(&rn.p, pu.p);
+ apply_mask(&rn.p);
- node = hash_get(table->hash, (void *)&p, NULL);
+ node = rn_hash_node_find(&table->hash, &rn);
return (node && node->info) ? route_lock_node(node) : NULL;
}
/* Lookup same prefix node. Return NULL when we can't find route. */
-struct route_node *route_node_lookup_maynull(const struct route_table *table,
+struct route_node *route_node_lookup_maynull(struct route_table *table,
union prefixconstptr pu)
{
- struct prefix p;
- struct route_node *node;
- prefix_copy(&p, pu.p);
- apply_mask(&p);
+ struct route_node rn, *node;
+ prefix_copy(&rn.p, pu.p);
+ apply_mask(&rn.p);
- node = hash_get(table->hash, (void *)&p, NULL);
+ node = rn_hash_node_find(&table->hash, &rn);
return node ? route_lock_node(node) : NULL;
}
/* Add node to routing table. */
-struct route_node *route_node_get(struct route_table *const table,
+struct route_node *route_node_get(struct route_table *table,
union prefixconstptr pu)
{
- const struct prefix *p = pu.p;
+ struct route_node search;
+ struct prefix *p = &search.p;
+
+ prefix_copy(p, pu.p);
+ apply_mask(p);
+
struct route_node *new;
struct route_node *node;
struct route_node *match;
- struct route_node *inserted;
uint16_t prefixlen = p->prefixlen;
const uint8_t *prefix = &p->u.prefix;
- apply_mask((struct prefix *)p);
- node = hash_get(table->hash, (void *)p, NULL);
+ node = rn_hash_node_find(&table->hash, &search);
if (node && node->info)
return route_lock_node(node);
@@ -314,8 +314,7 @@ struct route_node *route_node_get(struct route_table *const table,
new->p.family = p->family;
new->table = table;
set_link(new, node);
- inserted = hash_get(node->table->hash, new, hash_alloc_intern);
- assert(inserted == new);
+ rn_hash_node_add(&table->hash, new);
if (match)
set_link(match, new);
@@ -367,7 +366,7 @@ void route_node_delete(struct route_node *node)
node->table->count--;
- hash_release(node->table->hash, node);
+ rn_hash_node_del(&node->table->hash, node);
/* WARNING: FRAGILE CODE!
* route_node_free may have the side effect of free'ing the entire
@@ -472,7 +471,7 @@ struct route_node *route_next_until(struct route_node *node,
return NULL;
}
-unsigned long route_table_count(const struct route_table *table)
+unsigned long route_table_count(struct route_table *table)
{
return table->count;
}
@@ -607,7 +606,7 @@ static struct route_node *route_get_subtree_next(struct route_node *node)
* @see route_table_get_next
*/
static struct route_node *
-route_table_get_next_internal(const struct route_table *table,
+route_table_get_next_internal(struct route_table *table,
const struct prefix *p)
{
struct route_node *node, *tmp_node;
@@ -708,7 +707,7 @@ route_table_get_next_internal(const struct route_table *table,
* Find the node that occurs after the given prefix in order of
* iteration.
*/
-struct route_node *route_table_get_next(const struct route_table *table,
+struct route_node *route_table_get_next(struct route_table *table,
union prefixconstptr pu)
{
const struct prefix *p = pu.p;
diff --git a/lib/table.h b/lib/table.h
index ce578e795c..14be7ab656 100644
--- a/lib/table.h
+++ b/lib/table.h
@@ -25,6 +25,7 @@
#include "memory.h"
#include "hash.h"
#include "prefix.h"
+#include "typesafe.h"
#ifdef __cplusplus
extern "C" {
@@ -59,10 +60,12 @@ struct route_table_delegate_t_ {
route_table_destroy_node_func_t destroy_node;
};
+PREDECL_HASH(rn_hash_node)
+
/* Routing table top structure. */
struct route_table {
struct route_node *top;
- struct hash *hash;
+ struct rn_hash_node_head hash;
/*
* Delegate that performs certain functions for this table.
@@ -129,6 +132,7 @@ struct route_table {
/* Lock of this radix */ \
unsigned int table_rdonly(lock); \
\
+ struct rn_hash_node_item nodehash; \
/* Each node of route. */ \
void *info; \
@@ -194,26 +198,29 @@ static inline void route_table_set_info(struct route_table *table, void *d)
table->info = d;
}
+/* ext_pure => extern __attribute__((pure))
+ * does not modify memory (but depends on mem), allows compiler to optimize
+ */
+
extern void route_table_finish(struct route_table *table);
-extern struct route_node *route_top(struct route_table *table);
-extern struct route_node *route_next(struct route_node *node);
-extern struct route_node *route_next_until(struct route_node *node,
- const struct route_node *limit);
-extern struct route_node *route_node_get(struct route_table *const table,
+ext_pure struct route_node *route_top(struct route_table *table);
+ext_pure struct route_node *route_next(struct route_node *node);
+ext_pure struct route_node *route_next_until(struct route_node *node,
+ const struct route_node *limit);
+extern struct route_node *route_node_get(struct route_table *table,
union prefixconstptr pu);
-extern struct route_node *route_node_lookup(const struct route_table *table,
- union prefixconstptr pu);
-extern struct route_node *
-route_node_lookup_maynull(const struct route_table *table,
- union prefixconstptr pu);
-extern struct route_node *route_node_match(const struct route_table *table,
- union prefixconstptr pu);
-extern struct route_node *route_node_match_ipv4(const struct route_table *table,
- const struct in_addr *addr);
-extern struct route_node *route_node_match_ipv6(const struct route_table *table,
- const struct in6_addr *addr);
-
-extern unsigned long route_table_count(const struct route_table *table);
+ext_pure struct route_node *route_node_lookup(struct route_table *table,
+ union prefixconstptr pu);
+ext_pure struct route_node *route_node_lookup_maynull(struct route_table *table,
+ union prefixconstptr pu);
+ext_pure struct route_node *route_node_match(struct route_table *table,
+ union prefixconstptr pu);
+ext_pure struct route_node *route_node_match_ipv4(struct route_table *table,
+ const struct in_addr *addr);
+ext_pure struct route_node *route_node_match_ipv6(struct route_table *table,
+ const struct in6_addr *addr);
+
+ext_pure unsigned long route_table_count(struct route_table *table);
extern struct route_node *route_node_create(route_table_delegate_t *delegate,
struct route_table *table);
@@ -222,10 +229,10 @@ extern void route_node_destroy(route_table_delegate_t *delegate,
struct route_table *table,
struct route_node *node);
-extern struct route_node *route_table_get_next(const struct route_table *table,
- union prefixconstptr pu);
-extern int route_table_prefix_iter_cmp(const struct prefix *p1,
- const struct prefix *p2);
+ext_pure struct route_node *route_table_get_next(struct route_table *table,
+ union prefixconstptr pu);
+ext_pure int route_table_prefix_iter_cmp(const struct prefix *p1,
+ const struct prefix *p2);
/*
* Iterator functions.
diff --git a/lib/thread.c b/lib/thread.c
index 2760b83fb3..d3fb2cdf36 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -40,6 +40,8 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info")
DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
+DECLARE_LIST(thread_list, struct thread, threaditem)
+
#if defined(__APPLE__)
#include <mach/mach.h>
#include <mach/mach_time.h>
@@ -61,7 +63,7 @@ static struct list *masters;
static void thread_free(struct thread_master *master, struct thread *thread);
/* CLI start ---------------------------------------------------------------- */
-static unsigned int cpu_record_hash_key(struct cpu_thread_history *a)
+static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
{
int size = sizeof(a->func);
@@ -279,7 +281,7 @@ DEFUN (show_thread_cpu,
SHOW_STR
"Thread information\n"
"Thread CPU usage\n"
- "Display filter (rwtexb)\n")
+ "Display filter (rwtex)\n")
{
uint8_t filter = (uint8_t)-1U;
int idx = 0;
@@ -310,7 +312,8 @@ static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
vty_out(vty, "\nShowing poll FD's for %s\n", name);
vty_out(vty, "----------------------%s\n", underline);
- vty_out(vty, "Count: %u\n", (uint32_t)m->handler.pfdcount);
+ vty_out(vty, "Count: %u/%d\n", (uint32_t)m->handler.pfdcount,
+ m->fd_limit);
for (i = 0; i < m->handler.pfdcount; i++)
vty_out(vty, "\t%6d fd:%6d events:%2d revents:%2d\n", i,
m->handler.pfds[i].fd,
@@ -431,10 +434,13 @@ struct thread_master *thread_master_create(const char *name)
sizeof(struct thread *) * rv->fd_limit);
rv->cpu_record = hash_create_size(
- 8, (unsigned int (*)(void *))cpu_record_hash_key,
+ 8, (unsigned int (*)(const void *))cpu_record_hash_key,
(bool (*)(const void *, const void *))cpu_record_hash_cmp,
"Thread Hash");
+ thread_list_init(&rv->event);
+ thread_list_init(&rv->ready);
+ thread_list_init(&rv->unuse);
/* Initialize the timer queues */
rv->timer = pqueue_create();
@@ -487,50 +493,6 @@ void thread_master_set_name(struct thread_master *master, const char *name)
pthread_mutex_unlock(&master->mtx);
}
-/* Add a new thread to the list. */
-static void thread_list_add(struct thread_list *list, struct thread *thread)
-{
- thread->next = NULL;
- thread->prev = list->tail;
- if (list->tail)
- list->tail->next = thread;
- else
- list->head = thread;
- list->tail = thread;
- list->count++;
-}
-
-/* Delete a thread from the list. */
-static struct thread *thread_list_delete(struct thread_list *list,
- struct thread *thread)
-{
- if (thread->next)
- thread->next->prev = thread->prev;
- else
- list->tail = thread->prev;
- if (thread->prev)
- thread->prev->next = thread->next;
- else
- list->head = thread->next;
- thread->next = thread->prev = NULL;
- list->count--;
- return thread;
-}
-
-/* Thread list is empty or not. */
-static int thread_empty(struct thread_list *list)
-{
- return list->head ? 0 : 1;
-}
-
-/* Delete top of the list and return it. */
-static struct thread *thread_trim_head(struct thread_list *list)
-{
- if (!thread_empty(list))
- return thread_list_delete(list, list->head);
- return NULL;
-}
-
#define THREAD_UNUSED_DEPTH 10
/* Move thread to unuse list. */
@@ -539,8 +501,6 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
pthread_mutex_t mtxc = thread->mtx;
assert(m != NULL && thread != NULL);
- assert(thread->next == NULL);
- assert(thread->prev == NULL);
thread->hist->total_active--;
memset(thread, 0, sizeof(struct thread));
@@ -549,8 +509,8 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
/* Restore the thread mutex context. */
thread->mtx = mtxc;
- if (m->unuse.count < THREAD_UNUSED_DEPTH) {
- thread_list_add(&m->unuse, thread);
+ if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) {
+ thread_list_add_tail(&m->unuse, thread);
return;
}
@@ -558,16 +518,13 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
}
/* Free all unused thread. */
-static void thread_list_free(struct thread_master *m, struct thread_list *list)
+static void thread_list_free(struct thread_master *m,
+ struct thread_list_head *list)
{
struct thread *t;
- struct thread *next;
- for (t = list->head; t; t = next) {
- next = t->next;
+ while ((t = thread_list_pop(list)))
thread_free(m, t);
- list->count--;
- }
}
static void thread_array_free(struct thread_master *m,
@@ -609,9 +566,8 @@ void thread_master_free_unused(struct thread_master *m)
pthread_mutex_lock(&m->mtx);
{
struct thread *t;
- while ((t = thread_trim_head(&m->unuse)) != NULL) {
+ while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
- }
}
pthread_mutex_unlock(&m->mtx);
}
@@ -690,7 +646,7 @@ static struct thread *thread_get(struct thread_master *m, uint8_t type,
int (*func)(struct thread *), void *arg,
debugargdef)
{
- struct thread *thread = thread_trim_head(&m->unuse);
+ struct thread *thread = thread_list_pop(&m->unuse);
struct cpu_thread_history tmp;
if (!thread) {
@@ -971,7 +927,7 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
pthread_mutex_lock(&thread->mtx);
{
thread->u.val = val;
- thread_list_add(&m->event, thread);
+ thread_list_add_tail(&m->event, thread);
}
pthread_mutex_unlock(&thread->mtx);
@@ -1063,7 +1019,7 @@ static void thread_cancel_rw(struct thread_master *master, int fd, short state)
*/
static void do_thread_cancel(struct thread_master *master)
{
- struct thread_list *list = NULL;
+ struct thread_list_head *list = NULL;
struct pqueue *queue = NULL;
struct thread **thread_array = NULL;
struct thread *thread;
@@ -1078,31 +1034,23 @@ static void do_thread_cancel(struct thread_master *master)
* need to check every thread in the ready queue. */
if (cr->eventobj) {
struct thread *t;
- thread = master->event.head;
-
- while (thread) {
- t = thread;
- thread = t->next;
-
- if (t->arg == cr->eventobj) {
- thread_list_delete(&master->event, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
+
+ frr_each_safe(thread_list, &master->event, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ thread_list_del(&master->event, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
}
- thread = master->ready.head;
- while (thread) {
- t = thread;
- thread = t->next;
-
- if (t->arg == cr->eventobj) {
- thread_list_delete(&master->ready, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
+ frr_each_safe(thread_list, &master->ready, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ thread_list_del(&master->ready, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
}
continue;
}
@@ -1146,7 +1094,7 @@ static void do_thread_cancel(struct thread_master *master)
assert(thread == queue->array[thread->index]);
pqueue_remove_at(thread->index, queue);
} else if (list) {
- thread_list_delete(list, thread);
+ thread_list_del(list, thread);
} else if (thread_array) {
thread_array[thread->u.fd] = NULL;
} else {
@@ -1301,7 +1249,7 @@ static int thread_process_io_helper(struct thread_master *m,
thread_array = m->write;
thread_array[thread->u.fd] = NULL;
- thread_list_add(&m->ready, thread);
+ thread_list_add_tail(&m->ready, thread);
thread->type = THREAD_READY;
/* if another pthread scheduled this file descriptor for the event we're
* responding to, no problem; we're getting to it now */
@@ -1380,24 +1328,21 @@ static unsigned int thread_process_timers(struct pqueue *queue,
return ready;
pqueue_dequeue(queue);
thread->type = THREAD_READY;
- thread_list_add(&thread->master->ready, thread);
+ thread_list_add_tail(&thread->master->ready, thread);
ready++;
}
return ready;
}
/* process a list en masse, e.g. for event thread lists */
-static unsigned int thread_process(struct thread_list *list)
+static unsigned int thread_process(struct thread_list_head *list)
{
struct thread *thread;
- struct thread *next;
unsigned int ready = 0;
- for (thread = list->head; thread; thread = next) {
- next = thread->next;
- thread_list_delete(list, thread);
+ while ((thread = thread_list_pop(list))) {
thread->type = THREAD_READY;
- thread_list_add(&thread->master->ready, thread);
+ thread_list_add_tail(&thread->master->ready, thread);
ready++;
}
return ready;
@@ -1429,7 +1374,7 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
* Attempt to flush ready queue before going into poll().
* This is performance-critical. Think twice before modifying.
*/
- if ((thread = thread_trim_head(&m->ready))) {
+ if ((thread = thread_list_pop(&m->ready))) {
fetch = thread_run(m, thread, fetch);
if (fetch->ref)
*fetch->ref = NULL;
@@ -1462,10 +1407,11 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
* In every case except the last, we need to hit poll() at least
* once per loop to avoid starvation by events
*/
- if (m->ready.count == 0)
+ if (!thread_list_count(&m->ready))
tw = thread_timer_wait(m->timer, &tv);
- if (m->ready.count != 0 || (tw && !timercmp(tw, &zerotime, >)))
+ if (thread_list_count(&m->ready) ||
+ (tw && !timercmp(tw, &zerotime, >)))
tw = &zerotime;
if (!tw && m->handler.pfdcount == 0) { /* die */
diff --git a/lib/thread.h b/lib/thread.h
index ec774a6543..7897265120 100644
--- a/lib/thread.h
+++ b/lib/thread.h
@@ -26,6 +26,7 @@
#include <poll.h>
#include "monotime.h"
#include "frratomic.h"
+#include "typesafe.h"
#ifdef __cplusplus
extern "C" {
@@ -39,12 +40,7 @@ struct rusage_t {
#define GETRUSAGE(X) thread_getrusage(X)
-/* Linked list of thread. */
-struct thread_list {
- struct thread *head;
- struct thread *tail;
- int count;
-};
+PREDECL_LIST(thread_list)
struct pqueue;
@@ -78,9 +74,7 @@ struct thread_master {
struct thread **read;
struct thread **write;
struct pqueue *timer;
- struct thread_list event;
- struct thread_list ready;
- struct thread_list unuse;
+ struct thread_list_head event, ready, unuse;
struct list *cancel_req;
bool canceled;
pthread_cond_t cancel_cond;
@@ -100,8 +94,7 @@ struct thread_master {
struct thread {
uint8_t type; /* thread type */
uint8_t add_type; /* thread type */
- struct thread *next; /* next pointer of the thread */
- struct thread *prev; /* previous pointer of the thread */
+ struct thread_list_item threaditem;
struct thread **ref; /* external reference (if given) */
struct thread_master *master; /* pointer to the struct thread_master */
int (*func)(struct thread *); /* event function */
diff --git a/lib/typerb.c b/lib/typerb.c
new file mode 100644
index 0000000000..d361e7651e
--- /dev/null
+++ b/lib/typerb.c
@@ -0,0 +1,472 @@
+/* RB-tree */
+
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "typerb.h"
+
+#define RB_BLACK 0
+#define RB_RED 1
+
+#define rb_entry typed_rb_entry
+#define rbt_tree typed_rb_root
+
+#define RBE_LEFT(_rbe) (_rbe)->rbt_left
+#define RBE_RIGHT(_rbe) (_rbe)->rbt_right
+#define RBE_PARENT(_rbe) (_rbe)->rbt_parent
+#define RBE_COLOR(_rbe) (_rbe)->rbt_color
+
+#define RBH_ROOT(_rbt) (_rbt)->rbt_root
+
+static inline void rbe_set(struct rb_entry *rbe, struct rb_entry *parent)
+{
+ RBE_PARENT(rbe) = parent;
+ RBE_LEFT(rbe) = RBE_RIGHT(rbe) = NULL;
+ RBE_COLOR(rbe) = RB_RED;
+}
+
+static inline void rbe_set_blackred(struct rb_entry *black,
+ struct rb_entry *red)
+{
+ RBE_COLOR(black) = RB_BLACK;
+ RBE_COLOR(red) = RB_RED;
+}
+
+static inline void rbe_rotate_left(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *parent;
+ struct rb_entry *tmp;
+
+ tmp = RBE_RIGHT(rbe);
+ RBE_RIGHT(rbe) = RBE_LEFT(tmp);
+ if (RBE_RIGHT(rbe) != NULL)
+ RBE_PARENT(RBE_LEFT(tmp)) = rbe;
+
+ parent = RBE_PARENT(rbe);
+ RBE_PARENT(tmp) = parent;
+ if (parent != NULL) {
+ if (rbe == RBE_LEFT(parent))
+ RBE_LEFT(parent) = tmp;
+ else
+ RBE_RIGHT(parent) = tmp;
+ } else
+ RBH_ROOT(rbt) = tmp;
+
+ RBE_LEFT(tmp) = rbe;
+ RBE_PARENT(rbe) = tmp;
+}
+
+static inline void rbe_rotate_right(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *parent;
+ struct rb_entry *tmp;
+
+ tmp = RBE_LEFT(rbe);
+ RBE_LEFT(rbe) = RBE_RIGHT(tmp);
+ if (RBE_LEFT(rbe) != NULL)
+ RBE_PARENT(RBE_RIGHT(tmp)) = rbe;
+
+ parent = RBE_PARENT(rbe);
+ RBE_PARENT(tmp) = parent;
+ if (parent != NULL) {
+ if (rbe == RBE_LEFT(parent))
+ RBE_LEFT(parent) = tmp;
+ else
+ RBE_RIGHT(parent) = tmp;
+ } else
+ RBH_ROOT(rbt) = tmp;
+
+ RBE_RIGHT(tmp) = rbe;
+ RBE_PARENT(rbe) = tmp;
+}
+
+static inline void rbe_insert_color(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *parent, *gparent, *tmp;
+
+ rbt->count++;
+
+ while ((parent = RBE_PARENT(rbe)) != NULL
+ && RBE_COLOR(parent) == RB_RED) {
+ gparent = RBE_PARENT(parent);
+
+ if (parent == RBE_LEFT(gparent)) {
+ tmp = RBE_RIGHT(gparent);
+ if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) {
+ RBE_COLOR(tmp) = RB_BLACK;
+ rbe_set_blackred(parent, gparent);
+ rbe = gparent;
+ continue;
+ }
+
+ if (RBE_RIGHT(parent) == rbe) {
+ rbe_rotate_left(rbt, parent);
+ tmp = parent;
+ parent = rbe;
+ rbe = tmp;
+ }
+
+ rbe_set_blackred(parent, gparent);
+ rbe_rotate_right(rbt, gparent);
+ } else {
+ tmp = RBE_LEFT(gparent);
+ if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) {
+ RBE_COLOR(tmp) = RB_BLACK;
+ rbe_set_blackred(parent, gparent);
+ rbe = gparent;
+ continue;
+ }
+
+ if (RBE_LEFT(parent) == rbe) {
+ rbe_rotate_right(rbt, parent);
+ tmp = parent;
+ parent = rbe;
+ rbe = tmp;
+ }
+
+ rbe_set_blackred(parent, gparent);
+ rbe_rotate_left(rbt, gparent);
+ }
+ }
+
+ RBE_COLOR(RBH_ROOT(rbt)) = RB_BLACK;
+}
+
+static inline void rbe_remove_color(struct rbt_tree *rbt,
+ struct rb_entry *parent,
+ struct rb_entry *rbe)
+{
+ struct rb_entry *tmp;
+
+ while ((rbe == NULL || RBE_COLOR(rbe) == RB_BLACK)
+ && rbe != RBH_ROOT(rbt) && parent) {
+ if (RBE_LEFT(parent) == rbe) {
+ tmp = RBE_RIGHT(parent);
+ if (RBE_COLOR(tmp) == RB_RED) {
+ rbe_set_blackred(tmp, parent);
+ rbe_rotate_left(rbt, parent);
+ tmp = RBE_RIGHT(parent);
+ }
+ if ((RBE_LEFT(tmp) == NULL
+ || RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK)
+ && (RBE_RIGHT(tmp) == NULL
+ || RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) {
+ RBE_COLOR(tmp) = RB_RED;
+ rbe = parent;
+ parent = RBE_PARENT(rbe);
+ } else {
+ if (RBE_RIGHT(tmp) == NULL
+ || RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK) {
+ struct rb_entry *oleft;
+
+ oleft = RBE_LEFT(tmp);
+ if (oleft != NULL)
+ RBE_COLOR(oleft) = RB_BLACK;
+
+ RBE_COLOR(tmp) = RB_RED;
+ rbe_rotate_right(rbt, tmp);
+ tmp = RBE_RIGHT(parent);
+ }
+
+ RBE_COLOR(tmp) = RBE_COLOR(parent);
+ RBE_COLOR(parent) = RB_BLACK;
+ if (RBE_RIGHT(tmp))
+ RBE_COLOR(RBE_RIGHT(tmp)) = RB_BLACK;
+
+ rbe_rotate_left(rbt, parent);
+ rbe = RBH_ROOT(rbt);
+ break;
+ }
+ } else {
+ tmp = RBE_LEFT(parent);
+ if (RBE_COLOR(tmp) == RB_RED) {
+ rbe_set_blackred(tmp, parent);
+ rbe_rotate_right(rbt, parent);
+ tmp = RBE_LEFT(parent);
+ }
+
+ if ((RBE_LEFT(tmp) == NULL
+ || RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK)
+ && (RBE_RIGHT(tmp) == NULL
+ || RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) {
+ RBE_COLOR(tmp) = RB_RED;
+ rbe = parent;
+ parent = RBE_PARENT(rbe);
+ } else {
+ if (RBE_LEFT(tmp) == NULL
+ || RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK) {
+ struct rb_entry *oright;
+
+ oright = RBE_RIGHT(tmp);
+ if (oright != NULL)
+ RBE_COLOR(oright) = RB_BLACK;
+
+ RBE_COLOR(tmp) = RB_RED;
+ rbe_rotate_left(rbt, tmp);
+ tmp = RBE_LEFT(parent);
+ }
+
+ RBE_COLOR(tmp) = RBE_COLOR(parent);
+ RBE_COLOR(parent) = RB_BLACK;
+ if (RBE_LEFT(tmp) != NULL)
+ RBE_COLOR(RBE_LEFT(tmp)) = RB_BLACK;
+
+ rbe_rotate_right(rbt, parent);
+ rbe = RBH_ROOT(rbt);
+ break;
+ }
+ }
+ }
+
+ if (rbe != NULL)
+ RBE_COLOR(rbe) = RB_BLACK;
+}
+
+static inline struct rb_entry *
+rbe_remove(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *child, *parent, *old = rbe;
+ unsigned int color;
+
+ if (RBE_LEFT(rbe) == NULL)
+ child = RBE_RIGHT(rbe);
+ else if (RBE_RIGHT(rbe) == NULL)
+ child = RBE_LEFT(rbe);
+ else {
+ struct rb_entry *tmp;
+
+ rbe = RBE_RIGHT(rbe);
+ while ((tmp = RBE_LEFT(rbe)) != NULL)
+ rbe = tmp;
+
+ child = RBE_RIGHT(rbe);
+ parent = RBE_PARENT(rbe);
+ color = RBE_COLOR(rbe);
+ if (child != NULL)
+ RBE_PARENT(child) = parent;
+ if (parent != NULL) {
+ if (RBE_LEFT(parent) == rbe)
+ RBE_LEFT(parent) = child;
+ else
+ RBE_RIGHT(parent) = child;
+ } else
+ RBH_ROOT(rbt) = child;
+ if (RBE_PARENT(rbe) == old)
+ parent = rbe;
+ *rbe = *old;
+
+ tmp = RBE_PARENT(old);
+ if (tmp != NULL) {
+ if (RBE_LEFT(tmp) == old)
+ RBE_LEFT(tmp) = rbe;
+ else
+ RBE_RIGHT(tmp) = rbe;
+ } else
+ RBH_ROOT(rbt) = rbe;
+
+ RBE_PARENT(RBE_LEFT(old)) = rbe;
+ if (RBE_RIGHT(old))
+ RBE_PARENT(RBE_RIGHT(old)) = rbe;
+
+ goto color;
+ }
+
+ parent = RBE_PARENT(rbe);
+ color = RBE_COLOR(rbe);
+
+ if (child != NULL)
+ RBE_PARENT(child) = parent;
+ if (parent != NULL) {
+ if (RBE_LEFT(parent) == rbe)
+ RBE_LEFT(parent) = child;
+ else
+ RBE_RIGHT(parent) = child;
+ } else
+ RBH_ROOT(rbt) = child;
+color:
+ if (color == RB_BLACK)
+ rbe_remove_color(rbt, parent, child);
+
+ rbt->count--;
+ return (old);
+}
+
+void typed_rb_remove(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ rbe_remove(rbt, rbe);
+}
+
+struct typed_rb_entry *typed_rb_insert(struct rbt_tree *rbt,
+ struct rb_entry *rbe, int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp;
+ struct rb_entry *parent = NULL;
+ int comp = 0;
+
+ tmp = RBH_ROOT(rbt);
+ while (tmp != NULL) {
+ parent = tmp;
+
+ comp = cmpfn(rbe, tmp);
+ if (comp < 0)
+ tmp = RBE_LEFT(tmp);
+ else if (comp > 0)
+ tmp = RBE_RIGHT(tmp);
+ else
+ return tmp;
+ }
+
+ rbe_set(rbe, parent);
+
+ if (parent != NULL) {
+ if (comp < 0)
+ RBE_LEFT(parent) = rbe;
+ else
+ RBE_RIGHT(parent) = rbe;
+ } else
+ RBH_ROOT(rbt) = rbe;
+
+ rbe_insert_color(rbt, rbe);
+
+ return NULL;
+}
+
+/* Finds the node with the same key as elm */
+struct rb_entry *typed_rb_find(struct rbt_tree *rbt, const struct rb_entry *key,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp = RBH_ROOT(rbt);
+ int comp;
+
+ while (tmp != NULL) {
+ comp = cmpfn(key, tmp);
+ if (comp < 0)
+ tmp = RBE_LEFT(tmp);
+ else if (comp > 0)
+ tmp = RBE_RIGHT(tmp);
+ else
+ return tmp;
+ }
+
+ return (NULL);
+}
+
+struct rb_entry *typed_rb_find_gteq(struct rbt_tree *rbt,
+ const struct rb_entry *key,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp = RBH_ROOT(rbt), *best = NULL;
+ int comp;
+
+ while (tmp != NULL) {
+ comp = cmpfn(key, tmp);
+ if (comp < 0) {
+ best = tmp;
+ tmp = RBE_LEFT(tmp);
+ } else if (comp > 0)
+ tmp = RBE_RIGHT(tmp);
+ else
+ return tmp;
+ }
+
+ return best;
+}
+
+struct rb_entry *typed_rb_find_lt(struct rbt_tree *rbt,
+ const struct rb_entry *key,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp = RBH_ROOT(rbt), *best = NULL;
+ int comp;
+
+ while (tmp != NULL) {
+ comp = cmpfn(key, tmp);
+ if (comp <= 0)
+ tmp = RBE_LEFT(tmp);
+ else {
+ best = tmp;
+ tmp = RBE_RIGHT(tmp);
+ }
+ }
+
+ return best;
+}
+
+struct rb_entry *typed_rb_next(struct rb_entry *rbe)
+{
+ if (RBE_RIGHT(rbe) != NULL) {
+ rbe = RBE_RIGHT(rbe);
+ while (RBE_LEFT(rbe) != NULL)
+ rbe = RBE_LEFT(rbe);
+ } else {
+ if (RBE_PARENT(rbe) && (rbe == RBE_LEFT(RBE_PARENT(rbe))))
+ rbe = RBE_PARENT(rbe);
+ else {
+ while (RBE_PARENT(rbe)
+ && (rbe == RBE_RIGHT(RBE_PARENT(rbe))))
+ rbe = RBE_PARENT(rbe);
+ rbe = RBE_PARENT(rbe);
+ }
+ }
+
+ return rbe;
+}
+
+struct rb_entry *typed_rb_min(struct rbt_tree *rbt)
+{
+ struct rb_entry *rbe = RBH_ROOT(rbt);
+ struct rb_entry *parent = NULL;
+
+ while (rbe != NULL) {
+ parent = rbe;
+ rbe = RBE_LEFT(rbe);
+ }
+
+ return parent;
+}
diff --git a/lib/typerb.h b/lib/typerb.h
new file mode 100644
index 0000000000..ce8446f853
--- /dev/null
+++ b/lib/typerb.h
@@ -0,0 +1,190 @@
+/*
+ * The following Red-Black tree implementation is based off code with
+ * original copyright:
+ *
+ * Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_TYPERB_H
+#define _FRR_TYPERB_H
+
+#include "typesafe.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct typed_rb_entry {
+ struct typed_rb_entry *rbt_parent;
+ struct typed_rb_entry *rbt_left;
+ struct typed_rb_entry *rbt_right;
+ unsigned int rbt_color;
+};
+
+struct typed_rb_root {
+ struct typed_rb_entry *rbt_root;
+ size_t count;
+};
+
+struct typed_rb_entry *typed_rb_insert(struct typed_rb_root *,
+ struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+void typed_rb_remove(struct typed_rb_root *, struct typed_rb_entry *rbe);
+struct typed_rb_entry *typed_rb_find(struct typed_rb_root *,
+ const struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+struct typed_rb_entry *typed_rb_find_gteq(struct typed_rb_root *,
+ const struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+struct typed_rb_entry *typed_rb_find_lt(struct typed_rb_root *,
+ const struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+struct typed_rb_entry *typed_rb_min(struct typed_rb_root *);
+struct typed_rb_entry *typed_rb_next(struct typed_rb_entry *);
+
+#define _PREDECL_RBTREE(prefix) \
+struct prefix ## _head { struct typed_rb_root rr; }; \
+struct prefix ## _item { struct typed_rb_entry re; };
+
+#define INIT_RBTREE_UNIQ(var) { }
+#define INIT_RBTREE_NONUNIQ(var) { }
+
+#define _DECLARE_RBTREE(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_insert(&h->rr, &item->field.re, cmpfn_uq); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_find_gteq(&h->rr, &item->field.re, cmpfn_nuq); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_find_lt(&h->rr, &item->field.re, cmpfn_nuq); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ typed_rb_remove(&h->rr, &item->field.re); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_min(&h->rr); \
+ if (!re) \
+ return NULL; \
+ typed_rb_remove(&h->rr, re); \
+ return container_of(re, type, field.re); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_min(&h->rr); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_next(&item->field.re); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = item ? typed_rb_next(&item->field.re) : NULL; \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->rr.count; \
+} \
+/* ... */
+
+#define PREDECL_RBTREE_UNIQ(prefix) \
+ _PREDECL_RBTREE(prefix)
+#define DECLARE_RBTREE_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct typed_rb_entry *a, \
+ const struct typed_rb_entry *b) \
+{ \
+ return cmpfn(container_of(a, type, field.re), \
+ container_of(b, type, field.re)); \
+} \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_find(&h->rr, &item->field.re, &prefix ## __cmp); \
+ return container_of_null(re, type, field.re); \
+} \
+ \
+_DECLARE_RBTREE(prefix, type, field, prefix ## __cmp, prefix ## __cmp) \
+/* ... */
+
+#define PREDECL_RBTREE_NONUNIQ(prefix) \
+ _PREDECL_RBTREE(prefix)
+#define DECLARE_RBTREE_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct typed_rb_entry *a, \
+ const struct typed_rb_entry *b) \
+{ \
+ return cmpfn(container_of(a, type, field.re), \
+ container_of(b, type, field.re)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct typed_rb_entry *a, \
+ const struct typed_rb_entry *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.re), \
+ container_of(b, type, field.re)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_RBTREE(prefix, type, field, prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_TYPERB_H */
diff --git a/lib/typesafe.c b/lib/typesafe.c
new file mode 100644
index 0000000000..47a6d0af48
--- /dev/null
+++ b/lib/typesafe.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "typesafe.h"
+#include "memory.h"
+
+DEFINE_MTYPE_STATIC(LIB, TYPEDHASH_BUCKET, "Typed-hash bucket")
+DEFINE_MTYPE_STATIC(LIB, SKIPLIST_OFLOW, "Skiplist overflow")
+DEFINE_MTYPE_STATIC(LIB, HEAP_ARRAY, "Typed-heap array")
+
+#if 0
+static void hash_consistency_check(struct thash_head *head)
+{
+ uint32_t i;
+ struct thash_item *item, *prev;
+
+ for (i = 0; i < HASH_SIZE(*head); i++) {
+ item = head->entries[i];
+ prev = NULL;
+ while (item) {
+ assert(HASH_KEY(*head, item->hashval) == i);
+ assert(!prev || item->hashval >= prev->hashval);
+ prev = item;
+ item = item->next;
+ }
+ }
+}
+#else
+#define hash_consistency_check(x)
+#endif
+
+void typesafe_hash_grow(struct thash_head *head)
+{
+ uint32_t newsize = head->count, i, j;
+ uint8_t newshift, delta;
+
+ hash_consistency_check(head);
+
+ newsize |= newsize >> 1;
+ newsize |= newsize >> 2;
+ newsize |= newsize >> 4;
+ newsize |= newsize >> 8;
+ newsize |= newsize >> 16;
+ newsize++;
+ newshift = __builtin_ctz(newsize) + 1;
+
+ if (head->maxshift && newshift > head->maxshift)
+ newshift = head->maxshift;
+ if (newshift == head->tabshift)
+ return;
+ newsize = _HASH_SIZE(newshift);
+
+ head->entries = XREALLOC(MTYPE_TYPEDHASH_BUCKET, head->entries,
+ sizeof(head->entries[0]) * newsize);
+ memset(head->entries + HASH_SIZE(*head), 0,
+ sizeof(head->entries[0]) *
+ (newsize - HASH_SIZE(*head)));
+
+ delta = newshift - head->tabshift;
+
+ i = HASH_SIZE(*head);
+ if (i == 0)
+ goto out;
+ do {
+ struct thash_item **apos, *item;
+
+ i--;
+ apos = &head->entries[i];
+
+ for (j = 0; j < (1U << delta); j++) {
+ item = *apos;
+ *apos = NULL;
+
+ head->entries[(i << delta) + j] = item;
+ apos = &head->entries[(i << delta) + j];
+
+ while ((item = *apos)) {
+ uint32_t midbits;
+ midbits = _HASH_KEY(newshift, item->hashval);
+ midbits &= (1 << delta) - 1;
+ if (midbits > j)
+ break;
+ apos = &item->next;
+ }
+ }
+ } while (i > 0);
+
+out:
+ head->tabshift = newshift;
+ hash_consistency_check(head);
+}
+
+void typesafe_hash_shrink(struct thash_head *head)
+{
+ uint32_t newsize = head->count, i, j;
+ uint8_t newshift, delta;
+
+ hash_consistency_check(head);
+
+ if (!head->count) {
+ XFREE(MTYPE_TYPEDHASH_BUCKET, head->entries);
+ head->tabshift = 0;
+ return;
+ }
+
+ newsize |= newsize >> 1;
+ newsize |= newsize >> 2;
+ newsize |= newsize >> 4;
+ newsize |= newsize >> 8;
+ newsize |= newsize >> 16;
+ newsize++;
+ newshift = __builtin_ctz(newsize) + 1;
+
+ if (head->minshift && newshift < head->minshift)
+ newshift = head->minshift;
+ if (newshift == head->tabshift)
+ return;
+ newsize = _HASH_SIZE(newshift);
+
+ delta = head->tabshift - newshift;
+
+ for (i = 0; i < newsize; i++) {
+ struct thash_item **apos = &head->entries[i];
+
+ for (j = 0; j < (1U << delta); j++) {
+ *apos = head->entries[(i << delta) + j];
+ while (*apos)
+ apos = &(*apos)->next;
+ }
+ }
+ head->entries = XREALLOC(MTYPE_TYPEDHASH_BUCKET, head->entries,
+ sizeof(head->entries[0]) * newsize);
+ head->tabshift = newshift;
+
+ hash_consistency_check(head);
+}
+
+/* skiplist */
+
+static inline struct sskip_item *sl_level_get(struct sskip_item *item,
+ size_t level)
+{
+ if (level < SKIPLIST_OVERFLOW)
+ return item->next[level];
+ if (level == SKIPLIST_OVERFLOW && !((uintptr_t)item->next[level] & 1))
+ return item->next[level];
+
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ return oflow->next[level - SKIPLIST_OVERFLOW];
+}
+
+static inline void sl_level_set(struct sskip_item *item, size_t level,
+ struct sskip_item *value)
+{
+ if (level < SKIPLIST_OVERFLOW)
+ item->next[level] = value;
+ else if (level == SKIPLIST_OVERFLOW && !((uintptr_t)item->next[level] & 1))
+ item->next[level] = value;
+ else {
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ oflow->next[level - SKIPLIST_OVERFLOW] = value;
+ }
+}
+
+struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
+ struct sskip_item *item,
+ int (*cmpfn)(const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH, newlevel, auxlevel;
+ struct sskip_item *prev = &head->hitem, *next, *auxprev, *auxnext;
+ int cmpval;
+
+ /* level / newlevel are 1-counted here */
+ newlevel = __builtin_ctz(random()) + 1;
+ if (newlevel > SKIPLIST_MAXDEPTH)
+ newlevel = SKIPLIST_MAXDEPTH;
+
+ next = NULL;
+ while (level >= newlevel) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ } else if (cmpval == 0) {
+ return next;
+ }
+ level--;
+ }
+
+ /* check for duplicate item - could be removed if code doesn't rely
+ * on it, but not really work the complication. */
+ auxlevel = level;
+ auxprev = prev;
+ while (auxlevel) {
+ auxlevel--;
+ auxnext = sl_level_get(auxprev, auxlevel);
+ cmpval = 1;
+ while (auxnext && (cmpval = cmpfn(auxnext, item)) < 0) {
+ auxprev = auxnext;
+ auxnext = sl_level_get(auxprev, auxlevel);
+ }
+ if (cmpval == 0)
+ return auxnext;
+ };
+
+ head->count++;
+ memset(item, 0, sizeof(*item));
+ if (newlevel > SKIPLIST_EMBED) {
+ struct sskip_overflow *oflow;
+ oflow = XMALLOC(MTYPE_SKIPLIST_OFLOW, sizeof(void *)
+ * (newlevel - SKIPLIST_OVERFLOW));
+ item->next[SKIPLIST_OVERFLOW] = (struct sskip_item *)
+ ((uintptr_t)oflow | 1);
+ }
+
+ sl_level_set(item, level, next);
+ sl_level_set(prev, level, item);
+ /* level is now 0-counted and < newlevel*/
+ while (level) {
+ level--;
+ next = sl_level_get(prev, level);
+ while (next && cmpfn(next, item) < 0) {
+ prev = next;
+ next = sl_level_get(prev, level);
+ }
+
+ sl_level_set(item, level, next);
+ sl_level_set(prev, level, item);
+ };
+ return NULL;
+}
+
+/* NOTE: level counting below is 1-based since that makes the code simpler! */
+
+struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ }
+ if (cmpval == 0)
+ return next;
+ level--;
+ }
+ return NULL;
+}
+
+struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ }
+ if (cmpval == 0)
+ return next;
+ level--;
+ }
+ return next;
+}
+
+struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next, *best = NULL;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ best = prev = next;
+ continue;
+ }
+ level--;
+ }
+ return best;
+}
+
+void typesafe_skiplist_del(struct sskip_head *head, struct sskip_item *item,
+ int (*cmpfn)(const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ if (next == item) {
+ sl_level_set(prev, level - 1,
+ sl_level_get(item, level - 1));
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ }
+ level--;
+ }
+
+ /* TBD: assert when trying to remove non-existing item? */
+ head->count--;
+
+ if ((uintptr_t)item->next[SKIPLIST_OVERFLOW] & 1) {
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ XFREE(MTYPE_SKIPLIST_OFLOW, oflow);
+ }
+ memset(item, 0, sizeof(*item));
+}
+
+struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head)
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next, *item;
+
+ item = sl_level_get(prev, 0);
+ if (!item)
+ return NULL;
+
+ do {
+ level--;
+
+ next = sl_level_get(prev, level);
+ if (next != item)
+ continue;
+
+ sl_level_set(prev, level, sl_level_get(item, level));
+ } while (level);
+
+ head->count--;
+
+ if ((uintptr_t)item->next[SKIPLIST_OVERFLOW] & 1) {
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ XFREE(MTYPE_SKIPLIST_OFLOW, oflow);
+ }
+ memset(item, 0, sizeof(*item));
+
+ return item;
+}
+
+/* heap */
+
+#if 0
+static void heap_consistency_check(struct heap_head *head,
+ int (*cmpfn)(const struct heap_item *a,
+ const struct heap_item *b),
+ uint32_t pos)
+{
+ uint32_t rghtpos = pos + 1;
+ uint32_t downpos = HEAP_NARY * (pos + 1);
+
+ if (pos + 1 > ~0U / HEAP_NARY)
+ downpos = ~0U;
+
+ if ((pos & (HEAP_NARY - 1)) != HEAP_NARY - 1 && rghtpos < head->count) {
+ assert(cmpfn(head->array[rghtpos], head->array[pos]) >= 0);
+ heap_consistency_check(head, cmpfn, rghtpos);
+ }
+ if (downpos < head->count) {
+ assert(cmpfn(head->array[downpos], head->array[pos]) >= 0);
+ heap_consistency_check(head, cmpfn, downpos);
+ }
+}
+#else
+#define heap_consistency_check(head, cmpfn, pos)
+#endif
+
+void typesafe_heap_resize(struct heap_head *head, bool grow)
+{
+ uint32_t newsize;
+
+ if (grow) {
+ newsize = head->arraysz;
+ if (newsize <= 36)
+ newsize = 72;
+ else if (newsize < 262144)
+ newsize += newsize / 2;
+ else if (newsize < 0xaaaa0000)
+ newsize += newsize / 3;
+ else
+ assert(!newsize);
+ } else if (head->count > 0) {
+ newsize = head->count;
+ } else {
+ XFREE(MTYPE_HEAP_ARRAY, head->array);
+ head->arraysz = 0;
+ return;
+ }
+
+ newsize += HEAP_NARY - 1;
+ newsize &= ~(HEAP_NARY - 1);
+ if (newsize == head->arraysz)
+ return;
+
+ head->array = XREALLOC(MTYPE_HEAP_ARRAY, head->array,
+ newsize * sizeof(struct heap_item *));
+ head->arraysz = newsize;
+}
+
+void typesafe_heap_pushdown(struct heap_head *head, uint32_t pos,
+ struct heap_item *item,
+ int (*cmpfn)(const struct heap_item *a,
+ const struct heap_item *b))
+{
+ uint32_t rghtpos, downpos, moveto;
+
+ while (1) {
+ /* rghtpos: neighbor to the "right", inside block of NARY.
+ * may be invalid if last in block, check nary_last()
+ * downpos: first neighbor in the "downwards" block further
+ * away from the root
+ */
+ rghtpos = pos + 1;
+
+ /* make sure we can use the full 4G items */
+ downpos = HEAP_NARY * (pos + 1);
+ if (pos + 1 > ~0U / HEAP_NARY)
+ /* multiplication overflowed. ~0U is guaranteed
+ * to be an invalid index; size limit is enforced in
+ * resize()
+ */
+ downpos = ~0U;
+
+ /* only used on break */
+ moveto = pos;
+
+#define nary_last(x) (((x) & (HEAP_NARY - 1)) == HEAP_NARY - 1)
+ if (downpos >= head->count
+ || cmpfn(head->array[downpos], item) >= 0) {
+ /* not moving down; either at end or down is >= item */
+ if (nary_last(pos) || rghtpos >= head->count
+ || cmpfn(head->array[rghtpos], item) >= 0)
+ /* not moving right either - got our spot */
+ break;
+
+ moveto = rghtpos;
+
+ /* else: downpos is valid and < item. choose between down
+ * or right (if the latter is an option) */
+ } else if (nary_last(pos) || cmpfn(head->array[rghtpos],
+ head->array[downpos]) >= 0)
+ moveto = downpos;
+ else
+ moveto = rghtpos;
+#undef nary_last
+
+ head->array[pos] = head->array[moveto];
+ head->array[pos]->index = pos;
+ pos = moveto;
+ }
+
+ head->array[moveto] = item;
+ item->index = moveto;
+
+ heap_consistency_check(head, cmpfn, 0);
+}
+
+void typesafe_heap_pullup(struct heap_head *head, uint32_t pos,
+ struct heap_item *item,
+ int (*cmpfn)(const struct heap_item *a,
+ const struct heap_item *b))
+{
+ uint32_t moveto;
+
+ while (pos != 0) {
+ if ((pos & (HEAP_NARY - 1)) == 0)
+ moveto = pos / HEAP_NARY - 1;
+ else
+ moveto = pos - 1;
+
+ if (cmpfn(head->array[moveto], item) <= 0)
+ break;
+
+ head->array[pos] = head->array[moveto];
+ head->array[pos]->index = pos;
+
+ pos = moveto;
+ }
+
+ head->array[pos] = item;
+ item->index = pos;
+
+ heap_consistency_check(head, cmpfn, 0);
+}
diff --git a/lib/typesafe.h b/lib/typesafe.h
new file mode 100644
index 0000000000..0a4ed69e4e
--- /dev/null
+++ b/lib/typesafe.h
@@ -0,0 +1,866 @@
+/*
+ * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_TYPESAFE_H
+#define _FRR_TYPESAFE_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <assert.h>
+#include "compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* generic macros for all list-like types */
+
+#define frr_each(prefix, head, item) \
+ for (item = prefix##_first(head); item; \
+ item = prefix##_next(head, item))
+#define frr_each_safe(prefix, head, item) \
+ for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
+ prefix##_next_safe(head, \
+ (item = prefix##_first(head))); \
+ item; \
+ item = prefix##_safe, \
+ prefix##_safe = prefix##_next_safe(head, prefix##_safe))
+#define frr_each_from(prefix, head, item, from) \
+ for (item = from, from = prefix##_next_safe(head, item); \
+ item; \
+ item = from, from = prefix##_next_safe(head, from))
+
+/* single-linked list, unsorted/arbitrary.
+ * can be used as queue with add_tail / pop
+ */
+
+/* don't use these structs directly */
+struct slist_item {
+ struct slist_item *next;
+};
+
+struct slist_head {
+ struct slist_item *first, **last_next;
+ size_t count;
+};
+
+static inline void typesafe_list_add(struct slist_head *head,
+ struct slist_item **pos, struct slist_item *item)
+{
+ item->next = *pos;
+ *pos = item;
+ if (pos == head->last_next)
+ head->last_next = &item->next;
+ head->count++;
+}
+
+/* use as:
+ *
+ * PREDECL_LIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_LIST(namelist, struct name, nlitem)
+ */
+#define PREDECL_LIST(prefix) \
+struct prefix ## _head { struct slist_head sh; }; \
+struct prefix ## _item { struct slist_item si; };
+
+#define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
+
+#define DECLARE_LIST(prefix, type, field) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+ h->sh.last_next = &h->sh.first; \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
+} \
+macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
+} \
+macro_inline void prefix ## _add_after(struct prefix##_head *h, \
+ type *after, type *item) \
+{ \
+ struct slist_item **nextp; \
+ nextp = after ? &after->field.si.next : &h->sh.first; \
+ typesafe_list_add(&h->sh, nextp, &item->field.si); \
+} \
+/* TODO: del_hint */ \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ struct slist_item **iter = &h->sh.first; \
+ while (*iter && *iter != &item->field.si) \
+ iter = &(*iter)->next; \
+ if (!*iter) \
+ return; \
+ h->sh.count--; \
+ *iter = item->field.si.next; \
+ if (!item->field.si.next) \
+ h->sh.last_next = iter; \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct slist_item *sitem = h->sh.first; \
+ if (!sitem) \
+ return NULL; \
+ h->sh.count--; \
+ h->sh.first = sitem->next; \
+ if (h->sh.first == NULL) \
+ h->sh.last_next = &h->sh.first; \
+ return container_of(sitem, type, field.si); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ return container_of_null(h->sh.first, type, field.si); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
+{ \
+ struct slist_item *sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct slist_item *sitem; \
+ if (!item) \
+ return NULL; \
+ sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->sh.count; \
+} \
+/* ... */
+
+/* don't use these structs directly */
+struct dlist_item {
+ struct dlist_item *next;
+ struct dlist_item *prev;
+};
+
+struct dlist_head {
+ struct dlist_item hitem;
+ size_t count;
+};
+
+static inline void typesafe_dlist_add(struct dlist_head *head,
+ struct dlist_item *prev, struct dlist_item *item)
+{
+ item->next = prev->next;
+ item->next->prev = item;
+ item->prev = prev;
+ prev->next = item;
+ head->count++;
+}
+
+/* double-linked list, for fast item deletion
+ */
+#define PREDECL_DLIST(prefix) \
+struct prefix ## _head { struct dlist_head dh; }; \
+struct prefix ## _item { struct dlist_item di; };
+
+#define INIT_DLIST(var) { .dh = { \
+ .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
+
+#define DECLARE_DLIST(prefix, type, field) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+ h->dh.hitem.prev = &h->dh.hitem; \
+ h->dh.hitem.next = &h->dh.hitem; \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
+} \
+macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
+} \
+macro_inline void prefix ## _add_after(struct prefix##_head *h, \
+ type *after, type *item) \
+{ \
+ struct dlist_item *prev; \
+ prev = after ? &after->field.di : &h->dh.hitem; \
+ typesafe_dlist_add(&h->dh, prev, &item->field.di); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ struct dlist_item *ditem = &item->field.di; \
+ ditem->prev->next = ditem->next; \
+ ditem->next->prev = ditem->prev; \
+ h->dh.count--; \
+ ditem->prev = ditem->next = NULL; \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct dlist_item *ditem = h->dh.hitem.next; \
+ if (ditem == &h->dh.hitem) \
+ return NULL; \
+ ditem->prev->next = ditem->next; \
+ ditem->next->prev = ditem->prev; \
+ h->dh.count--; \
+ return container_of(ditem, type, field.di); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct dlist_item *ditem = h->dh.hitem.next; \
+ if (ditem == &h->dh.hitem) \
+ return NULL; \
+ return container_of(ditem, type, field.di); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
+{ \
+ struct dlist_item *ditem = &item->field.di; \
+ if (ditem->next == &h->dh.hitem) \
+ return NULL; \
+ return container_of(ditem->next, type, field.di); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ if (!item) \
+ return NULL; \
+ return prefix ## _next(h, item); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->dh.count; \
+} \
+/* ... */
+
+/* note: heap currently caps out at 4G items */
+
+#define HEAP_NARY 8U
+typedef uint32_t heap_index_i;
+
+struct heap_item {
+ uint32_t index;
+};
+
+struct heap_head {
+ struct heap_item **array;
+ uint32_t arraysz, count;
+};
+
+#define HEAP_RESIZE_TRESH_UP(h) \
+ (h->hh.count + 1 >= h->hh.arraysz)
+#define HEAP_RESIZE_TRESH_DN(h) \
+ (h->hh.count == 0 || \
+ h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
+
+#define PREDECL_HEAP(prefix) \
+struct prefix ## _head { struct heap_head hh; }; \
+struct prefix ## _item { struct heap_item hi; };
+
+#define INIT_HEAP(var) { }
+
+#define DECLARE_HEAP(prefix, type, field, cmpfn) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(h->hh.count == 0); \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline int prefix ## __cmp(const struct heap_item *a, \
+ const struct heap_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.hi), \
+ container_of(b, type, field.hi)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ if (HEAP_RESIZE_TRESH_UP(h)) \
+ typesafe_heap_resize(&h->hh, true); \
+ typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
+ prefix ## __cmp); \
+ h->hh.count++; \
+ return NULL; \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ struct heap_item *other; \
+ uint32_t index = item->field.hi.index; \
+ assert(h->hh.array[index] == &item->field.hi); \
+ h->hh.count--; \
+ other = h->hh.array[h->hh.count]; \
+ if (cmpfn(container_of(other, type, field.hi), item) < 0) \
+ typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
+ else \
+ typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
+ if (HEAP_RESIZE_TRESH_DN(h)) \
+ typesafe_heap_resize(&h->hh, false); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct heap_item *hitem, *other; \
+ if (h->hh.count == 0) \
+ return NULL; \
+ hitem = h->hh.array[0]; \
+ h->hh.count--; \
+ other = h->hh.array[h->hh.count]; \
+ typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
+ if (HEAP_RESIZE_TRESH_DN(h)) \
+ typesafe_heap_resize(&h->hh, false); \
+ return container_of(hitem, type, field.hi); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ if (h->hh.count == 0) \
+ return NULL; \
+ return container_of(h->hh.array[0], type, field.hi); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ uint32_t idx = item->field.hi.index + 1; \
+ if (idx >= h->hh.count) \
+ return NULL; \
+ return container_of(h->hh.array[idx], type, field.hi); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ if (!item) \
+ return NULL; \
+ return prefix ## _next(h, item); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->hh.count; \
+} \
+/* ... */
+
+extern void typesafe_heap_resize(struct heap_head *head, bool grow);
+extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
+ struct heap_item *item,
+ int (*cmpfn)(const struct heap_item *a,
+ const struct heap_item *b));
+extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
+ struct heap_item *item,
+ int (*cmpfn)(const struct heap_item *a,
+ const struct heap_item *b));
+
+/* single-linked list, sorted.
+ * can be used as priority queue with add / pop
+ */
+
+/* don't use these structs directly */
+struct ssort_item {
+ struct ssort_item *next;
+};
+
+struct ssort_head {
+ struct ssort_item *first;
+ size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_SORTLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_SORTLIST(namelist, struct name, nlitem)
+ */
+#define _PREDECL_SORTLIST(prefix) \
+struct prefix ## _head { struct ssort_head sh; }; \
+struct prefix ## _item { struct ssort_item si; };
+
+#define INIT_SORTLIST_UNIQ(var) { }
+#define INIT_SORTLIST_NONUNIQ(var) { }
+
+#define PREDECL_SORTLIST_UNIQ(prefix) \
+ _PREDECL_SORTLIST(prefix)
+#define PREDECL_SORTLIST_NONUNIQ(prefix) \
+ _PREDECL_SORTLIST(prefix)
+
+#define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item **np = &h->sh.first; \
+ int c = 1; \
+ while (*np && (c = cmpfn_uq( \
+ container_of(*np, type, field.si), item)) < 0) \
+ np = &(*np)->next; \
+ if (c == 0) \
+ return container_of(*np, type, field.si); \
+ item->field.si.next = *np; \
+ *np = &item->field.si; \
+ h->sh.count++; \
+ return NULL; \
+} \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct ssort_item *sitem = h->sh.first; \
+ int cmpval = 0; \
+ while (sitem && (cmpval = cmpfn_nuq( \
+ container_of(sitem, type, field.si), item) < 0)) \
+ sitem = sitem->next; \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct ssort_item *prev = NULL, *sitem = h->sh.first; \
+ int cmpval = 0; \
+ while (sitem && (cmpval = cmpfn_nuq( \
+ container_of(sitem, type, field.si), item) < 0)) \
+ sitem = (prev = sitem)->next; \
+ return container_of_null(prev, type, field.si); \
+} \
+/* TODO: del_hint */ \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item **iter = &h->sh.first; \
+ while (*iter && *iter != &item->field.si) \
+ iter = &(*iter)->next; \
+ if (!*iter) \
+ return; \
+ h->sh.count--; \
+ *iter = item->field.si.next; \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct ssort_item *sitem = h->sh.first; \
+ if (!sitem) \
+ return NULL; \
+ h->sh.count--; \
+ h->sh.first = sitem->next; \
+ return container_of(sitem, type, field.si); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ return container_of_null(h->sh.first, type, field.si); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item *sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item *sitem; \
+ if (!item) \
+ return NULL; \
+ sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->sh.count; \
+} \
+/* ... */
+
+#define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
+ _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
+ \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ struct ssort_item *sitem = h->sh.first; \
+ int cmpval = 0; \
+ while (sitem && (cmpval = cmpfn( \
+ container_of(sitem, type, field.si), item) < 0)) \
+ sitem = sitem->next; \
+ if (!sitem || cmpval > 0) \
+ return NULL; \
+ return container_of(sitem, type, field.si); \
+} \
+/* ... */
+
+#define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
+macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
+{ \
+ int cmpval = cmpfn(a, b); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
+/* ... */
+
+
+/* hash, "sorted" by hash value
+ */
+
+/* don't use these structs directly */
+struct thash_item {
+ struct thash_item *next;
+ uint32_t hashval;
+};
+
+struct thash_head {
+ struct thash_item **entries;
+ uint32_t count;
+
+ uint8_t tabshift;
+ uint8_t minshift, maxshift;
+};
+
+#define _HASH_SIZE(tabshift) \
+ ((1U << (tabshift)) >> 1)
+#define HASH_SIZE(head) \
+ _HASH_SIZE((head).tabshift)
+#define _HASH_KEY(tabshift, val) \
+ ((val) >> (33 - (tabshift)))
+#define HASH_KEY(head, val) \
+ _HASH_KEY((head).tabshift, val)
+#define HASH_GROW_THRESHOLD(head) \
+ ((head).count >= HASH_SIZE(head))
+#define HASH_SHRINK_THRESHOLD(head) \
+ ((head).count <= (HASH_SIZE(head) - 1) / 2)
+
+extern void typesafe_hash_grow(struct thash_head *head);
+extern void typesafe_hash_shrink(struct thash_head *head);
+
+/* use as:
+ *
+ * PREDECL_HASH(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
+ */
+#define PREDECL_HASH(prefix) \
+struct prefix ## _head { struct thash_head hh; }; \
+struct prefix ## _item { struct thash_item hi; };
+
+#define INIT_HASH(var) { }
+
+#define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(h->hh.count == 0); \
+ h->hh.minshift = 0; \
+ typesafe_hash_shrink(&h->hh); \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ h->hh.count++; \
+ if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
+ typesafe_hash_grow(&h->hh); \
+ \
+ uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
+ item->field.hi.hashval = hval; \
+ struct thash_item **np = &h->hh.entries[hbits]; \
+ while (*np && (*np)->hashval < hval) \
+ np = &(*np)->next; \
+ if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
+ h->hh.count--; \
+ return container_of(*np, type, field.hi); \
+ } \
+ item->field.hi.next = *np; \
+ *np = &item->field.hi; \
+ return NULL; \
+} \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ if (!h->hh.tabshift) \
+ return NULL; \
+ uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
+ struct thash_item *hitem = h->hh.entries[hbits]; \
+ while (hitem && hitem->hashval < hval) \
+ hitem = hitem->next; \
+ while (hitem && hitem->hashval == hval) { \
+ if (!cmpfn(container_of(hitem, type, field.hi), item)) \
+ return container_of(hitem, type, field.hi); \
+ hitem = hitem->next; \
+ } \
+ return NULL; \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ if (!h->hh.tabshift) \
+ return; \
+ uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
+ struct thash_item **np = &h->hh.entries[hbits]; \
+ while (*np && (*np)->hashval < hval) \
+ np = &(*np)->next; \
+ while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
+ np = &(*np)->next; \
+ if (*np != &item->field.hi) \
+ return; \
+ *np = item->field.hi.next; \
+ item->field.hi.next = NULL; \
+ h->hh.count--; \
+ if (HASH_SHRINK_THRESHOLD(h->hh)) \
+ typesafe_hash_shrink(&h->hh); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ uint32_t i; \
+ for (i = 0; i < HASH_SIZE(h->hh); i++) \
+ if (h->hh.entries[i]) { \
+ struct thash_item *hitem = h->hh.entries[i]; \
+ h->hh.entries[i] = hitem->next; \
+ h->hh.count--; \
+ hitem->next = NULL; \
+ if (HASH_SHRINK_THRESHOLD(h->hh)) \
+ typesafe_hash_shrink(&h->hh); \
+ return container_of(hitem, type, field.hi); \
+ } \
+ return NULL; \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ uint32_t i; \
+ for (i = 0; i < HASH_SIZE(h->hh); i++) \
+ if (h->hh.entries[i]) \
+ return container_of(h->hh.entries[i], type, field.hi); \
+ return NULL; \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct thash_item *hitem = &item->field.hi; \
+ if (hitem->next) \
+ return container_of(hitem->next, type, field.hi); \
+ uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
+ for (; i < HASH_SIZE(h->hh); i++) \
+ if (h->hh.entries[i]) \
+ return container_of(h->hh.entries[i], type, field.hi); \
+ return NULL; \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ if (!item) \
+ return NULL; \
+ return prefix ## _next(h, item); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->hh.count; \
+} \
+/* ... */
+
+/* skiplist, sorted.
+ * can be used as priority queue with add / pop
+ */
+
+/* don't use these structs directly */
+#define SKIPLIST_MAXDEPTH 16
+#define SKIPLIST_EMBED 4
+#define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
+
+struct sskip_item {
+ struct sskip_item *next[SKIPLIST_EMBED];
+};
+
+struct sskip_overflow {
+ struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
+};
+
+struct sskip_head {
+ struct sskip_item hitem;
+ struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
+ size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_SKIPLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
+ */
+#define _PREDECL_SKIPLIST(prefix) \
+struct prefix ## _head { struct sskip_head sh; }; \
+struct prefix ## _item { struct sskip_item si; };
+
+#define INIT_SKIPLIST_UNIQ(var) { }
+#define INIT_SKIPLIST_NONUNIQ(var) { }
+
+#define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+ h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
+ ((uintptr_t)h->sh.overflow | 1); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct sskip_item *si; \
+ si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
+ return container_of_null(si, type, field.si); \
+} \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
+ &item->field.si, cmpfn_nuq); \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
+ &item->field.si, cmpfn_nuq); \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_skiplist_del(&h->sh, &item->field.si, cmpfn_uq); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct sskip_item *first = h->sh.hitem.next[0]; \
+ return container_of_null(first, type, field.si); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct sskip_item *next = item->field.si.next[0]; \
+ return container_of_null(next, type, field.si); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct sskip_item *next; \
+ next = item ? item->field.si.next[0] : NULL; \
+ return container_of_null(next, type, field.si); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->sh.count; \
+} \
+/* ... */
+
+#define PREDECL_SKIPLIST_UNIQ(prefix) \
+ _PREDECL_SKIPLIST(prefix)
+#define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct sskip_item *a, \
+ const struct sskip_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.si), \
+ container_of(b, type, field.si)); \
+} \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
+ &item->field.si, &prefix ## __cmp); \
+ return container_of_null(sitem, type, field.si); \
+} \
+ \
+_DECLARE_SKIPLIST(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp) \
+/* ... */
+
+#define PREDECL_SKIPLIST_NONUNIQ(prefix) \
+ _PREDECL_SKIPLIST(prefix)
+#define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct sskip_item *a, \
+ const struct sskip_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.si), \
+ container_of(b, type, field.si)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
+ const struct sskip_item *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.si), \
+ container_of(b, type, field.si)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_SKIPLIST(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+
+extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
+ struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern void typesafe_skiplist_del(struct sskip_head *head,
+ struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* this needs to stay at the end because both files include each other.
+ * the resolved order is typesafe.h before typerb.h
+ */
+#include "typerb.h"
+
+#endif /* _FRR_TYPESAFE_H */
diff --git a/lib/vrf.c b/lib/vrf.c
index de50e6a517..229f19f29a 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -362,9 +362,9 @@ struct vrf_bit_set {
bool set;
};
-static unsigned int vrf_hash_bitmap_key(void *data)
+static unsigned int vrf_hash_bitmap_key(const void *data)
{
- struct vrf_bit_set *bit = data;
+ const struct vrf_bit_set *bit = data;
return bit->vrf_id;
}
@@ -860,7 +860,6 @@ void vrf_cmd_init(int (*writefunc)(struct vty *vty),
void vrf_set_default_name(const char *default_name, bool force)
{
struct vrf *def_vrf;
- struct vrf *vrf_with_default_name = NULL;
static bool def_vrf_forced;
def_vrf = vrf_lookup_by_id(VRF_DEFAULT);
@@ -871,13 +870,7 @@ void vrf_set_default_name(const char *default_name, bool force)
def_vrf->vrf_id);
return;
}
- if (vrf_with_default_name && vrf_with_default_name != def_vrf) {
- /* vrf name already used by an other VRF */
- zlog_debug("VRF: %s, avoid changing name to %s, same name exists (%u)",
- vrf_with_default_name->name, default_name,
- vrf_with_default_name->vrf_id);
- return;
- }
+
snprintf(vrf_default_name, VRF_NAMSIZ, "%s", default_name);
if (def_vrf) {
if (force)
@@ -911,10 +904,16 @@ vrf_id_t vrf_get_default_id(void)
int vrf_bind(vrf_id_t vrf_id, int fd, const char *name)
{
int ret = 0;
+ struct interface *ifp;
if (fd < 0 || name == NULL)
return fd;
- if (vrf_is_backend_netns())
+ /* the device should exist
+ * otherwise we should return
+ * case ifname = vrf in netns mode => return
+ */
+ ifp = if_lookup_by_name(name, vrf_id);
+ if (!ifp)
return fd;
#ifdef SO_BINDTODEVICE
ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen(name)+1);
diff --git a/lib/vty.c b/lib/vty.c
index dae8e82599..0ee9b78b91 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -86,9 +86,6 @@ static vector Vvty_serv_thread;
/* Current directory. */
char *vty_cwd = NULL;
-/* Exclusive configuration lock. */
-struct vty *vty_exclusive_lock;
-
/* Login password check. */
static int no_password_check = 0;
@@ -2369,7 +2366,7 @@ static void vty_read_file(struct nb_config *config, FILE *confp)
if (config == NULL && vty->candidate_config
&& frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL) {
ret = nb_candidate_commit(vty->candidate_config, NB_CLIENT_CLI,
- true, "Read configuration file",
+ vty, true, "Read configuration file",
NULL);
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES)
zlog_err("%s: failed to read configuration file.",
@@ -2601,8 +2598,8 @@ void vty_log_fixed(char *buf, size_t len)
int vty_config_enter(struct vty *vty, bool private_config, bool exclusive)
{
- if (exclusive && !vty_config_exclusive_lock(vty)) {
- vty_out(vty, "VTY configuration is locked by other VTY\n");
+ if (exclusive && nb_running_lock(NB_CLIENT_CLI, vty)) {
+ vty_out(vty, "%% Configuration is locked by other client\n");
return CMD_WARNING;
}
@@ -2611,17 +2608,22 @@ int vty_config_enter(struct vty *vty, bool private_config, bool exclusive)
vty->private_config = private_config;
vty->xpath_index = 0;
- if (private_config) {
- vty->candidate_config = nb_config_dup(running_config);
- vty->candidate_config_base = nb_config_dup(running_config);
- vty_out(vty,
- "Warning: uncommitted changes will be discarded on exit.\n\n");
- } else {
- vty->candidate_config = vty_shared_candidate_config;
- if (frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL)
+ pthread_rwlock_rdlock(&running_config->lock);
+ {
+ if (private_config) {
+ vty->candidate_config = nb_config_dup(running_config);
vty->candidate_config_base =
nb_config_dup(running_config);
+ vty_out(vty,
+ "Warning: uncommitted changes will be discarded on exit.\n\n");
+ } else {
+ vty->candidate_config = vty_shared_candidate_config;
+ if (frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL)
+ vty->candidate_config_base =
+ nb_config_dup(running_config);
+ }
}
+ pthread_rwlock_unlock(&running_config->lock);
return CMD_SUCCESS;
}
@@ -2636,7 +2638,7 @@ void vty_config_exit(struct vty *vty)
nb_cli_confirmed_commit_clean(vty);
}
- vty_config_exclusive_unlock(vty);
+ (void)nb_running_unlock(NB_CLIENT_CLI, vty);
if (vty->candidate_config) {
if (vty->private_config)
@@ -2651,21 +2653,6 @@ void vty_config_exit(struct vty *vty)
vty->config = false;
}
-int vty_config_exclusive_lock(struct vty *vty)
-{
- if (vty_exclusive_lock == NULL) {
- vty_exclusive_lock = vty;
- return 1;
- }
- return 0;
-}
-
-void vty_config_exclusive_unlock(struct vty *vty)
-{
- if (vty_exclusive_lock == vty)
- vty_exclusive_lock = NULL;
-}
-
/* Master of the threads. */
static struct thread_master *vty_master;
diff --git a/lib/vty.h b/lib/vty.h
index 9c2653e1ac..98d75542fd 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -289,9 +289,6 @@ struct vty_arg {
#define IS_DIRECTORY_SEP(c) ((c) == DIRECTORY_SEP)
#endif
-/* Exported variables */
-extern struct vty *vty_exclusive_lock;
-
/* Prototypes. */
extern void vty_init(struct thread_master *);
extern void vty_init_vtysh(void);
@@ -321,8 +318,6 @@ extern void vty_log(const char *level, const char *proto, const char *fmt,
extern int vty_config_enter(struct vty *vty, bool private_config,
bool exclusive);
extern void vty_config_exit(struct vty *);
-extern int vty_config_exclusive_lock(struct vty *vty);
-extern void vty_config_exclusive_unlock(struct vty *vty);
extern int vty_shell(struct vty *);
extern int vty_shell_serv(struct vty *);
extern void vty_hello(struct vty *);
diff --git a/lib/wheel.c b/lib/wheel.c
index 69d2fa48dc..8e479c931b 100644
--- a/lib/wheel.c
+++ b/lib/wheel.c
@@ -80,7 +80,7 @@ static int wheel_timer_thread(struct thread *t)
}
struct timer_wheel *wheel_init(struct thread_master *master, int period,
- size_t slots, unsigned int (*slot_key)(void *),
+ size_t slots, unsigned int (*slot_key)(const void *),
void (*slot_run)(void *),
const char *run_name)
{
diff --git a/lib/wheel.h b/lib/wheel.h
index e66751c1d0..401789e1a0 100644
--- a/lib/wheel.h
+++ b/lib/wheel.h
@@ -38,7 +38,7 @@ struct timer_wheel {
/*
* Key to determine what slot the item belongs in
*/
- unsigned int (*slot_key)(void *);
+ unsigned int (*slot_key)(const void *);
void (*slot_run)(void *);
};
@@ -80,9 +80,9 @@ struct timer_wheel {
* of running your code.
*/
struct timer_wheel *wheel_init(struct thread_master *master, int period,
- size_t slots, unsigned int (*slot_key)(void *),
- void (*slot_run)(void *),
- const char *run_name);
+ size_t slots,
+ unsigned int (*slot_key)(const void *),
+ void (*slot_run)(void *), const char *run_name);
/*
* Delete the specified timer wheel created
diff --git a/lib/workqueue.c b/lib/workqueue.c
index 066d81f350..54090d0d0f 100644
--- a/lib/workqueue.c
+++ b/lib/workqueue.c
@@ -280,7 +280,7 @@ int work_queue_run(struct thread *thread)
if (item->ran > wq->spec.max_retries) {
/* run error handler, if any */
if (wq->spec.errorfunc)
- wq->spec.errorfunc(wq, item->data);
+ wq->spec.errorfunc(wq, item);
work_queue_item_remove(wq, item);
continue;
}
diff --git a/lib/yang.c b/lib/yang.c
index 2a2c155dee..2f9a9aa5a3 100644
--- a/lib/yang.c
+++ b/lib/yang.c
@@ -617,14 +617,6 @@ static void ly_log_cb(LY_LOG_LEVEL level, const char *msg, const char *path)
zlog(priority, "libyang: %s", msg);
}
-#if CONFDATE > 20190401
-CPP_NOTICE("lib/yang: time to remove non-LIBYANG_EXT_BUILTIN support")
-#endif
-
-#ifdef LIBYANG_EXT_BUILTIN
-extern struct lytype_plugin_list frr_user_types[];
-#endif
-
struct ly_ctx *yang_ctx_new_setup(void)
{
struct ly_ctx *ctx;
@@ -650,31 +642,10 @@ struct ly_ctx *yang_ctx_new_setup(void)
void yang_init(void)
{
-#ifndef LIBYANG_EXT_BUILTIN
-CPP_NOTICE("lib/yang: deprecated libyang <0.16.74 extension loading in use!")
- static char ly_plugin_dir[PATH_MAX];
- const char *const *ly_loaded_plugins;
- const char *ly_plugin;
- bool found_ly_frr_types = false;
-
- /* Tell libyang where to find its plugins. */
- snprintf(ly_plugin_dir, sizeof(ly_plugin_dir), "%s=%s",
- "LIBYANG_USER_TYPES_PLUGINS_DIR", LIBYANG_PLUGINS_PATH);
- putenv(ly_plugin_dir);
-#endif
-
/* Initialize libyang global parameters that affect all containers. */
ly_set_log_clb(ly_log_cb, 1);
ly_log_options(LY_LOLOG | LY_LOSTORE);
-#ifdef LIBYANG_EXT_BUILTIN
- if (ly_register_types(frr_user_types, "frr_user_types")) {
- flog_err(EC_LIB_LIBYANG_PLUGIN_LOAD,
- "ly_register_types() failed");
- exit(1);
- }
-#endif
-
/* Initialize libyang container for native models. */
ly_native_ctx = yang_ctx_new_setup();
if (!ly_native_ctx) {
@@ -682,22 +653,6 @@ CPP_NOTICE("lib/yang: deprecated libyang <0.16.74 extension loading in use!")
exit(1);
}
-#ifndef LIBYANG_EXT_BUILTIN
- /* Detect if the required libyang plugin(s) were loaded successfully. */
- ly_loaded_plugins = ly_get_loaded_plugins();
- for (size_t i = 0; (ly_plugin = ly_loaded_plugins[i]); i++) {
- if (strmatch(ly_plugin, "frr_user_types")) {
- found_ly_frr_types = true;
- break;
- }
- }
- if (!found_ly_frr_types) {
- flog_err(EC_LIB_LIBYANG_PLUGIN_LOAD,
- "%s: failed to load frr_user_types.so", __func__);
- exit(1);
- }
-#endif
-
yang_translator_init();
}
diff --git a/lib/yang_translator.c b/lib/yang_translator.c
index 76a6cc5fd1..341420eeda 100644
--- a/lib/yang_translator.c
+++ b/lib/yang_translator.c
@@ -24,6 +24,7 @@
#include "hash.h"
#include "yang.h"
#include "yang_translator.h"
+#include "frrstr.h"
DEFINE_MTYPE_STATIC(LIB, YANG_TRANSLATOR, "YANG Translator")
DEFINE_MTYPE_STATIC(LIB, YANG_TRANSLATOR_MODULE, "YANG Translator Module")
@@ -45,8 +46,6 @@ static struct ly_ctx *ly_translator_ctx;
static unsigned int
yang_translator_validate(struct yang_translator *translator);
static unsigned int yang_module_nodes_count(const struct lys_module *module);
-static void str_replace(char *o_string, const char *s_string,
- const char *r_string);
struct yang_mapping_node {
char xpath_from_canonical[XPATH_MAXLEN];
@@ -62,7 +61,7 @@ static bool yang_mapping_hash_cmp(const void *value1, const void *value2)
return strmatch(c1->xpath_from_canonical, c2->xpath_from_canonical);
}
-static unsigned int yang_mapping_hash_key(void *value)
+static unsigned int yang_mapping_hash_key(const void *value)
{
return string_hash_make(value);
}
@@ -108,14 +107,24 @@ static void yang_mapping_add(struct yang_translator *translator, int dir,
sizeof(mapping->xpath_from_fmt));
strlcpy(mapping->xpath_to_fmt, xpath_to_fmt,
sizeof(mapping->xpath_to_fmt));
- str_replace(mapping->xpath_from_fmt, "KEY1", "%[^']");
- str_replace(mapping->xpath_from_fmt, "KEY2", "%[^']");
- str_replace(mapping->xpath_from_fmt, "KEY3", "%[^']");
- str_replace(mapping->xpath_from_fmt, "KEY4", "%[^']");
- str_replace(mapping->xpath_to_fmt, "KEY1", "%s");
- str_replace(mapping->xpath_to_fmt, "KEY2", "%s");
- str_replace(mapping->xpath_to_fmt, "KEY3", "%s");
- str_replace(mapping->xpath_to_fmt, "KEY4", "%s");
+
+ const char *keys[] = {"KEY1", "KEY2", "KEY3", "KEY4"};
+ char *xpfmt;
+
+ for (unsigned int i = 0; i < array_size(keys); i++) {
+ xpfmt = frrstr_replace(mapping->xpath_from_fmt, keys[i],
+ "%[^']");
+ strlcpy(mapping->xpath_from_fmt, xpfmt,
+ sizeof(mapping->xpath_from_fmt));
+ XFREE(MTYPE_TMP, xpfmt);
+ }
+
+ for (unsigned int i = 0; i < array_size(keys); i++) {
+ xpfmt = frrstr_replace(mapping->xpath_to_fmt, keys[i], "%s");
+ strlcpy(mapping->xpath_to_fmt, xpfmt,
+ sizeof(mapping->xpath_to_fmt));
+ XFREE(MTYPE_TMP, xpfmt);
+ }
}
struct yang_translator *yang_translator_load(const char *path)
@@ -500,28 +509,6 @@ static unsigned int yang_module_nodes_count(const struct lys_module *module)
return total;
}
-/* TODO: rewrite this function. */
-static void str_replace(char *o_string, const char *s_string,
- const char *r_string)
-{
- char buffer[BUFSIZ];
- char *ch;
-
- ch = strstr(o_string, s_string);
- if (!ch)
- return;
-
- memcpy(buffer, o_string, ch - o_string);
- buffer[ch - o_string] = 0;
-
- sprintf(buffer + (ch - o_string), "%s%s", r_string,
- ch + strlen(s_string));
-
- o_string[0] = 0;
- strcpy(o_string, buffer);
- return str_replace(o_string, s_string, r_string);
-}
-
void yang_translator_init(void)
{
ly_translator_ctx = yang_ctx_new_setup();
diff --git a/lib/yang_wrappers.c b/lib/yang_wrappers.c
index 7ecea5f445..0558383823 100644
--- a/lib/yang_wrappers.c
+++ b/lib/yang_wrappers.c
@@ -817,7 +817,7 @@ void yang_dnode_get_ipv4(struct in_addr *addr, const struct lyd_node *dnode,
dleaf = (const struct lyd_node_leaf_list *)dnode;
assert(dleaf->value_type == LY_TYPE_STRING);
- memcpy(addr, dleaf->value.ptr, sizeof(*addr));
+ (void)inet_pton(AF_INET, dleaf->value_str, addr);
}
void yang_get_default_ipv4(struct in_addr *var, const char *xpath_fmt, ...)
@@ -874,7 +874,7 @@ void yang_dnode_get_ipv4p(union prefixptr prefix, const struct lyd_node *dnode,
dleaf = (const struct lyd_node_leaf_list *)dnode;
assert(dleaf->value_type == LY_TYPE_STRING);
- memcpy(prefix4, dleaf->value.ptr, sizeof(*prefix4));
+ (void)str2prefix_ipv4(dleaf->value_str, prefix4);
}
void yang_get_default_ipv4p(union prefixptr var, const char *xpath_fmt, ...)
@@ -927,7 +927,7 @@ void yang_dnode_get_ipv6(struct in6_addr *addr, const struct lyd_node *dnode,
dleaf = (const struct lyd_node_leaf_list *)dnode;
assert(dleaf->value_type == LY_TYPE_STRING);
- memcpy(addr, dleaf->value.ptr, sizeof(*addr));
+ (void)inet_pton(AF_INET6, dleaf->value_str, addr);
}
void yang_get_default_ipv6(struct in6_addr *var, const char *xpath_fmt, ...)
@@ -984,7 +984,7 @@ void yang_dnode_get_ipv6p(union prefixptr prefix, const struct lyd_node *dnode,
dleaf = (const struct lyd_node_leaf_list *)dnode;
assert(dleaf->value_type == LY_TYPE_STRING);
- memcpy(prefix6, dleaf->value.ptr, sizeof(*prefix6));
+ (void)str2prefix_ipv6(dleaf->value_str, prefix6);
}
void yang_get_default_ipv6p(union prefixptr var, const char *xpath_fmt, ...)
diff --git a/lib/zclient.c b/lib/zclient.c
index 4901c92743..0972590ca6 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -555,6 +555,25 @@ void zclient_send_interface_radv_req(struct zclient *zclient, vrf_id_t vrf_id,
zclient_send_message(zclient);
}
+int zclient_send_interface_protodown(struct zclient *zclient, vrf_id_t vrf_id,
+ struct interface *ifp, bool down)
+{
+ struct stream *s;
+
+ if (zclient->sock < 0)
+ return -1;
+
+ s = zclient->obuf;
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_INTERFACE_SET_PROTODOWN, vrf_id);
+ stream_putl(s, ifp->ifindex);
+ stream_putc(s, !!down);
+ stream_putw_at(s, 0, stream_get_endp(s));
+ zclient_send_message(zclient);
+
+ return 0;
+}
+
/* Make connection to zebra daemon. */
int zclient_start(struct zclient *zclient)
{
@@ -1381,6 +1400,8 @@ stream_failure:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | bandwidth |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | parent ifindex |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Link Layer Type |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Harware Address Length |
@@ -1561,6 +1582,7 @@ void zebra_interface_if_set_value(struct stream *s, struct interface *ifp)
ifp->mtu = stream_getl(s);
ifp->mtu6 = stream_getl(s);
ifp->bandwidth = stream_getl(s);
+ ifp->link_ifindex = stream_getl(s);
ifp->ll_type = stream_getl(s);
ifp->hw_addr_len = stream_getl(s);
if (ifp->hw_addr_len)
@@ -2371,9 +2393,7 @@ int zebra_send_pw(struct zclient *zclient, int command, struct zapi_pw *pw)
/*
* Receive PW status update from Zebra and send it to LDE process.
*/
-void zebra_read_pw_status_update(int command, struct zclient *zclient,
- zebra_size_t length, vrf_id_t vrf_id,
- struct zapi_pw_status *pw)
+void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw)
{
struct stream *s;
@@ -2386,8 +2406,7 @@ void zebra_read_pw_status_update(int command, struct zclient *zclient,
pw->status = stream_getl(s);
}
-static void zclient_capability_decode(int command, struct zclient *zclient,
- zebra_size_t length, vrf_id_t vrf_id)
+static void zclient_capability_decode(ZAPI_CALLBACK_ARGS)
{
struct zclient_capabilities cap;
struct stream *s = zclient->ibuf;
diff --git a/lib/zclient.h b/lib/zclient.h
index 0926281f2e..09f0acad84 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -76,6 +76,7 @@ typedef enum {
ZEBRA_INTERFACE_UP,
ZEBRA_INTERFACE_DOWN,
ZEBRA_INTERFACE_SET_MASTER,
+ ZEBRA_INTERFACE_SET_PROTODOWN,
ZEBRA_ROUTE_ADD,
ZEBRA_ROUTE_DELETE,
ZEBRA_ROUTE_NOTIFY_OWNER,
@@ -221,66 +222,49 @@ struct zclient {
/* Redistribute defauilt. */
vrf_bitmap_t default_information[AFI_MAX];
+#define ZAPI_CALLBACK_ARGS \
+ int cmd, struct zclient *zclient, uint16_t length, vrf_id_t vrf_id
+
/* Pointer to the callback functions. */
void (*zebra_connected)(struct zclient *);
void (*zebra_capabilities)(struct zclient_capabilities *cap);
- int (*router_id_update)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_add)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_delete)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_up)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_down)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_address_add)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_address_delete)(int, struct zclient *, uint16_t,
- vrf_id_t);
- int (*interface_link_params)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*interface_bfd_dest_update)(int, struct zclient *, uint16_t,
- vrf_id_t);
- int (*interface_nbr_address_add)(int, struct zclient *, uint16_t,
- vrf_id_t);
- int (*interface_nbr_address_delete)(int, struct zclient *, uint16_t,
- vrf_id_t);
- int (*interface_vrf_update)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*nexthop_update)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*import_check_update)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*bfd_dest_replay)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*redistribute_route_add)(int, struct zclient *, uint16_t,
- vrf_id_t);
- int (*redistribute_route_del)(int, struct zclient *, uint16_t,
- vrf_id_t);
+ int (*router_id_update)(ZAPI_CALLBACK_ARGS);
+ int (*interface_add)(ZAPI_CALLBACK_ARGS);
+ int (*interface_delete)(ZAPI_CALLBACK_ARGS);
+ int (*interface_up)(ZAPI_CALLBACK_ARGS);
+ int (*interface_down)(ZAPI_CALLBACK_ARGS);
+ int (*interface_address_add)(ZAPI_CALLBACK_ARGS);
+ int (*interface_address_delete)(ZAPI_CALLBACK_ARGS);
+ int (*interface_link_params)(ZAPI_CALLBACK_ARGS);
+ int (*interface_bfd_dest_update)(ZAPI_CALLBACK_ARGS);
+ int (*interface_nbr_address_add)(ZAPI_CALLBACK_ARGS);
+ int (*interface_nbr_address_delete)(ZAPI_CALLBACK_ARGS);
+ int (*interface_vrf_update)(ZAPI_CALLBACK_ARGS);
+ int (*nexthop_update)(ZAPI_CALLBACK_ARGS);
+ int (*import_check_update)(ZAPI_CALLBACK_ARGS);
+ int (*bfd_dest_replay)(ZAPI_CALLBACK_ARGS);
+ int (*redistribute_route_add)(ZAPI_CALLBACK_ARGS);
+ int (*redistribute_route_del)(ZAPI_CALLBACK_ARGS);
int (*fec_update)(int, struct zclient *, uint16_t);
- int (*local_es_add)(int command, struct zclient *zclient,
- uint16_t length, vrf_id_t vrf_id);
- int (*local_es_del)(int command, struct zclient *zclient,
- uint16_t length, vrf_id_t vrf_id);
- int (*local_vni_add)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*local_vni_del)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*local_l3vni_add)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*local_l3vni_del)(int, struct zclient *, uint16_t, vrf_id_t);
- void (*local_ip_prefix_add)(int, struct zclient *, uint16_t, vrf_id_t);
- void (*local_ip_prefix_del)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*local_macip_add)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*local_macip_del)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*pw_status_update)(int, struct zclient *, uint16_t, vrf_id_t);
- int (*route_notify_owner)(int command, struct zclient *zclient,
- uint16_t length, vrf_id_t vrf_id);
- int (*rule_notify_owner)(int command, struct zclient *zclient,
- uint16_t length, vrf_id_t vrf_id);
- void (*label_chunk)(int command, struct zclient *zclient,
- uint16_t length, vrf_id_t vrf_id);
- int (*ipset_notify_owner)(int command, struct zclient *zclient,
- uint16_t length, vrf_id_t vrf_id);
- int (*ipset_entry_notify_owner)(int command,
- struct zclient *zclient,
- uint16_t length,
- vrf_id_t vrf_id);
- int (*iptable_notify_owner)(int command,
- struct zclient *zclient,
- uint16_t length,
- vrf_id_t vrf_id);
- int (*vxlan_sg_add)(int command, struct zclient *client,
- uint16_t length, vrf_id_t vrf_id);
- int (*vxlan_sg_del)(int command, struct zclient *client,
- uint16_t length, vrf_id_t vrf_id_t);
+ int (*local_es_add)(ZAPI_CALLBACK_ARGS);
+ int (*local_es_del)(ZAPI_CALLBACK_ARGS);
+ int (*local_vni_add)(ZAPI_CALLBACK_ARGS);
+ int (*local_vni_del)(ZAPI_CALLBACK_ARGS);
+ int (*local_l3vni_add)(ZAPI_CALLBACK_ARGS);
+ int (*local_l3vni_del)(ZAPI_CALLBACK_ARGS);
+ void (*local_ip_prefix_add)(ZAPI_CALLBACK_ARGS);
+ void (*local_ip_prefix_del)(ZAPI_CALLBACK_ARGS);
+ int (*local_macip_add)(ZAPI_CALLBACK_ARGS);
+ int (*local_macip_del)(ZAPI_CALLBACK_ARGS);
+ int (*pw_status_update)(ZAPI_CALLBACK_ARGS);
+ int (*route_notify_owner)(ZAPI_CALLBACK_ARGS);
+ int (*rule_notify_owner)(ZAPI_CALLBACK_ARGS);
+ void (*label_chunk)(ZAPI_CALLBACK_ARGS);
+ int (*ipset_notify_owner)(ZAPI_CALLBACK_ARGS);
+ int (*ipset_entry_notify_owner)(ZAPI_CALLBACK_ARGS);
+ int (*iptable_notify_owner)(ZAPI_CALLBACK_ARGS);
+ int (*vxlan_sg_add)(ZAPI_CALLBACK_ARGS);
+ int (*vxlan_sg_del)(ZAPI_CALLBACK_ARGS);
};
/* Zebra API message flag. */
@@ -483,6 +467,9 @@ extern void zclient_send_interface_radv_req(struct zclient *zclient,
vrf_id_t vrf_id,
struct interface *ifp, int enable,
int ra_interval);
+extern int zclient_send_interface_protodown(struct zclient *zclient,
+ vrf_id_t vrf_id,
+ struct interface *ifp, bool down);
/* Send redistribute command to zebra daemon. Do not update zclient state. */
extern int zebra_redistribute_send(int command, struct zclient *, afi_t,
@@ -597,9 +584,7 @@ extern int tm_release_table_chunk(struct zclient *zclient, uint32_t start,
extern int zebra_send_pw(struct zclient *zclient, int command,
struct zapi_pw *pw);
-extern void zebra_read_pw_status_update(int command, struct zclient *zclient,
- zebra_size_t length, vrf_id_t vrf_id,
- struct zapi_pw_status *pw);
+extern void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw);
extern int zclient_route_send(uint8_t, struct zclient *, struct zapi_route *);
extern int zclient_send_rnh(struct zclient *zclient, int command,
diff --git a/lib/zebra.h b/lib/zebra.h
index b96fb5a206..3e1eefdb2e 100644
--- a/lib/zebra.h
+++ b/lib/zebra.h
@@ -335,45 +335,6 @@ struct in_pktinfo {
#endif /* ndef BYTE_ORDER */
-/* MAX / MIN are not commonly defined, but useful */
-/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */
-#ifdef MAX
-#undef MAX
-#endif
-#define MAX(a, b) \
- ({ \
- typeof(a) _max_a = (a); \
- typeof(b) _max_b = (b); \
- _max_a > _max_b ? _max_a : _max_b; \
- })
-#ifdef MIN
-#undef MIN
-#endif
-#define MIN(a, b) \
- ({ \
- typeof(a) _min_a = (a); \
- typeof(b) _min_b = (b); \
- _min_a < _min_b ? _min_a : _min_b; \
- })
-
-#ifndef offsetof
-#ifdef __compiler_offsetof
-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
-#else
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-#endif
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
- ({ \
- const typeof(((type *)0)->member) *__mptr = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); \
- })
-#endif
-
-#define ZEBRA_NUM_OF(x) (sizeof (x) / sizeof (x[0]))
-
/* For old definition. */
#ifndef IN6_ARE_ADDR_EQUAL
#define IN6_ARE_ADDR_EQUAL IN6_IS_ADDR_EQUAL
@@ -461,7 +422,13 @@ extern const char *zserv_command_string(unsigned int command);
#endif
/* Address family numbers from RFC1700. */
-typedef enum { AFI_IP = 1, AFI_IP6 = 2, AFI_L2VPN = 3, AFI_MAX = 4 } afi_t;
+typedef enum {
+ AFI_UNSPEC = 0,
+ AFI_IP = 1,
+ AFI_IP6 = 2,
+ AFI_L2VPN = 3,
+ AFI_MAX = 4
+} afi_t;
/* Subsequent Address Family Identifier. */
typedef enum {