summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/atomlist.c348
-rw-r--r--lib/atomlist.h347
-rw-r--r--lib/compiler.h85
-rw-r--r--lib/fifo.h66
-rw-r--r--lib/frratomic.h7
-rw-r--r--lib/memory.h2
-rw-r--r--lib/privs.c2
-rw-r--r--lib/qobj.c46
-rw-r--r--lib/qobj.h7
-rw-r--r--lib/seqlock.c167
-rw-r--r--lib/seqlock.h106
-rw-r--r--lib/sockunion.c2
-rw-r--r--lib/subdir.am9
-rw-r--r--lib/thread.c133
-rw-r--r--lib/thread.h15
-rw-r--r--lib/typerb.c472
-rw-r--r--lib/typerb.h182
-rw-r--r--lib/typesafe.c377
-rw-r--r--lib/typesafe.h645
-rw-r--r--lib/zebra.h39
20 files changed, 2820 insertions, 237 deletions
diff --git a/lib/atomlist.c b/lib/atomlist.c
new file mode 100644
index 0000000000..8169ba9eb4
--- /dev/null
+++ b/lib/atomlist.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "atomlist.h"
+
+void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item)
+{
+ atomptr_t prevval;
+ atomptr_t i = atomptr_i(item);
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* updating ->last is possible here, but makes the code considerably
+ * more complicated... let's not.
+ */
+ prevval = ATOMPTR_NULL;
+ item->next = ATOMPTR_NULL;
+
+ /* head-insert atomically
+ * release barrier: item + item->next writes must be completed
+ */
+ while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i,
+ memory_order_release, memory_order_relaxed))
+ atomic_store_explicit(&item->next, prevval,
+ memory_order_relaxed);
+}
+
+void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item)
+{
+ atomptr_t prevval = ATOMPTR_NULL;
+ atomptr_t i = atomptr_i(item);
+ atomptr_t hint;
+ struct atomlist_item *prevptr;
+ _Atomic atomptr_t *prev;
+
+ item->next = ATOMPTR_NULL;
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* place new item into ->last
+ * release: item writes completed; acquire: DD barrier on hint
+ */
+ hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel);
+
+ while (1) {
+ if (atomptr_p(hint) == NULL)
+ prev = &h->first;
+ else
+ prev = &atomlist_itemp(hint)->next;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+ prevptr = atomlist_itemp(prevval);
+ if (prevptr == NULL)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ /* last item is being deleted - start over */
+ if (atomptr_l(prevval)) {
+ hint = ATOMPTR_NULL;
+ continue;
+ }
+
+ /* no barrier - item->next is NULL and was so in xchg above */
+ if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i,
+ memory_order_consume,
+ memory_order_consume)) {
+ hint = prevval;
+ continue;
+ }
+ break;
+ }
+}
+
+static void atomlist_del_core(struct atomlist_head *h,
+ struct atomlist_item *item,
+ _Atomic atomptr_t *hint,
+ atomptr_t next)
+{
+ _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
+ atomptr_t prevval, updval;
+ struct atomlist_item *prevptr;
+
+ /* drop us off "last" if needed. no r/w to barrier. */
+ prevval = atomptr_i(item);
+ atomic_compare_exchange_strong_explicit(&h->last, &prevval,
+ ATOMPTR_NULL,
+ memory_order_relaxed, memory_order_relaxed);
+
+ atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* the following code should be identical (except sort<>list) to
+ * atomsort_del_hint()
+ */
+ while (1) {
+ upd = NULL;
+ updval = ATOMPTR_LOCK;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+
+ /* track the beginning of a chain of deleted items
+ * this is neccessary to make this lock-free; we can
+ * complete deletions started by other threads.
+ */
+ if (!atomptr_l(prevval)) {
+ updval = prevval;
+ upd = prev;
+ }
+
+ prevptr = atomlist_itemp(prevval);
+ if (prevptr == item)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ if (prevptr != item)
+ /* another thread completed our deletion */
+ return;
+
+ if (!upd || atomptr_l(updval)) {
+ /* failed to find non-deleted predecessor...
+ * have to try again
+ */
+ prev = &h->first;
+ continue;
+ }
+
+ if (!atomic_compare_exchange_strong_explicit(upd, &updval,
+ next, memory_order_consume,
+ memory_order_consume)) {
+ /* prev doesn't point to item anymore, something
+ * was inserted. continue at same position forward.
+ */
+ continue;
+ }
+ break;
+ }
+}
+
+void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
+ _Atomic atomptr_t *hint)
+{
+ atomptr_t next;
+
+ /* mark ourselves in-delete - full barrier */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+ assert(!atomptr_l(next)); /* delete race on same item */
+
+ atomlist_del_core(h, item, hint, next);
+}
+
+struct atomlist_item *atomlist_pop(struct atomlist_head *h)
+{
+ struct atomlist_item *item;
+ atomptr_t next;
+
+ /* grab head of the list - and remember it in replval for the
+ * actual delete below. No matter what, the head of the list is
+ * where we start deleting because either it's our item, or it's
+ * some delete-marked items and then our item.
+ */
+ next = atomic_load_explicit(&h->first, memory_order_consume);
+
+ do {
+ item = atomlist_itemp(next);
+ if (!item)
+ return NULL;
+
+ /* try to mark deletion */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+
+ } while (atomptr_l(next));
+ /* if loop is taken: delete race on same item (another pop or del)
+ * => proceed to next item
+ * if loop exited here: we have our item selected and marked
+ */
+ atomlist_del_core(h, item, &h->first, next);
+ return item;
+}
+
+struct atomsort_item *atomsort_add(struct atomsort_head *h,
+ struct atomsort_item *item, int (*cmpfn)(
+ const struct atomsort_item *,
+ const struct atomsort_item *))
+{
+ _Atomic atomptr_t *prev;
+ atomptr_t prevval;
+ atomptr_t i = atomptr_i(item);
+ struct atomsort_item *previtem;
+ int cmpval;
+
+ do {
+ prev = &h->first;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_acquire);
+ previtem = atomptr_p(prevval);
+
+ if (!previtem || (cmpval = cmpfn(previtem, item)) > 0)
+ break;
+ if (cmpval == 0)
+ return previtem;
+
+ prev = &previtem->next;
+ } while (1);
+
+ if (atomptr_l(prevval))
+ continue;
+
+ item->next = prevval;
+ if (atomic_compare_exchange_strong_explicit(prev, &prevval, i,
+ memory_order_release, memory_order_relaxed))
+ break;
+ } while (1);
+
+ atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed);
+ return NULL;
+}
+
+static void atomsort_del_core(struct atomsort_head *h,
+ struct atomsort_item *item, _Atomic atomptr_t *hint,
+ atomptr_t next)
+{
+ _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd;
+ atomptr_t prevval, updval;
+ struct atomsort_item *prevptr;
+
+ atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed);
+
+ /* the following code should be identical (except sort<>list) to
+ * atomlist_del_core()
+ */
+ while (1) {
+ upd = NULL;
+ updval = ATOMPTR_LOCK;
+
+ do {
+ prevval = atomic_load_explicit(prev,
+ memory_order_consume);
+
+ /* track the beginning of a chain of deleted items
+ * this is neccessary to make this lock-free; we can
+ * complete deletions started by other threads.
+ */
+ if (!atomptr_l(prevval)) {
+ updval = prevval;
+ upd = prev;
+ }
+
+ prevptr = atomsort_itemp(prevval);
+ if (prevptr == item)
+ break;
+
+ prev = &prevptr->next;
+ } while (prevptr);
+
+ if (prevptr != item)
+ /* another thread completed our deletion */
+ return;
+
+ if (!upd || atomptr_l(updval)) {
+ /* failed to find non-deleted predecessor...
+ * have to try again
+ */
+ prev = &h->first;
+ continue;
+ }
+
+ if (!atomic_compare_exchange_strong_explicit(upd, &updval,
+ next, memory_order_relaxed,
+ memory_order_relaxed)) {
+ /* prev doesn't point to item anymore, something
+ * was inserted. continue at same position forward.
+ */
+ continue;
+ }
+ break;
+ }
+}
+
+void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item,
+ _Atomic atomptr_t *hint)
+{
+ atomptr_t next;
+
+ /* mark ourselves in-delete - full barrier */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_seq_cst);
+ assert(!atomptr_l(next)); /* delete race on same item */
+
+ atomsort_del_core(h, item, hint, next);
+}
+
+struct atomsort_item *atomsort_pop(struct atomsort_head *h)
+{
+ struct atomsort_item *item;
+ atomptr_t next;
+
+ /* grab head of the list - and remember it in replval for the
+ * actual delete below. No matter what, the head of the list is
+ * where we start deleting because either it's our item, or it's
+ * some delete-marked items and then our item.
+ */
+ next = atomic_load_explicit(&h->first, memory_order_consume);
+
+ do {
+ item = atomsort_itemp(next);
+ if (!item)
+ return NULL;
+
+ /* try to mark deletion */
+ next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK,
+ memory_order_acquire);
+
+ } while (atomptr_l(next));
+ /* if loop is taken: delete race on same item (another pop or del)
+ * => proceed to next item
+ * if loop exited here: we have our item selected and marked
+ */
+ atomsort_del_core(h, item, &h->first, next);
+ return item;
+}
diff --git a/lib/atomlist.h b/lib/atomlist.h
new file mode 100644
index 0000000000..373c481f2a
--- /dev/null
+++ b/lib/atomlist.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_ATOMLIST_H
+#define _FRR_ATOMLIST_H
+
+#include "typesafe.h"
+#include "frratomic.h"
+
+/* pointer with lock/deleted/invalid bit in lowest bit
+ *
+ * for atomlist/atomsort, "locked" means "this pointer can't be updated, the
+ * item is being deleted". it is permissible to assume the item will indeed
+ * be deleted (as there are no replace/etc. ops in this).
+ *
+ * in general, lowest 2/3 bits on 32/64bit architectures are available for
+ * uses like this; the only thing that will really break this is putting an
+ * atomlist_item in a struct with "packed" attribute. (it'll break
+ * immediately and consistently.) -- don't do that.
+ *
+ * ATOMPTR_USER is currently unused (and available for atomic hash or skiplist
+ * implementations.)
+ */
+typedef uintptr_t atomptr_t;
+#define ATOMPTR_MASK (UINTPTR_MAX - 3)
+#define ATOMPTR_LOCK (1)
+#define ATOMPTR_USER (2)
+#define ATOMPTR_NULL (0)
+
+static inline atomptr_t atomptr_i(void *val)
+{
+ atomptr_t atomval = (atomptr_t)val;
+
+ assert(!(atomval & ATOMPTR_LOCK));
+ return atomval;
+}
+static inline void *atomptr_p(atomptr_t val)
+{
+ return (void *)(val & ATOMPTR_MASK);
+}
+static inline bool atomptr_l(atomptr_t val)
+{
+ return (bool)(val & ATOMPTR_LOCK);
+}
+static inline bool atomptr_u(atomptr_t val)
+{
+ return (bool)(val & ATOMPTR_USER);
+}
+
+
+/* the problem with, find(), find_gteq() and find_lt() on atomic lists is that
+ * they're neither an "acquire" nor a "release" operation; the element that
+ * was found is still on the list and doesn't change ownership. Therefore,
+ * an atomic transition in ownership state can't be implemented.
+ *
+ * Contrast this with add() or pop(): both function calls atomically transfer
+ * ownership of an item to or from the list, which makes them "acquire" /
+ * "release" operations.
+ *
+ * What can be implemented atomically is a "find_pop()", i.e. try to locate an
+ * item and atomically try to remove it if found. It's not currently
+ * implemented but can be added when needed.
+ *
+ * Either way - for find(), generally speaking, if you need to use find() on
+ * a list then the whole thing probably isn't well-suited to atomic
+ * implementation and you'll need to have extra locks around to make it work
+ * correctly.
+ */
+#ifdef WNO_ATOMLIST_UNSAFE_FIND
+# define atomic_find_warn
+#else
+# define atomic_find_warn __attribute__((_DEPRECATED( \
+ "WARNING: find() on atomic lists cannot be atomic by principle; " \
+ "check code to make sure usage pattern is OK and if it is, use " \
+ "#define WNO_ATOMLIST_UNSAFE_FIND")))
+#endif
+
+
+/* single-linked list, unsorted/arbitrary.
+ * can be used as queue with add_tail / pop
+ *
+ * all operations are lock-free, but not neccessarily wait-free. this means
+ * that there is no state where the system as a whole stops making process,
+ * but it *is* possible that a *particular* thread is delayed by some time.
+ *
+ * the only way for this to happen is for other threads to continuously make
+ * updates. an inactive / blocked / deadlocked other thread cannot cause such
+ * delays, and to cause such delays a thread must be heavily hitting the list -
+ * it's a rather theoretical concern.
+ */
+
+/* don't use these structs directly */
+struct atomlist_item {
+ _Atomic atomptr_t next;
+};
+#define atomlist_itemp(val) ((struct atomlist_item *)atomptr_p(val))
+
+struct atomlist_head {
+ _Atomic atomptr_t first, last;
+ _Atomic size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_ATOMLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_ATOMLIST(namelist, struct name, nlitem)
+ */
+#define PREDECL_ATOMLIST(prefix) \
+struct prefix ## _head { struct atomlist_head ah; }; \
+struct prefix ## _item { struct atomlist_item ai; };
+
+#define INIT_ATOMLIST(var) { }
+
+#define DECLARE_ATOMLIST(prefix, type, field) \
+macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
+{ atomlist_add_head(&h->ah, &item->field.ai); } \
+macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
+{ atomlist_add_tail(&h->ah, &item->field.ai); } \
+macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
+ _Atomic atomptr_t *hint) \
+{ atomlist_del_hint(&h->ah, &item->field.ai, hint); } \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ atomlist_del_hint(&h->ah, &item->field.ai, NULL); } \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ char *p = (char *)atomlist_pop(&h->ah); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _first(struct prefix##_head *h) \
+{ char *p = atomptr_p(atomic_load_explicit(&h->ah.first, \
+ memory_order_acquire)); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ char *p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
+ memory_order_acquire)); \
+ return p ? (type *)(p - offsetof(type, field)) : NULL; } \
+macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ return item ? prefix##_next(h, item) : NULL; } \
+macro_inline size_t prefix ## _count(struct prefix##_head *h) \
+{ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); } \
+/* ... */
+
+/* add_head:
+ * - contention on ->first pointer
+ * - return implies completion
+ */
+void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item);
+
+/* add_tail:
+ * - concurrent add_tail can cause wait but has progress guarantee
+ * - return does NOT imply completion. completion is only guaranteed after
+ * all other add_tail operations that started before this add_tail have
+ * completed as well.
+ */
+void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item);
+
+/* del/del_hint:
+ *
+ * OWNER MUST HOLD REFERENCE ON ITEM TO BE DELETED, ENSURING NO OTHER THREAD
+ * WILL TRY TO DELETE THE SAME ITEM. DELETING INCLUDES pop().
+ *
+ * as with all deletions, threads that started reading earlier may still hold
+ * pointers to the deleted item. completion is however guaranteed for all
+ * reads starting later.
+ */
+void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item,
+ _Atomic atomptr_t *hint);
+
+/* pop:
+ *
+ * as with all deletions, threads that started reading earlier may still hold
+ * pointers to the deleted item. completion is however guaranteed for all
+ * reads starting later.
+ */
+struct atomlist_item *atomlist_pop(struct atomlist_head *h);
+
+
+
+struct atomsort_item {
+ _Atomic atomptr_t next;
+};
+#define atomsort_itemp(val) ((struct atomsort_item *)atomptr_p(val))
+
+struct atomsort_head {
+ _Atomic atomptr_t first;
+ _Atomic size_t count;
+};
+
+#define _PREDECL_ATOMSORT(prefix) \
+struct prefix ## _head { struct atomsort_head ah; }; \
+struct prefix ## _item { struct atomsort_item ai; };
+
+#define INIT_ATOMSORT_UNIQ(var) { }
+#define INIT_ATOMSORT_NONUNIQ(var) { }
+
+#define _DECLARE_ATOMSORT(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(h->ah.count == 0); \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct atomsort_item *p; \
+ p = atomsort_add(&h->ah, &item->field.ai, cmpfn_uq); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct atomsort_item *p; \
+ p = atomptr_p(atomic_load_explicit(&h->ah.first, \
+ memory_order_acquire)); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct atomsort_item *p; \
+ p = atomptr_p(atomic_load_explicit(&item->field.ai.next, \
+ memory_order_acquire)); \
+ return container_of_null(p, type, field.ai); \
+} \
+macro_inline type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ return item ? prefix##_next(h, item) : NULL; \
+} \
+atomic_find_warn \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ type *p = prefix ## _first(h); \
+ while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
+ p = prefix ## _next(h, p); \
+ return p; \
+} \
+atomic_find_warn \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ type *p = prefix ## _first(h), *prev = NULL; \
+ while (p && cmpfn_nuq(&p->field.ai, &item->field.ai) < 0) \
+ p = prefix ## _next(h, (prev = p)); \
+ return prev; \
+} \
+macro_inline void prefix ## _del_hint(struct prefix##_head *h, type *item, \
+ _Atomic atomptr_t *hint) \
+{ \
+ atomsort_del_hint(&h->ah, &item->field.ai, hint); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ atomsort_del_hint(&h->ah, &item->field.ai, NULL); \
+} \
+macro_inline size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return atomic_load_explicit(&h->ah.count, memory_order_relaxed); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct atomsort_item *p = atomsort_pop(&h->ah); \
+ return p ? container_of(p, type, field.ai) : NULL; \
+} \
+/* ... */
+
+#define PREDECL_ATOMSORT_UNIQ(prefix) \
+ _PREDECL_ATOMSORT(prefix)
+#define DECLARE_ATOMSORT_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+} \
+ \
+_DECLARE_ATOMSORT(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp) \
+ \
+atomic_find_warn \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ type *p = prefix ## _first(h); \
+ int cmpval = 0; \
+ while (p && (cmpval = cmpfn(p, item)) < 0) \
+ p = prefix ## _next(h, p); \
+ if (!p || cmpval > 0) \
+ return NULL; \
+ return p; \
+} \
+/* ... */
+
+#define PREDECL_ATOMSORT_NONUNIQ(prefix) \
+ _PREDECL_ATOMSORT(prefix)
+#define DECLARE_ATOMSORT_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct atomsort_item *a, \
+ const struct atomsort_item *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.ai), \
+ container_of(b, type, field.ai)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_ATOMSORT(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+struct atomsort_item *atomsort_add(struct atomsort_head *h,
+ struct atomsort_item *item, int (*cmpfn)(
+ const struct atomsort_item *,
+ const struct atomsort_item *));
+
+void atomsort_del_hint(struct atomsort_head *h,
+ struct atomsort_item *item, _Atomic atomptr_t *hint);
+
+struct atomsort_item *atomsort_pop(struct atomsort_head *h);
+
+#endif /* _FRR_ATOMLIST_H */
diff --git a/lib/compiler.h b/lib/compiler.h
index cb4f7531ec..474adc7c8b 100644
--- a/lib/compiler.h
+++ b/lib/compiler.h
@@ -32,6 +32,7 @@ extern "C" {
# define _FALLTHROUGH __attribute__((fallthrough));
#endif
# define _CONSTRUCTOR(x) constructor(x)
+# define _DEPRECATED(x) deprecated(x)
#elif defined(__GNUC__)
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
# define _RET_NONNULL , returns_nonnull
@@ -41,6 +42,9 @@ extern "C" {
# define _DESTRUCTOR(x) destructor(x)
# define _ALLOC_SIZE(x) alloc_size(x)
#endif
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+# define _DEPRECATED(x) deprecated(x)
+#endif
#if __GNUC__ >= 7
# define _FALLTHROUGH __attribute__((fallthrough));
#endif
@@ -68,6 +72,13 @@ extern "C" {
#ifndef _FALLTHROUGH
#define _FALLTHROUGH
#endif
+#ifndef _DEPRECATED
+#define _DEPRECATED(x) deprecated
+#endif
+
+/* for helper functions defined inside macros */
+#define macro_inline static inline __attribute__((unused))
+#define macro_pure static inline __attribute__((unused, pure))
/*
* for warnings on macros, put in the macro content like this:
@@ -92,6 +103,80 @@ extern "C" {
#define CPP_NOTICE(text)
#endif
+/* MAX / MIN are not commonly defined, but useful */
+/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */
+#ifdef MAX
+#undef MAX
+#endif
+#define MAX(a, b) \
+ ({ \
+ typeof(a) _max_a = (a); \
+ typeof(b) _max_b = (b); \
+ _max_a > _max_b ? _max_a : _max_b; \
+ })
+#ifdef MIN
+#undef MIN
+#endif
+#define MIN(a, b) \
+ ({ \
+ typeof(a) _min_a = (a); \
+ typeof(b) _min_b = (b); \
+ _min_a < _min_b ? _min_a : _min_b; \
+ })
+
+#ifndef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+#endif
+
+/* this variant of container_of() retains 'const' on pointers without needing
+ * to be told to do so. The following will all work without warning:
+ *
+ * struct member *p;
+ * const struct member *cp;
+ *
+ * const struct cont *x = container_of(cp, struct cont, member);
+ * const struct cont *x = container_of(cp, const struct cont, member);
+ * const struct cont *x = container_of(p, struct cont, member);
+ * const struct cont *x = container_of(p, const struct cont, member);
+ * struct cont *x = container_of(p, struct cont, member);
+ *
+ * but the following will generate warnings about stripping const:
+ *
+ * struct cont *x = container_of(cp, struct cont, member);
+ * struct cont *x = container_of(cp, const struct cont, member);
+ * struct cont *x = container_of(p, const struct cont, member);
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#define container_of(ptr, type, member) \
+ (__builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(&((type *)0)->member), \
+ typeof(ptr)) \
+ || __builtin_types_compatible_p(void *, typeof(ptr)), \
+ ({ \
+ typeof(((type *)0)->member) *__mptr = (void *)(ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); \
+ }), \
+ ({ \
+ typeof(((const type *)0)->member) *__mptr = (ptr); \
+ (const type *)((const char *)__mptr - \
+ offsetof(type, member)); \
+ }) \
+ ))
+
+#define container_of_null(ptr, type, member) \
+ ({ \
+ typeof(ptr) _tmp = (ptr); \
+ _tmp ? container_of(_tmp, type, member) : NULL; \
+ })
+
+#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/fifo.h b/lib/fifo.h
deleted file mode 100644
index 6f9c59b5c1..0000000000
--- a/lib/fifo.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* FIFO common header.
- * Copyright (C) 2015 Kunihiro Ishiguro
- *
- * This file is part of Quagga.
- *
- * Quagga is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * Quagga is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; see the file COPYING; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __LIB_FIFO_H__
-#define __LIB_FIFO_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* FIFO -- first in first out structure and macros. */
-struct fifo {
- struct fifo *next;
- struct fifo *prev;
-};
-
-#define FIFO_INIT(F) \
- do { \
- struct fifo *Xfifo = (struct fifo *)(F); \
- Xfifo->next = Xfifo->prev = Xfifo; \
- } while (0)
-
-#define FIFO_ADD(F, N) \
- do { \
- struct fifo *Xfifo = (struct fifo *)(F); \
- struct fifo *Xnode = (struct fifo *)(N); \
- Xnode->next = Xfifo; \
- Xnode->prev = Xfifo->prev; \
- Xfifo->prev = Xfifo->prev->next = Xnode; \
- } while (0)
-
-#define FIFO_DEL(N) \
- do { \
- struct fifo *Xnode = (struct fifo *)(N); \
- Xnode->prev->next = Xnode->next; \
- Xnode->next->prev = Xnode->prev; \
- } while (0)
-
-#define FIFO_HEAD(F) \
- ((((struct fifo *)(F))->next == (struct fifo *)(F)) ? NULL : (F)->next)
-
-#define FIFO_EMPTY(F) (((struct fifo *)(F))->next == (struct fifo *)(F))
-
-#define FIFO_TOP(F) (FIFO_EMPTY(F) ? NULL : ((struct fifo *)(F))->next)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __LIB_FIFO_H__ */
diff --git a/lib/frratomic.h b/lib/frratomic.h
index e86030f83c..1e28253f2b 100644
--- a/lib/frratomic.h
+++ b/lib/frratomic.h
@@ -80,6 +80,9 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
#define atomic_compare_exchange_weak_explicit(atom, expect, desire, mem1, \
mem2) \
__atomic_compare_exchange_n(atom, expect, desire, 1, mem1, mem2)
+#define atomic_compare_exchange_strong_explicit(atom, expect, desire, mem1, \
+ mem2) \
+ __atomic_compare_exchange_n(atom, expect, desire, 0, mem1, mem2)
/* gcc 4.1 and newer,
* clang 3.3 (possibly older)
@@ -152,7 +155,7 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
rval; \
})
-#define atomic_compare_exchange_weak_explicit(atom, expect, desire, mem1, \
+#define atomic_compare_exchange_strong_explicit(atom, expect, desire, mem1, \
mem2) \
({ \
typeof(atom) _atom = (atom); \
@@ -166,6 +169,8 @@ typedef std::atomic<uint_fast32_t> atomic_uint_fast32_t;
*_expect = rval; \
ret; \
})
+#define atomic_compare_exchange_weak_explicit \
+ atomic_compare_exchange_strong_explicit
#define atomic_fetch_and_explicit(ptr, val, mem) \
({ \
diff --git a/lib/memory.h b/lib/memory.h
index 91a02b7966..0002ea3349 100644
--- a/lib/memory.h
+++ b/lib/memory.h
@@ -26,8 +26,6 @@
extern "C" {
#endif
-#define array_size(ar) (sizeof(ar) / sizeof(ar[0]))
-
#if defined(HAVE_MALLOC_SIZE) && !defined(HAVE_MALLOC_USABLE_SIZE)
#define malloc_usable_size(x) malloc_size(x)
#define HAVE_MALLOC_USABLE_SIZE
diff --git a/lib/privs.c b/lib/privs.c
index a19707b1c9..a3314c6c3c 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -917,7 +917,7 @@ void zprivs_init(struct zebra_privs_t *zprivs)
zprivs->user, zprivs->vty_group);
exit(1);
}
- if (i >= ngroups && ngroups < (int)ZEBRA_NUM_OF(groups)) {
+ if (i >= ngroups && ngroups < (int)array_size(groups)) {
groups[i] = zprivs_state.vtygrp;
}
}
diff --git a/lib/qobj.c b/lib/qobj.c
index 811645f3c3..3e3860a96a 100644
--- a/lib/qobj.c
+++ b/lib/qobj.c
@@ -27,21 +27,27 @@
#include "qobj.h"
#include "jhash.h"
-static pthread_rwlock_t nodes_lock;
-static struct hash *nodes = NULL;
-
-static unsigned int qobj_key(void *data)
+static uint32_t qobj_hash(const struct qobj_node *node)
{
- struct qobj_node *node = data;
- return (unsigned int)node->nid;
+ return (uint32_t)node->nid;
}
-static bool qobj_cmp(const void *a, const void *b)
+static int qobj_cmp(const struct qobj_node *na, const struct qobj_node *nb)
{
- const struct qobj_node *na = a, *nb = b;
- return na->nid == nb->nid;
+ if (na->nid < nb->nid)
+ return -1;
+ if (na->nid > nb->nid)
+ return 1;
+ return 0;
}
+DECLARE_HASH(qobj_nodes, struct qobj_node, nodehash,
+ qobj_cmp, qobj_hash)
+
+static pthread_rwlock_t nodes_lock;
+static struct qobj_nodes_head nodes = { };
+
+
void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type)
{
node->type = type;
@@ -49,15 +55,15 @@ void qobj_reg(struct qobj_node *node, struct qobj_nodetype *type)
do {
node->nid = (uint64_t)random();
node->nid ^= (uint64_t)random() << 32;
- } while (!node->nid
- || hash_get(nodes, node, hash_alloc_intern) != node);
+ } while (!node->nid || qobj_nodes_find(&nodes, node));
+ qobj_nodes_add(&nodes, node);
pthread_rwlock_unlock(&nodes_lock);
}
void qobj_unreg(struct qobj_node *node)
{
pthread_rwlock_wrlock(&nodes_lock);
- hash_release(nodes, node);
+ qobj_nodes_del(&nodes, node);
pthread_rwlock_unlock(&nodes_lock);
}
@@ -65,7 +71,7 @@ struct qobj_node *qobj_get(uint64_t id)
{
struct qobj_node dummy = {.nid = id}, *rv;
pthread_rwlock_rdlock(&nodes_lock);
- rv = hash_lookup(nodes, &dummy);
+ rv = qobj_nodes_find(&nodes, &dummy);
pthread_rwlock_unlock(&nodes_lock);
return rv;
}
@@ -77,7 +83,7 @@ void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type)
void *rv;
pthread_rwlock_rdlock(&nodes_lock);
- node = hash_lookup(nodes, &dummy);
+ node = qobj_nodes_find(&nodes, &dummy);
/* note: we explicitly hold the lock until after we have checked the
* type.
@@ -96,16 +102,14 @@ void *qobj_get_typed(uint64_t id, struct qobj_nodetype *type)
void qobj_init(void)
{
- if (!nodes) {
- pthread_rwlock_init(&nodes_lock, NULL);
- nodes = hash_create_size(16, qobj_key, qobj_cmp, "QOBJ Hash");
- }
+ pthread_rwlock_init(&nodes_lock, NULL);
+ qobj_nodes_init(&nodes);
}
void qobj_finish(void)
{
- hash_clean(nodes, NULL);
- hash_free(nodes);
- nodes = NULL;
+ struct qobj_node *node;
+ while ((node = qobj_nodes_pop(&nodes)))
+ qobj_nodes_del(&nodes, node);
pthread_rwlock_destroy(&nodes_lock);
}
diff --git a/lib/qobj.h b/lib/qobj.h
index d63988cbab..415eae02ef 100644
--- a/lib/qobj.h
+++ b/lib/qobj.h
@@ -21,6 +21,8 @@
#include <stdlib.h>
#include <stddef.h>
+#include "typesafe.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -69,6 +71,8 @@ struct qobj_nodetype_capnp {
};
#endif
+#include "typesafe.h"
+
/* each different kind of object will have a global variable of this type,
* which can be used by various other pieces to store type-related bits.
* type equality can be tested as pointer equality. (cf. QOBJ_GET_TYPESAFE)
@@ -79,9 +83,12 @@ struct qobj_nodetype {
RESERVED_SPACE_STRUCT(qobj_nodetype_capnp, capnp, 256)
};
+PREDECL_HASH(qobj_nodes)
+
/* anchor to be embedded somewhere in the object's struct */
struct qobj_node {
uint64_t nid;
+ struct qobj_nodes_item nodehash;
struct qobj_nodetype *type;
};
diff --git a/lib/seqlock.c b/lib/seqlock.c
new file mode 100644
index 0000000000..223d14952c
--- /dev/null
+++ b/lib/seqlock.c
@@ -0,0 +1,167 @@
+/*
+ * "Sequence" lock primitive
+ *
+ * Copyright (C) 2015 David Lamparter <equinox@diac24.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <unistd.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <pthread.h>
+#include <assert.h>
+
+#include "seqlock.h"
+
+#ifdef HAVE_SYNC_LINUX_FUTEX
+/* Linux-specific - sys_futex() */
+#include <sys/syscall.h>
+#include <linux/futex.h>
+
+static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
+ void *addr2, int val3)
+{
+ return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+#define wait_once(sqlo, val) \
+ sys_futex((int *)&sqlo->pos, FUTEX_WAIT, (int)val, NULL, NULL, 0)
+#define wait_poke(sqlo) \
+ sys_futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
+
+#elif defined(HAVE_SYNC_OPENBSD_FUTEX)
+/* OpenBSD variant of the above. untested, not upstream in OpenBSD. */
+#include <sys/syscall.h>
+#include <sys/futex.h>
+
+#define wait_once(sqlo, val) \
+ futex((int *)&sqlo->pos, FUTEX_WAIT, (int)val, NULL, NULL, 0)
+#define wait_poke(sqlo) \
+ futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
+
+#elif defined(HAVE_SYNC_UMTX_OP)
+/* FreeBSD-specific: umtx_op() */
+#include <sys/umtx.h>
+
+#define wait_once(sqlo, val) \
+ _umtx_op((void *)&sqlo->pos, UMTX_OP_WAIT_UINT, val, NULL, NULL)
+#define wait_poke(sqlo) \
+ _umtx_op((void *)&sqlo->pos, UMTX_OP_WAKE, INT_MAX, NULL, NULL)
+
+#else
+/* generic version. used on *BSD, Solaris and OSX.
+ */
+
+#define wait_init(sqlo) do { \
+ pthread_mutex_init(&sqlo->lock, NULL); \
+ pthread_cond_init(&sqlo->wake, NULL); \
+ } while (0)
+#define wait_prep(sqlo) pthread_mutex_lock(&sqlo->lock)
+#define wait_once(sqlo, val) pthread_cond_wait(&sqlo->wake, &sqlo->lock)
+#define wait_done(sqlo) pthread_mutex_unlock(&sqlo->lock)
+#define wait_poke(sqlo) do { \
+ pthread_mutex_lock(&sqlo->lock); \
+ pthread_cond_broadcast(&sqlo->wake); \
+ pthread_mutex_unlock(&sqlo->lock); \
+ } while (0)
+
+#endif
+
+#ifndef wait_init
+#define wait_init(sqlo) /**/
+#define wait_prep(sqlo) /**/
+#define wait_done(sqlo) /**/
+#endif /* wait_init */
+
+
+void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val)
+{
+ seqlock_val_t cur, cal;
+
+ seqlock_assert_valid(val);
+
+ wait_prep(sqlo);
+ while (1) {
+ cur = atomic_load_explicit(&sqlo->pos, memory_order_acquire);
+ if (!(cur & 1))
+ break;
+ cal = cur - val - 1;
+ assert(cal < 0x40000000 || cal > 0xc0000000);
+ if (cal < 0x80000000)
+ break;
+
+ wait_once(sqlo, cur);
+ }
+ wait_done(sqlo);
+}
+
+bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val)
+{
+ seqlock_val_t cur;
+
+ seqlock_assert_valid(val);
+
+ cur = atomic_load_explicit(&sqlo->pos, memory_order_acquire);
+ if (!(cur & 1))
+ return 1;
+ cur -= val;
+ assert(cur < 0x40000000 || cur > 0xc0000000);
+ return cur < 0x80000000;
+}
+
+void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val)
+{
+ seqlock_assert_valid(val);
+
+ atomic_store_explicit(&sqlo->pos, val, memory_order_release);
+ wait_poke(sqlo);
+}
+
+void seqlock_release(struct seqlock *sqlo)
+{
+ atomic_store_explicit(&sqlo->pos, 0, memory_order_release);
+ wait_poke(sqlo);
+}
+
+void seqlock_init(struct seqlock *sqlo)
+{
+ sqlo->pos = 0;
+ wait_init(sqlo);
+}
+
+
+seqlock_val_t seqlock_cur(struct seqlock *sqlo)
+{
+ return atomic_load_explicit(&sqlo->pos, memory_order_acquire);
+}
+
+seqlock_val_t seqlock_bump(struct seqlock *sqlo)
+{
+ seqlock_val_t val;
+
+ val = atomic_fetch_add_explicit(&sqlo->pos, 2, memory_order_release);
+ wait_poke(sqlo);
+ return val;
+}
diff --git a/lib/seqlock.h b/lib/seqlock.h
new file mode 100644
index 0000000000..eef05a4307
--- /dev/null
+++ b/lib/seqlock.h
@@ -0,0 +1,106 @@
+/*
+ * "Sequence" lock primitive
+ *
+ * Copyright (C) 2015 David Lamparter <equinox@diac24.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301 USA
+ */
+
+#ifndef _SEQLOCK_H
+#define _SEQLOCK_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <pthread.h>
+#include "frratomic.h"
+
+/*
+ * this locking primitive is intended to use in a 1:N setup.
+ *
+ * - one "counter" seqlock issuing increasing numbers
+ * - multiple seqlock users hold references on these numbers
+ *
+ * this is intended for implementing RCU reference-holding. There is one
+ * global counter, with threads locking a seqlock whenever they take a
+ * reference. A seqlock can also be idle/unlocked.
+ *
+ * The "counter" seqlock will always stay locked; the RCU cleanup thread
+ * continuously counts it up, waiting for threads to release or progress to a
+ * sequence number further ahead. If all threads are > N, references dropped
+ * in N can be free'd.
+ *
+ * generally, the lock function is:
+ *
+ * Thread-A Thread-B
+ *
+ * seqlock_acquire(a)
+ * | running seqlock_wait(b) -- a <= b
+ * seqlock_release() | blocked
+ * OR: seqlock_acquire(a') | -- a' > b
+ * (resumes)
+ */
+
+/* use sequentially increasing "ticket numbers". lowest bit will always
+ * be 1 to have a 'cleared' indication (i.e., counts 1,3,5,7,etc. )
+ */
+typedef _Atomic uint32_t seqlock_ctr_t;
+typedef uint32_t seqlock_val_t;
+#define seqlock_assert_valid(val) assert(val & 1)
+
+
+struct seqlock {
+/* always used */
+ seqlock_ctr_t pos;
+/* used when futexes not available: (i.e. non-linux) */
+ pthread_mutex_t lock;
+ pthread_cond_t wake;
+};
+
+
+/* sqlo = 0 - init state: not held */
+extern void seqlock_init(struct seqlock *sqlo);
+
+
+/* while (sqlo <= val) - wait until seqlock->pos > val, or seqlock unheld */
+extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
+extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
+
+static inline bool seqlock_held(struct seqlock *sqlo)
+{
+ return !!atomic_load_explicit(&sqlo->pos, memory_order_relaxed);
+}
+
+/* sqlo - get seqlock position -- for the "counter" seqlock */
+extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
+/* sqlo++ - note: like x++, returns previous value, before bumping */
+extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
+
+
+/* sqlo = val - can be used on held seqlock. */
+extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
+/* sqlo = ref - standard pattern: acquire relative to other seqlock */
+static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
+{
+ seqlock_acquire_val(sqlo, seqlock_cur(ref));
+}
+
+/* sqlo = 0 - set seqlock position to 0, marking as non-held */
+extern void seqlock_release(struct seqlock *sqlo);
+/* release should normally be followed by a bump on the "counter", if
+ * anything other than reading RCU items was done
+ */
+
+#endif /* _SEQLOCK_H */
diff --git a/lib/sockunion.c b/lib/sockunion.c
index af4f41f37c..bb5426d74a 100644
--- a/lib/sockunion.c
+++ b/lib/sockunion.c
@@ -472,7 +472,7 @@ unsigned int sockunion_hash(const union sockunion *su)
return jhash_1word(su->sin.sin_addr.s_addr, 0);
case AF_INET6:
return jhash2(su->sin6.sin6_addr.s6_addr32,
- ZEBRA_NUM_OF(su->sin6.sin6_addr.s6_addr32), 0);
+ array_size(su->sin6.sin6_addr.s6_addr32), 0);
}
return 0;
}
diff --git a/lib/subdir.am b/lib/subdir.am
index 3b14be4676..7027f3f0da 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -7,6 +7,7 @@ lib_libfrr_la_LIBADD = $(LIBCAP) $(UNWIND_LIBS) $(LIBYANG_LIBS)
lib_libfrr_la_SOURCES = \
lib/agg_table.c \
+ lib/atomlist.c \
lib/bfd.c \
lib/buffer.c \
lib/checksum.c \
@@ -65,6 +66,7 @@ lib_libfrr_la_SOURCES = \
lib/ringbuf.c \
lib/routemap.c \
lib/sbuf.c \
+ lib/seqlock.c \
lib/sha256.c \
lib/sigevent.c \
lib/skiplist.c \
@@ -79,6 +81,8 @@ lib_libfrr_la_SOURCES = \
lib/table.c \
lib/termtable.c \
lib/thread.c \
+ lib/typerb.c \
+ lib/typesafe.c \
lib/vector.c \
lib/vrf.c \
lib/vty.c \
@@ -130,6 +134,7 @@ lib/northbound_cli.lo: lib/northbound_cli_clippy.c
pkginclude_HEADERS += \
lib/agg_table.h \
+ lib/atomlist.h \
lib/bfd.h \
lib/bitfield.h \
lib/buffer.h \
@@ -144,7 +149,6 @@ pkginclude_HEADERS += \
lib/debug.h \
lib/distribute.h \
lib/ferr.h \
- lib/fifo.h \
lib/filter.h \
lib/freebsd-queue.h \
lib/frr_pthread.h \
@@ -193,6 +197,7 @@ pkginclude_HEADERS += \
lib/ringbuf.h \
lib/routemap.h \
lib/sbuf.h \
+ lib/seqlock.h \
lib/sha256.h \
lib/sigevent.h \
lib/skiplist.h \
@@ -206,6 +211,8 @@ pkginclude_HEADERS += \
lib/table.h \
lib/termtable.h \
lib/thread.h \
+ lib/typerb.h \
+ lib/typesafe.h \
lib/vector.h \
lib/vlan.h \
lib/vrf.h \
diff --git a/lib/thread.c b/lib/thread.c
index 2760b83fb3..5ca859a74d 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -40,6 +40,8 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info")
DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
+DECLARE_LIST(thread_list, struct thread, threaditem)
+
#if defined(__APPLE__)
#include <mach/mach.h>
#include <mach/mach_time.h>
@@ -435,6 +437,9 @@ struct thread_master *thread_master_create(const char *name)
(bool (*)(const void *, const void *))cpu_record_hash_cmp,
"Thread Hash");
+ thread_list_init(&rv->event);
+ thread_list_init(&rv->ready);
+ thread_list_init(&rv->unuse);
/* Initialize the timer queues */
rv->timer = pqueue_create();
@@ -487,50 +492,6 @@ void thread_master_set_name(struct thread_master *master, const char *name)
pthread_mutex_unlock(&master->mtx);
}
-/* Add a new thread to the list. */
-static void thread_list_add(struct thread_list *list, struct thread *thread)
-{
- thread->next = NULL;
- thread->prev = list->tail;
- if (list->tail)
- list->tail->next = thread;
- else
- list->head = thread;
- list->tail = thread;
- list->count++;
-}
-
-/* Delete a thread from the list. */
-static struct thread *thread_list_delete(struct thread_list *list,
- struct thread *thread)
-{
- if (thread->next)
- thread->next->prev = thread->prev;
- else
- list->tail = thread->prev;
- if (thread->prev)
- thread->prev->next = thread->next;
- else
- list->head = thread->next;
- thread->next = thread->prev = NULL;
- list->count--;
- return thread;
-}
-
-/* Thread list is empty or not. */
-static int thread_empty(struct thread_list *list)
-{
- return list->head ? 0 : 1;
-}
-
-/* Delete top of the list and return it. */
-static struct thread *thread_trim_head(struct thread_list *list)
-{
- if (!thread_empty(list))
- return thread_list_delete(list, list->head);
- return NULL;
-}
-
#define THREAD_UNUSED_DEPTH 10
/* Move thread to unuse list. */
@@ -539,8 +500,6 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
pthread_mutex_t mtxc = thread->mtx;
assert(m != NULL && thread != NULL);
- assert(thread->next == NULL);
- assert(thread->prev == NULL);
thread->hist->total_active--;
memset(thread, 0, sizeof(struct thread));
@@ -549,8 +508,8 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
/* Restore the thread mutex context. */
thread->mtx = mtxc;
- if (m->unuse.count < THREAD_UNUSED_DEPTH) {
- thread_list_add(&m->unuse, thread);
+ if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) {
+ thread_list_add_tail(&m->unuse, thread);
return;
}
@@ -558,16 +517,13 @@ static void thread_add_unuse(struct thread_master *m, struct thread *thread)
}
/* Free all unused thread. */
-static void thread_list_free(struct thread_master *m, struct thread_list *list)
+static void thread_list_free(struct thread_master *m,
+ struct thread_list_head *list)
{
struct thread *t;
- struct thread *next;
- for (t = list->head; t; t = next) {
- next = t->next;
+ while ((t = thread_list_pop(list)))
thread_free(m, t);
- list->count--;
- }
}
static void thread_array_free(struct thread_master *m,
@@ -609,9 +565,8 @@ void thread_master_free_unused(struct thread_master *m)
pthread_mutex_lock(&m->mtx);
{
struct thread *t;
- while ((t = thread_trim_head(&m->unuse)) != NULL) {
+ while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
- }
}
pthread_mutex_unlock(&m->mtx);
}
@@ -690,7 +645,7 @@ static struct thread *thread_get(struct thread_master *m, uint8_t type,
int (*func)(struct thread *), void *arg,
debugargdef)
{
- struct thread *thread = thread_trim_head(&m->unuse);
+ struct thread *thread = thread_list_pop(&m->unuse);
struct cpu_thread_history tmp;
if (!thread) {
@@ -971,7 +926,7 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
pthread_mutex_lock(&thread->mtx);
{
thread->u.val = val;
- thread_list_add(&m->event, thread);
+ thread_list_add_tail(&m->event, thread);
}
pthread_mutex_unlock(&thread->mtx);
@@ -1063,7 +1018,7 @@ static void thread_cancel_rw(struct thread_master *master, int fd, short state)
*/
static void do_thread_cancel(struct thread_master *master)
{
- struct thread_list *list = NULL;
+ struct thread_list_head *list = NULL;
struct pqueue *queue = NULL;
struct thread **thread_array = NULL;
struct thread *thread;
@@ -1078,31 +1033,23 @@ static void do_thread_cancel(struct thread_master *master)
* need to check every thread in the ready queue. */
if (cr->eventobj) {
struct thread *t;
- thread = master->event.head;
-
- while (thread) {
- t = thread;
- thread = t->next;
-
- if (t->arg == cr->eventobj) {
- thread_list_delete(&master->event, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
+
+ for_each_safe(thread_list, &master->event, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ thread_list_del(&master->event, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
}
- thread = master->ready.head;
- while (thread) {
- t = thread;
- thread = t->next;
-
- if (t->arg == cr->eventobj) {
- thread_list_delete(&master->ready, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
+ for_each_safe(thread_list, &master->ready, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ thread_list_del(&master->ready, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
}
continue;
}
@@ -1146,7 +1093,7 @@ static void do_thread_cancel(struct thread_master *master)
assert(thread == queue->array[thread->index]);
pqueue_remove_at(thread->index, queue);
} else if (list) {
- thread_list_delete(list, thread);
+ thread_list_del(list, thread);
} else if (thread_array) {
thread_array[thread->u.fd] = NULL;
} else {
@@ -1301,7 +1248,7 @@ static int thread_process_io_helper(struct thread_master *m,
thread_array = m->write;
thread_array[thread->u.fd] = NULL;
- thread_list_add(&m->ready, thread);
+ thread_list_add_tail(&m->ready, thread);
thread->type = THREAD_READY;
/* if another pthread scheduled this file descriptor for the event we're
* responding to, no problem; we're getting to it now */
@@ -1380,24 +1327,21 @@ static unsigned int thread_process_timers(struct pqueue *queue,
return ready;
pqueue_dequeue(queue);
thread->type = THREAD_READY;
- thread_list_add(&thread->master->ready, thread);
+ thread_list_add_tail(&thread->master->ready, thread);
ready++;
}
return ready;
}
/* process a list en masse, e.g. for event thread lists */
-static unsigned int thread_process(struct thread_list *list)
+static unsigned int thread_process(struct thread_list_head *list)
{
struct thread *thread;
- struct thread *next;
unsigned int ready = 0;
- for (thread = list->head; thread; thread = next) {
- next = thread->next;
- thread_list_delete(list, thread);
+ while ((thread = thread_list_pop(list))) {
thread->type = THREAD_READY;
- thread_list_add(&thread->master->ready, thread);
+ thread_list_add_tail(&thread->master->ready, thread);
ready++;
}
return ready;
@@ -1429,7 +1373,7 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
* Attempt to flush ready queue before going into poll().
* This is performance-critical. Think twice before modifying.
*/
- if ((thread = thread_trim_head(&m->ready))) {
+ if ((thread = thread_list_pop(&m->ready))) {
fetch = thread_run(m, thread, fetch);
if (fetch->ref)
*fetch->ref = NULL;
@@ -1462,10 +1406,11 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
* In every case except the last, we need to hit poll() at least
* once per loop to avoid starvation by events
*/
- if (m->ready.count == 0)
+ if (!thread_list_count(&m->ready))
tw = thread_timer_wait(m->timer, &tv);
- if (m->ready.count != 0 || (tw && !timercmp(tw, &zerotime, >)))
+ if (thread_list_count(&m->ready) ||
+ (tw && !timercmp(tw, &zerotime, >)))
tw = &zerotime;
if (!tw && m->handler.pfdcount == 0) { /* die */
diff --git a/lib/thread.h b/lib/thread.h
index ec774a6543..7897265120 100644
--- a/lib/thread.h
+++ b/lib/thread.h
@@ -26,6 +26,7 @@
#include <poll.h>
#include "monotime.h"
#include "frratomic.h"
+#include "typesafe.h"
#ifdef __cplusplus
extern "C" {
@@ -39,12 +40,7 @@ struct rusage_t {
#define GETRUSAGE(X) thread_getrusage(X)
-/* Linked list of thread. */
-struct thread_list {
- struct thread *head;
- struct thread *tail;
- int count;
-};
+PREDECL_LIST(thread_list)
struct pqueue;
@@ -78,9 +74,7 @@ struct thread_master {
struct thread **read;
struct thread **write;
struct pqueue *timer;
- struct thread_list event;
- struct thread_list ready;
- struct thread_list unuse;
+ struct thread_list_head event, ready, unuse;
struct list *cancel_req;
bool canceled;
pthread_cond_t cancel_cond;
@@ -100,8 +94,7 @@ struct thread_master {
struct thread {
uint8_t type; /* thread type */
uint8_t add_type; /* thread type */
- struct thread *next; /* next pointer of the thread */
- struct thread *prev; /* previous pointer of the thread */
+ struct thread_list_item threaditem;
struct thread **ref; /* external reference (if given) */
struct thread_master *master; /* pointer to the struct thread_master */
int (*func)(struct thread *); /* event function */
diff --git a/lib/typerb.c b/lib/typerb.c
new file mode 100644
index 0000000000..d361e7651e
--- /dev/null
+++ b/lib/typerb.c
@@ -0,0 +1,472 @@
+/* RB-tree */
+
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "typerb.h"
+
+#define RB_BLACK 0
+#define RB_RED 1
+
+#define rb_entry typed_rb_entry
+#define rbt_tree typed_rb_root
+
+#define RBE_LEFT(_rbe) (_rbe)->rbt_left
+#define RBE_RIGHT(_rbe) (_rbe)->rbt_right
+#define RBE_PARENT(_rbe) (_rbe)->rbt_parent
+#define RBE_COLOR(_rbe) (_rbe)->rbt_color
+
+#define RBH_ROOT(_rbt) (_rbt)->rbt_root
+
+static inline void rbe_set(struct rb_entry *rbe, struct rb_entry *parent)
+{
+ RBE_PARENT(rbe) = parent;
+ RBE_LEFT(rbe) = RBE_RIGHT(rbe) = NULL;
+ RBE_COLOR(rbe) = RB_RED;
+}
+
+static inline void rbe_set_blackred(struct rb_entry *black,
+ struct rb_entry *red)
+{
+ RBE_COLOR(black) = RB_BLACK;
+ RBE_COLOR(red) = RB_RED;
+}
+
+static inline void rbe_rotate_left(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *parent;
+ struct rb_entry *tmp;
+
+ tmp = RBE_RIGHT(rbe);
+ RBE_RIGHT(rbe) = RBE_LEFT(tmp);
+ if (RBE_RIGHT(rbe) != NULL)
+ RBE_PARENT(RBE_LEFT(tmp)) = rbe;
+
+ parent = RBE_PARENT(rbe);
+ RBE_PARENT(tmp) = parent;
+ if (parent != NULL) {
+ if (rbe == RBE_LEFT(parent))
+ RBE_LEFT(parent) = tmp;
+ else
+ RBE_RIGHT(parent) = tmp;
+ } else
+ RBH_ROOT(rbt) = tmp;
+
+ RBE_LEFT(tmp) = rbe;
+ RBE_PARENT(rbe) = tmp;
+}
+
+static inline void rbe_rotate_right(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *parent;
+ struct rb_entry *tmp;
+
+ tmp = RBE_LEFT(rbe);
+ RBE_LEFT(rbe) = RBE_RIGHT(tmp);
+ if (RBE_LEFT(rbe) != NULL)
+ RBE_PARENT(RBE_RIGHT(tmp)) = rbe;
+
+ parent = RBE_PARENT(rbe);
+ RBE_PARENT(tmp) = parent;
+ if (parent != NULL) {
+ if (rbe == RBE_LEFT(parent))
+ RBE_LEFT(parent) = tmp;
+ else
+ RBE_RIGHT(parent) = tmp;
+ } else
+ RBH_ROOT(rbt) = tmp;
+
+ RBE_RIGHT(tmp) = rbe;
+ RBE_PARENT(rbe) = tmp;
+}
+
+static inline void rbe_insert_color(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *parent, *gparent, *tmp;
+
+ rbt->count++;
+
+ while ((parent = RBE_PARENT(rbe)) != NULL
+ && RBE_COLOR(parent) == RB_RED) {
+ gparent = RBE_PARENT(parent);
+
+ if (parent == RBE_LEFT(gparent)) {
+ tmp = RBE_RIGHT(gparent);
+ if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) {
+ RBE_COLOR(tmp) = RB_BLACK;
+ rbe_set_blackred(parent, gparent);
+ rbe = gparent;
+ continue;
+ }
+
+ if (RBE_RIGHT(parent) == rbe) {
+ rbe_rotate_left(rbt, parent);
+ tmp = parent;
+ parent = rbe;
+ rbe = tmp;
+ }
+
+ rbe_set_blackred(parent, gparent);
+ rbe_rotate_right(rbt, gparent);
+ } else {
+ tmp = RBE_LEFT(gparent);
+ if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) {
+ RBE_COLOR(tmp) = RB_BLACK;
+ rbe_set_blackred(parent, gparent);
+ rbe = gparent;
+ continue;
+ }
+
+ if (RBE_LEFT(parent) == rbe) {
+ rbe_rotate_right(rbt, parent);
+ tmp = parent;
+ parent = rbe;
+ rbe = tmp;
+ }
+
+ rbe_set_blackred(parent, gparent);
+ rbe_rotate_left(rbt, gparent);
+ }
+ }
+
+ RBE_COLOR(RBH_ROOT(rbt)) = RB_BLACK;
+}
+
+static inline void rbe_remove_color(struct rbt_tree *rbt,
+ struct rb_entry *parent,
+ struct rb_entry *rbe)
+{
+ struct rb_entry *tmp;
+
+ while ((rbe == NULL || RBE_COLOR(rbe) == RB_BLACK)
+ && rbe != RBH_ROOT(rbt) && parent) {
+ if (RBE_LEFT(parent) == rbe) {
+ tmp = RBE_RIGHT(parent);
+ if (RBE_COLOR(tmp) == RB_RED) {
+ rbe_set_blackred(tmp, parent);
+ rbe_rotate_left(rbt, parent);
+ tmp = RBE_RIGHT(parent);
+ }
+ if ((RBE_LEFT(tmp) == NULL
+ || RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK)
+ && (RBE_RIGHT(tmp) == NULL
+ || RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) {
+ RBE_COLOR(tmp) = RB_RED;
+ rbe = parent;
+ parent = RBE_PARENT(rbe);
+ } else {
+ if (RBE_RIGHT(tmp) == NULL
+ || RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK) {
+ struct rb_entry *oleft;
+
+ oleft = RBE_LEFT(tmp);
+ if (oleft != NULL)
+ RBE_COLOR(oleft) = RB_BLACK;
+
+ RBE_COLOR(tmp) = RB_RED;
+ rbe_rotate_right(rbt, tmp);
+ tmp = RBE_RIGHT(parent);
+ }
+
+ RBE_COLOR(tmp) = RBE_COLOR(parent);
+ RBE_COLOR(parent) = RB_BLACK;
+ if (RBE_RIGHT(tmp))
+ RBE_COLOR(RBE_RIGHT(tmp)) = RB_BLACK;
+
+ rbe_rotate_left(rbt, parent);
+ rbe = RBH_ROOT(rbt);
+ break;
+ }
+ } else {
+ tmp = RBE_LEFT(parent);
+ if (RBE_COLOR(tmp) == RB_RED) {
+ rbe_set_blackred(tmp, parent);
+ rbe_rotate_right(rbt, parent);
+ tmp = RBE_LEFT(parent);
+ }
+
+ if ((RBE_LEFT(tmp) == NULL
+ || RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK)
+ && (RBE_RIGHT(tmp) == NULL
+ || RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) {
+ RBE_COLOR(tmp) = RB_RED;
+ rbe = parent;
+ parent = RBE_PARENT(rbe);
+ } else {
+ if (RBE_LEFT(tmp) == NULL
+ || RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK) {
+ struct rb_entry *oright;
+
+ oright = RBE_RIGHT(tmp);
+ if (oright != NULL)
+ RBE_COLOR(oright) = RB_BLACK;
+
+ RBE_COLOR(tmp) = RB_RED;
+ rbe_rotate_left(rbt, tmp);
+ tmp = RBE_LEFT(parent);
+ }
+
+ RBE_COLOR(tmp) = RBE_COLOR(parent);
+ RBE_COLOR(parent) = RB_BLACK;
+ if (RBE_LEFT(tmp) != NULL)
+ RBE_COLOR(RBE_LEFT(tmp)) = RB_BLACK;
+
+ rbe_rotate_right(rbt, parent);
+ rbe = RBH_ROOT(rbt);
+ break;
+ }
+ }
+ }
+
+ if (rbe != NULL)
+ RBE_COLOR(rbe) = RB_BLACK;
+}
+
+static inline struct rb_entry *
+rbe_remove(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ struct rb_entry *child, *parent, *old = rbe;
+ unsigned int color;
+
+ if (RBE_LEFT(rbe) == NULL)
+ child = RBE_RIGHT(rbe);
+ else if (RBE_RIGHT(rbe) == NULL)
+ child = RBE_LEFT(rbe);
+ else {
+ struct rb_entry *tmp;
+
+ rbe = RBE_RIGHT(rbe);
+ while ((tmp = RBE_LEFT(rbe)) != NULL)
+ rbe = tmp;
+
+ child = RBE_RIGHT(rbe);
+ parent = RBE_PARENT(rbe);
+ color = RBE_COLOR(rbe);
+ if (child != NULL)
+ RBE_PARENT(child) = parent;
+ if (parent != NULL) {
+ if (RBE_LEFT(parent) == rbe)
+ RBE_LEFT(parent) = child;
+ else
+ RBE_RIGHT(parent) = child;
+ } else
+ RBH_ROOT(rbt) = child;
+ if (RBE_PARENT(rbe) == old)
+ parent = rbe;
+ *rbe = *old;
+
+ tmp = RBE_PARENT(old);
+ if (tmp != NULL) {
+ if (RBE_LEFT(tmp) == old)
+ RBE_LEFT(tmp) = rbe;
+ else
+ RBE_RIGHT(tmp) = rbe;
+ } else
+ RBH_ROOT(rbt) = rbe;
+
+ RBE_PARENT(RBE_LEFT(old)) = rbe;
+ if (RBE_RIGHT(old))
+ RBE_PARENT(RBE_RIGHT(old)) = rbe;
+
+ goto color;
+ }
+
+ parent = RBE_PARENT(rbe);
+ color = RBE_COLOR(rbe);
+
+ if (child != NULL)
+ RBE_PARENT(child) = parent;
+ if (parent != NULL) {
+ if (RBE_LEFT(parent) == rbe)
+ RBE_LEFT(parent) = child;
+ else
+ RBE_RIGHT(parent) = child;
+ } else
+ RBH_ROOT(rbt) = child;
+color:
+ if (color == RB_BLACK)
+ rbe_remove_color(rbt, parent, child);
+
+ rbt->count--;
+ return (old);
+}
+
+void typed_rb_remove(struct rbt_tree *rbt, struct rb_entry *rbe)
+{
+ rbe_remove(rbt, rbe);
+}
+
+struct typed_rb_entry *typed_rb_insert(struct rbt_tree *rbt,
+ struct rb_entry *rbe, int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp;
+ struct rb_entry *parent = NULL;
+ int comp = 0;
+
+ tmp = RBH_ROOT(rbt);
+ while (tmp != NULL) {
+ parent = tmp;
+
+ comp = cmpfn(rbe, tmp);
+ if (comp < 0)
+ tmp = RBE_LEFT(tmp);
+ else if (comp > 0)
+ tmp = RBE_RIGHT(tmp);
+ else
+ return tmp;
+ }
+
+ rbe_set(rbe, parent);
+
+ if (parent != NULL) {
+ if (comp < 0)
+ RBE_LEFT(parent) = rbe;
+ else
+ RBE_RIGHT(parent) = rbe;
+ } else
+ RBH_ROOT(rbt) = rbe;
+
+ rbe_insert_color(rbt, rbe);
+
+ return NULL;
+}
+
+/* Finds the node with the same key as elm */
+struct rb_entry *typed_rb_find(struct rbt_tree *rbt, const struct rb_entry *key,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp = RBH_ROOT(rbt);
+ int comp;
+
+ while (tmp != NULL) {
+ comp = cmpfn(key, tmp);
+ if (comp < 0)
+ tmp = RBE_LEFT(tmp);
+ else if (comp > 0)
+ tmp = RBE_RIGHT(tmp);
+ else
+ return tmp;
+ }
+
+ return (NULL);
+}
+
+struct rb_entry *typed_rb_find_gteq(struct rbt_tree *rbt,
+ const struct rb_entry *key,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp = RBH_ROOT(rbt), *best = NULL;
+ int comp;
+
+ while (tmp != NULL) {
+ comp = cmpfn(key, tmp);
+ if (comp < 0) {
+ best = tmp;
+ tmp = RBE_LEFT(tmp);
+ } else if (comp > 0)
+ tmp = RBE_RIGHT(tmp);
+ else
+ return tmp;
+ }
+
+ return best;
+}
+
+struct rb_entry *typed_rb_find_lt(struct rbt_tree *rbt,
+ const struct rb_entry *key,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b))
+{
+ struct rb_entry *tmp = RBH_ROOT(rbt), *best = NULL;
+ int comp;
+
+ while (tmp != NULL) {
+ comp = cmpfn(key, tmp);
+ if (comp <= 0)
+ tmp = RBE_LEFT(tmp);
+ else {
+ best = tmp;
+ tmp = RBE_RIGHT(tmp);
+ }
+ }
+
+ return best;
+}
+
+struct rb_entry *typed_rb_next(struct rb_entry *rbe)
+{
+ if (RBE_RIGHT(rbe) != NULL) {
+ rbe = RBE_RIGHT(rbe);
+ while (RBE_LEFT(rbe) != NULL)
+ rbe = RBE_LEFT(rbe);
+ } else {
+ if (RBE_PARENT(rbe) && (rbe == RBE_LEFT(RBE_PARENT(rbe))))
+ rbe = RBE_PARENT(rbe);
+ else {
+ while (RBE_PARENT(rbe)
+ && (rbe == RBE_RIGHT(RBE_PARENT(rbe))))
+ rbe = RBE_PARENT(rbe);
+ rbe = RBE_PARENT(rbe);
+ }
+ }
+
+ return rbe;
+}
+
+struct rb_entry *typed_rb_min(struct rbt_tree *rbt)
+{
+ struct rb_entry *rbe = RBH_ROOT(rbt);
+ struct rb_entry *parent = NULL;
+
+ while (rbe != NULL) {
+ parent = rbe;
+ rbe = RBE_LEFT(rbe);
+ }
+
+ return parent;
+}
diff --git a/lib/typerb.h b/lib/typerb.h
new file mode 100644
index 0000000000..3d8db06fe0
--- /dev/null
+++ b/lib/typerb.h
@@ -0,0 +1,182 @@
+/*
+ * The following Red-Black tree implementation is based off code with
+ * original copyright:
+ *
+ * Copyright (c) 2016 David Gwynne <dlg@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_TYPERB_H
+#define _FRR_TYPERB_H
+
+#include "typesafe.h"
+
+struct typed_rb_entry {
+ struct typed_rb_entry *rbt_parent;
+ struct typed_rb_entry *rbt_left;
+ struct typed_rb_entry *rbt_right;
+ unsigned int rbt_color;
+};
+
+struct typed_rb_root {
+ struct typed_rb_entry *rbt_root;
+ size_t count;
+};
+
+struct typed_rb_entry *typed_rb_insert(struct typed_rb_root *,
+ struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+void typed_rb_remove(struct typed_rb_root *, struct typed_rb_entry *rbe);
+struct typed_rb_entry *typed_rb_find(struct typed_rb_root *,
+ const struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+struct typed_rb_entry *typed_rb_find_gteq(struct typed_rb_root *,
+ const struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+struct typed_rb_entry *typed_rb_find_lt(struct typed_rb_root *,
+ const struct typed_rb_entry *rbe,
+ int (*cmpfn)(
+ const struct typed_rb_entry *a,
+ const struct typed_rb_entry *b));
+struct typed_rb_entry *typed_rb_min(struct typed_rb_root *);
+struct typed_rb_entry *typed_rb_next(struct typed_rb_entry *);
+
+#define _PREDECL_RBTREE(prefix) \
+struct prefix ## _head { struct typed_rb_root rr; }; \
+struct prefix ## _item { struct typed_rb_entry re; };
+
+#define INIT_RBTREE_UNIQ(var) { }
+#define INIT_RBTREE_NONUNIQ(var) { }
+
+#define _DECLARE_RBTREE(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_insert(&h->rr, &item->field.re, cmpfn_uq); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_find_gteq(&h->rr, &item->field.re, cmpfn_nuq); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_find_lt(&h->rr, &item->field.re, cmpfn_nuq); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ typed_rb_remove(&h->rr, &item->field.re); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_min(&h->rr); \
+ if (!re) \
+ return NULL; \
+ typed_rb_remove(&h->rr, re); \
+ return container_of(re, type, field.re); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_min(&h->rr); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_next(&item->field.re); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = item ? typed_rb_next(&item->field.re) : NULL; \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->rr.count; \
+} \
+/* ... */
+
+#define PREDECL_RBTREE_UNIQ(prefix) \
+ _PREDECL_RBTREE(prefix)
+#define DECLARE_RBTREE_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct typed_rb_entry *a, \
+ const struct typed_rb_entry *b) \
+{ \
+ return cmpfn(container_of(a, type, field.re), \
+ container_of(b, type, field.re)); \
+} \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = typed_rb_find(&h->rr, &item->field.re, &prefix ## __cmp); \
+ return container_of_null(re, type, field.re); \
+} \
+ \
+_DECLARE_RBTREE(prefix, type, field, prefix ## __cmp, prefix ## __cmp) \
+/* ... */
+
+#define PREDECL_RBTREE_NONUNIQ(prefix) \
+ _PREDECL_RBTREE(prefix)
+#define DECLARE_RBTREE_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct typed_rb_entry *a, \
+ const struct typed_rb_entry *b) \
+{ \
+ return cmpfn(container_of(a, type, field.re), \
+ container_of(b, type, field.re)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct typed_rb_entry *a, \
+ const struct typed_rb_entry *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.re), \
+ container_of(b, type, field.re)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_RBTREE(prefix, type, field, prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+#endif /* _FRR_TYPERB_H */
diff --git a/lib/typesafe.c b/lib/typesafe.c
new file mode 100644
index 0000000000..bd269e9b54
--- /dev/null
+++ b/lib/typesafe.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "typesafe.h"
+#include "memory.h"
+
+DEFINE_MTYPE_STATIC(LIB, TYPEDHASH_BUCKET, "Typed-hash bucket")
+DEFINE_MTYPE_STATIC(LIB, SKIPLIST_OFLOW, "Skiplist overflow")
+
+#if 0
+static void hash_consistency_check(struct thash_head *head)
+{
+ uint32_t i;
+ struct thash_item *item, *prev;
+
+ for (i = 0; i < HASH_SIZE(*head); i++) {
+ item = head->entries[i];
+ prev = NULL;
+ while (item) {
+ assert(HASH_KEY(*head, item->hashval) == i);
+ assert(!prev || item->hashval >= prev->hashval);
+ prev = item;
+ item = item->next;
+ }
+ }
+}
+#else
+#define hash_consistency_check(x)
+#endif
+
+void typesafe_hash_grow(struct thash_head *head)
+{
+ uint32_t newsize = head->count, i, j;
+ uint8_t newshift, delta;
+
+ hash_consistency_check(head);
+
+ newsize |= newsize >> 1;
+ newsize |= newsize >> 2;
+ newsize |= newsize >> 4;
+ newsize |= newsize >> 8;
+ newsize |= newsize >> 16;
+ newsize++;
+ newshift = __builtin_ctz(newsize) + 1;
+
+ if (head->maxshift && newshift > head->maxshift)
+ newshift = head->maxshift;
+ if (newshift == head->tabshift)
+ return;
+ newsize = _HASH_SIZE(newshift);
+
+ head->entries = XREALLOC(MTYPE_TYPEDHASH_BUCKET, head->entries,
+ sizeof(head->entries[0]) * newsize);
+ memset(head->entries + HASH_SIZE(*head), 0,
+ sizeof(head->entries[0]) *
+ (newsize - HASH_SIZE(*head)));
+
+ delta = newshift - head->tabshift;
+
+ i = HASH_SIZE(*head);
+ if (i == 0)
+ goto out;
+ do {
+ struct thash_item **apos, *item;
+
+ i--;
+ apos = &head->entries[i];
+
+ for (j = 0; j < (1U << delta); j++) {
+ item = *apos;
+ *apos = NULL;
+
+ head->entries[(i << delta) + j] = item;
+ apos = &head->entries[(i << delta) + j];
+
+ while ((item = *apos)) {
+ uint32_t midbits;
+ midbits = _HASH_KEY(newshift, item->hashval);
+ midbits &= (1 << delta) - 1;
+ if (midbits > j)
+ break;
+ apos = &item->next;
+ }
+ }
+ } while (i > 0);
+
+out:
+ head->tabshift = newshift;
+ hash_consistency_check(head);
+}
+
+void typesafe_hash_shrink(struct thash_head *head)
+{
+ uint32_t newsize = head->count, i, j;
+ uint8_t newshift, delta;
+
+ hash_consistency_check(head);
+
+ if (!head->count) {
+ XFREE(MTYPE_TYPEDHASH_BUCKET, head->entries);
+ head->tabshift = 0;
+ return;
+ }
+
+ newsize |= newsize >> 1;
+ newsize |= newsize >> 2;
+ newsize |= newsize >> 4;
+ newsize |= newsize >> 8;
+ newsize |= newsize >> 16;
+ newsize++;
+ newshift = __builtin_ctz(newsize) + 1;
+
+ if (head->minshift && newshift < head->minshift)
+ newshift = head->minshift;
+ if (newshift == head->tabshift)
+ return;
+ newsize = _HASH_SIZE(newshift);
+
+ delta = head->tabshift - newshift;
+
+ for (i = 0; i < newsize; i++) {
+ struct thash_item **apos = &head->entries[i];
+
+ for (j = 0; j < (1U << delta); j++) {
+ *apos = head->entries[(i << delta) + j];
+ while (*apos)
+ apos = &(*apos)->next;
+ }
+ }
+ head->entries = XREALLOC(MTYPE_TYPEDHASH_BUCKET, head->entries,
+ sizeof(head->entries[0]) * newsize);
+ head->tabshift = newshift;
+
+ hash_consistency_check(head);
+}
+
+/* skiplist */
+
+static inline struct sskip_item *sl_level_get(struct sskip_item *item,
+ size_t level)
+{
+ if (level < SKIPLIST_OVERFLOW)
+ return item->next[level];
+ if (level == SKIPLIST_OVERFLOW && !((uintptr_t)item->next[level] & 1))
+ return item->next[level];
+
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ return oflow->next[level - SKIPLIST_OVERFLOW];
+}
+
+static inline void sl_level_set(struct sskip_item *item, size_t level,
+ struct sskip_item *value)
+{
+ if (level < SKIPLIST_OVERFLOW)
+ item->next[level] = value;
+ else if (level == SKIPLIST_OVERFLOW && !((uintptr_t)item->next[level] & 1))
+ item->next[level] = value;
+ else {
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ oflow->next[level - SKIPLIST_OVERFLOW] = value;
+ }
+}
+
+struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
+ struct sskip_item *item,
+ int (*cmpfn)(const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH, newlevel, auxlevel;
+ struct sskip_item *prev = &head->hitem, *next, *auxprev, *auxnext;
+ int cmpval;
+
+ /* level / newlevel are 1-counted here */
+ newlevel = __builtin_ctz(random()) + 1;
+ if (newlevel > SKIPLIST_MAXDEPTH)
+ newlevel = SKIPLIST_MAXDEPTH;
+
+ next = NULL;
+ while (level >= newlevel) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ } else if (cmpval == 0) {
+ return next;
+ }
+ level--;
+ }
+
+ /* check for duplicate item - could be removed if code doesn't rely
+ * on it, but not really work the complication. */
+ auxlevel = level;
+ auxprev = prev;
+ while (auxlevel) {
+ auxlevel--;
+ auxnext = sl_level_get(auxprev, auxlevel);
+ cmpval = 1;
+ while (auxnext && (cmpval = cmpfn(auxnext, item)) < 0) {
+ auxprev = auxnext;
+ auxnext = sl_level_get(auxprev, auxlevel);
+ }
+ if (cmpval == 0)
+ return auxnext;
+ };
+
+ head->count++;
+ memset(item, 0, sizeof(*item));
+ if (newlevel > SKIPLIST_EMBED) {
+ struct sskip_overflow *oflow;
+ oflow = XMALLOC(MTYPE_SKIPLIST_OFLOW, sizeof(void *)
+ * (newlevel - SKIPLIST_OVERFLOW));
+ item->next[SKIPLIST_OVERFLOW] = (struct sskip_item *)
+ ((uintptr_t)oflow | 1);
+ }
+
+ sl_level_set(item, level, next);
+ sl_level_set(prev, level, item);
+ /* level is now 0-counted and < newlevel*/
+ while (level) {
+ level--;
+ next = sl_level_get(prev, level);
+ while (next && cmpfn(next, item) < 0) {
+ prev = next;
+ next = sl_level_get(prev, level);
+ }
+
+ sl_level_set(item, level, next);
+ sl_level_set(prev, level, item);
+ };
+ return NULL;
+}
+
+/* NOTE: level counting below is 1-based since that makes the code simpler! */
+
+struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ }
+ if (cmpval == 0)
+ return next;
+ level--;
+ }
+ return NULL;
+}
+
+struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ }
+ if (cmpval == 0)
+ return next;
+ level--;
+ }
+ return next;
+}
+
+struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next, *best = NULL;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ best = prev = next;
+ continue;
+ }
+ level--;
+ }
+ return best;
+}
+
+void typesafe_skiplist_del(struct sskip_head *head, struct sskip_item *item,
+ int (*cmpfn)(const struct sskip_item *a,
+ const struct sskip_item *b))
+{
+ size_t level = SKIPLIST_MAXDEPTH;
+ struct sskip_item *prev = &head->hitem, *next;
+ int cmpval;
+
+ while (level) {
+ next = sl_level_get(prev, level - 1);
+ if (!next) {
+ level--;
+ continue;
+ }
+ if (next == item) {
+ sl_level_set(prev, level - 1,
+ sl_level_get(item, level - 1));
+ level--;
+ continue;
+ }
+ cmpval = cmpfn(next, item);
+ if (cmpval < 0) {
+ prev = next;
+ continue;
+ }
+ level--;
+ }
+
+ /* TBD: assert when trying to remove non-existing item? */
+ head->count--;
+
+ if ((uintptr_t)item->next[SKIPLIST_OVERFLOW] & 1) {
+ uintptr_t ptrval = (uintptr_t)item->next[SKIPLIST_OVERFLOW];
+ ptrval &= UINTPTR_MAX - 3;
+ struct sskip_overflow *oflow = (struct sskip_overflow *)ptrval;
+ XFREE(MTYPE_SKIPLIST_OFLOW, oflow);
+ }
+ memset(item, 0, sizeof(*item));
+}
diff --git a/lib/typesafe.h b/lib/typesafe.h
new file mode 100644
index 0000000000..bbf3ce8f1c
--- /dev/null
+++ b/lib/typesafe.h
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FRR_TYPESAFE_H
+#define _FRR_TYPESAFE_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <assert.h>
+#include "compiler.h"
+
+/* generic macros for all list-like types */
+
+#define for_each(prefix, head, item) \
+ for (item = prefix##_first(head); item; \
+ item = prefix##_next(head, item))
+#define for_each_safe(prefix, head, item) \
+ for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
+ prefix##_next_safe(head, \
+ (item = prefix##_first(head))); \
+ item; \
+ item = prefix##_safe, \
+ prefix##_safe = prefix##_next_safe(head, prefix##_safe))
+#define for_each_from(prefix, head, item, from) \
+ for (item = from, from = prefix##_next_safe(head, item); \
+ item; \
+ item = from, from = prefix##_next_safe(head, from))
+
+/* single-linked list, unsorted/arbitrary.
+ * can be used as queue with add_tail / pop
+ */
+
+/* don't use these structs directly */
+struct slist_item {
+ struct slist_item *next;
+};
+
+struct slist_head {
+ struct slist_item *first, **last_next;
+ size_t count;
+};
+
+static inline void typesafe_list_add(struct slist_head *head,
+ struct slist_item **pos, struct slist_item *item)
+{
+ item->next = *pos;
+ *pos = item;
+ if (pos == head->last_next)
+ head->last_next = &item->next;
+ head->count++;
+}
+
+/* use as:
+ *
+ * PREDECL_LIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_LIST(namelist, struct name, nlitem)
+ */
+#define PREDECL_LIST(prefix) \
+struct prefix ## _head { struct slist_head sh; }; \
+struct prefix ## _item { struct slist_item si; };
+
+#define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
+
+#define DECLARE_LIST(prefix, type, field) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+ h->sh.last_next = &h->sh.first; \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
+} \
+macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
+} \
+macro_inline void prefix ## _add_after(struct prefix##_head *h, \
+ type *after, type *item) \
+{ \
+ struct slist_item **nextp; \
+ nextp = after ? &after->field.si.next : &h->sh.first; \
+ typesafe_list_add(&h->sh, nextp, &item->field.si); \
+} \
+/* TODO: del_hint */ \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ struct slist_item **iter = &h->sh.first; \
+ while (*iter && *iter != &item->field.si) \
+ iter = &(*iter)->next; \
+ if (!*iter) \
+ return; \
+ h->sh.count--; \
+ *iter = item->field.si.next; \
+ if (!item->field.si.next) \
+ h->sh.last_next = iter; \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct slist_item *sitem = h->sh.first; \
+ if (!sitem) \
+ return NULL; \
+ h->sh.count--; \
+ h->sh.first = sitem->next; \
+ if (h->sh.first == NULL) \
+ h->sh.last_next = &h->sh.first; \
+ return container_of(sitem, type, field.si); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ return container_of_null(h->sh.first, type, field.si); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
+{ \
+ struct slist_item *sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct slist_item *sitem; \
+ if (!item) \
+ return NULL; \
+ sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->sh.count; \
+} \
+/* ... */
+
+/* single-linked list, sorted.
+ * can be used as priority queue with add / pop
+ */
+
+/* don't use these structs directly */
+struct ssort_item {
+ struct ssort_item *next;
+};
+
+struct ssort_head {
+ struct ssort_item *first;
+ size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_SORTLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_SORTLIST(namelist, struct name, nlitem)
+ */
+#define _PREDECL_SORTLIST(prefix) \
+struct prefix ## _head { struct ssort_head sh; }; \
+struct prefix ## _item { struct ssort_item si; };
+
+#define INIT_SORTLIST_UNIQ(var) { }
+#define INIT_SORTLIST_NONUNIQ(var) { }
+
+#define PREDECL_SORTLIST_UNIQ(prefix) \
+ _PREDECL_SORTLIST(prefix)
+#define PREDECL_SORTLIST_NONUNIQ(prefix) \
+ _PREDECL_SORTLIST(prefix)
+
+#define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item **np = &h->sh.first; \
+ int c = 1; \
+ while (*np && (c = cmpfn_uq( \
+ container_of(*np, type, field.si), item)) < 0) \
+ np = &(*np)->next; \
+ if (c == 0) \
+ return container_of(*np, type, field.si); \
+ item->field.si.next = *np; \
+ *np = &item->field.si; \
+ h->sh.count++; \
+ return NULL; \
+} \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct ssort_item *sitem = h->sh.first; \
+ int cmpval = 0; \
+ while (sitem && (cmpval = cmpfn_nuq( \
+ container_of(sitem, type, field.si), item) < 0)) \
+ sitem = sitem->next; \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct ssort_item *prev = NULL, *sitem = h->sh.first; \
+ int cmpval = 0; \
+ while (sitem && (cmpval = cmpfn_nuq( \
+ container_of(sitem, type, field.si), item) < 0)) \
+ sitem = (prev = sitem)->next; \
+ return container_of_null(prev, type, field.si); \
+} \
+/* TODO: del_hint */ \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item **iter = &h->sh.first; \
+ while (*iter && *iter != &item->field.si) \
+ iter = &(*iter)->next; \
+ if (!*iter) \
+ return; \
+ h->sh.count--; \
+ *iter = item->field.si.next; \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct ssort_item *sitem = h->sh.first; \
+ if (!sitem) \
+ return NULL; \
+ h->sh.count--; \
+ h->sh.first = sitem->next; \
+ return container_of(sitem, type, field.si); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ return container_of_null(h->sh.first, type, field.si); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item *sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct ssort_item *sitem; \
+ if (!item) \
+ return NULL; \
+ sitem = &item->field.si; \
+ return container_of_null(sitem->next, type, field.si); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->sh.count; \
+} \
+/* ... */
+
+#define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
+ _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
+ \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ struct ssort_item *sitem = h->sh.first; \
+ int cmpval = 0; \
+ while (sitem && (cmpval = cmpfn( \
+ container_of(sitem, type, field.si), item) < 0)) \
+ sitem = sitem->next; \
+ if (!sitem || cmpval > 0) \
+ return NULL; \
+ return container_of(sitem, type, field.si); \
+} \
+/* ... */
+
+#define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
+macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
+{ \
+ int cmpval = cmpfn(a, b); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
+/* ... */
+
+
+/* hash, "sorted" by hash value
+ */
+
+/* don't use these structs directly */
+struct thash_item {
+ struct thash_item *next;
+ uint32_t hashval;
+};
+
+struct thash_head {
+ struct thash_item **entries;
+ uint32_t count;
+
+ uint8_t tabshift;
+ uint8_t minshift, maxshift;
+};
+
+#define _HASH_SIZE(tabshift) \
+ ((1U << (tabshift)) >> 1)
+#define HASH_SIZE(head) \
+ _HASH_SIZE((head).tabshift)
+#define _HASH_KEY(tabshift, val) \
+ ((val) >> (33 - (tabshift)))
+#define HASH_KEY(head, val) \
+ _HASH_KEY((head).tabshift, val)
+#define HASH_GROW_THRESHOLD(head) \
+ ((head).count >= HASH_SIZE(head))
+#define HASH_SHRINK_THRESHOLD(head) \
+ ((head).count <= (HASH_SIZE(head) - 1) / 2)
+
+extern void typesafe_hash_grow(struct thash_head *head);
+extern void typesafe_hash_shrink(struct thash_head *head);
+
+/* use as:
+ *
+ * PREDECL_HASH(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
+ */
+#define PREDECL_HASH(prefix) \
+struct prefix ## _head { struct thash_head hh; }; \
+struct prefix ## _item { struct thash_item hi; };
+
+#define INIT_HASH(var) { }
+
+#define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ assert(h->hh.count == 0); \
+ h->hh.minshift = 0; \
+ typesafe_hash_shrink(&h->hh); \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ h->hh.count++; \
+ if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
+ typesafe_hash_grow(&h->hh); \
+ \
+ uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
+ item->field.hi.hashval = hval; \
+ struct thash_item **np = &h->hh.entries[hbits]; \
+ while (*np && (*np)->hashval < hval) \
+ np = &(*np)->next; \
+ if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
+ h->hh.count--; \
+ return container_of(*np, type, field.hi); \
+ } \
+ item->field.hi.next = *np; \
+ *np = &item->field.hi; \
+ return NULL; \
+} \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ if (!h->hh.tabshift) \
+ return NULL; \
+ uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
+ struct thash_item *hitem = h->hh.entries[hbits]; \
+ while (hitem && hitem->hashval < hval) \
+ hitem = hitem->next; \
+ while (hitem && hitem->hashval == hval) { \
+ if (!cmpfn(container_of(hitem, type, field.hi), item)) \
+ return container_of(hitem, type, field.hi); \
+ hitem = hitem->next; \
+ } \
+ return NULL; \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ if (!h->hh.tabshift) \
+ return; \
+ uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
+ struct thash_item **np = &h->hh.entries[hbits]; \
+ while (*np && (*np)->hashval < hval) \
+ np = &(*np)->next; \
+ while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
+ np = &(*np)->next; \
+ if (*np != &item->field.hi) \
+ return; \
+ *np = item->field.hi.next; \
+ item->field.hi.next = NULL; \
+ h->hh.count--; \
+ if (HASH_SHRINK_THRESHOLD(h->hh)) \
+ typesafe_hash_shrink(&h->hh); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ uint32_t i; \
+ for (i = 0; i < HASH_SIZE(h->hh); i++) \
+ if (h->hh.entries[i]) { \
+ struct thash_item *hitem = h->hh.entries[i]; \
+ h->hh.entries[i] = hitem->next; \
+ h->hh.count--; \
+ hitem->next = NULL; \
+ if (HASH_SHRINK_THRESHOLD(h->hh)) \
+ typesafe_hash_shrink(&h->hh); \
+ return container_of(hitem, type, field.hi); \
+ } \
+ return NULL; \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ uint32_t i; \
+ for (i = 0; i < HASH_SIZE(h->hh); i++) \
+ if (h->hh.entries[i]) \
+ return container_of(h->hh.entries[i], type, field.hi); \
+ return NULL; \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct thash_item *hitem = &item->field.hi; \
+ if (hitem->next) \
+ return container_of(hitem->next, type, field.hi); \
+ uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
+ for (; i < HASH_SIZE(h->hh); i++) \
+ if (h->hh.entries[i]) \
+ return container_of(h->hh.entries[i], type, field.hi); \
+ return NULL; \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ if (!item) \
+ return NULL; \
+ return prefix ## _next(h, item); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->hh.count; \
+} \
+/* ... */
+
+/* skiplist, sorted.
+ * can be used as priority queue with add / pop
+ */
+
+/* don't use these structs directly */
+#define SKIPLIST_MAXDEPTH 16
+#define SKIPLIST_EMBED 4
+#define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
+
+struct sskip_item {
+ struct sskip_item *next[SKIPLIST_EMBED];
+};
+
+struct sskip_overflow {
+ struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
+};
+
+struct sskip_head {
+ struct sskip_item hitem;
+ struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
+ size_t count;
+};
+
+/* use as:
+ *
+ * PREDECL_SKIPLIST(namelist)
+ * struct name {
+ * struct namelist_item nlitem;
+ * }
+ * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
+ */
+#define _PREDECL_SKIPLIST(prefix) \
+struct prefix ## _head { struct sskip_head sh; }; \
+struct prefix ## _item { struct sskip_item si; };
+
+#define INIT_SKIPLIST_UNIQ(var) { }
+#define INIT_SKIPLIST_NONUNIQ(var) { }
+
+#define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
+ \
+macro_inline void prefix ## _init(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+ h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
+ ((uintptr_t)h->sh.overflow | 1); \
+} \
+macro_inline void prefix ## _fini(struct prefix##_head *h) \
+{ \
+ memset(h, 0, sizeof(*h)); \
+} \
+macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
+{ \
+ struct sskip_item *si; \
+ si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
+ return container_of_null(si, type, field.si); \
+} \
+macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
+ &item->field.si, cmpfn_nuq); \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
+ const type *item) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
+ &item->field.si, cmpfn_nuq); \
+ return container_of_null(sitem, type, field.si); \
+} \
+macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
+{ \
+ typesafe_skiplist_del(&h->sh, &item->field.si, cmpfn_uq); \
+} \
+macro_inline type *prefix ## _pop(struct prefix##_head *h) \
+{ \
+ struct sskip_item *sitem = h->sh.hitem.next[0]; \
+ if (!sitem) \
+ return NULL; \
+ typesafe_skiplist_del(&h->sh, sitem, cmpfn_uq); \
+ return container_of(sitem, type, field.si); \
+} \
+macro_pure type *prefix ## _first(struct prefix##_head *h) \
+{ \
+ struct sskip_item *first = h->sh.hitem.next[0]; \
+ return container_of_null(first, type, field.si); \
+} \
+macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
+{ \
+ struct sskip_item *next = item->field.si.next[0]; \
+ return container_of_null(next, type, field.si); \
+} \
+macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct sskip_item *next; \
+ next = item ? item->field.si.next[0] : NULL; \
+ return container_of_null(next, type, field.si); \
+} \
+macro_pure size_t prefix ## _count(struct prefix##_head *h) \
+{ \
+ return h->sh.count; \
+} \
+/* ... */
+
+#define PREDECL_SKIPLIST_UNIQ(prefix) \
+ _PREDECL_SKIPLIST(prefix)
+#define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct sskip_item *a, \
+ const struct sskip_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.si), \
+ container_of(b, type, field.si)); \
+} \
+macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
+{ \
+ struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
+ &item->field.si, &prefix ## __cmp); \
+ return container_of_null(sitem, type, field.si); \
+} \
+ \
+_DECLARE_SKIPLIST(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp) \
+/* ... */
+
+#define PREDECL_SKIPLIST_NONUNIQ(prefix) \
+ _PREDECL_SKIPLIST(prefix)
+#define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
+ \
+macro_inline int prefix ## __cmp(const struct sskip_item *a, \
+ const struct sskip_item *b) \
+{ \
+ return cmpfn(container_of(a, type, field.si), \
+ container_of(b, type, field.si)); \
+} \
+macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
+ const struct sskip_item *b) \
+{ \
+ int cmpval = cmpfn(container_of(a, type, field.si), \
+ container_of(b, type, field.si)); \
+ if (cmpval) \
+ return cmpval; \
+ if (a < b) \
+ return -1; \
+ if (a > b) \
+ return 1; \
+ return 0; \
+} \
+ \
+_DECLARE_SKIPLIST(prefix, type, field, \
+ prefix ## __cmp, prefix ## __cmp_uq) \
+/* ... */
+
+
+extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
+ struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
+ const struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+extern void typesafe_skiplist_del(struct sskip_head *head,
+ struct sskip_item *item, int (*cmpfn)(
+ const struct sskip_item *a,
+ const struct sskip_item *b));
+
+/* this needs to stay at the end because both files include each other.
+ * the resolved order is typesafe.h before typerb.h
+ */
+#include "typerb.h"
+
+#endif /* _FRR_TYPESAFE_H */
diff --git a/lib/zebra.h b/lib/zebra.h
index b96fb5a206..b1ea43c747 100644
--- a/lib/zebra.h
+++ b/lib/zebra.h
@@ -335,45 +335,6 @@ struct in_pktinfo {
#endif /* ndef BYTE_ORDER */
-/* MAX / MIN are not commonly defined, but useful */
-/* note: glibc sys/param.h has #define MIN(a,b) (((a)<(b))?(a):(b)) */
-#ifdef MAX
-#undef MAX
-#endif
-#define MAX(a, b) \
- ({ \
- typeof(a) _max_a = (a); \
- typeof(b) _max_b = (b); \
- _max_a > _max_b ? _max_a : _max_b; \
- })
-#ifdef MIN
-#undef MIN
-#endif
-#define MIN(a, b) \
- ({ \
- typeof(a) _min_a = (a); \
- typeof(b) _min_b = (b); \
- _min_a < _min_b ? _min_a : _min_b; \
- })
-
-#ifndef offsetof
-#ifdef __compiler_offsetof
-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
-#else
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-#endif
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
- ({ \
- const typeof(((type *)0)->member) *__mptr = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); \
- })
-#endif
-
-#define ZEBRA_NUM_OF(x) (sizeof (x) / sizeof (x[0]))
-
/* For old definition. */
#ifndef IN6_ARE_ADDR_EQUAL
#define IN6_ARE_ADDR_EQUAL IN6_IS_ADDR_EQUAL