ZEBRA_ROUTE_BFD, bfd, bfdd, '-', 0, 0, 0, "BFD"
ZEBRA_ROUTE_OPENFABRIC, openfabric, fabricd, 'f', 1, 1, 1, "OpenFabric"
ZEBRA_ROUTE_VRRP, vrrp, vrrpd, '-', 0, 0, 0, "VRRP"
+ZEBRA_ROUTE_NHG, nhg, none, '-', 0, 0, 0, "Nexthop Group"
ZEBRA_ROUTE_ALL, wildcard, none, '-', 0, 0, 0, "-"
ZEBRA_ROUTE_BFD, "Bidirectional Fowarding Detection (BFD)"
ZEBRA_ROUTE_VRRP, "Virtual Router Redundancy Protocol (VRRP)"
ZEBRA_ROUTE_OPENFABRIC, "OpenFabric Routing Protocol"
+ZEBRA_ROUTE_NHG, "Zebra Nexthop Groups (NHG)"
return 0;
}
+static void if_nhg_dependents_release(struct interface *ifp)
+{
+ if (!if_nhg_dependents_is_empty(ifp)) {
+ struct nhg_connected *rb_node_dep = NULL;
+ struct zebra_if *zif = (struct zebra_if *)ifp->info;
+
+ RB_FOREACH (rb_node_dep, nhg_connected_head,
+ &zif->nhg_dependents) {
+ rb_node_dep->nhe->ifp = NULL;
+ zebra_nhg_set_invalid(rb_node_dep->nhe);
+ }
+ }
+}
+
/* Called when interface is deleted. */
static int if_zebra_delete_hook(struct interface *ifp)
{
list_delete(&rtadv->AdvDNSSLList);
#endif /* HAVE_RTADV */
+ if_nhg_dependents_release(ifp);
zebra_if_nhg_dependents_free(zebra_if);
XFREE(MTYPE_TMP, zebra_if->desc);
return false;
}
-void if_down_nhg_dependents(const struct interface *ifp)
+static void if_down_nhg_dependents(const struct interface *ifp)
{
if (!if_nhg_dependents_is_empty(ifp)) {
struct nhg_connected *rb_node_dep = NULL;
struct nhg_hash_entry *nhe);
extern unsigned int if_nhg_dependents_count(const struct interface *ifp);
extern bool if_nhg_dependents_is_empty(const struct interface *ifp);
-extern void if_down_nhg_dependents(const struct interface *ifp);
extern void vrf_add_update(struct vrf *vrfp);
#define RIB_KERNEL_ROUTE(R) RKERNEL_ROUTE((R)->type)
/* meta-queue structure:
- * sub-queue 0: connected, kernel
- * sub-queue 1: static
- * sub-queue 2: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP
- * sub-queue 3: iBGP, eBGP
- * sub-queue 4: any other origin (if any)
+ * sub-queue 0: nexthop group objects
+ * sub-queue 1: connected, kernel
+ * sub-queue 2: static
+ * sub-queue 3: RIP, RIPng, OSPF, OSPF6, IS-IS, EIGRP, NHRP
+ * sub-queue 4: iBGP, eBGP
+ * sub-queue 5: any other origin (if any)
*/
-#define MQ_SIZE 5
+#define MQ_SIZE 6
struct meta_queue {
struct list *subq[MQ_SIZE];
uint32_t size; /* sum of lengths of all subqueues */
#define RIB_ROUTE_QUEUED(x) (1 << (x))
// If MQ_SIZE is modified this value needs to be updated.
-#define RIB_ROUTE_ANY_QUEUED 0x1F
+#define RIB_ROUTE_ANY_QUEUED 0x3F
/*
* The maximum qindex that can be used.
extern unsigned long rib_score_proto_table(uint8_t proto,
unsigned short instance,
struct route_table *table);
-extern void rib_queue_add(struct route_node *rn);
+
+extern int rib_queue_add(struct route_node *rn);
+
+struct nhg_ctx; /* Forward declaration */
+
+extern int rib_queue_nhg_add(struct nhg_ctx *ctx);
+
extern void meta_queue_free(struct meta_queue *mq);
extern int zebra_rib_labeled_unicast(struct route_entry *re);
extern struct route_table *rib_table_ipv6;
*
* @n: Netlink message header struct
* @req_size: Size allocated for this message
- * @depends_info: Array of depend_info structs
+ * @z_grp: Array of nh_grp structs
* @count: How many depencies there are
*/
static void _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size,
- const struct depend_info *depends_info,
+ const struct nh_grp *z_grp,
const uint8_t count)
{
struct nexthop_grp grp[count];
if (count) {
for (int i = 0; i < count; i++) {
- grp[i].id = depends_info[i].id;
- grp[i].weight = depends_info[i].weight;
+ grp[i].id = z_grp[i].id;
+ grp[i].weight = z_grp[i].weight;
}
addattr_l(n, req_size, NHA_GROUP, grp, count * sizeof(*grp));
}
addattr32(&req.n, sizeof(req), NHA_ID, id);
if (cmd == RTM_NEWNEXTHOP) {
- if (dplane_ctx_get_nhe_depends_count(ctx))
+ if (dplane_ctx_get_nhe_nh_grp_count(ctx))
_netlink_nexthop_build_group(
&req.n, sizeof(req),
- dplane_ctx_get_nhe_depends_info(ctx),
- dplane_ctx_get_nhe_depends_count(ctx));
+ dplane_ctx_get_nhe_nh_grp(ctx),
+ dplane_ctx_get_nhe_nh_grp_count(ctx));
else {
const struct nexthop *nh =
dplane_ctx_get_nhe_ng(ctx)->nexthop;
*
* Return: New nexthop
*/
-static struct nexthop *netlink_nexthop_process_nh(struct rtattr **tb,
- unsigned char family,
- struct interface **ifp,
- ns_id_t ns_id)
+static struct nexthop netlink_nexthop_process_nh(struct rtattr **tb,
+ unsigned char family,
+ struct interface **ifp,
+ ns_id_t ns_id)
{
- struct nexthop *nh = NULL;
+ struct nexthop nh = {};
void *gate = NULL;
enum nexthop_types_t type = 0;
- int if_index;
- size_t sz;
+ int if_index = 0;
+ size_t sz = 0;
if_index = *(int *)RTA_DATA(tb[NHA_OIF]);
EC_ZEBRA_BAD_NHG_MESSAGE,
"Nexthop gateway with bad address family (%d) received from kernel",
family);
- return NULL;
+ return nh;
}
gate = RTA_DATA(tb[NHA_GATEWAY]);
- } else {
+ } else
type = NEXTHOP_TYPE_IFINDEX;
- }
-
- /* Allocate the new nexthop */
- nh = nexthop_new();
if (type)
- nh->type = type;
+ nh.type = type;
if (gate)
- memcpy(&(nh->gate), gate, sz);
+ memcpy(&(nh.gate), gate, sz);
if (if_index)
- nh->ifindex = if_index;
+ nh.ifindex = if_index;
- *ifp = if_lookup_by_index_per_ns(zebra_ns_lookup(ns_id), nh->ifindex);
- if (ifp) {
- nh->vrf_id = (*ifp)->vrf_id;
- } else {
+ *ifp = if_lookup_by_index_per_ns(zebra_ns_lookup(ns_id), nh.ifindex);
+ if (ifp)
+ nh.vrf_id = (*ifp)->vrf_id;
+ else {
flog_warn(
EC_ZEBRA_UNKNOWN_INTERFACE,
"%s: Unknown nexthop interface %u received, defaulting to VRF_DEFAULT",
- __PRETTY_FUNCTION__, nh->ifindex);
+ __PRETTY_FUNCTION__, nh.ifindex);
- nh->vrf_id = VRF_DEFAULT;
+ nh.vrf_id = VRF_DEFAULT;
}
if (tb[NHA_ENCAP] && tb[NHA_ENCAP_TYPE]) {
int num_labels = 0;
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
- if (encap_type == LWTUNNEL_ENCAP_MPLS) {
+ if (encap_type == LWTUNNEL_ENCAP_MPLS)
num_labels = parse_encap_mpls(tb[NHA_ENCAP], labels);
- }
- if (num_labels) {
- nexthop_add_labels(nh, ZEBRA_LSP_STATIC, num_labels,
+ if (num_labels)
+ nexthop_add_labels(&nh, ZEBRA_LSP_STATIC, num_labels,
labels);
- }
}
return nh;
}
-/**
- * netlink_nexthop_process_group() - Iterate over nhmsg nexthop group
- *
- * @tb: Netlink RTA data
- * @nhg_depends: Tree head of nexthops in the group
- * @nhg: Nexthop group struct
- *
- * Return: Count of nexthops in the group
- */
static int netlink_nexthop_process_group(struct rtattr **tb,
- struct nexthop_group *nhg,
- struct nhg_connected_head *nhg_depends)
+ struct nh_grp *z_grp)
{
- int count = 0;
+ uint8_t count = 0;
+ /* linux/nexthop.h group struct */
struct nexthop_grp *n_grp = NULL;
- struct nhg_hash_entry *depend = NULL;
n_grp = (struct nexthop_grp *)RTA_DATA(tb[NHA_GROUP]);
count = (RTA_PAYLOAD(tb[NHA_GROUP]) / sizeof(*n_grp));
zlog_debug("Nexthop group type: %d",
*((uint16_t *)RTA_DATA(tb[NHA_GROUP_TYPE])));
- nhg_connected_head_init(nhg_depends);
for (int i = 0; i < count; i++) {
- /* We do not care about nexthop_grp.weight at
- * this time. But we should figure out
- * how to adapt this to our code in
- * the future.
- */
- depend = zebra_nhg_lookup_id(n_grp[i].id);
- if (depend) {
- nhg_connected_head_add(nhg_depends, depend);
- /*
- * If this is a nexthop with its own group
- * dependencies, add them as well. Not sure its
- * even possible to have a group within a group
- * in the kernel.
- */
-
- copy_nexthops(&nhg->nexthop, depend->nhg->nexthop,
- NULL);
- } else {
- flog_err(
- EC_ZEBRA_NHG_SYNC,
- "Received Nexthop Group from the kernel with a dependent Nexthop ID (%u) which we do not have in our table",
- n_grp[i].id);
- }
+ z_grp[i].id = n_grp[i].id;
+ z_grp[i].weight = n_grp[i].weight;
}
return count;
}
vrf_id_t vrf_id = 0;
struct interface *ifp = NULL;
struct nhmsg *nhm = NULL;
- /* struct for nexthop group abstraction */
- struct nexthop_group *nhg = NULL;
- struct nexthop *nh = NULL;
- /* If its a group, tree head of nexthops */
- struct nhg_connected_head nhg_depends = {0};
+ struct nexthop nh = {};
+ struct nh_grp grp[MULTIPATH_NUM] = {};
/* Count of nexthops in group array */
- int dep_count = 0;
+ uint8_t grp_count = 0;
/* struct that goes into our tables */
struct nhg_hash_entry *nhe = NULL;
- struct rtattr *tb[NHA_MAX + 1];
+ struct rtattr *tb[NHA_MAX + 1] = {};
nhm = NLMSG_DATA(h);
return -1;
}
- memset(tb, 0, sizeof(tb));
netlink_parse_rtattr(tb, NHA_MAX, RTM_NHA(nhm), len);
nl_family_to_str(family), ns_id);
- nhe = zebra_nhg_lookup_id(id);
-
if (h->nlmsg_type == RTM_NEWNEXTHOP) {
- nhg = nexthop_group_new();
-
if (tb[NHA_GROUP]) {
/**
* If this is a group message its only going to have
* an array of nexthop IDs associated with it
*/
- dep_count = netlink_nexthop_process_group(tb, nhg,
- &nhg_depends);
+ grp_count = netlink_nexthop_process_group(tb, grp);
} else {
if (tb[NHA_BLACKHOLE]) {
/**
* traffic, it should not have an OIF, GATEWAY,
* or ENCAP
*/
- nh = nexthop_new();
- nh->type = NEXTHOP_TYPE_BLACKHOLE;
- nh->bh_type = BLACKHOLE_UNSPEC;
- } else if (tb[NHA_OIF]) {
+ nh.type = NEXTHOP_TYPE_BLACKHOLE;
+ nh.bh_type = BLACKHOLE_UNSPEC;
+ } else if (tb[NHA_OIF])
/**
* This is a true new nexthop, so we need
* to parse the gateway and device info
*/
nh = netlink_nexthop_process_nh(tb, family,
&ifp, ns_id);
- }
- if (nh) {
- SET_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE);
- if (nhm->nh_flags & RTNH_F_ONLINK)
- SET_FLAG(nh->flags,
- NEXTHOP_FLAG_ONLINK);
- vrf_id = nh->vrf_id;
- nexthop_group_add_sorted(nhg, nh);
- } else {
+ else {
+
flog_warn(
EC_ZEBRA_BAD_NHG_MESSAGE,
"Invalid Nexthop message received from the kernel with ID (%u)",
id);
return -1;
}
+ SET_FLAG(nh.flags, NEXTHOP_FLAG_ACTIVE);
+ if (nhm->nh_flags & RTNH_F_ONLINK)
+ SET_FLAG(nh.flags, NEXTHOP_FLAG_ONLINK);
+ vrf_id = nh.vrf_id;
}
- if (!nhg->nexthop) {
- /* Nothing to lookup */
- nexthop_group_free_delete(&nhg);
- nhg_connected_head_free(&nhg_depends);
- return -1;
- }
-
- if (nhe) {
- // TODO: Apparently we don't want changes
- // to already created one in our table.
- // They should be immutable...
- // Gotta figure that one out.
-
- /* This is a change to a group we already have
- */
-
- zebra_nhg_set_invalid(nhe);
- nexthop_group_free_delete(&nhg);
- nhg_connected_head_free(&nhg_depends);
+ // TODO: Apparently we don't want changes
+ // to already created one in our table.
+ // They should be immutable...
+ // Gotta figure that one out.
- } else {
- /* This is a new nexthop group */
- nhe = zebra_nhg_find(nhg, vrf_id, afi, id, &nhg_depends,
- true);
- /* The group was copied over, so free it */
- nexthop_group_free_delete(&nhg);
-
- if (!nhe) {
- flog_err(
- EC_ZEBRA_TABLE_LOOKUP_FAILED,
- "Zebra failed to find or create a nexthop hash entry for ID (%u) from the kernel",
- id);
- return -1;
- }
- nhe->is_kernel_nh = true;
-
- if (id != nhe->id) {
- /* Duplicate but with different ID from
- * the kernel */
+ if (zebra_nhg_kernel_find(id, &nh, grp, grp_count, vrf_id, afi))
+ return -1;
- /* The kernel allows duplicate nexthops
- * as long as they have different IDs.
- * We are ignoring those to prevent
- * syncing problems with the kernel
- * changes.
- */
- flog_warn(
- EC_ZEBRA_DUPLICATE_NHG_MESSAGE,
- "Nexthop Group from kernel with ID (%d) is a duplicate, ignoring",
- id);
- nhg_connected_head_free(&nhg_depends);
- } else {
- SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
- SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
- }
- }
} else if (h->nlmsg_type == RTM_DELNEXTHOP) {
+ // TODO: Add new function in nhgc to handle del
+ nhe = zebra_nhg_lookup_id(id);
if (!nhe) {
flog_warn(
EC_ZEBRA_BAD_NHG_MESSAGE,
"Kernel deleted a nexthop group with ID (%u) that we are still using for a route, sending it back down",
nhe->id);
zebra_nhg_install_kernel(nhe);
- } else {
- zebra_nhg_release(nhe);
- }
+ } else
+ zebra_nhg_set_invalid(nhe);
}
return 0;
bool is_kernel_nh;
struct nexthop_group ng;
- struct depend_info depends_info[MULTIPATH_NUM];
- uint8_t depends_count;
+ struct nh_grp nh_grp[MULTIPATH_NUM];
+ uint8_t nh_grp_count;
};
/*
return &(ctx->u.rinfo.nhe.ng);
}
-const struct depend_info *
-dplane_ctx_get_nhe_depends_info(const struct zebra_dplane_ctx *ctx)
+const struct nh_grp *
+dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->u.rinfo.nhe.depends_info;
+ return ctx->u.rinfo.nhe.nh_grp;
}
-uint8_t dplane_ctx_get_nhe_depends_count(const struct zebra_dplane_ctx *ctx)
+uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
- return ctx->u.rinfo.nhe.depends_count;
+ return ctx->u.rinfo.nhe.nh_grp_count;
}
/* Accessors for LSP information */
struct nhg_connected *rb_node_dep = NULL;
uint8_t i = 0;
+ // TODO: This doesn't work with depends being recursive
+ // resolved nh's as well. Yea, good luck future stephen
+ // this one...
+
RB_FOREACH (rb_node_dep, nhg_connected_head,
&nhe->nhg_depends) {
- ctx->u.rinfo.nhe.depends_info[i].id =
- rb_node_dep->nhe->id;
+ ctx->u.rinfo.nhe.nh_grp[i].id = rb_node_dep->nhe->id;
/* We aren't using weights for anything right now */
- ctx->u.rinfo.nhe.depends_info[i].weight = 0;
+ ctx->u.rinfo.nhe.nh_grp[i].weight = 0;
i++;
}
- ctx->u.rinfo.nhe.depends_count = i;
+ ctx->u.rinfo.nhe.nh_grp_count = i;
}
/* Extract ns info - can't use pointers to 'core'
bool dplane_ctx_get_nhe_is_kernel_nh(const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *
dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx);
-const struct depend_info *
-dplane_ctx_get_nhe_depends_info(const struct zebra_dplane_ctx *ctx);
-uint8_t dplane_ctx_get_nhe_depends_count(const struct zebra_dplane_ctx *ctx);
+const struct nh_grp *
+dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx);
+uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx);
/* Accessors for LSP information */
mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx);
DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry");
DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected");
+DEFINE_MTYPE_STATIC(ZEBRA, NHG_CTX, "Nexthop Group Context");
static int nhg_connected_cmp(const struct nhg_connected *dep1,
const struct nhg_connected *dep2);
RB_GENERATE(nhg_connected_head, nhg_connected, nhg_entry, nhg_connected_cmp);
+
void nhg_connected_free(struct nhg_connected *dep)
{
XFREE(MTYPE_NHG_CONNECTED, dep);
struct nhg_hash_entry *copy = arg;
struct nhg_connected *rb_node_dep = NULL;
-
nhe = XCALLOC(MTYPE_NHG, sizeof(struct nhg_hash_entry));
-
nhe->id = copy->id;
-
nhe->nhg_depends = copy->nhg_depends;
nhe->nhg = nexthop_group_new();
/* Add to id table as well */
zebra_nhg_insert_id(nhe);
-
- // TODO: This needs to be moved
- // It should only install AFTER it gets
- // the ifp right?
- //
- /* Send it to the kernel */
- if (!nhe->is_kernel_nh)
- zebra_nhg_install_kernel(nhe);
-
return nhe;
}
return (con1->nhe->id - con2->nhe->id);
}
-/**
- * zebra_nhg_find() - Find the zebra nhg in our table, or create it
- *
- * @nhg: Nexthop group we lookup with
- * @vrf_id: VRF id
- * @afi: Address Family type
- * @id: ID we lookup with, 0 means its from us and we
- * need to give it an ID, otherwise its from the
- * kernel as we use the ID it gave us.
- * @nhg_depends: Nexthop dependency tree head
- * @is_kernel_nh: Was the nexthop created by the kernel
- *
- * Return: Hash entry found or created
- *
- * The nhg and n_grp are fundementally the same thing (a group of nexthops).
- * We are just using the nhg representation with routes and the n_grp
- * is what the kernel gives us (a list of IDs). Our nhg_hash_entry
- * will contain both.
- *
- * nhg_hash_entry example:
- *
- * nhe:
- * ->nhg:
- * .nexthop->nexthop->nexthop
- * ->nhg_depends:
- * .nhe->nhe->nhe
- *
- * Routes will use the nhg directly, and any updating of nexthops
- * we have to do or flag setting, we use the nhg_depends.
- *
- */
-struct nhg_hash_entry *zebra_nhg_find(struct nexthop_group *nhg,
- vrf_id_t vrf_id, afi_t afi, uint32_t id,
- struct nhg_connected_head *nhg_depends,
- bool is_kernel_nh)
+static void zebra_nhg_process_grp(struct nexthop_group *nhg,
+ struct nhg_connected_head *depends,
+ struct nh_grp *grp, uint8_t count)
+{
+ nhg_connected_head_init(depends);
+
+ for (int i = 0; i < count; i++) {
+ struct nhg_hash_entry *depend = NULL;
+ /* We do not care about nexthop_grp.weight at
+ * this time. But we should figure out
+ * how to adapt this to our code in
+ * the future.
+ */
+ depend = zebra_nhg_lookup_id(grp[i].id);
+ if (depend) {
+ nhg_connected_head_add(depends, depend);
+ /*
+ * If this is a nexthop with its own group
+ * dependencies, add them as well. Not sure its
+ * even possible to have a group within a group
+ * in the kernel.
+ */
+
+ copy_nexthops(&nhg->nexthop, depend->nhg->nexthop,
+ NULL);
+ } else {
+ flog_err(
+ EC_ZEBRA_NHG_SYNC,
+ "Received Nexthop Group from the kernel with a dependent Nexthop ID (%u) which we do not have in our table",
+ grp[i].id);
+ }
+ }
+}
+
+
+static struct nhg_hash_entry *
+zebra_nhg_find(uint32_t id, struct nexthop_group *nhg,
+ struct nhg_connected_head *nhg_depends, vrf_id_t vrf_id,
+ afi_t afi, bool is_kernel_nh)
{
- /* lock for getiing and setting the id */
- static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
/* id counter to keep in sync with kernel */
static uint32_t id_counter = 0;
struct nhg_hash_entry lookup = {};
struct nhg_hash_entry *nhe = NULL;
- uint32_t old_id_counter = 0;
-
- pthread_mutex_lock(&lock); /* Lock, set the id counter */
- old_id_counter = id_counter;
+ uint32_t old_id_counter = id_counter;
- if (id) {
- if (id > id_counter) {
- /* Increase our counter so we don't try to create
- * an ID that already exists
- */
- id_counter = id;
- }
+ if (id > id_counter) {
+ /* Increase our counter so we don't try to create
+ * an ID that already exists
+ */
+ id_counter = id;
lookup.id = id;
- } else {
+ } else
lookup.id = ++id_counter;
- }
- lookup.vrf_id = vrf_id;
lookup.afi = afi;
- lookup.nhg = nhg;
- lookup.nhg_depends = *nhg_depends;
+ lookup.vrf_id = vrf_id;
lookup.is_kernel_nh = is_kernel_nh;
+ lookup.nhg = nhg;
+
+ if (nhg_depends)
+ lookup.nhg_depends = *nhg_depends;
if (id)
nhe = zebra_nhg_lookup_id(id);
if (nhe)
id_counter = old_id_counter;
- pthread_mutex_unlock(&lock);
-
if (!nhe)
nhe = hash_get(zrouter.nhgs, &lookup, zebra_nhg_alloc);
return nhe;
}
-/**
- * zebra_nhg_find_nexthop() - Create a group with a single nexthop, find it in
- * our table, or create it
- *
- * @nh: Nexthop to lookup
- * @afi: Address Family type
- *
- * Return: Hash entry found or created
- */
-struct nhg_hash_entry *zebra_nhg_find_nexthop(struct nexthop *nh, afi_t afi)
+/* Find/create a single nexthop */
+static struct nhg_hash_entry *zebra_nhg_find_nexthop(uint32_t id,
+ struct nexthop *nh,
+ afi_t afi,
+ bool is_kernel_nh)
{
+ struct nexthop_group nhg = {};
+
+ _nexthop_group_add_sorted(&nhg, nh);
+
+ return zebra_nhg_find(id, &nhg, NULL, nh->vrf_id, afi, is_kernel_nh);
+}
+
+static struct nhg_ctx *nhg_ctx_new()
+{
+ struct nhg_ctx *new = NULL;
+
+ new = XCALLOC(MTYPE_NHG_CTX, sizeof(struct nhg_ctx));
+
+ return new;
+}
+
+static void nhg_ctx_free(struct nhg_ctx *ctx)
+{
+ XFREE(MTYPE_NHG_CTX, ctx);
+}
+
+static void nhg_ctx_set_status(struct nhg_ctx *ctx, enum nhg_ctx_result status)
+{
+ ctx->status = status;
+}
+
+static enum nhg_ctx_result nhg_ctx_get_status(const struct nhg_ctx *ctx)
+{
+ return ctx->status;
+}
+
+static void nhg_ctx_set_op(struct nhg_ctx *ctx, enum nhg_ctx_op_e op)
+{
+ ctx->op = op;
+}
+
+static enum nhg_ctx_op_e nhg_ctx_get_op(const struct nhg_ctx *ctx)
+{
+ return ctx->op;
+}
+
+static int nhg_ctx_process_new(struct nhg_ctx *ctx)
+{
+ struct nexthop_group *nhg = NULL;
+ struct nhg_connected_head nhg_depends = {};
struct nhg_hash_entry *nhe = NULL;
- struct nexthop_group *nhg = nexthop_group_new();
+ if (ctx->count) {
+ nhg = nexthop_group_new();
+ zebra_nhg_process_grp(nhg, &nhg_depends, ctx->u.grp,
+ ctx->count);
+ nhe = zebra_nhg_find(ctx->id, nhg, &nhg_depends, ctx->vrf_id,
+ ctx->afi, true);
+ /* These got copied over in zebra_nhg_alloc() */
+ nexthop_group_free_delete(&nhg);
+ } else
+ nhe = zebra_nhg_find_nexthop(ctx->id, &ctx->u.nh, ctx->afi,
+ ctx->is_kernel_nh);
+
+ if (nhe) {
+ if (ctx->id != nhe->id)
+ /* Duplicate but with different ID from
+ * the kernel */
+
+ /* The kernel allows duplicate nexthops
+ * as long as they have different IDs.
+ * We are ignoring those to prevent
+ * syncing problems with the kernel
+ * changes.
+ */
+ flog_warn(
+ EC_ZEBRA_DUPLICATE_NHG_MESSAGE,
+ "Nexthop Group with ID (%d) is a duplicate, ignoring",
+ ctx->id);
+ else {
+ /* It actually created a new nhe */
+ if (nhe->is_kernel_nh) {
+ SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
+ SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+ }
+ }
+ } else {
+ flog_err(
+ EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "Zebra failed to find or create a nexthop hash entry for ID (%u)",
+ ctx->id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void nhg_ctx_process_finish(struct nhg_ctx *ctx)
+{
+ /*
+ * Just freeing for now, maybe do something more in the future
+ * based on flag.
+ */
+
+ if (ctx)
+ nhg_ctx_free(ctx);
+}
+
+int nhg_ctx_process(struct nhg_ctx *ctx)
+{
+ int ret = 0;
+
+ switch (nhg_ctx_get_op(ctx)) {
+ case NHG_CTX_OP_NEW:
+ ret = nhg_ctx_process_new(ctx);
+ break;
+ case NHG_CTX_OP_DEL:
+ case NHG_CTX_OP_NONE:
+ break;
+ }
+
+ nhg_ctx_set_status(ctx, (ret ? NHG_CTX_FAILURE : NHG_CTX_SUCCESS));
+
+ nhg_ctx_process_finish(ctx);
+
+ return ret;
+}
+
+static int queue_add(struct nhg_ctx *ctx)
+{
+ /* If its queued or already processed do nothing */
+ if (nhg_ctx_get_status(ctx))
+ return 0;
+
+ if (rib_queue_nhg_add(ctx)) {
+ nhg_ctx_set_status(ctx, NHG_CTX_FAILURE);
+ return -1;
+ }
+
+ nhg_ctx_set_status(ctx, NHG_CTX_QUEUED);
+
+ return 0;
+}
+
+/* Kernel-side, you either get a single new nexthop or a array of ID's */
+int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
+ uint8_t count, vrf_id_t vrf_id, afi_t afi)
+{
+ // TODO: Can probably put table lookup
+ // here before queueing? And if deleted, re-send to kernel?
+ // ... Well, if changing the flags it probably needs to be queued
+ // still...
+
+ struct nhg_ctx *ctx = NULL;
+
+ ctx = nhg_ctx_new();
+
+ ctx->id = id;
+ ctx->vrf_id = vrf_id;
+ ctx->afi = afi;
+ ctx->is_kernel_nh = true;
+ ctx->count = count;
+
+ if (count)
+ /* Copy over the array */
+ memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp));
+ else
+ ctx->u.nh = *nh;
+
+ nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW);
+
+ if (queue_add(ctx)) {
+ nhg_ctx_process_finish(ctx);
+ return -1;
+ }
+
+ return 0;
+}
- nexthop_group_add_sorted(nhg, nh);
- nhe = zebra_nhg_find(nhg, nh->vrf_id, afi, 0, NULL, false);
+/* Rib-side, you get a nexthop group struct */
+struct nhg_hash_entry *zebra_nhg_rib_find(uint32_t id,
+ struct nexthop_group *nhg,
+ vrf_id_t rt_vrf_id, afi_t rt_afi)
+{
+ struct nhg_hash_entry *nhe = NULL;
+ struct nhg_connected_head nhg_depends = {};
+ // Defualt the nhe to the afi and vrf of the route
+ afi_t nhg_afi = rt_afi;
+ vrf_id_t nhg_vrf_id = rt_vrf_id;
+
+ /* If its a group, create a dependency list */
+ if (nhg && nhg->nexthop->next) {
+ struct nexthop *nh = NULL;
+ struct nexthop lookup = {0};
+ struct nhg_hash_entry *depend = NULL;
+
+ nhg_connected_head_init(&nhg_depends);
+
+ for (ALL_NEXTHOPS_PTR(nhg, nh)) {
+ lookup = *nh;
+ /* Clear it, since its a group */
+ lookup.next = NULL;
+ /* Use the route afi here, since a single nh */
+ depend = zebra_nhg_find_nexthop(0, &lookup, rt_afi,
+ false);
+ nhg_connected_head_add(&nhg_depends, depend);
+ }
- nexthop_group_delete(&nhg);
+ /* change the afi/vrf_id since its a group */
+ nhg_afi = AFI_UNSPEC;
+ nhg_vrf_id = 0;
+ }
+
+ nhe = zebra_nhg_find(id, nhg, &nhg_depends, nhg_vrf_id, nhg_afi, false);
return nhe;
}
*
* @nhe: Nexthop group hash entry
*/
-void zebra_nhg_release(struct nhg_hash_entry *nhe)
+static void zebra_nhg_release(struct nhg_hash_entry *nhe)
{
zlog_debug("Releasing nexthop group with ID (%u)", nhe->id);
hash_release(zrouter.nhgs, nhe);
hash_release(zrouter.nhgs_id, nhe);
+
zebra_nhg_free(nhe);
}
*/
void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
{
+ nhe->refcnt--;
+
if (!zebra_nhg_depends_is_empty(nhe)) {
struct nhg_connected *rb_node_dep = NULL;
}
}
- nhe->refcnt--;
-
- if (!nhe->is_kernel_nh && nhe->refcnt <= 0) {
+ if (!nhe->is_kernel_nh && nhe->refcnt <= 0)
zebra_nhg_uninstall_kernel(nhe);
- zebra_nhg_release(nhe);
- }
}
/**
*/
void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
{
+ nhe->refcnt++;
+
if (!zebra_nhg_depends_is_empty(nhe)) {
struct nhg_connected *rb_node_dep = NULL;
zebra_nhg_increment_ref(rb_node_dep->nhe);
}
}
+}
- nhe->refcnt++;
+static bool zebra_nhg_is_valid(struct nhg_hash_entry *nhe)
+{
+ if (nhe->flags & NEXTHOP_GROUP_VALID)
+ return true;
+
+ return false;
+}
+
+bool zebra_nhg_id_is_valid(uint32_t id)
+{
+ struct nhg_hash_entry *nhe = NULL;
+ bool is_valid = false;
+
+ nhe = zebra_nhg_lookup_id(id);
+
+ if (nhe)
+ is_valid = zebra_nhg_is_valid(nhe);
+
+ return is_valid;
}
void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe)
{
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
+ /* Assuming uninstalled as well here */
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
if (!zebra_nhg_dependents_is_empty(nhe)) {
struct nhg_connected *rb_node_dep = NULL;
zebra_nhg_set_invalid(rb_node_dep->nhe);
}
}
-
- UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
- /* Assuming uninstalled as well here */
- UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
}
void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp)
unsigned int prev_active, new_active;
ifindex_t prev_index;
uint8_t curr_active = 0;
+ afi_t rt_afi = AFI_UNSPEC;
+
+ // TODO: Temporary until we get this function sorted out
+ // a little better.
+ //
+ if (re->nhe_id) {
+ struct nhg_hash_entry *nhe = NULL;
+
+ nhe = zebra_nhg_lookup_id(re->nhe_id);
+
+ if (nhe) {
+ if (!re->ng) {
+ /* This is its first time getting attached */
+ zebra_nhg_increment_ref(nhe);
+ re->ng = nhe->nhg;
+ }
+
+ if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
+ return 1;
+ }
+ } else
+ flog_err(
+ EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "Zebra failed to find the nexthop hash entry for id=%u in a route entry",
+ re->nhe_id);
+ }
UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
}
+ // TODO: Update this when we have this function
+ // figured out a little better.
+ //
+ struct nhg_hash_entry *new_nhe = NULL;
+
+ rt_afi = family2afi(rn->p.family);
+ // TODO: Add proto type here
+
+ // TODO: Maybe make this a UPDATE message?
+ // Right now we are just creating a new one
+ // and deleting the old.
+ new_nhe = zebra_nhg_rib_find(0, re->ng, re->vrf_id, rt_afi);
+
+ if (new_nhe && (re->nhe_id != new_nhe->id)) {
+ struct nhg_hash_entry *old_nhe =
+ zebra_nhg_lookup_id(re->nhe_id);
+
+ /* It should point to the nhe nexthop group now */
+ if (re->ng)
+ nexthop_group_free_delete(&re->ng);
+ re->ng = new_nhe->nhg;
+ re->nhe_id = new_nhe->id;
+
+ zebra_nhg_increment_ref(new_nhe);
+ if (old_nhe)
+ zebra_nhg_decrement_ref(old_nhe);
+
+ if (curr_active) {
+ SET_FLAG(new_nhe->flags, NEXTHOP_GROUP_VALID);
+ if (!new_nhe->is_kernel_nh)
+ zebra_nhg_install_kernel(new_nhe);
+ }
+ }
+
return curr_active;
}
*/
void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe)
{
- if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) {
+ if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
+ && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)) {
nhe->is_kernel_nh = false;
int ret = dplane_nexthop_add(nhe);
switch (ret) {
break;
case ZEBRA_DPLANE_REQUEST_SUCCESS:
UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+ zebra_nhg_release(nhe);
break;
}
- }
+ } else
+ zebra_nhg_release(nhe);
}
/**
status = dplane_ctx_get_status(ctx);
id = dplane_ctx_get_nhe_id(ctx);
+
nhe = zebra_nhg_lookup_id(id);
if (nhe) {
case DPLANE_OP_NH_DELETE:
if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) {
UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+ zebra_nhg_release(nhe);
} else {
flog_err(
EC_ZEBRA_DP_DELETE_FAIL,
* It is designed to mimic the netlink nexthop_grp
* struct in include/linux/nexthop.h
*/
-struct depend_info {
+struct nh_grp {
uint32_t id;
uint8_t weight;
};
RB_PROTOTYPE(nhg_connected_head, nhg_connected, nhg_entry, nhg_connected_cmp);
+
+enum nhg_ctx_op_e {
+ NHG_CTX_OP_NONE = 0,
+ NHG_CTX_OP_NEW,
+ NHG_CTX_OP_DEL,
+};
+
+enum nhg_ctx_result {
+ NHG_CTX_NONE = 0,
+ NHG_CTX_QUEUED,
+ NHG_CTX_SUCCESS,
+ NHG_CTX_FAILURE,
+};
+
+/*
+ * Context needed to queue nhg updates on the
+ * work queue.
+ */
+struct nhg_ctx {
+
+ /* Unique ID */
+ uint32_t id;
+
+ vrf_id_t vrf_id;
+ afi_t afi;
+ bool is_kernel_nh;
+
+ /* If its a group array, how many? */
+ uint8_t count;
+
+ /* Its either a single nexthop or an array of ID's */
+ union {
+ struct nexthop nh;
+ struct nh_grp grp[MULTIPATH_NUM];
+ } u;
+
+ enum nhg_ctx_op_e op;
+ enum nhg_ctx_result status;
+};
+
+
void zebra_nhg_init(void);
void zebra_nhg_terminate(void);
extern bool zebra_nhg_hash_equal(const void *arg1, const void *arg2);
extern bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2);
-extern struct nhg_hash_entry *
-zebra_nhg_find(struct nexthop_group *nhg, vrf_id_t vrf_id, afi_t afi,
- uint32_t id, struct nhg_connected_head *nhg_depends,
- bool is_kernel_nh);
+/*
+ * Process a context off of a queue.
+ * Specifically this should be from
+ * the rib meta queue.
+ */
+extern int nhg_ctx_process(struct nhg_ctx *ctx);
-extern struct nhg_hash_entry *zebra_nhg_find_nexthop(struct nexthop *nh,
- afi_t afi);
+/* Find via kernel nh creation */
+extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh,
+ struct nh_grp *grp, uint8_t count,
+ vrf_id_t vrf_id, afi_t afi);
+
+/* Find via route creation */
+extern struct nhg_hash_entry *zebra_nhg_rib_find(uint32_t id,
+ struct nexthop_group *nhg,
+ vrf_id_t rt_vrf_id,
+ afi_t rt_afi);
void zebra_nhg_free_members(struct nhg_hash_entry *nhe);
void zebra_nhg_free(void *arg);
-void zebra_nhg_release(struct nhg_hash_entry *nhe);
void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe);
void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe);
+
+extern bool zebra_nhg_id_is_valid(uint32_t id);
void zebra_nhg_set_invalid(struct nhg_hash_entry *nhe);
void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp);
#include "zebra/zebra_vxlan.h"
#include "zebra/zapi_msg.h"
#include "zebra/zebra_dplane.h"
-#include "zebra/zebra_nhg.h"
DEFINE_MTYPE_STATIC(ZEBRA, RIB_UPDATE_CTX, "Rib update context object");
uint8_t distance;
uint8_t meta_q_map;
} route_info[ZEBRA_ROUTE_MAX] = {
- [ZEBRA_ROUTE_SYSTEM] = {ZEBRA_ROUTE_SYSTEM, 0, 4},
- [ZEBRA_ROUTE_KERNEL] = {ZEBRA_ROUTE_KERNEL, 0, 0},
- [ZEBRA_ROUTE_CONNECT] = {ZEBRA_ROUTE_CONNECT, 0, 0},
- [ZEBRA_ROUTE_STATIC] = {ZEBRA_ROUTE_STATIC, 1, 1},
- [ZEBRA_ROUTE_RIP] = {ZEBRA_ROUTE_RIP, 120, 2},
- [ZEBRA_ROUTE_RIPNG] = {ZEBRA_ROUTE_RIPNG, 120, 2},
- [ZEBRA_ROUTE_OSPF] = {ZEBRA_ROUTE_OSPF, 110, 2},
- [ZEBRA_ROUTE_OSPF6] = {ZEBRA_ROUTE_OSPF6, 110, 2},
- [ZEBRA_ROUTE_ISIS] = {ZEBRA_ROUTE_ISIS, 115, 2},
- [ZEBRA_ROUTE_BGP] = {ZEBRA_ROUTE_BGP, 20 /* IBGP is 200. */, 3},
- [ZEBRA_ROUTE_PIM] = {ZEBRA_ROUTE_PIM, 255, 4},
- [ZEBRA_ROUTE_EIGRP] = {ZEBRA_ROUTE_EIGRP, 90, 2},
- [ZEBRA_ROUTE_NHRP] = {ZEBRA_ROUTE_NHRP, 10, 2},
- [ZEBRA_ROUTE_HSLS] = {ZEBRA_ROUTE_HSLS, 255, 4},
- [ZEBRA_ROUTE_OLSR] = {ZEBRA_ROUTE_OLSR, 255, 4},
- [ZEBRA_ROUTE_TABLE] = {ZEBRA_ROUTE_TABLE, 150, 1},
- [ZEBRA_ROUTE_LDP] = {ZEBRA_ROUTE_LDP, 150, 4},
- [ZEBRA_ROUTE_VNC] = {ZEBRA_ROUTE_VNC, 20, 3},
- [ZEBRA_ROUTE_VNC_DIRECT] = {ZEBRA_ROUTE_VNC_DIRECT, 20, 3},
- [ZEBRA_ROUTE_VNC_DIRECT_RH] = {ZEBRA_ROUTE_VNC_DIRECT_RH, 20, 3},
- [ZEBRA_ROUTE_BGP_DIRECT] = {ZEBRA_ROUTE_BGP_DIRECT, 20, 3},
- [ZEBRA_ROUTE_BGP_DIRECT_EXT] = {ZEBRA_ROUTE_BGP_DIRECT_EXT, 20, 3},
- [ZEBRA_ROUTE_BABEL] = {ZEBRA_ROUTE_BABEL, 100, 2},
- [ZEBRA_ROUTE_SHARP] = {ZEBRA_ROUTE_SHARP, 150, 4},
- [ZEBRA_ROUTE_PBR] = {ZEBRA_ROUTE_PBR, 200, 4},
- [ZEBRA_ROUTE_BFD] = {ZEBRA_ROUTE_BFD, 255, 4},
- [ZEBRA_ROUTE_OPENFABRIC] = {ZEBRA_ROUTE_OPENFABRIC, 115, 2},
- [ZEBRA_ROUTE_VRRP] = {ZEBRA_ROUTE_VRRP, 255, 4}
+ [ZEBRA_ROUTE_NHG] = {ZEBRA_ROUTE_NHG, 255 /* Uneeded for nhg's */, 0},
+ [ZEBRA_ROUTE_SYSTEM] = {ZEBRA_ROUTE_SYSTEM, 0, 5},
+ [ZEBRA_ROUTE_KERNEL] = {ZEBRA_ROUTE_KERNEL, 0, 1},
+ [ZEBRA_ROUTE_CONNECT] = {ZEBRA_ROUTE_CONNECT, 0, 1},
+ [ZEBRA_ROUTE_STATIC] = {ZEBRA_ROUTE_STATIC, 1, 2},
+ [ZEBRA_ROUTE_RIP] = {ZEBRA_ROUTE_RIP, 120, 3},
+ [ZEBRA_ROUTE_RIPNG] = {ZEBRA_ROUTE_RIPNG, 120, 3},
+ [ZEBRA_ROUTE_OSPF] = {ZEBRA_ROUTE_OSPF, 110, 3},
+ [ZEBRA_ROUTE_OSPF6] = {ZEBRA_ROUTE_OSPF6, 110, 3},
+ [ZEBRA_ROUTE_ISIS] = {ZEBRA_ROUTE_ISIS, 115, 3},
+ [ZEBRA_ROUTE_BGP] = {ZEBRA_ROUTE_BGP, 20 /* IBGP is 200. */, 4},
+ [ZEBRA_ROUTE_PIM] = {ZEBRA_ROUTE_PIM, 255, 5},
+ [ZEBRA_ROUTE_EIGRP] = {ZEBRA_ROUTE_EIGRP, 90, 3},
+ [ZEBRA_ROUTE_NHRP] = {ZEBRA_ROUTE_NHRP, 10, 3},
+ [ZEBRA_ROUTE_HSLS] = {ZEBRA_ROUTE_HSLS, 255, 5},
+ [ZEBRA_ROUTE_OLSR] = {ZEBRA_ROUTE_OLSR, 255, 5},
+ [ZEBRA_ROUTE_TABLE] = {ZEBRA_ROUTE_TABLE, 150, 2},
+ [ZEBRA_ROUTE_LDP] = {ZEBRA_ROUTE_LDP, 150, 5},
+ [ZEBRA_ROUTE_VNC] = {ZEBRA_ROUTE_VNC, 20, 4},
+ [ZEBRA_ROUTE_VNC_DIRECT] = {ZEBRA_ROUTE_VNC_DIRECT, 20, 4},
+ [ZEBRA_ROUTE_VNC_DIRECT_RH] = {ZEBRA_ROUTE_VNC_DIRECT_RH, 20, 4},
+ [ZEBRA_ROUTE_BGP_DIRECT] = {ZEBRA_ROUTE_BGP_DIRECT, 20, 4},
+ [ZEBRA_ROUTE_BGP_DIRECT_EXT] = {ZEBRA_ROUTE_BGP_DIRECT_EXT, 20, 4},
+ [ZEBRA_ROUTE_BABEL] = {ZEBRA_ROUTE_BABEL, 100, 3},
+ [ZEBRA_ROUTE_SHARP] = {ZEBRA_ROUTE_SHARP, 150, 5},
+ [ZEBRA_ROUTE_PBR] = {ZEBRA_ROUTE_PBR, 200, 5},
+ [ZEBRA_ROUTE_BFD] = {ZEBRA_ROUTE_BFD, 255, 5},
+ [ZEBRA_ROUTE_OPENFABRIC] = {ZEBRA_ROUTE_OPENFABRIC, 115, 3},
+ [ZEBRA_ROUTE_VRRP] = {ZEBRA_ROUTE_VRRP, 255, 5}
/* Any new route type added to zebra, should be mirrored here */
/* no entry/default: 150 */
rib_table_info_t *info = srcdest_rnode_table_info(rn);
struct zebra_vrf *zvrf = vrf_info_lookup(re->vrf_id);
+ // TODO: Might need to move this?
+ // It checks if the nhe is even valid
+ // before trying to uninstall it. If the
+ // nexthop is invalid/uninstalled, then
+ // this route is not in the kernel anymore
+ // most likely.
+ if (!zebra_nhg_id_is_valid(re->nhe_id))
+ return;
+
+
if (info->safi != SAFI_UNICAST) {
UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
for (ALL_NEXTHOPS_PTR(re->ng, nexthop))
return current;
}
+/* Core function for processing nexthop group contexts's off metaq */
+static void rib_nhg_process(struct nhg_ctx *ctx)
+{
+ nhg_ctx_process(ctx);
+}
+
/* Core function for processing routing information base. */
static void rib_process(struct route_node *rn)
{
dplane_ctx_fini(&ctx);
}
-/* Take a list of route_node structs and return 1, if there was a record
- * picked from it and processed by rib_process(). Don't process more,
- * than one RN record; operate only in the specified sub-queue.
- */
-static unsigned int process_subq(struct list *subq, uint8_t qindex)
+static void process_subq_nhg(struct listnode *lnode)
{
- struct listnode *lnode = listhead(subq);
- struct route_node *rnode;
- rib_dest_t *dest;
- struct zebra_vrf *zvrf = NULL;
+ struct nhg_ctx *ctx = NULL;
+ uint8_t qindex = route_info[ZEBRA_ROUTE_NHG].meta_q_map;
- if (!lnode)
- return 0;
+ ctx = listgetdata(lnode);
+
+ if (!ctx)
+ return;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("NHG Context id=%u dequeued from sub-queue %u",
+ ctx->id, qindex);
+
+ rib_nhg_process(ctx);
+}
+
+static void process_subq_route(struct listnode *lnode, uint8_t qindex)
+{
+ struct route_node *rnode = NULL;
+ rib_dest_t *dest = NULL;
+ struct zebra_vrf *zvrf = NULL;
rnode = listgetdata(lnode);
dest = rib_dest_from_rnode(rnode);
}
#endif
route_unlock_node(rnode);
+}
+
+/* Take a list of route_node structs and return 1, if there was a record
+ * picked from it and processed by rib_process(). Don't process more,
+ * than one RN record; operate only in the specified sub-queue.
+ */
+static unsigned int process_subq(struct list *subq, uint8_t qindex)
+{
+ struct listnode *lnode = listhead(subq);
+
+ if (!lnode)
+ return 0;
+
+ if (qindex == route_info[ZEBRA_ROUTE_NHG].meta_q_map)
+ process_subq_nhg(lnode);
+ else
+ process_subq_route(lnode, qindex);
+
list_delete_node(subq, lnode);
+
return 1;
}
* original metaqueue index value will win and we'll end up with
* the route node enqueued once.
*/
-static void rib_meta_queue_add(struct meta_queue *mq, struct route_node *rn)
+static int rib_meta_queue_add(struct meta_queue *mq, void *data)
{
+ struct route_node *rn = NULL;
struct route_entry *re = NULL, *curr_re = NULL;
uint8_t qindex = MQ_SIZE, curr_qindex = MQ_SIZE;
+ rn = (struct route_node *)data;
+
RNODE_FOREACH_RE (rn, curr_re) {
curr_qindex = route_info[curr_re->type].meta_q_map;
}
if (!re)
- return;
+ return -1;
/* Invariant: at this point we always have rn->info set. */
if (CHECK_FLAG(rib_dest_from_rnode(rn)->flags,
rnode_debug(rn, re->vrf_id,
"rn %p is already queued in sub-queue %u",
(void *)rn, qindex);
- return;
+ return -1;
}
SET_FLAG(rib_dest_from_rnode(rn)->flags, RIB_ROUTE_QUEUED(qindex));
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %u",
(void *)rn, qindex);
+
+ return 0;
}
-/* Add route_node to work queue and schedule processing */
-void rib_queue_add(struct route_node *rn)
+static int rib_meta_queue_nhg_add(struct meta_queue *mq, void *data)
{
- assert(rn);
+ struct nhg_ctx *ctx = NULL;
+ uint8_t qindex = route_info[ZEBRA_ROUTE_NHG].meta_q_map;
- /* Pointless to queue a route_node with no RIB entries to add or remove
- */
- if (!rnode_to_ribs(rn)) {
- zlog_debug("%s: called for route_node (%p, %d) with no ribs",
- __func__, (void *)rn, rn->lock);
- zlog_backtrace(LOG_DEBUG);
- return;
- }
+ ctx = (struct nhg_ctx *)data;
+
+ if (!ctx)
+ return -1;
+ listnode_add(mq->subq[qindex], ctx);
+ mq->size++;
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("NHG Context id=%u queued into sub-queue %u",
+ ctx->id, qindex);
+
+ return 0;
+}
+
+static int mq_add_handler(void *data,
+ int (*mq_add_func)(struct meta_queue *mq, void *data))
+{
if (zrouter.ribq == NULL) {
flog_err(EC_ZEBRA_WQ_NONEXISTENT,
"%s: work_queue does not exist!", __func__);
- return;
+ return -1;
}
/*
if (work_queue_empty(zrouter.ribq))
work_queue_add(zrouter.ribq, zrouter.mq);
- rib_meta_queue_add(zrouter.mq, rn);
+ return mq_add_func(zrouter.mq, data);
+}
- return;
+/* Add route_node to work queue and schedule processing */
+int rib_queue_add(struct route_node *rn)
+{
+ assert(rn);
+
+ /* Pointless to queue a route_node with no RIB entries to add or remove
+ */
+ if (!rnode_to_ribs(rn)) {
+ zlog_debug("%s: called for route_node (%p, %d) with no ribs",
+ __func__, (void *)rn, rn->lock);
+ zlog_backtrace(LOG_DEBUG);
+ return -1;
+ }
+
+ return mq_add_handler(rn, &rib_meta_queue_add);
+}
+
+int rib_queue_nhg_add(struct nhg_ctx *ctx)
+{
+ assert(ctx);
+
+ return mq_add_handler(ctx, &rib_meta_queue_nhg_add);
}
/* Create new meta queue.
if (dest->selected_fib == re)
dest->selected_fib = NULL;
- nhe = zebra_nhg_lookup_id(re->nhe_id);
- if (nhe)
- zebra_nhg_decrement_ref(nhe);
+ if (re->nhe_id) {
+ nhe = zebra_nhg_lookup_id(re->nhe_id);
+ if (nhe)
+ zebra_nhg_decrement_ref(nhe);
+ } else if (re->ng)
+ nexthop_group_free_delete(&re->ng);
nexthops_free(re->fib_ng.nexthop);
struct route_table *table;
struct route_node *rn;
struct route_entry *same = NULL;
- struct nhg_hash_entry *nhe = NULL;
- struct nhg_connected_head nhg_depends = {0};
- /* Default to route afi */
- afi_t nhg_afi = afi;
- /* Default to route vrf id */
- vrf_id_t nhg_vrf_id = re->vrf_id;
int ret = 0;
if (!re)
if (src_p)
apply_mask_ipv6(src_p);
- /* If its a group, create a dependency list */
- if (re->ng && re->ng->nexthop->next) {
- struct nexthop *nh = NULL;
- struct nexthop lookup = {0};
- struct nhg_hash_entry *depend = NULL;
-
- nhg_connected_head_init(&nhg_depends);
-
- for (ALL_NEXTHOPS_PTR(re->ng, nh)) {
- lookup = *nh;
- /* Clear it, since its a group */
- lookup.next = NULL;
- /* Use the route afi here, since a single nh */
- depend = zebra_nhg_find_nexthop(&lookup, afi);
- nhg_connected_head_add(&nhg_depends, depend);
- }
-
- /* change the afi for group */
- nhg_afi = AFI_UNSPEC;
- nhg_vrf_id = 0;
- }
-
- // TODO: Add proto type here
- nhe = zebra_nhg_find(re->ng, nhg_vrf_id, nhg_afi, re->nhe_id,
- &nhg_depends, false);
-
- if (nhe) {
- /* It should point to the nhe nexthop group now */
- if (re->ng)
- nexthop_group_free_delete(&re->ng);
- re->ng = nhe->nhg;
- re->nhe_id = nhe->id;
- zebra_nhg_increment_ref(nhe);
- } else {
- flog_err(
- EC_ZEBRA_TABLE_LOOKUP_FAILED,
- "Zebra failed to find or create a nexthop hash entry for id=%u in a route entry",
- re->nhe_id);
- nhg_connected_head_free(&nhg_depends);
- }
-
-
/* Set default distance by route type. */
if (re->distance == 0)
re->distance = route_distance(re->type);
re->tag = tag;
re->nhe_id = nhe_id;
- re->ng = nexthop_group_new();
+ if (!nhe_id) {
+ re->ng = nexthop_group_new();
- /* Add nexthop. */
- nexthop = nexthop_new();
- *nexthop = *nh;
- route_entry_nexthop_add(re, nexthop);
+ /* Add nexthop. */
+ nexthop = nexthop_new();
+ *nexthop = *nh;
+ route_entry_nexthop_add(re, nexthop);
+ }
return rib_add_multipath(afi, safi, p, src_p, re);
}