void nexthop_group_copy(struct nexthop_group *to,
const struct nexthop_group *from)
{
+ to->nhgr = from->nhgr;
/* Copy everything, including recursive info */
copy_nexthops(&to->nexthop, from->nexthop, NULL);
}
extern "C" {
#endif
+struct nhg_resilience {
+ uint16_t buckets;
+ uint32_t idle_timer;
+ uint32_t unbalanced_timer;
+ uint64_t unbalanced_time;
+};
+
/*
* What is a nexthop group?
*
*/
struct nexthop_group {
struct nexthop *nexthop;
+
+ struct nhg_resilience nhgr;
};
struct nexthop_group *nexthop_group_new(void);
static bool _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size,
uint32_t id,
const struct nh_grp *z_grp,
- const uint8_t count)
+ const uint8_t count, bool resilient,
+ const struct nhg_resilience *nhgr)
{
struct nexthop_grp grp[count];
/* Need space for max group size, "/", and null term */
if (!nl_attr_put(n, req_size, NHA_GROUP, grp,
count * sizeof(*grp)))
return false;
+
+ if (resilient) {
+ struct rtattr *nest;
+
+ nest = nl_attr_nest(n, req_size, NHA_RES_GROUP);
+
+ nl_attr_put16(n, req_size, NHA_RES_GROUP_BUCKETS,
+ nhgr->buckets);
+ nl_attr_put32(n, req_size, NHA_RES_GROUP_IDLE_TIMER,
+ nhgr->idle_timer * 1000);
+ nl_attr_put32(n, req_size,
+ NHA_RES_GROUP_UNBALANCED_TIMER,
+ nhgr->unbalanced_timer * 1000);
+ nl_attr_nest_end(n, nest);
+
+ nl_attr_put16(n, req_size, NHA_GROUP_TYPE,
+ NEXTHOP_GRP_TYPE_RES);
+ }
}
if (IS_ZEBRA_DEBUG_KERNEL)
* other ids.
*/
if (dplane_ctx_get_nhe_nh_grp_count(ctx)) {
+ const struct nexthop_group *nhg;
+ const struct nhg_resilience *nhgr;
+
+ nhg = dplane_ctx_get_nhe_ng(ctx);
+ nhgr = &nhg->nhgr;
if (!_netlink_nexthop_build_group(
&req->n, buflen, id,
dplane_ctx_get_nhe_nh_grp(ctx),
- dplane_ctx_get_nhe_nh_grp_count(ctx)))
+ dplane_ctx_get_nhe_nh_grp_count(ctx),
+ !!nhgr->buckets, nhgr))
return 0;
} else {
const struct nexthop *nh =
}
static int netlink_nexthop_process_group(struct rtattr **tb,
- struct nh_grp *z_grp, int z_grp_size)
+ struct nh_grp *z_grp, int z_grp_size,
+ struct nhg_resilience *nhgr)
{
uint8_t count = 0;
/* linux/nexthop.h group struct */
z_grp[i].id = n_grp[i].id;
z_grp[i].weight = n_grp[i].weight + 1;
}
+
+ memset(nhgr, 0, sizeof(*nhgr));
+ if (tb[NHA_RES_GROUP]) {
+ struct rtattr *tbn[NHA_RES_GROUP_MAX + 1];
+ struct rtattr *rta;
+ struct rtattr *res_group = tb[NHA_RES_GROUP];
+
+ netlink_parse_rtattr_nested(tbn, NHA_RES_GROUP_MAX, res_group);
+
+ if (tbn[NHA_RES_GROUP_BUCKETS]) {
+ rta = tbn[NHA_RES_GROUP_BUCKETS];
+ nhgr->buckets = *(uint16_t *)RTA_DATA(rta);
+ }
+
+ if (tbn[NHA_RES_GROUP_IDLE_TIMER]) {
+ rta = tbn[NHA_RES_GROUP_IDLE_TIMER];
+ nhgr->idle_timer = *(uint32_t *)RTA_DATA(rta);
+ }
+
+ if (tbn[NHA_RES_GROUP_UNBALANCED_TIMER]) {
+ rta = tbn[NHA_RES_GROUP_UNBALANCED_TIMER];
+ nhgr->unbalanced_timer = *(uint32_t *)RTA_DATA(rta);
+ }
+
+ if (tbn[NHA_RES_GROUP_UNBALANCED_TIME]) {
+ rta = tbn[NHA_RES_GROUP_UNBALANCED_TIME];
+ nhgr->unbalanced_time = *(uint64_t *)RTA_DATA(rta);
+ }
+ }
+
return count;
}
if (h->nlmsg_type == RTM_NEWNEXTHOP) {
+ struct nhg_resilience nhgr = {};
+
if (tb[NHA_GROUP]) {
/**
* If this is a group message its only going to have
* an array of nexthop IDs associated with it
*/
grp_count = netlink_nexthop_process_group(
- tb, grp, array_size(grp));
+ tb, grp, array_size(grp), &nhgr);
} else {
if (tb[NHA_BLACKHOLE]) {
/**
}
if (zebra_nhg_kernel_find(id, &nh, grp, grp_count, vrf_id, afi,
- type, startup))
+ type, startup, &nhgr))
return -1;
} else if (h->nlmsg_type == RTM_DELNEXTHOP)
if (nhe1->afi != nhe2->afi)
return false;
+ if (nhe1->nhg.nhgr.buckets != nhe2->nhg.nhgr.buckets)
+ return false;
+
+ if (nhe1->nhg.nhgr.idle_timer != nhe2->nhg.nhgr.idle_timer)
+ return false;
+
+ if (nhe1->nhg.nhgr.unbalanced_timer != nhe2->nhg.nhgr.unbalanced_timer)
+ return false;
+
/* Nexthops should be in-order, so we simply compare them in-place */
for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop;
nexthop1 && nexthop2;
static int zebra_nhg_process_grp(struct nexthop_group *nhg,
struct nhg_connected_tree_head *depends,
- struct nh_grp *grp, uint8_t count)
+ struct nh_grp *grp, uint8_t count,
+ struct nhg_resilience *resilience)
{
nhg_connected_tree_init(depends);
copy_nexthops(&nhg->nexthop, depend->nhg.nexthop, NULL);
}
+ if (resilience)
+ nhg->nhgr = *resilience;
+
return 0;
}
return ctx->u.grp;
}
+static struct nhg_resilience *nhg_ctx_get_resilience(struct nhg_ctx *ctx)
+{
+ return &ctx->resilience;
+}
+
static struct nhg_ctx *nhg_ctx_new(void)
{
struct nhg_ctx *new;
static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh,
struct nh_grp *grp, vrf_id_t vrf_id,
- afi_t afi, int type, uint8_t count)
+ afi_t afi, int type, uint8_t count,
+ struct nhg_resilience *resilience)
{
struct nhg_ctx *ctx = NULL;
ctx->type = type;
ctx->count = count;
+ if (resilience)
+ ctx->resilience = *resilience;
+
if (count)
/* Copy over the array */
memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp));
if (nhg_ctx_get_count(ctx)) {
nhg = nexthop_group_new();
if (zebra_nhg_process_grp(nhg, &nhg_depends,
- nhg_ctx_get_grp(ctx), count)) {
+ nhg_ctx_get_grp(ctx), count,
+ nhg_ctx_get_resilience(ctx))) {
depends_decrement_free(&nhg_depends);
nexthop_group_delete(&nhg);
return -ENOENT;
/* Kernel-side, you either get a single new nexthop or a array of ID's */
int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
uint8_t count, vrf_id_t vrf_id, afi_t afi, int type,
- int startup)
+ int startup, struct nhg_resilience *nhgr)
{
struct nhg_ctx *ctx = NULL;
*/
id_counter = id;
- ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count);
+ ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count, nhgr);
nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW);
/* Under statup conditions, we need to handle them immediately
{
struct nhg_ctx *ctx = NULL;
- ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0);
+ ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0, NULL);
nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL);
struct nh_grp grp[MULTIPATH_NUM];
} u;
+ struct nhg_resilience resilience;
enum nhg_ctx_op_e op;
enum nhg_ctx_status status;
};
extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh,
struct nh_grp *grp, uint8_t count,
vrf_id_t vrf_id, afi_t afi, int type,
- int startup);
+ int startup,
+ struct nhg_resilience *resilience);
/* Del via kernel */
extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id);
vty_out(vty, "\n");
}
+ if (nhe->nhg.nhgr.buckets)
+ vty_out(vty,
+ " Buckets: %u Idle Timer: %u Unbalanced Timer: %u Unbalanced time: %" PRIu64 "\n",
+ nhe->nhg.nhgr.buckets, nhe->nhg.nhgr.idle_timer,
+ nhe->nhg.nhgr.unbalanced_timer,
+ nhe->nhg.nhgr.unbalanced_time);
}
static int show_nexthop_group_id_cmd_helper(struct vty *vty, uint32_t id)