From 569e141113c56d80da9f3720dfaed104a27107ad Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Sat, 22 Oct 2022 15:37:27 -0400 Subject: [PATCH] lib, zebra: Add ability to encode/decode resilient nhg's Add ability to read the nexthop group resilient linux kernel data as well as write it. Signed-off-by: Donald Sharp --- lib/nexthop_group.c | 1 + lib/nexthop_group.h | 9 ++++++ zebra/rt_netlink.c | 68 +++++++++++++++++++++++++++++++++++++++++---- zebra/zebra_nhg.c | 35 +++++++++++++++++++---- zebra/zebra_nhg.h | 4 ++- zebra/zebra_vty.c | 6 ++++ 6 files changed, 111 insertions(+), 12 deletions(-) diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c index f342f50e8f..f6479f8d2c 100644 --- a/lib/nexthop_group.c +++ b/lib/nexthop_group.c @@ -272,6 +272,7 @@ struct nexthop_group *nexthop_group_new(void) void nexthop_group_copy(struct nexthop_group *to, const struct nexthop_group *from) { + to->nhgr = from->nhgr; /* Copy everything, including recursive info */ copy_nexthops(&to->nexthop, from->nexthop, NULL); } diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h index 8e75e5c6ac..7d1b57a2f9 100644 --- a/lib/nexthop_group.h +++ b/lib/nexthop_group.h @@ -28,6 +28,13 @@ extern "C" { #endif +struct nhg_resilience { + uint16_t buckets; + uint32_t idle_timer; + uint32_t unbalanced_timer; + uint64_t unbalanced_time; +}; + /* * What is a nexthop group? * @@ -38,6 +45,8 @@ extern "C" { */ struct nexthop_group { struct nexthop *nexthop; + + struct nhg_resilience nhgr; }; struct nexthop_group *nexthop_group_new(void); diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 3a8f5264f4..ec818840d1 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -2412,7 +2412,8 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in) static bool _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size, uint32_t id, const struct nh_grp *z_grp, - const uint8_t count) + const uint8_t count, bool resilient, + const struct nhg_resilience *nhgr) { struct nexthop_grp grp[count]; /* Need space for max group size, "/", and null term */ @@ -2442,6 +2443,24 @@ static bool _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size, if (!nl_attr_put(n, req_size, NHA_GROUP, grp, count * sizeof(*grp))) return false; + + if (resilient) { + struct rtattr *nest; + + nest = nl_attr_nest(n, req_size, NHA_RES_GROUP); + + nl_attr_put16(n, req_size, NHA_RES_GROUP_BUCKETS, + nhgr->buckets); + nl_attr_put32(n, req_size, NHA_RES_GROUP_IDLE_TIMER, + nhgr->idle_timer * 1000); + nl_attr_put32(n, req_size, + NHA_RES_GROUP_UNBALANCED_TIMER, + nhgr->unbalanced_timer * 1000); + nl_attr_nest_end(n, nest); + + nl_attr_put16(n, req_size, NHA_GROUP_TYPE, + NEXTHOP_GRP_TYPE_RES); + } } if (IS_ZEBRA_DEBUG_KERNEL) @@ -2538,10 +2557,16 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd, * other ids. */ if (dplane_ctx_get_nhe_nh_grp_count(ctx)) { + const struct nexthop_group *nhg; + const struct nhg_resilience *nhgr; + + nhg = dplane_ctx_get_nhe_ng(ctx); + nhgr = &nhg->nhgr; if (!_netlink_nexthop_build_group( &req->n, buflen, id, dplane_ctx_get_nhe_nh_grp(ctx), - dplane_ctx_get_nhe_nh_grp_count(ctx))) + dplane_ctx_get_nhe_nh_grp_count(ctx), + !!nhgr->buckets, nhgr)) return 0; } else { const struct nexthop *nh = @@ -2985,7 +3010,8 @@ static struct nexthop netlink_nexthop_process_nh(struct rtattr **tb, } static int netlink_nexthop_process_group(struct rtattr **tb, - struct nh_grp *z_grp, int z_grp_size) + struct nh_grp *z_grp, int z_grp_size, + struct nhg_resilience *nhgr) { uint8_t count = 0; /* linux/nexthop.h group struct */ @@ -3004,6 +3030,36 @@ static int netlink_nexthop_process_group(struct rtattr **tb, z_grp[i].id = n_grp[i].id; z_grp[i].weight = n_grp[i].weight + 1; } + + memset(nhgr, 0, sizeof(*nhgr)); + if (tb[NHA_RES_GROUP]) { + struct rtattr *tbn[NHA_RES_GROUP_MAX + 1]; + struct rtattr *rta; + struct rtattr *res_group = tb[NHA_RES_GROUP]; + + netlink_parse_rtattr_nested(tbn, NHA_RES_GROUP_MAX, res_group); + + if (tbn[NHA_RES_GROUP_BUCKETS]) { + rta = tbn[NHA_RES_GROUP_BUCKETS]; + nhgr->buckets = *(uint16_t *)RTA_DATA(rta); + } + + if (tbn[NHA_RES_GROUP_IDLE_TIMER]) { + rta = tbn[NHA_RES_GROUP_IDLE_TIMER]; + nhgr->idle_timer = *(uint32_t *)RTA_DATA(rta); + } + + if (tbn[NHA_RES_GROUP_UNBALANCED_TIMER]) { + rta = tbn[NHA_RES_GROUP_UNBALANCED_TIMER]; + nhgr->unbalanced_timer = *(uint32_t *)RTA_DATA(rta); + } + + if (tbn[NHA_RES_GROUP_UNBALANCED_TIME]) { + rta = tbn[NHA_RES_GROUP_UNBALANCED_TIME]; + nhgr->unbalanced_time = *(uint64_t *)RTA_DATA(rta); + } + } + return count; } @@ -3087,13 +3143,15 @@ int netlink_nexthop_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) if (h->nlmsg_type == RTM_NEWNEXTHOP) { + struct nhg_resilience nhgr = {}; + if (tb[NHA_GROUP]) { /** * If this is a group message its only going to have * an array of nexthop IDs associated with it */ grp_count = netlink_nexthop_process_group( - tb, grp, array_size(grp)); + tb, grp, array_size(grp), &nhgr); } else { if (tb[NHA_BLACKHOLE]) { /** @@ -3125,7 +3183,7 @@ int netlink_nexthop_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) } if (zebra_nhg_kernel_find(id, &nh, grp, grp_count, vrf_id, afi, - type, startup)) + type, startup, &nhgr)) return -1; } else if (h->nlmsg_type == RTM_DELNEXTHOP) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index ded2bd04bb..07e1e5f305 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -563,6 +563,15 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2) if (nhe1->afi != nhe2->afi) return false; + if (nhe1->nhg.nhgr.buckets != nhe2->nhg.nhgr.buckets) + return false; + + if (nhe1->nhg.nhgr.idle_timer != nhe2->nhg.nhgr.idle_timer) + return false; + + if (nhe1->nhg.nhgr.unbalanced_timer != nhe2->nhg.nhgr.unbalanced_timer) + return false; + /* Nexthops should be in-order, so we simply compare them in-place */ for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop; nexthop1 && nexthop2; @@ -621,7 +630,8 @@ bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2) static int zebra_nhg_process_grp(struct nexthop_group *nhg, struct nhg_connected_tree_head *depends, - struct nh_grp *grp, uint8_t count) + struct nh_grp *grp, uint8_t count, + struct nhg_resilience *resilience) { nhg_connected_tree_init(depends); @@ -652,6 +662,9 @@ static int zebra_nhg_process_grp(struct nexthop_group *nhg, copy_nexthops(&nhg->nexthop, depend->nhg.nexthop, NULL); } + if (resilience) + nhg->nhgr = *resilience; + return 0; } @@ -985,6 +998,11 @@ static struct nh_grp *nhg_ctx_get_grp(struct nhg_ctx *ctx) return ctx->u.grp; } +static struct nhg_resilience *nhg_ctx_get_resilience(struct nhg_ctx *ctx) +{ + return &ctx->resilience; +} + static struct nhg_ctx *nhg_ctx_new(void) { struct nhg_ctx *new; @@ -1018,7 +1036,8 @@ done: static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, struct nh_grp *grp, vrf_id_t vrf_id, - afi_t afi, int type, uint8_t count) + afi_t afi, int type, uint8_t count, + struct nhg_resilience *resilience) { struct nhg_ctx *ctx = NULL; @@ -1030,6 +1049,9 @@ static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, ctx->type = type; ctx->count = count; + if (resilience) + ctx->resilience = *resilience; + if (count) /* Copy over the array */ memcpy(&ctx->u.grp, grp, count * sizeof(struct nh_grp)); @@ -1176,7 +1198,8 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) if (nhg_ctx_get_count(ctx)) { nhg = nexthop_group_new(); if (zebra_nhg_process_grp(nhg, &nhg_depends, - nhg_ctx_get_grp(ctx), count)) { + nhg_ctx_get_grp(ctx), count, + nhg_ctx_get_resilience(ctx))) { depends_decrement_free(&nhg_depends); nexthop_group_delete(&nhg); return -ENOENT; @@ -1306,7 +1329,7 @@ int nhg_ctx_process(struct nhg_ctx *ctx) /* Kernel-side, you either get a single new nexthop or a array of ID's */ int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, uint8_t count, vrf_id_t vrf_id, afi_t afi, int type, - int startup) + int startup, struct nhg_resilience *nhgr) { struct nhg_ctx *ctx = NULL; @@ -1320,7 +1343,7 @@ int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, */ id_counter = id; - ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count); + ctx = nhg_ctx_init(id, nh, grp, vrf_id, afi, type, count, nhgr); nhg_ctx_set_op(ctx, NHG_CTX_OP_NEW); /* Under statup conditions, we need to handle them immediately @@ -1343,7 +1366,7 @@ int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id) { struct nhg_ctx *ctx = NULL; - ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0); + ctx = nhg_ctx_init(id, NULL, NULL, vrf_id, 0, 0, 0, NULL); nhg_ctx_set_op(ctx, NHG_CTX_OP_DEL); diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index 62f71f943f..9b925bf10f 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -228,6 +228,7 @@ struct nhg_ctx { struct nh_grp grp[MULTIPATH_NUM]; } u; + struct nhg_resilience resilience; enum nhg_ctx_op_e op; enum nhg_ctx_status status; }; @@ -308,7 +309,8 @@ void nhg_ctx_free(struct nhg_ctx **ctx); extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, uint8_t count, vrf_id_t vrf_id, afi_t afi, int type, - int startup); + int startup, + struct nhg_resilience *resilience); /* Del via kernel */ extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id); diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index f68a656710..bb28ecbfff 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -1532,6 +1532,12 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe) vty_out(vty, "\n"); } + if (nhe->nhg.nhgr.buckets) + vty_out(vty, + " Buckets: %u Idle Timer: %u Unbalanced Timer: %u Unbalanced time: %" PRIu64 "\n", + nhe->nhg.nhgr.buckets, nhe->nhg.nhgr.idle_timer, + nhe->nhg.nhgr.unbalanced_timer, + nhe->nhg.nhgr.unbalanced_time); } static int show_nexthop_group_id_cmd_helper(struct vty *vty, uint32_t id) -- 2.39.5