summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Worley <sworley@cumulusnetworks.com>2020-03-30 10:26:17 -0400
committerGitHub <noreply@github.com>2020-03-30 10:26:17 -0400
commitff82bbbb912d84a4a9bc22a7efe7f51adada02eb (patch)
tree3027b6a8dc9cfa41f5679915ee677bdf36296e86
parenta326a812b30e5c11b9d67f9a9434342a9cf6008e (diff)
parentaccf6280fcdd0c29897234ad9786d9f3d0509b44 (diff)
Merge pull request #5901 from mjstapp/backup_nh_prep
zebra, lib: Backup nexthop (path) prep work
-rw-r--r--lib/ipaddr.h2
-rw-r--r--lib/mpls.c2
-rw-r--r--lib/mpls.h5
-rw-r--r--lib/nexthop.c44
-rw-r--r--lib/nexthop.h22
-rw-r--r--lib/nexthop_group.c94
-rw-r--r--lib/nexthop_group.h9
-rw-r--r--lib/zclient.c109
-rw-r--r--lib/zclient.h21
-rw-r--r--sharpd/sharp_globals.h4
-rw-r--r--sharpd/sharp_vty.c57
-rw-r--r--sharpd/sharp_zebra.c33
-rw-r--r--sharpd/sharp_zebra.h8
-rw-r--r--zebra/rib.h10
-rw-r--r--zebra/rt_netlink.c61
-rw-r--r--zebra/zapi_msg.c369
-rw-r--r--zebra/zebra_dplane.c70
-rw-r--r--zebra/zebra_dplane.h8
-rw-r--r--zebra/zebra_mpls.c56
-rw-r--r--zebra/zebra_mpls.h4
-rw-r--r--zebra/zebra_nhg.c756
-rw-r--r--zebra/zebra_nhg.h39
-rw-r--r--zebra/zebra_rib.c237
-rw-r--r--zebra/zebra_vty.c608
-rw-r--r--zebra/zebra_vxlan.c45
-rw-r--r--zebra/zebra_vxlan.h6
26 files changed, 1920 insertions, 759 deletions
diff --git a/lib/ipaddr.h b/lib/ipaddr.h
index c6372f1abb..cd7f79a04e 100644
--- a/lib/ipaddr.h
+++ b/lib/ipaddr.h
@@ -112,7 +112,7 @@ static inline void ipv4_to_ipv4_mapped_ipv6(struct in6_addr *in6,
/*
* convert an ipv4 mapped ipv6 address back to ipv4 address
*/
-static inline void ipv4_mapped_ipv6_to_ipv4(struct in6_addr *in6,
+static inline void ipv4_mapped_ipv6_to_ipv4(const struct in6_addr *in6,
struct in_addr *in)
{
memset(in, 0, sizeof(struct in_addr));
diff --git a/lib/mpls.c b/lib/mpls.c
index 759fe1206d..ac5792a686 100644
--- a/lib/mpls.c
+++ b/lib/mpls.c
@@ -79,7 +79,7 @@ int mpls_str2label(const char *label_str, uint8_t *num_labels,
/*
* Label to string conversion, labels in string separated by '/'.
*/
-char *mpls_label2str(uint8_t num_labels, mpls_label_t *labels, char *buf,
+char *mpls_label2str(uint8_t num_labels, const mpls_label_t *labels, char *buf,
int len, int pretty)
{
char label_buf[BUFSIZ];
diff --git a/lib/mpls.h b/lib/mpls.h
index 635ecc77a1..05cf2935e8 100644
--- a/lib/mpls.h
+++ b/lib/mpls.h
@@ -209,10 +209,13 @@ static inline char *label2str(mpls_label_t label, char *buf, size_t len)
int mpls_str2label(const char *label_str, uint8_t *num_labels,
mpls_label_t *labels);
+/* Generic string buffer for label-stack-to-str */
+#define MPLS_LABEL_STRLEN 1024
+
/*
* Label to string conversion, labels in string separated by '/'.
*/
-char *mpls_label2str(uint8_t num_labels, mpls_label_t *labels, char *buf,
+char *mpls_label2str(uint8_t num_labels, const mpls_label_t *labels, char *buf,
int len, int pretty);
#ifdef __cplusplus
diff --git a/lib/nexthop.c b/lib/nexthop.c
index c3be0a71e6..0d239e091b 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -23,11 +23,9 @@
#include "table.h"
#include "memory.h"
#include "command.h"
-#include "if.h"
#include "log.h"
#include "sockunion.h"
#include "linklist.h"
-#include "thread.h"
#include "prefix.h"
#include "nexthop.h"
#include "mpls.h"
@@ -155,7 +153,24 @@ static int _nexthop_cmp_no_labels(const struct nexthop *next1,
}
ret = _nexthop_source_cmp(next1, next2);
+ if (ret != 0)
+ goto done;
+
+ if (!CHECK_FLAG(next1->flags, NEXTHOP_FLAG_HAS_BACKUP) &&
+ CHECK_FLAG(next2->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ return -1;
+
+ if (CHECK_FLAG(next1->flags, NEXTHOP_FLAG_HAS_BACKUP) &&
+ !CHECK_FLAG(next2->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ return 1;
+
+ if (next1->backup_idx < next2->backup_idx)
+ return -1;
+
+ if (next1->backup_idx > next2->backup_idx)
+ return 1;
+done:
return ret;
}
@@ -240,7 +255,7 @@ struct nexthop *nexthop_new(void)
* The linux kernel does some weird stuff with adding +1 to
* all nexthop weights it gets over netlink.
* To handle this, just default everything to 1 right from
- * from the beggining so we don't have to special case
+ * from the beginning so we don't have to special case
* default weights in the linux netlink code.
*
* 1 should be a valid on all platforms anyway.
@@ -393,8 +408,8 @@ struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type)
}
/* Update nexthop with label information. */
-void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t type,
- uint8_t num_labels, mpls_label_t *label)
+void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype,
+ uint8_t num_labels, const mpls_label_t *labels)
{
struct mpls_label_stack *nh_label;
int i;
@@ -402,13 +417,18 @@ void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t type,
if (num_labels == 0)
return;
- nexthop->nh_label_type = type;
+ /* Enforce limit on label stack size */
+ if (num_labels > MPLS_MAX_LABELS)
+ num_labels = MPLS_MAX_LABELS;
+
+ nexthop->nh_label_type = ltype;
+
nh_label = XCALLOC(MTYPE_NH_LABEL,
sizeof(struct mpls_label_stack)
+ num_labels * sizeof(mpls_label_t));
nh_label->num_labels = num_labels;
for (i = 0; i < num_labels; i++)
- nh_label->label[i] = *(label + i);
+ nh_label->label[i] = *(labels + i);
nexthop->nh_label = nh_label;
}
@@ -503,6 +523,7 @@ unsigned int nexthop_level(struct nexthop *nexthop)
uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
{
uint32_t key = 0x45afe398;
+ uint32_t val;
key = jhash_3words(nexthop->type, nexthop->vrf_id,
nexthop->nh_label_type, key);
@@ -532,8 +553,12 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
key = jhash_1word(nexthop->nh_label->label[i], key);
}
- key = jhash_2words(nexthop->ifindex,
- CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK),
+ val = 0;
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ val = (uint32_t)nexthop->backup_idx;
+
+ key = jhash_3words(nexthop->ifindex,
+ CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK), val,
key);
return key;
@@ -573,6 +598,7 @@ void nexthop_copy_no_recurse(struct nexthop *copy,
copy->type = nexthop->type;
copy->flags = nexthop->flags;
copy->weight = nexthop->weight;
+ copy->backup_idx = nexthop->backup_idx;
memcpy(&copy->gate, &nexthop->gate, sizeof(nexthop->gate));
memcpy(&copy->src, &nexthop->src, sizeof(nexthop->src));
memcpy(&copy->rmap_src, &nexthop->rmap_src, sizeof(nexthop->rmap_src));
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 267f9f28ad..c4e88dd844 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -86,6 +86,8 @@ struct nexthop {
* active one
*/
#define NEXTHOP_FLAG_RNH_FILTERED (1 << 5) /* rmap filtered, used by rnh */
+#define NEXTHOP_FLAG_HAS_BACKUP (1 << 6) /* Backup nexthop index is set */
+
#define NEXTHOP_IS_ACTIVE(flags) \
(CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \
&& !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE))
@@ -116,15 +118,31 @@ struct nexthop {
/* Weight of the nexthop ( for unequal cost ECMP ) */
uint8_t weight;
+
+ /* Index of a corresponding backup nexthop in a backup list;
+ * only meaningful if the HAS_BACKUP flag is set.
+ */
+ uint8_t backup_idx;
};
+/* Backup index value is limited */
+#define NEXTHOP_BACKUP_IDX_MAX 255
+
+/* Utility to append one nexthop to another. */
+#define NEXTHOP_APPEND(to, new) \
+ do { \
+ (to)->next = (new); \
+ (new)->prev = (to); \
+ (new)->next = NULL; \
+ } while (0)
+
struct nexthop *nexthop_new(void);
void nexthop_free(struct nexthop *nexthop);
void nexthops_free(struct nexthop *nexthop);
-void nexthop_add_labels(struct nexthop *, enum lsp_types_t, uint8_t,
- mpls_label_t *);
+void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype,
+ uint8_t num_labels, const mpls_label_t *labels);
void nexthop_del_labels(struct nexthop *);
/*
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index d660428bcd..8c3bbbdcd4 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -43,8 +43,12 @@ struct nexthop_hold {
char *intf;
char *labels;
uint32_t weight;
+ int backup_idx; /* Index of backup nexthop, if >= 0 */
};
+/* Invalid/unset value for nexthop_hold's backup_idx */
+#define NHH_BACKUP_IDX_INVALID -1
+
struct nexthop_group_hooks {
void (*new)(const char *name);
void (*add_nexthop)(const struct nexthop_group_cmd *nhg,
@@ -225,6 +229,10 @@ void nexthop_group_copy(struct nexthop_group *to,
void nexthop_group_delete(struct nexthop_group **nhg)
{
+ /* OK to call with NULL group */
+ if ((*nhg) == NULL)
+ return;
+
if ((*nhg)->nexthop)
nexthops_free((*nhg)->nexthop);
@@ -567,11 +575,36 @@ DEFUN_NOSH(no_nexthop_group, no_nexthop_group_cmd, "no nexthop-group NHGNAME",
return CMD_SUCCESS;
}
+DEFPY(nexthop_group_backup, nexthop_group_backup_cmd,
+ "backup-group WORD$name",
+ "Specify a group name containing backup nexthops\n"
+ "The name of the backup group\n")
+{
+ VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc);
+
+ strlcpy(nhgc->backup_list_name, name, sizeof(nhgc->backup_list_name));
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_nexthop_group_backup, no_nexthop_group_backup_cmd,
+ "no backup-group [WORD$name]",
+ NO_STR
+ "Clear group name containing backup nexthops\n"
+ "The name of the backup group\n")
+{
+ VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc);
+
+ nhgc->backup_list_name[0] = 0;
+
+ return CMD_SUCCESS;
+}
+
static void nexthop_group_save_nhop(struct nexthop_group_cmd *nhgc,
const char *nhvrf_name,
const union sockunion *addr,
const char *intf, const char *labels,
- const uint32_t weight)
+ const uint32_t weight, int backup_idx)
{
struct nexthop_hold *nh;
@@ -588,6 +621,8 @@ static void nexthop_group_save_nhop(struct nexthop_group_cmd *nhgc,
nh->weight = weight;
+ nh->backup_idx = backup_idx;
+
listnode_add_sort(nhgc->nhg_list, nh);
}
@@ -629,7 +664,7 @@ static bool nexthop_group_parse_nexthop(struct nexthop *nhop,
const union sockunion *addr,
const char *intf, const char *name,
const char *labels, int *lbl_ret,
- uint32_t weight)
+ uint32_t weight, int backup_idx)
{
int ret = 0;
struct vrf *vrf;
@@ -688,6 +723,15 @@ static bool nexthop_group_parse_nexthop(struct nexthop *nhop,
nhop->weight = weight;
+ if (backup_idx != NHH_BACKUP_IDX_INVALID) {
+ /* Validate index value */
+ if (backup_idx > NEXTHOP_BACKUP_IDX_MAX)
+ return false;
+
+ SET_FLAG(nhop->flags, NEXTHOP_FLAG_HAS_BACKUP);
+ nhop->backup_idx = backup_idx;
+ }
+
return true;
}
@@ -699,7 +743,7 @@ static bool nexthop_group_parse_nhh(struct nexthop *nhop,
{
return (nexthop_group_parse_nexthop(nhop, nhh->addr, nhh->intf,
nhh->nhvrf_name, nhh->labels, NULL,
- nhh->weight));
+ nhh->weight, nhh->backup_idx));
}
DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
@@ -712,6 +756,7 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
nexthop-vrf NAME$vrf_name \
|label WORD \
|weight (1-255) \
+ |backup-idx$bi_str (0-254)$idx \
}]",
NO_STR
"Specify one of the nexthops in this ECMP group\n"
@@ -724,16 +769,23 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
"Specify label(s) for this nexthop\n"
"One or more labels in the range (16-1048575) separated by '/'\n"
"Weight to be used by the nexthop for purposes of ECMP\n"
- "Weight value to be used\n")
+ "Weight value to be used\n"
+ "Backup nexthop index in another group\n"
+ "Nexthop index value\n")
{
VTY_DECLVAR_CONTEXT(nexthop_group_cmd, nhgc);
struct nexthop nhop;
struct nexthop *nh;
int lbl_ret = 0;
bool legal;
+ int backup_idx = idx;
+ bool add_update = false;
+
+ if (bi_str == NULL)
+ backup_idx = NHH_BACKUP_IDX_INVALID;
legal = nexthop_group_parse_nexthop(&nhop, addr, intf, vrf_name, label,
- &lbl_ret, weight);
+ &lbl_ret, weight, backup_idx);
if (nhop.type == NEXTHOP_TYPE_IPV6
&& IN6_IS_ADDR_LINKLOCAL(&nhop.gate.ipv6)) {
@@ -765,19 +817,30 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
nh = nexthop_exists(&nhgc->nhg, &nhop);
- if (no) {
+ if (no || nh) {
+ /* Remove or replace cases */
+
+ /* Remove existing config */
nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf, label,
weight);
if (nh) {
+ /* Remove nexthop object */
_nexthop_del(&nhgc->nhg, nh);
if (nhg_hooks.del_nexthop)
nhg_hooks.del_nexthop(nhgc, nh);
nexthop_free(nh);
+ nh = NULL;
}
- } else if (!nh) {
- /* must be adding new nexthop since !no and !nexthop_exists */
+ }
+
+ add_update = !no;
+
+ if (add_update) {
+ /* Add or replace cases */
+
+ /* If valid config, add nexthop object */
if (legal) {
nh = nexthop_new();
@@ -785,8 +848,9 @@ DEFPY(ecmp_nexthops, ecmp_nexthops_cmd,
_nexthop_add(&nhgc->nhg.nexthop, nh);
}
+ /* Save config always */
nexthop_group_save_nhop(nhgc, vrf_name, addr, intf, label,
- weight);
+ weight, backup_idx);
if (legal && nhg_hooks.add_nexthop)
nhg_hooks.add_nexthop(nhgc, nh);
@@ -849,6 +913,9 @@ void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh)
if (nh->weight)
vty_out(vty, " weight %u", nh->weight);
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ vty_out(vty, " backup-idx %d", nh->backup_idx);
+
vty_out(vty, "\n");
}
@@ -874,6 +941,9 @@ static void nexthop_group_write_nexthop_internal(struct vty *vty,
if (nh->weight)
vty_out(vty, " weight %u", nh->weight);
+ if (nh->backup_idx != NHH_BACKUP_IDX_INVALID)
+ vty_out(vty, " backup-idx %d", nh->backup_idx);
+
vty_out(vty, "\n");
}
@@ -887,6 +957,10 @@ static int nexthop_group_write(struct vty *vty)
vty_out(vty, "nexthop-group %s\n", nhgc->name);
+ if (nhgc->backup_list_name[0])
+ vty_out(vty, " backup-group %s\n",
+ nhgc->backup_list_name);
+
for (ALL_LIST_ELEMENTS_RO(nhgc->nhg_list, node, nh)) {
vty_out(vty, " ");
nexthop_group_write_nexthop_internal(vty, nh);
@@ -1067,6 +1141,8 @@ void nexthop_group_init(void (*new)(const char *name),
install_element(CONFIG_NODE, &no_nexthop_group_cmd);
install_default(NH_GROUP_NODE);
+ install_element(NH_GROUP_NODE, &nexthop_group_backup_cmd);
+ install_element(NH_GROUP_NODE, &no_nexthop_group_backup_cmd);
install_element(NH_GROUP_NODE, &ecmp_nexthops_cmd);
memset(&nhg_hooks, 0, sizeof(nhg_hooks));
diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h
index f99a53f694..3a5a1299c1 100644
--- a/lib/nexthop_group.h
+++ b/lib/nexthop_group.h
@@ -57,6 +57,8 @@ void copy_nexthops(struct nexthop **tnh, const struct nexthop *nh,
uint32_t nexthop_group_hash_no_recurse(const struct nexthop_group *nhg);
uint32_t nexthop_group_hash(const struct nexthop_group *nhg);
void nexthop_group_mark_duplicates(struct nexthop_group *nhg);
+
+/* Add a nexthop to a list, enforcing the canonical sort order. */
void nexthop_group_add_sorted(struct nexthop_group *nhg,
struct nexthop *nexthop);
@@ -79,11 +81,16 @@ void nexthop_group_add_sorted(struct nexthop_group *nhg,
(nhop) = nexthop_next(nhop)
+#define NHGC_NAME_SIZE 80
+
struct nexthop_group_cmd {
RB_ENTRY(nexthop_group_cmd) nhgc_entry;
- char name[80];
+ char name[NHGC_NAME_SIZE];
+
+ /* Name of group containing backup nexthops (if set) */
+ char backup_list_name[NHGC_NAME_SIZE];
struct nexthop_group nhg;
diff --git a/lib/zclient.c b/lib/zclient.c
index 1ac0e49e13..d380267a70 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -904,6 +904,7 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh,
}
}
+ /* If present, set 'weight' flag before encoding flags */
if (api_nh->weight)
SET_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_WEIGHT);
@@ -948,6 +949,10 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh,
stream_put(s, &(api_nh->rmac),
sizeof(struct ethaddr));
+ /* Index of backup nexthop */
+ if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP))
+ stream_putc(s, api_nh->backup_idx);
+
done:
return ret;
}
@@ -1007,6 +1012,10 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
return -1;
}
+ /* We canonicalize the nexthops by sorting them; this allows
+ * zebra to resolve the list of nexthops to a nexthop-group
+ * more efficiently.
+ */
zapi_nexthop_group_sort(api->nexthops, api->nexthop_num);
stream_putw(s, api->nexthop_num);
@@ -1033,6 +1042,50 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
}
}
+ /* Backup nexthops */
+ if (CHECK_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS)) {
+ /* limit the number of nexthops if necessary */
+ if (api->backup_nexthop_num > MULTIPATH_NUM) {
+ char buf[PREFIX2STR_BUFFER];
+
+ prefix2str(&api->prefix, buf, sizeof(buf));
+ flog_err(
+ EC_LIB_ZAPI_ENCODE,
+ "%s: prefix %s: can't encode %u backup nexthops (maximum is %u)",
+ __func__, buf, api->backup_nexthop_num,
+ MULTIPATH_NUM);
+ return -1;
+ }
+
+ /* Note that we do not sort the list of backup nexthops -
+ * this list is treated as an array and indexed by each
+ * primary nexthop that is associated with a backup.
+ */
+
+ stream_putw(s, api->backup_nexthop_num);
+
+ for (i = 0; i < api->backup_nexthop_num; i++) {
+ api_nh = &api->backup_nexthops[i];
+
+ /* MPLS labels for BGP-LU or Segment Routing */
+ if (api_nh->label_num > MPLS_MAX_LABELS) {
+ char buf[PREFIX2STR_BUFFER];
+
+ prefix2str(&api->prefix, buf, sizeof(buf));
+
+ flog_err(EC_LIB_ZAPI_ENCODE,
+ "%s: prefix %s: backup: can't encode %u labels (maximum is %u)",
+ __func__, buf,
+ api_nh->label_num,
+ MPLS_MAX_LABELS);
+ return -1;
+ }
+
+ if (zapi_nexthop_encode(s, api_nh, api->flags) != 0)
+ return -1;
+ }
+ }
+
/* Attributes. */
if (CHECK_FLAG(api->message, ZAPI_MESSAGE_DISTANCE))
stream_putc(s, api->distance);
@@ -1108,6 +1161,10 @@ static int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh,
STREAM_GET(&(api_nh->rmac), s,
sizeof(struct ethaddr));
+ /* Backup nexthop index */
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP))
+ STREAM_GETC(s, api_nh->backup_idx);
+
/* Success */
ret = 0;
@@ -1214,6 +1271,24 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api)
}
}
+ /* Backup nexthops. */
+ if (CHECK_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS)) {
+ STREAM_GETW(s, api->backup_nexthop_num);
+ if (api->backup_nexthop_num > MULTIPATH_NUM) {
+ flog_err(EC_LIB_ZAPI_ENCODE,
+ "%s: invalid number of backup nexthops (%u)",
+ __func__, api->backup_nexthop_num);
+ return -1;
+ }
+
+ for (i = 0; i < api->backup_nexthop_num; i++) {
+ api_nh = &api->backup_nexthops[i];
+
+ if (zapi_nexthop_decode(s, api_nh, api->flags) != 0)
+ return -1;
+ }
+ }
+
/* Attributes. */
if (CHECK_FLAG(api->message, ZAPI_MESSAGE_DISTANCE))
STREAM_GETC(s, api->distance);
@@ -1388,7 +1463,7 @@ stream_failure:
return false;
}
-struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh)
+struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh)
{
struct nexthop *n = nexthop_new();
@@ -1405,6 +1480,11 @@ struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh)
znh->labels);
}
+ if (CHECK_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) {
+ SET_FLAG(n->flags, NEXTHOP_FLAG_HAS_BACKUP);
+ n->backup_idx = znh->backup_idx;
+ }
+
return n;
}
@@ -1420,10 +1500,16 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
znh->type = nh->type;
znh->vrf_id = nh->vrf_id;
+ znh->weight = nh->weight;
znh->ifindex = nh->ifindex;
znh->gate = nh->gate;
if (nh->nh_label && (nh->nh_label->num_labels > 0)) {
+
+ /* Validate */
+ if (nh->nh_label->num_labels > MPLS_MAX_LABELS)
+ return -1;
+
for (i = 0; i < nh->nh_label->num_labels; i++)
znh->labels[i] = nh->nh_label->label[i];
@@ -1431,10 +1517,31 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_LABEL);
}
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP);
+ znh->backup_idx = nh->backup_idx;
+ }
+
return 0;
}
/*
+ * Wrapper that converts backup nexthop
+ */
+int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh,
+ const struct nexthop *nh)
+{
+ int ret;
+
+ /* Ensure that zapi flags are correct: backups don't have backups */
+ ret = zapi_nexthop_from_nexthop(znh, nh);
+ if (ret == 0)
+ UNSET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP);
+
+ return ret;
+}
+
+/*
* Decode the nexthop-tracking update message
*/
bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr)
diff --git a/lib/zclient.h b/lib/zclient.h
index 4de42a35bb..e747809f16 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -341,6 +341,9 @@ struct zclient {
#define ZAPI_MESSAGE_TAG 0x08
#define ZAPI_MESSAGE_MTU 0x10
#define ZAPI_MESSAGE_SRCPFX 0x20
+/* Backup nexthops are present */
+#define ZAPI_MESSAGE_BACKUP_NEXTHOPS 0x40
+
/*
* This should only be used by a DAEMON that needs to communicate
* the table being used is not in the VRF. You must pass the
@@ -377,14 +380,21 @@ struct zapi_nexthop {
struct ethaddr rmac;
uint32_t weight;
+
+ /* Index of backup nexthop */
+ uint8_t backup_idx;
};
/*
- * ZAPI nexthop flags values
+ * ZAPI nexthop flags values - we're encoding a single octet
+ * initially, so ensure that the on-the-wire encoding continues
+ * to match the number of valid flags.
*/
+
#define ZAPI_NEXTHOP_FLAG_ONLINK 0x01
#define ZAPI_NEXTHOP_FLAG_LABEL 0x02
#define ZAPI_NEXTHOP_FLAG_WEIGHT 0x04
+#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */
/*
* Some of these data structures do not map easily to
@@ -448,6 +458,10 @@ struct zapi_route {
uint16_t nexthop_num;
struct zapi_nexthop nexthops[MULTIPATH_NUM];
+ /* Support backup routes for IP FRR, TI-LFA, traffic engineering */
+ uint16_t backup_nexthop_num;
+ struct zapi_nexthop backup_nexthops[MULTIPATH_NUM];
+
uint8_t distance;
uint32_t metric;
@@ -769,9 +783,12 @@ bool zapi_iptable_notify_decode(struct stream *s,
uint32_t *unique,
enum zapi_iptable_notify_owner *note);
-extern struct nexthop *nexthop_from_zapi_nexthop(struct zapi_nexthop *znh);
+extern struct nexthop *
+nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh);
int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
const struct nexthop *nh);
+int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh,
+ const struct nexthop *nh);
extern bool zapi_nexthop_update_decode(struct stream *s,
struct zapi_route *nhr);
diff --git a/sharpd/sharp_globals.h b/sharpd/sharp_globals.h
index 4e5c933667..8eba57f4dd 100644
--- a/sharpd/sharp_globals.h
+++ b/sharpd/sharp_globals.h
@@ -28,9 +28,11 @@ struct sharp_routes {
/* The original prefix for route installation */
struct prefix orig_prefix;
- /* The nexthop group we are using for installation */
+ /* The nexthop info we are using for installation */
struct nexthop nhop;
+ struct nexthop backup_nhop;
struct nexthop_group nhop_group;
+ struct nexthop_group backup_nhop_group;
uint32_t total_routes;
uint32_t installed_routes;
diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c
index aa3d85624b..8a787c8e83 100644
--- a/sharpd/sharp_vty.c
+++ b/sharpd/sharp_vty.c
@@ -162,7 +162,12 @@ DEFPY (install_routes_data_dump,
DEFPY (install_routes,
install_routes_cmd,
- "sharp install routes [vrf NAME$vrf_name] <A.B.C.D$start4|X:X::X:X$start6> <nexthop <A.B.C.D$nexthop4|X:X::X:X$nexthop6>|nexthop-group NHGNAME$nexthop_group> (1-1000000)$routes [instance (0-255)$instance] [repeat (2-1000)$rpt]",
+ "sharp install routes [vrf NAME$vrf_name]\
+ <A.B.C.D$start4|X:X::X:X$start6>\
+ <nexthop <A.B.C.D$nexthop4|X:X::X:X$nexthop6>|\
+ nexthop-group NHGNAME$nexthop_group>\
+ [backup$backup <A.B.C.D$backup_nexthop4|X:X::X:X$backup_nexthop6>] \
+ (1-1000000)$routes [instance (0-255)$instance] [repeat (2-1000)$rpt]",
"Sharp routing Protocol\n"
"install some routes\n"
"Routes to install\n"
@@ -175,6 +180,9 @@ DEFPY (install_routes,
"V6 Nexthop address to use\n"
"Nexthop-Group to use\n"
"The Name of the nexthop-group\n"
+ "Backup nexthop to use(Can be an IPv4 or IPv6 address)\n"
+ "Backup V4 Nexthop address to use\n"
+ "Backup V6 Nexthop address to use\n"
"How many to create\n"
"Instance to use\n"
"Instance\n"
@@ -197,6 +205,8 @@ DEFPY (install_routes,
memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
+ memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
if (start4.s_addr != 0) {
prefix.family = AF_INET;
@@ -219,6 +229,12 @@ DEFPY (install_routes,
return CMD_WARNING;
}
+ /* Explicit backup not available with named nexthop-group */
+ if (backup && nexthop_group) {
+ vty_out(vty, "%% Invalid: cannot specify both nexthop-group and backup\n");
+ return CMD_WARNING;
+ }
+
if (nexthop_group) {
struct nexthop_group_cmd *nhgc = nhgc_find(nexthop_group);
if (!nhgc) {
@@ -229,6 +245,22 @@ DEFPY (install_routes,
}
sg.r.nhop_group.nexthop = nhgc->nhg.nexthop;
+
+ /* Use group's backup nexthop info if present */
+ if (nhgc->backup_list_name[0]) {
+ struct nexthop_group_cmd *bnhgc =
+ nhgc_find(nhgc->backup_list_name);
+
+ if (!bnhgc) {
+ vty_out(vty, "%% Backup group %s not found for group %s\n",
+ nhgc->backup_list_name,
+ nhgc->name);
+ return CMD_WARNING;
+ }
+
+ sg.r.backup_nhop.vrf_id = vrf->vrf_id;
+ sg.r.backup_nhop_group.nexthop = bnhgc->nhg.nexthop;
+ }
} else {
if (nexthop4.s_addr != INADDR_ANY) {
sg.r.nhop.gate.ipv4 = nexthop4;
@@ -242,11 +274,30 @@ DEFPY (install_routes,
sg.r.nhop_group.nexthop = &sg.r.nhop;
}
+ /* Use single backup nexthop if specified */
+ if (backup) {
+ /* Set flag and index in primary nexthop */
+ SET_FLAG(sg.r.nhop.flags, NEXTHOP_FLAG_HAS_BACKUP);
+ sg.r.nhop.backup_idx = 0;
+
+ if (backup_nexthop4.s_addr != INADDR_ANY) {
+ sg.r.backup_nhop.gate.ipv4 = backup_nexthop4;
+ sg.r.backup_nhop.type = NEXTHOP_TYPE_IPV4;
+ } else {
+ sg.r.backup_nhop.gate.ipv6 = backup_nexthop6;
+ sg.r.backup_nhop.type = NEXTHOP_TYPE_IPV6;
+ }
+
+ sg.r.backup_nhop.vrf_id = vrf->vrf_id;
+ sg.r.backup_nhop_group.nexthop = &sg.r.backup_nhop;
+ }
+
sg.r.inst = instance;
sg.r.vrf_id = vrf->vrf_id;
rts = routes;
- sharp_install_routes_helper(&prefix, sg.r.vrf_id,
- sg.r.inst, &sg.r.nhop_group, rts);
+ sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst,
+ &sg.r.nhop_group, &sg.r.backup_nhop_group,
+ rts);
return CMD_SUCCESS;
}
diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c
index 258a0a06dd..e1bd6f5722 100644
--- a/sharpd/sharp_zebra.c
+++ b/sharpd/sharp_zebra.c
@@ -143,7 +143,9 @@ int sharp_install_lsps_helper(bool install_p, const struct prefix *p,
}
void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
- uint8_t instance, struct nexthop_group *nhg,
+ uint8_t instance,
+ const struct nexthop_group *nhg,
+ const struct nexthop_group *backup_nhg,
uint32_t routes)
{
uint32_t temp, i;
@@ -157,9 +159,13 @@ void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
} else
temp = ntohl(p->u.val32[3]);
+ /* Only use backup route/nexthops if present */
+ if (backup_nhg && (backup_nhg->nexthop == NULL))
+ backup_nhg = NULL;
+
monotime(&sg.r.t_start);
for (i = 0; i < routes; i++) {
- route_add(p, vrf_id, (uint8_t)instance, nhg);
+ route_add(p, vrf_id, (uint8_t)instance, nhg, backup_nhg);
if (v4)
p->u.prefix4.s_addr = htonl(++temp);
else
@@ -209,6 +215,7 @@ static void handle_repeated(bool installed)
sg.r.installed_routes = 0;
sharp_install_routes_helper(&p, sg.r.vrf_id, sg.r.inst,
&sg.r.nhop_group,
+ &sg.r.backup_nhop_group,
sg.r.total_routes);
}
}
@@ -276,8 +283,9 @@ void vrf_label_add(vrf_id_t vrf_id, afi_t afi, mpls_label_t label)
zclient_send_vrf_label(zclient, vrf_id, afi, label, ZEBRA_LSP_SHARP);
}
-void route_add(struct prefix *p, vrf_id_t vrf_id,
- uint8_t instance, struct nexthop_group *nhg)
+void route_add(const struct prefix *p, vrf_id_t vrf_id,
+ uint8_t instance, const struct nexthop_group *nhg,
+ const struct nexthop_group *backup_nhg)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
@@ -298,10 +306,27 @@ void route_add(struct prefix *p, vrf_id_t vrf_id,
api_nh = &api.nexthops[i];
zapi_nexthop_from_nexthop(api_nh, nh);
+
i++;
}
api.nexthop_num = i;
+ /* Include backup nexthops, if present */
+ if (backup_nhg && backup_nhg->nexthop) {
+ SET_FLAG(api.message, ZAPI_MESSAGE_BACKUP_NEXTHOPS);
+
+ i = 0;
+ for (ALL_NEXTHOPS_PTR(backup_nhg, nh)) {
+ api_nh = &api.backup_nexthops[i];
+
+ zapi_backup_nexthop_from_nexthop(api_nh, nh);
+
+ i++;
+ }
+
+ api.backup_nexthop_num = i;
+ }
+
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h
index c995d557af..926bff676b 100644
--- a/sharpd/sharp_zebra.h
+++ b/sharpd/sharp_zebra.h
@@ -25,15 +25,17 @@
extern void sharp_zebra_init(void);
extern void vrf_label_add(vrf_id_t vrf_id, afi_t afi, mpls_label_t label);
-extern void route_add(struct prefix *p, vrf_id_t, uint8_t instance,
- struct nexthop_group *nhg);
+extern void route_add(const struct prefix *p, vrf_id_t, uint8_t instance,
+ const struct nexthop_group *nhg,
+ const struct nexthop_group *backup_nhg);
extern void route_delete(struct prefix *p, vrf_id_t vrf_id, uint8_t instance);
extern void sharp_zebra_nexthop_watch(struct prefix *p, vrf_id_t vrf_id,
bool import, bool watch, bool connected);
extern void sharp_install_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance,
- struct nexthop_group *nhg,
+ const struct nexthop_group *nhg,
+ const struct nexthop_group *backup_nhg,
uint32_t routes);
extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t routes);
diff --git a/zebra/rib.h b/zebra/rib.h
index 931c97638e..3717a12814 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -107,7 +107,7 @@ struct route_entry {
/* Uptime. */
time_t uptime;
- /* Type fo this route. */
+ /* Type of this route. */
int type;
/* VRF identifier. */
@@ -347,10 +347,16 @@ extern int rib_add(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
struct prefix_ipv6 *src_p, const struct nexthop *nh,
uint32_t nhe_id, uint32_t table_id, uint32_t metric,
uint32_t mtu, uint8_t distance, route_tag_t tag);
-
+/*
+ * Multipath route apis.
+ */
extern int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
struct prefix_ipv6 *src_p, struct route_entry *re,
struct nexthop_group *ng);
+extern int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p,
+ struct prefix_ipv6 *src_p,
+ struct route_entry *re,
+ struct nhg_hash_entry *nhe);
extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
unsigned short instance, int flags, struct prefix *p,
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 5a1ae2c217..84c9bd098e 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -1982,6 +1982,12 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx)
addattr32(&req.n, req_size, NHA_ID, id);
if (cmd == RTM_NEWNEXTHOP) {
+ /*
+ * We distinguish between a "group", which is a collection
+ * of ids, and a singleton nexthop with an id. The
+ * group is installed as an id that just refers to a list of
+ * other ids.
+ */
if (dplane_ctx_get_nhe_nh_grp_count(ctx))
_netlink_nexthop_build_group(
&req.n, req_size, id,
@@ -2068,14 +2074,13 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx)
}
}
- nexthop_done:
- if (IS_ZEBRA_DEBUG_KERNEL) {
- char buf[NEXTHOP_STRLEN];
+nexthop_done:
+
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: ID (%u): %pNHv (%u) %s ",
+ __func__, id, nh, nh->vrf_id,
+ label_buf);
- snprintfrr(buf, sizeof(buf), "%pNHv", nh);
- zlog_debug("%s: ID (%u): %s (%u) %s ", __func__,
- id, buf, nh->vrf_id, label_buf);
- }
}
req.nhm.nh_protocol = zebra2proto(dplane_ctx_get_nhe_type(ctx));
@@ -2103,43 +2108,19 @@ static int netlink_nexthop(int cmd, struct zebra_dplane_ctx *ctx)
*/
enum zebra_dplane_result kernel_nexthop_update(struct zebra_dplane_ctx *ctx)
{
+ enum dplane_op_e op;
int cmd = 0;
int ret = 0;
- switch (dplane_ctx_get_op(ctx)) {
- case DPLANE_OP_NH_DELETE:
- cmd = RTM_DELNEXTHOP;
- break;
- case DPLANE_OP_NH_INSTALL:
- case DPLANE_OP_NH_UPDATE:
+ op = dplane_ctx_get_op(ctx);
+ if (op == DPLANE_OP_NH_INSTALL || op == DPLANE_OP_NH_UPDATE)
cmd = RTM_NEWNEXTHOP;
- break;
- case DPLANE_OP_ROUTE_INSTALL:
- case DPLANE_OP_ROUTE_UPDATE:
- case DPLANE_OP_ROUTE_DELETE:
- case DPLANE_OP_ROUTE_NOTIFY:
- case DPLANE_OP_LSP_INSTALL:
- case DPLANE_OP_LSP_UPDATE:
- case DPLANE_OP_LSP_DELETE:
- case DPLANE_OP_LSP_NOTIFY:
- case DPLANE_OP_PW_INSTALL:
- case DPLANE_OP_PW_UNINSTALL:
- case DPLANE_OP_SYS_ROUTE_ADD:
- case DPLANE_OP_SYS_ROUTE_DELETE:
- case DPLANE_OP_ADDR_INSTALL:
- case DPLANE_OP_ADDR_UNINSTALL:
- case DPLANE_OP_MAC_INSTALL:
- case DPLANE_OP_MAC_DELETE:
- case DPLANE_OP_NEIGH_INSTALL:
- case DPLANE_OP_NEIGH_UPDATE:
- case DPLANE_OP_NEIGH_DELETE:
- case DPLANE_OP_VTEP_ADD:
- case DPLANE_OP_VTEP_DELETE:
- case DPLANE_OP_NONE:
- flog_err(
- EC_ZEBRA_NHG_FIB_UPDATE,
- "Context received for kernel nexthop update with incorrect OP code (%u)",
- dplane_ctx_get_op(ctx));
+ else if (op == DPLANE_OP_NH_DELETE)
+ cmd = RTM_DELNEXTHOP;
+ else {
+ flog_err(EC_ZEBRA_NHG_FIB_UPDATE,
+ "Context received for kernel nexthop update with incorrect OP code (%u)",
+ op);
return ZEBRA_DPLANE_REQUEST_FAILURE;
}
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 2190bfab4f..aabe533ee6 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -1413,6 +1413,132 @@ void zserv_nexthop_num_warn(const char *caller, const struct prefix *p,
}
}
+/*
+ * Create a new nexthop based on a zapi nexthop.
+ */
+static struct nexthop *nexthop_from_zapi(struct route_entry *re,
+ const struct zapi_nexthop *api_nh,
+ const struct zapi_route *api)
+{
+ struct nexthop *nexthop = NULL;
+ struct ipaddr vtep_ip;
+ struct interface *ifp;
+ char nhbuf[INET6_ADDRSTRLEN] = "";
+
+ switch (api_nh->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ nexthop = nexthop_from_ifindex(api_nh->ifindex, api_nh->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ if (IS_ZEBRA_DEBUG_RECV) {
+ inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf,
+ sizeof(nhbuf));
+ zlog_debug("%s: nh=%s, vrf_id=%d", __func__,
+ nhbuf, api_nh->vrf_id);
+ }
+ nexthop = nexthop_from_ipv4(&api_nh->gate.ipv4, NULL,
+ api_nh->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ if (IS_ZEBRA_DEBUG_RECV) {
+ inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf,
+ sizeof(nhbuf));
+ zlog_debug("%s: nh=%s, vrf_id=%d, ifindex=%d",
+ __func__, nhbuf, api_nh->vrf_id,
+ api_nh->ifindex);
+ }
+
+ nexthop = nexthop_from_ipv4_ifindex(
+ &api_nh->gate.ipv4, NULL, api_nh->ifindex,
+ api_nh->vrf_id);
+
+ ifp = if_lookup_by_index(api_nh->ifindex, api_nh->vrf_id);
+ if (ifp && connected_is_unnumbered(ifp))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK);
+
+ /* Special handling for IPv4 routes sourced from EVPN:
+ * the nexthop and associated MAC need to be installed.
+ */
+ if (CHECK_FLAG(api->flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ vtep_ip.ipa_type = IPADDR_V4;
+ memcpy(&(vtep_ip.ipaddr_v4), &(api_nh->gate.ipv4),
+ sizeof(struct in_addr));
+ zebra_vxlan_evpn_vrf_route_add(
+ api_nh->vrf_id, &api_nh->rmac,
+ &vtep_ip, &api->prefix);
+ }
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ if (IS_ZEBRA_DEBUG_RECV) {
+ inet_ntop(AF_INET6, &api_nh->gate.ipv6, nhbuf,
+ sizeof(nhbuf));
+ zlog_debug("%s: nh=%s, vrf_id=%d", __func__,
+ nhbuf, api_nh->vrf_id);
+ }
+ nexthop = nexthop_from_ipv6(&api_nh->gate.ipv6, api_nh->vrf_id);
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (IS_ZEBRA_DEBUG_RECV) {
+ inet_ntop(AF_INET6, &api_nh->gate.ipv6, nhbuf,
+ sizeof(nhbuf));
+ zlog_debug("%s: nh=%s, vrf_id=%d, ifindex=%d",
+ __func__, nhbuf, api_nh->vrf_id,
+ api_nh->ifindex);
+ }
+ nexthop = nexthop_from_ipv6_ifindex(&api_nh->gate.ipv6,
+ api_nh->ifindex,
+ api_nh->vrf_id);
+
+ /* Special handling for IPv6 routes sourced from EVPN:
+ * the nexthop and associated MAC need to be installed.
+ */
+ if (CHECK_FLAG(api->flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ vtep_ip.ipa_type = IPADDR_V6;
+ memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6),
+ sizeof(struct in6_addr));
+ zebra_vxlan_evpn_vrf_route_add(
+ api_nh->vrf_id, &api_nh->rmac,
+ &vtep_ip, &api->prefix);
+ }
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: nh blackhole %d",
+ __func__, api_nh->bh_type);
+
+ nexthop = nexthop_from_blackhole(api_nh->bh_type);
+ break;
+ }
+
+ /* Return early if we couldn't process the zapi nexthop */
+ if (nexthop == NULL) {
+ goto done;
+ }
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK);
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_WEIGHT))
+ nexthop->weight = api_nh->weight;
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) {
+ if (api_nh->backup_idx < api->backup_nexthop_num) {
+ /* Capture backup info */
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP);
+ nexthop->backup_idx = api_nh->backup_idx;
+ } else {
+ /* Warn about invalid backup index */
+ if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s: invalid backup nh idx %d",
+ __func__, api_nh->backup_idx);
+ }
+ }
+done:
+ return nexthop;
+}
+
static void zread_route_add(ZAPI_HANDLER_ARGS)
{
struct stream *s;
@@ -1421,12 +1547,15 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
afi_t afi;
struct prefix_ipv6 *src_p = NULL;
struct route_entry *re;
- struct nexthop *nexthop = NULL;
+ struct nexthop *nexthop = NULL, *last_nh;
struct nexthop_group *ng = NULL;
+ struct nhg_backup_info *bnhg = NULL;
int i, ret;
vrf_id_t vrf_id;
- struct ipaddr vtep_ip;
- struct interface *ifp;
+ struct nhg_hash_entry nhe;
+ enum lsp_types_t label_type;
+ char nhbuf[NEXTHOP_STRLEN];
+ char labelbuf[MPLS_LABEL_STRLEN];
s = msg;
if (zapi_route_decode(s, &api) < 0) {
@@ -1440,8 +1569,8 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
char buf_prefix[PREFIX_STRLEN];
prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
- zlog_debug("%s: p=%s, flags=0x%x",
- __func__, buf_prefix, api.flags);
+ zlog_debug("%s: p=%s, msg flags=0x%x, flags=0x%x",
+ __func__, buf_prefix, (int)api.message, api.flags);
}
/* Allocate new route. */
@@ -1469,6 +1598,15 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
return;
}
+ /* Report misuse of the backup flag */
+ if (CHECK_FLAG(api.message, ZAPI_MESSAGE_BACKUP_NEXTHOPS) &&
+ api.backup_nexthop_num == 0) {
+ if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_EVENT)
+ zlog_debug("%s: client %s: BACKUP flag set but no backup nexthops, prefix %pFX",
+ __func__,
+ zebra_route_string(client->proto), &api.prefix);
+ }
+
/* Use temporary list of nexthops */
ng = nexthop_group_new();
@@ -1479,130 +1617,138 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
*/
for (i = 0; i < api.nexthop_num; i++) {
api_nh = &api.nexthops[i];
- ifindex_t ifindex = 0;
- nexthop = NULL;
+ /* Convert zapi nexthop */
+ nexthop = nexthop_from_zapi(re, api_nh, &api);
+ if (!nexthop) {
+ flog_warn(
+ EC_ZEBRA_NEXTHOP_CREATION_FAILED,
+ "%s: Nexthops Specified: %d but we failed to properly create one",
+ __func__, api.nexthop_num);
+ nexthop_group_delete(&ng);
+ XFREE(MTYPE_RE, re);
+ return;
+ }
- if (IS_ZEBRA_DEBUG_RECV)
- zlog_debug("nh type %d", api_nh->type);
+ /* MPLS labels for BGP-LU or Segment Routing */
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL)
+ && api_nh->type != NEXTHOP_TYPE_IFINDEX
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE
+ && api_nh->label_num > 0) {
- switch (api_nh->type) {
- case NEXTHOP_TYPE_IFINDEX:
- nexthop = nexthop_from_ifindex(api_nh->ifindex,
- api_nh->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV4:
- if (IS_ZEBRA_DEBUG_RECV) {
- char nhbuf[INET6_ADDRSTRLEN] = {0};
+ label_type = lsp_type_from_re_type(client->proto);
+ nexthop_add_labels(nexthop, label_type,
+ api_nh->label_num,
+ &api_nh->labels[0]);
+ }
- inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf,
- INET6_ADDRSTRLEN);
- zlog_debug("%s: nh=%s, vrf_id=%d", __func__,
- nhbuf, api_nh->vrf_id);
- }
- nexthop = nexthop_from_ipv4(&api_nh->gate.ipv4,
- NULL, api_nh->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
+ if (IS_ZEBRA_DEBUG_RECV) {
+ labelbuf[0] = '\0';
+ nhbuf[0] = '\0';
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
- ifindex = api_nh->ifindex;
- if (IS_ZEBRA_DEBUG_RECV) {
- char nhbuf[INET6_ADDRSTRLEN] = {0};
+ nexthop2str(nexthop, nhbuf, sizeof(nhbuf));
- inet_ntop(AF_INET, &api_nh->gate.ipv4, nhbuf,
- INET6_ADDRSTRLEN);
- zlog_debug(
- "%s: nh=%s, vrf_id=%d (re->vrf_id=%d), ifindex=%d",
- __func__, nhbuf, api_nh->vrf_id,
- re->vrf_id, ifindex);
- }
- nexthop = nexthop_from_ipv4_ifindex(
- &api_nh->gate.ipv4, NULL, ifindex,
- api_nh->vrf_id);
-
- ifp = if_lookup_by_index(ifindex, api_nh->vrf_id);
- if (ifp && connected_is_unnumbered(ifp))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK);
- /* Special handling for IPv4 routes sourced from EVPN:
- * the nexthop and associated MAC need to be installed.
- */
- if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
- vtep_ip.ipa_type = IPADDR_V4;
- memcpy(&(vtep_ip.ipaddr_v4),
- &(api_nh->gate.ipv4),
- sizeof(struct in_addr));
- zebra_vxlan_evpn_vrf_route_add(
- api_nh->vrf_id, &api_nh->rmac,
- &vtep_ip, &api.prefix);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- nexthop = nexthop_from_ipv6(&api_nh->gate.ipv6,
- api_nh->vrf_id);
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
- ifindex = api_nh->ifindex;
- nexthop = nexthop_from_ipv6_ifindex(&api_nh->gate.ipv6,
- ifindex,
- api_nh->vrf_id);
-
- /* Special handling for IPv6 routes sourced from EVPN:
- * the nexthop and associated MAC need to be installed.
- */
- if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
- vtep_ip.ipa_type = IPADDR_V6;
- memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6),
- sizeof(struct in6_addr));
- zebra_vxlan_evpn_vrf_route_add(
- api_nh->vrf_id, &api_nh->rmac,
- &vtep_ip, &api.prefix);
+ if (nexthop->nh_label &&
+ nexthop->nh_label->num_labels > 0) {
+ mpls_label2str(nexthop->nh_label->num_labels,
+ nexthop->nh_label->label,
+ labelbuf, sizeof(labelbuf),
+ false);
}
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- nexthop = nexthop_from_blackhole(api_nh->bh_type);
- break;
+
+ zlog_debug("%s: nh=%s, vrf_id=%d %s",
+ __func__, nhbuf, api_nh->vrf_id, labelbuf);
}
+ /* Add new nexthop to temporary list. This list is
+ * canonicalized - sorted - so that it can be hashed later
+ * in route processing. We expect that the sender has sent
+ * the list sorted, and the zapi client api attempts to enforce
+ * that, so this should be inexpensive - but it is necessary
+ * to support shared nexthop-groups.
+ */
+ nexthop_group_add_sorted(ng, nexthop);
+ }
+
+ /* Allocate temporary list of backup nexthops, if necessary */
+ if (api.backup_nexthop_num > 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: adding %d backup nexthops",
+ __func__, api.backup_nexthop_num);
+
+ bnhg = zebra_nhg_backup_alloc();
+ nexthop = NULL;
+ last_nh = NULL;
+ }
+
+ /* Copy backup nexthops also, if present */
+ for (i = 0; i < api.backup_nexthop_num; i++) {
+ api_nh = &api.backup_nexthops[i];
+
+ /* Convert zapi backup nexthop */
+ nexthop = nexthop_from_zapi(re, api_nh, &api);
if (!nexthop) {
flog_warn(
EC_ZEBRA_NEXTHOP_CREATION_FAILED,
- "%s: Nexthops Specified: %d but we failed to properly create one",
- __func__, api.nexthop_num);
+ "%s: Backup Nexthops Specified: %d but we failed to properly create one",
+ __func__, api.backup_nexthop_num);
nexthop_group_delete(&ng);
+ zebra_nhg_backup_free(&bnhg);
XFREE(MTYPE_RE, re);
return;
}
- if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK);
-
- if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_WEIGHT))
- nexthop->weight = api_nh->weight;
+ /* Backup nexthops can't have backups; that's not valid. */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ if (IS_ZEBRA_DEBUG_RECV) {
+ nexthop2str(nexthop, nhbuf, sizeof(nhbuf));
+ zlog_debug("%s: backup nh %s with BACKUP flag!",
+ __func__, nhbuf);
+ }
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP);
+ nexthop->backup_idx = 0;
+ }
/* MPLS labels for BGP-LU or Segment Routing */
if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL)
&& api_nh->type != NEXTHOP_TYPE_IFINDEX
- && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
- enum lsp_types_t label_type;
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE
+ && api_nh->label_num > 0) {
label_type = lsp_type_from_re_type(client->proto);
-
- if (IS_ZEBRA_DEBUG_RECV) {
- zlog_debug(
- "%s: adding %d labels of type %d (1st=%u)",
- __func__, api_nh->label_num, label_type,
- api_nh->labels[0]);
- }
-
nexthop_add_labels(nexthop, label_type,
api_nh->label_num,
&api_nh->labels[0]);
}
- /* Add new nexthop to temporary list */
- nexthop_group_add_sorted(ng, nexthop);
+ if (IS_ZEBRA_DEBUG_RECV) {
+ labelbuf[0] = '\0';
+ nhbuf[0] = '\0';
+
+ nexthop2str(nexthop, nhbuf, sizeof(nhbuf));
+
+ if (nexthop->nh_label &&
+ nexthop->nh_label->num_labels > 0) {
+ mpls_label2str(nexthop->nh_label->num_labels,
+ nexthop->nh_label->label,
+ labelbuf, sizeof(labelbuf),
+ false);
+ }
+
+ zlog_debug("%s: backup nh=%s, vrf_id=%d %s",
+ __func__, nhbuf, api_nh->vrf_id, labelbuf);
+ }
+
+ /* Note that the order of the backup nexthops is significant,
+ * so we don't sort this list as we do the primary nexthops,
+ * we just append.
+ */
+ if (last_nh)
+ NEXTHOP_APPEND(last_nh, nexthop);
+ else
+ bnhg->nhe->nhg.nexthop = nexthop;
+
+ last_nh = nexthop;
}
if (CHECK_FLAG(api.message, ZAPI_MESSAGE_DISTANCE))
@@ -1620,6 +1766,7 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
"%s: Received SRC Prefix but afi is not v6",
__func__);
nexthop_group_delete(&ng);
+ zebra_nhg_backup_free(&bnhg);
XFREE(MTYPE_RE, re);
return;
}
@@ -1631,10 +1778,28 @@ static void zread_route_add(ZAPI_HANDLER_ARGS)
"%s: Received safi: %d but we can only accept UNICAST or MULTICAST",
__func__, api.safi);
nexthop_group_delete(&ng);
+ zebra_nhg_backup_free(&bnhg);
XFREE(MTYPE_RE, re);
return;
}
- ret = rib_add_multipath(afi, api.safi, &api.prefix, src_p, re, ng);
+
+ /* Include backup info with the route. We use a temporary nhe here;
+ * if this is a new/unknown nhe, a new copy will be allocated
+ * and stored.
+ */
+ zebra_nhe_init(&nhe, afi, ng->nexthop);
+ nhe.nhg.nexthop = ng->nexthop;
+ nhe.backup_info = bnhg;
+ ret = rib_add_multipath_nhe(afi, api.safi, &api.prefix, src_p,
+ re, &nhe);
+
+ /* At this point, these allocations are not needed: 're' has been
+ * retained or freed, and if 're' still exists, it is using
+ * a reference to a shared group object.
+ */
+ nexthop_group_delete(&ng);
+ if (bnhg)
+ zebra_nhg_backup_free(&bnhg);
/* Stats */
switch (api.prefix.family) {
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 459d2bc620..a2365ee76b 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -113,10 +113,15 @@ struct dplane_route_info {
struct dplane_nexthop_info nhe;
/* Nexthops */
+ uint32_t zd_nhg_id;
struct nexthop_group zd_ng;
+ /* Backup nexthops (if present) */
+ struct nexthop_group backup_ng;
+
/* "Previous" nexthops, used only in route updates without netlink */
struct nexthop_group zd_old_ng;
+ struct nexthop_group old_backup_ng;
/* TODO -- use fixed array of nexthops, to avoid mallocs? */
@@ -472,6 +477,14 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
(*pctx)->u.rinfo.zd_ng.nexthop = NULL;
}
+ /* Free backup info also (if present) */
+ if ((*pctx)->u.rinfo.backup_ng.nexthop) {
+ /* This deals with recursive nexthops too */
+ nexthops_free((*pctx)->u.rinfo.backup_ng.nexthop);
+
+ (*pctx)->u.rinfo.backup_ng.nexthop = NULL;
+ }
+
if ((*pctx)->u.rinfo.zd_old_ng.nexthop) {
/* This deals with recursive nexthops too */
nexthops_free((*pctx)->u.rinfo.zd_old_ng.nexthop);
@@ -479,6 +492,13 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
(*pctx)->u.rinfo.zd_old_ng.nexthop = NULL;
}
+ if ((*pctx)->u.rinfo.old_backup_ng.nexthop) {
+ /* This deals with recursive nexthops too */
+ nexthops_free((*pctx)->u.rinfo.old_backup_ng.nexthop);
+
+ (*pctx)->u.rinfo.old_backup_ng.nexthop = NULL;
+ }
+
break;
case DPLANE_OP_NH_INSTALL:
@@ -1038,6 +1058,12 @@ void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
nexthop_group_copy_nh_sorted(&(ctx->u.rinfo.zd_ng), nh);
}
+uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->u.rinfo.zd_nhg_id;
+}
+
const struct nexthop_group *dplane_ctx_get_ng(
const struct zebra_dplane_ctx *ctx)
{
@@ -1046,14 +1072,30 @@ const struct nexthop_group *dplane_ctx_get_ng(
return &(ctx->u.rinfo.zd_ng);
}
-const struct nexthop_group *dplane_ctx_get_old_ng(
- const struct zebra_dplane_ctx *ctx)
+const struct nexthop_group *
+dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.rinfo.backup_ng);
+}
+
+const struct nexthop_group *
+dplane_ctx_get_old_ng(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
return &(ctx->u.rinfo.zd_old_ng);
}
+const struct nexthop_group *
+dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.rinfo.old_backup_ng);
+}
+
const struct zebra_dplane_info *dplane_ctx_get_ns(
const struct zebra_dplane_ctx *ctx)
{
@@ -1514,6 +1556,13 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
/* Copy nexthops; recursive info is included too */
copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
re->nhe->nhg.nexthop, NULL);
+ ctx->u.rinfo.zd_nhg_id = re->nhe->id;
+
+ /* Copy backup nexthop info, if present */
+ if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
+ copy_nexthops(&(ctx->u.rinfo.backup_ng.nexthop),
+ re->nhe->backup_info->nhe->nhg.nexthop, NULL);
+ }
/* Ensure that the dplane nexthops' flags are clear. */
for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop))
@@ -1532,9 +1581,8 @@ static int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx,
dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_ROUTE_UPDATE));
#ifdef HAVE_NETLINK
- if (re->nhe_id) {
- struct nhg_hash_entry *nhe =
- zebra_nhg_resolve(zebra_nhg_lookup_id(re->nhe_id));
+ if (re->nhe) {
+ struct nhg_hash_entry *nhe = zebra_nhg_resolve(re->nhe);
ctx->u.rinfo.nhe.id = nhe->id;
/*
@@ -1581,7 +1629,6 @@ static int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx,
{
struct zebra_vrf *zvrf = NULL;
struct zebra_ns *zns = NULL;
-
int ret = EINVAL;
if (!ctx || !nhe)
@@ -1850,6 +1897,17 @@ dplane_route_update_internal(struct route_node *rn,
*/
copy_nexthops(&(ctx->u.rinfo.zd_old_ng.nexthop),
old_re->nhe->nhg.nexthop, NULL);
+
+ if (zebra_nhg_get_backup_nhg(old_re->nhe) != NULL) {
+ struct nexthop_group *nhg;
+ struct nexthop **nh;
+
+ nhg = zebra_nhg_get_backup_nhg(old_re->nhe);
+ nh = &(ctx->u.rinfo.old_backup_ng.nexthop);
+
+ if (nhg->nexthop)
+ copy_nexthops(nh, nhg->nexthop, NULL);
+ }
#endif /* !HAVE_NETLINK */
}
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index c0b04e71b0..9ce4df197c 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -270,11 +270,19 @@ void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance);
uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh);
+
+uint32_t dplane_ctx_get_nhg_id(const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_ng(
const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_old_ng(
const struct zebra_dplane_ctx *ctx);
+/* Backup nexthop information (list of nexthops) if present. */
+const struct nexthop_group *
+dplane_ctx_get_backup_ng(const struct zebra_dplane_ctx *ctx);
+const struct nexthop_group *
+dplane_ctx_get_old_backup_ng(const struct zebra_dplane_ctx *ctx);
+
/* Accessors for nexthop information */
uint32_t dplane_ctx_get_nhe_id(const struct zebra_dplane_ctx *ctx);
afi_t dplane_ctx_get_nhe_afi(const struct zebra_dplane_ctx *ctx);
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index d373fdf370..999e91486d 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -98,14 +98,14 @@ static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp);
static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size);
static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex);
+ const union g_addr *gate, ifindex_t ifindex);
static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype, union g_addr *gate,
- ifindex_t ifindex);
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex);
static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype, union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels,
- mpls_label_t *labels);
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex,
+ uint8_t num_labels, mpls_label_t *labels);
static int nhlfe_del(zebra_nhlfe_t *snhlfe);
static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe,
struct mpls_label_stack *nh_label);
@@ -117,13 +117,13 @@ static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty);
static void lsp_print(zebra_lsp_t *lsp, void *ctxt);
static void *slsp_alloc(void *p);
static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex);
+ const union g_addr *gate, ifindex_t ifindex);
static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp,
enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex);
+ const union g_addr *gate, ifindex_t ifindex);
static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp,
enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex,
+ const union g_addr *gate, ifindex_t ifindex,
mpls_label_t out_label);
static int snhlfe_del(zebra_snhlfe_t *snhlfe);
static int snhlfe_del_all(zebra_slsp_t *slsp);
@@ -960,7 +960,7 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data)
UNSET_FLAG(lsp->flags, LSP_FLAG_CHANGED);
/* We leave the INSTALLED flag set here
- * so we know an update in in-flight.
+ * so we know an update is in-flight.
*/
/*
@@ -1149,7 +1149,7 @@ static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size)
* Check if NHLFE matches with search info passed.
*/
static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex)
+ const union g_addr *gate, ifindex_t ifindex)
{
struct nexthop *nhop;
int cmp = 1;
@@ -1191,8 +1191,8 @@ static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
* Locate NHLFE that matches with passed info.
*/
static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype, union g_addr *gate,
- ifindex_t ifindex)
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex)
{
zebra_nhlfe_t *nhlfe;
@@ -1214,9 +1214,9 @@ static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
* check done.
*/
static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype, union g_addr *gate,
- ifindex_t ifindex, uint8_t num_labels,
- mpls_label_t labels[])
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex,
+ uint8_t num_labels, mpls_label_t labels[])
{
zebra_nhlfe_t *nhlfe;
struct nexthop *nexthop;
@@ -1520,7 +1520,7 @@ static struct list *hash_get_sorted_list(struct hash *hash, void *cmp)
/*
* Compare two LSPs based on their label values.
*/
-static int lsp_cmp(zebra_lsp_t *lsp1, zebra_lsp_t *lsp2)
+static int lsp_cmp(const zebra_lsp_t *lsp1, const zebra_lsp_t *lsp2)
{
if (lsp1->ile.in_label < lsp2->ile.in_label)
return -1;
@@ -1547,7 +1547,7 @@ static void *slsp_alloc(void *p)
/*
* Compare two static LSPs based on their label values.
*/
-static int slsp_cmp(zebra_slsp_t *slsp1, zebra_slsp_t *slsp2)
+static int slsp_cmp(const zebra_slsp_t *slsp1, const zebra_slsp_t *slsp2)
{
if (slsp1->ile.in_label < slsp2->ile.in_label)
return -1;
@@ -1562,7 +1562,7 @@ static int slsp_cmp(zebra_slsp_t *slsp1, zebra_slsp_t *slsp2)
* Check if static NHLFE matches with search info passed.
*/
static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex)
+ const union g_addr *gate, ifindex_t ifindex)
{
int cmp = 1;
@@ -1593,7 +1593,7 @@ static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype,
*/
static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp,
enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex)
+ const union g_addr *gate, ifindex_t ifindex)
{
zebra_snhlfe_t *snhlfe;
@@ -1615,7 +1615,7 @@ static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp,
*/
static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp,
enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex,
+ const union g_addr *gate, ifindex_t ifindex,
mpls_label_t out_label)
{
zebra_snhlfe_t *snhlfe;
@@ -2746,7 +2746,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, uint8_t num_out_labels,
mpls_label_t out_labels[], enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex)
+ const union g_addr *gate, ifindex_t ifindex)
{
struct hash *lsp_table;
zebra_ile_t tmp_ile;
@@ -2759,11 +2759,12 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
if (!lsp_table)
return -1;
- /* If entry is present, exit. */
+ /* Find or create LSP object */
tmp_ile.in_label = in_label;
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
if (!lsp)
return -1;
+
nhlfe = nhlfe_find(lsp, type, gtype, gate, ifindex);
if (nhlfe) {
struct nexthop *nh = nhlfe->nexthop;
@@ -2780,8 +2781,8 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
return 0;
if (IS_ZEBRA_DEBUG_MPLS) {
- char buf2[BUFSIZ];
- char buf3[BUFSIZ];
+ char buf2[MPLS_LABEL_STRLEN];
+ char buf3[MPLS_LABEL_STRLEN];
nhlfe2str(nhlfe, buf, BUFSIZ);
mpls_label2str(num_out_labels, out_labels, buf2,
@@ -2842,7 +2843,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
*/
int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex)
+ const union g_addr *gate, ifindex_t ifindex)
{
struct hash *lsp_table;
zebra_ile_t tmp_ile;
@@ -3056,11 +3057,12 @@ int zebra_mpls_static_lsp_add(struct zebra_vrf *zvrf, mpls_label_t in_label,
if (!slsp_table)
return -1;
- /* If entry is present, exit. */
+ /* Find or create LSP. */
tmp_ile.in_label = in_label;
slsp = hash_get(slsp_table, &tmp_ile, slsp_alloc);
if (!slsp)
return -1;
+
snhlfe = snhlfe_find(slsp, gtype, gate, ifindex);
if (snhlfe) {
if (snhlfe->out_label == out_label)
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index 2489e8e510..33cb614346 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -288,7 +288,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, uint8_t num_out_labels,
mpls_label_t out_labels[], enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex);
+ const union g_addr *gate, ifindex_t ifindex);
/*
* Uninstall a particular NHLFE in the forwarding table. If this is
@@ -296,7 +296,7 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
*/
int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex);
+ const union g_addr *gate, ifindex_t ifindex);
/*
* Uninstall all NHLFEs for a particular LSP forwarding entry.
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index dc0af050d7..fceddcb745 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -63,6 +63,9 @@ static struct nhg_hash_entry *
depends_find_id_add(struct nhg_connected_tree_head *head, uint32_t id);
static void depends_decrement_free(struct nhg_connected_tree_head *head);
+static struct nhg_backup_info *
+nhg_backup_copy(const struct nhg_backup_info *orig);
+
static void nhg_connected_free(struct nhg_connected *dep)
{
@@ -295,7 +298,7 @@ static void zebra_nhg_set_if(struct nhg_hash_entry *nhe, struct interface *ifp)
static void
zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
- struct nhg_connected_tree_head nhg_depends)
+ struct nhg_connected_tree_head *nhg_depends)
{
struct nhg_connected *rb_node_dep = NULL;
@@ -304,31 +307,58 @@ zebra_nhg_connect_depends(struct nhg_hash_entry *nhe,
* for now. Otherwise, their might be a time trade-off for repeated
* alloc/frees as startup.
*/
- nhe->nhg_depends = nhg_depends;
+ nhe->nhg_depends = *nhg_depends;
/* Attach backpointer to anything that it depends on */
zebra_nhg_dependents_init(nhe);
if (!zebra_nhg_depends_is_empty(nhe)) {
frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nhe %p (%u), dep %p (%u)",
+ __func__, nhe, nhe->id,
+ rb_node_dep->nhe,
+ rb_node_dep->nhe->id);
+
zebra_nhg_dependents_add(rb_node_dep->nhe, nhe);
}
}
+}
- /* Add the ifp now if its not a group or recursive and has ifindex */
- if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop
- && nhe->nhg.nexthop->ifindex) {
- struct interface *ifp = NULL;
+/* Init an nhe, for use in a hash lookup for example */
+void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
+ const struct nexthop *nh)
+{
+ memset(nhe, 0, sizeof(struct nhg_hash_entry));
+ nhe->vrf_id = VRF_DEFAULT;
+ nhe->type = ZEBRA_ROUTE_NHG;
+ nhe->afi = AFI_UNSPEC;
- ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
- nhe->nhg.nexthop->vrf_id);
- if (ifp)
- zebra_nhg_set_if(nhe, ifp);
- else
- flog_err(
- EC_ZEBRA_IF_LOOKUP_FAILED,
- "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
- nhe->nhg.nexthop->ifindex,
- nhe->nhg.nexthop->vrf_id, nhe->id);
+ /* There are some special rules that apply to groups representing
+ * a single nexthop.
+ */
+ if (nh && (nh->next == NULL)) {
+ switch (nh->type) {
+ case (NEXTHOP_TYPE_IFINDEX):
+ case (NEXTHOP_TYPE_BLACKHOLE):
+ /*
+ * This switch case handles setting the afi different
+ * for ipv4/v6 routes. Ifindex/blackhole nexthop
+ * objects cannot be ambiguous, they must be Address
+ * Family specific. If we get here, we will either use
+ * the AF of the route, or the one we got passed from
+ * here from the kernel.
+ */
+ nhe->afi = afi;
+ break;
+ case (NEXTHOP_TYPE_IPV4_IFINDEX):
+ case (NEXTHOP_TYPE_IPV4):
+ nhe->afi = AFI_IP;
+ break;
+ case (NEXTHOP_TYPE_IPV6_IFINDEX):
+ case (NEXTHOP_TYPE_IPV6):
+ nhe->afi = AFI_IP6;
+ break;
+ }
}
}
@@ -341,7 +371,7 @@ struct nhg_hash_entry *zebra_nhg_alloc(void)
return nhe;
}
-static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy,
+static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *orig,
uint32_t id)
{
struct nhg_hash_entry *nhe;
@@ -350,14 +380,18 @@ static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *copy,
nhe->id = id;
- nexthop_group_copy(&(nhe->nhg), &(copy->nhg));
+ nexthop_group_copy(&(nhe->nhg), &(orig->nhg));
- nhe->vrf_id = copy->vrf_id;
- nhe->afi = copy->afi;
- nhe->type = copy->type ? copy->type : ZEBRA_ROUTE_NHG;
+ nhe->vrf_id = orig->vrf_id;
+ nhe->afi = orig->afi;
+ nhe->type = orig->type ? orig->type : ZEBRA_ROUTE_NHG;
nhe->refcnt = 0;
nhe->dplane_ref = zebra_router_get_next_sequence();
+ /* Copy backup info also, if present */
+ if (orig->backup_info)
+ nhe->backup_info = nhg_backup_copy(orig->backup_info);
+
return nhe;
}
@@ -372,7 +406,25 @@ static void *zebra_nhg_hash_alloc(void *arg)
/* Mark duplicate nexthops in a group at creation time. */
nexthop_group_mark_duplicates(&(nhe->nhg));
- zebra_nhg_connect_depends(nhe, copy->nhg_depends);
+ zebra_nhg_connect_depends(nhe, &(copy->nhg_depends));
+
+ /* Add the ifp now if it's not a group or recursive and has ifindex */
+ if (zebra_nhg_depends_is_empty(nhe) && nhe->nhg.nexthop
+ && nhe->nhg.nexthop->ifindex) {
+ struct interface *ifp = NULL;
+
+ ifp = if_lookup_by_index(nhe->nhg.nexthop->ifindex,
+ nhe->nhg.nexthop->vrf_id);
+ if (ifp)
+ zebra_nhg_set_if(nhe, ifp);
+ else
+ flog_err(
+ EC_ZEBRA_IF_LOOKUP_FAILED,
+ "Zebra failed to lookup an interface with ifindex=%d in vrf=%u for NHE id=%u",
+ nhe->nhg.nexthop->ifindex,
+ nhe->nhg.nexthop->vrf_id, nhe->id);
+ }
+
zebra_nhg_insert_id(nhe);
return nhe;
@@ -381,12 +433,17 @@ static void *zebra_nhg_hash_alloc(void *arg)
uint32_t zebra_nhg_hash_key(const void *arg)
{
const struct nhg_hash_entry *nhe = arg;
+ uint32_t val, key = 0x5a351234;
+
+ val = nexthop_group_hash(&(nhe->nhg));
+ if (nhe->backup_info) {
+ val = jhash_2words(val,
+ nexthop_group_hash(
+ &(nhe->backup_info->nhe->nhg)),
+ key);
+ }
- uint32_t key = 0x5a351234;
-
- key = jhash_3words(nhe->vrf_id, nhe->afi,
- nexthop_group_hash(&(nhe->nhg)),
- key);
+ key = jhash_3words(nhe->vrf_id, nhe->afi, val, key);
return key;
}
@@ -398,6 +455,50 @@ uint32_t zebra_nhg_id_key(const void *arg)
return nhe->id;
}
+/* Helper with common nhg/nhe nexthop comparison logic */
+static bool nhg_compare_nexthops(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ if (nh1 && !nh2)
+ return false;
+
+ if (!nh1 && nh2)
+ return false;
+
+ /*
+ * We have to check the active flag of each individual one,
+ * not just the overall active_num. This solves the special case
+ * issue of a route with a nexthop group with one nexthop
+ * resolving to itself and thus marking it inactive. If we
+ * have two different routes each wanting to mark a different
+ * nexthop inactive, they need to hash to two different groups.
+ *
+ * If we just hashed on num_active, they would hash the same
+ * which is incorrect.
+ *
+ * ex)
+ * 1.1.1.0/24
+ * -> 1.1.1.1 dummy1 (inactive)
+ * -> 1.1.2.1 dummy2
+ *
+ * 1.1.2.0/24
+ * -> 1.1.1.1 dummy1
+ * -> 1.1.2.1 dummy2 (inactive)
+ *
+ * Without checking each individual one, they would hash to
+ * the same group and both have 1.1.1.1 dummy1 marked inactive.
+ *
+ */
+ if (CHECK_FLAG(nh1->flags, NEXTHOP_FLAG_ACTIVE)
+ != CHECK_FLAG(nh2->flags, NEXTHOP_FLAG_ACTIVE))
+ return false;
+
+ if (!nexthop_same(nh1, nh2))
+ return false;
+
+ return true;
+}
+
bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
{
const struct nhg_hash_entry *nhe1 = arg1;
@@ -415,45 +516,44 @@ bool zebra_nhg_hash_equal(const void *arg1, const void *arg2)
if (nhe1->afi != nhe2->afi)
return false;
- /* Nexthops should be sorted */
+ /* Nexthops should be in-order, so we simply compare them in-place */
for (nexthop1 = nhe1->nhg.nexthop, nexthop2 = nhe2->nhg.nexthop;
nexthop1 || nexthop2;
nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
- if (nexthop1 && !nexthop2)
- return false;
- if (!nexthop1 && nexthop2)
+ if (!nhg_compare_nexthops(nexthop1, nexthop2))
return false;
+ }
- /*
- * We have to check the active flag of each individual one,
- * not just the overall active_num. This solves the special case
- * issue of a route with a nexthop group with one nexthop
- * resolving to itself and thus marking it inactive. If we
- * have two different routes each wanting to mark a different
- * nexthop inactive, they need to hash to two different groups.
- *
- * If we just hashed on num_active, they would hash the same
- * which is incorrect.
- *
- * ex)
- * 1.1.1.0/24
- * -> 1.1.1.1 dummy1 (inactive)
- * -> 1.1.2.1 dummy2
- *
- * 1.1.2.0/24
- * -> 1.1.1.1 dummy1
- * -> 1.1.2.1 dummy2 (inactive)
- *
- * Without checking each individual one, they would hash to
- * the same group and both have 1.1.1.1 dummy1 marked inactive.
- *
- */
- if (CHECK_FLAG(nexthop1->flags, NEXTHOP_FLAG_ACTIVE)
- != CHECK_FLAG(nexthop2->flags, NEXTHOP_FLAG_ACTIVE))
- return false;
+ /* If there's no backup info, comparison is done. */
+ if ((nhe1->backup_info == NULL) && (nhe2->backup_info == NULL))
+ return true;
+
+ /* Compare backup info also - test the easy things first */
+ if (nhe1->backup_info && (nhe2->backup_info == NULL))
+ return false;
+ if (nhe2->backup_info && (nhe1->backup_info == NULL))
+ return false;
+
+ /* Compare number of backups before actually comparing any */
+ for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
+ nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
+ nexthop1 && nexthop2;
+ nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
+ ;
+ }
- if (!nexthop_same(nexthop1, nexthop2))
+ /* Did we find the end of one list before the other? */
+ if (nexthop1 || nexthop2)
+ return false;
+
+ /* Have to compare the backup nexthops */
+ for (nexthop1 = nhe1->backup_info->nhe->nhg.nexthop,
+ nexthop2 = nhe2->backup_info->nhe->nhg.nexthop;
+ nexthop1 || nexthop2;
+ nexthop1 = nexthop1->next, nexthop2 = nexthop2->next) {
+
+ if (!nhg_compare_nexthops(nexthop1, nexthop2))
return false;
}
@@ -512,29 +612,185 @@ static void handle_recursive_depend(struct nhg_connected_tree_head *nhg_depends,
resolved_ng.nexthop = nh;
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: head %p, nh %pNHv",
+ __func__, nhg_depends, nh);
+
depend = zebra_nhg_rib_find(0, &resolved_ng, afi);
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nh %pNHv => %p (%u)",
+ __func__, nh, depend,
+ depend ? depend->id : 0);
+
if (depend)
depends_add(nhg_depends, depend);
}
+/*
+ * Lookup an nhe in the global hash, using data from another nhe. If 'lookup'
+ * has an id value, that's used. Create a new global/shared nhe if not found.
+ */
+static bool zebra_nhe_find(struct nhg_hash_entry **nhe, /* return value */
+ struct nhg_hash_entry *lookup,
+ struct nhg_connected_tree_head *nhg_depends,
+ afi_t afi)
+{
+ bool created = false;
+ bool recursive = false;
+ struct nhg_hash_entry *newnhe, *backup_nhe;
+ struct nexthop *nh = NULL;
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: id %u, lookup %p, vrf %d, type %d, depends %p",
+ __func__, lookup->id, lookup,
+ lookup->vrf_id, lookup->type,
+ nhg_depends);
+
+ if (lookup->id)
+ (*nhe) = zebra_nhg_lookup_id(lookup->id);
+ else
+ (*nhe) = hash_lookup(zrouter.nhgs, lookup);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: lookup => %p (%u)",
+ __func__, (*nhe),
+ (*nhe) ? (*nhe)->id : 0);
+
+ /* If we found an existing object, we're done */
+ if (*nhe)
+ goto done;
+
+ /* We're going to create/insert a new nhe:
+ * assign the next global id value if necessary.
+ */
+ if (lookup->id == 0)
+ lookup->id = ++id_counter;
+ newnhe = hash_get(zrouter.nhgs, lookup, zebra_nhg_hash_alloc);
+ created = true;
+
+ /* Mail back the new object */
+ *nhe = newnhe;
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: => created %p (%u)", __func__, newnhe,
+ newnhe->id);
+
+ /* Only hash/lookup the depends if the first lookup
+ * fails to find something. This should hopefully save a
+ * lot of cycles for larger ecmp sizes.
+ */
+ if (nhg_depends) {
+ /* If you don't want to hash on each nexthop in the
+ * nexthop group struct you can pass the depends
+ * directly. Kernel-side we do this since it just looks
+ * them up via IDs.
+ */
+ zebra_nhg_connect_depends(newnhe, nhg_depends);
+ goto done;
+ }
+
+ /* Prepare dependency relationships if this is not a
+ * singleton nexthop. There are two cases: a single
+ * recursive nexthop, where we need a relationship to the
+ * resolving nexthop; or a group of nexthops, where we need
+ * relationships with the corresponding singletons.
+ */
+ zebra_nhg_depends_init(lookup);
+
+ nh = newnhe->nhg.nexthop;
+
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE))
+ SET_FLAG(newnhe->flags, NEXTHOP_GROUP_VALID);
+
+ if (nh->next == NULL) {
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
+ /* Single recursive nexthop */
+ handle_recursive_depend(&newnhe->nhg_depends,
+ nh->resolved, afi);
+ recursive = true;
+ }
+ } else {
+ /* List of nexthops */
+ for (nh = newnhe->nhg.nexthop; nh; nh = nh->next) {
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: depends NH %pNHv %s",
+ __func__, nh,
+ CHECK_FLAG(nh->flags,
+ NEXTHOP_FLAG_RECURSIVE) ?
+ "(R)" : "");
+
+ depends_find_add(&newnhe->nhg_depends, nh, afi);
+ }
+ }
+
+ if (recursive)
+ SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE);
+
+ if (zebra_nhg_get_backup_nhg(newnhe) == NULL ||
+ zebra_nhg_get_backup_nhg(newnhe)->nexthop == NULL)
+ goto done;
+
+ /* If there are backup nexthops, add them to the backup
+ * depends tree. The rules here are a little different.
+ */
+ recursive = false;
+ backup_nhe = newnhe->backup_info->nhe;
+
+ nh = backup_nhe->nhg.nexthop;
+
+ /* Singleton recursive NH */
+ if (nh->next == NULL &&
+ CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: backup depend NH %pNHv (R)",
+ __func__, nh);
+
+ /* Single recursive nexthop */
+ handle_recursive_depend(&backup_nhe->nhg_depends,
+ nh->resolved, afi);
+ recursive = true;
+ } else {
+ /* One or more backup NHs */
+ for (; nh; nh = nh->next) {
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: backup depend NH %pNHv %s",
+ __func__, nh,
+ CHECK_FLAG(nh->flags,
+ NEXTHOP_FLAG_RECURSIVE) ?
+ "(R)" : "");
+
+ depends_find_add(&backup_nhe->nhg_depends,
+ nh, afi);
+ }
+ }
+
+ if (recursive)
+ SET_FLAG(backup_nhe->flags, NEXTHOP_GROUP_RECURSIVE);
+
+done:
+
+ return created;
+}
+
+/*
+ * Lookup or create an nhe, based on an nhg or an nhe id.
+ */
static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
struct nexthop_group *nhg,
struct nhg_connected_tree_head *nhg_depends,
vrf_id_t vrf_id, afi_t afi, int type)
{
struct nhg_hash_entry lookup = {};
-
- uint32_t old_id_counter = id_counter;
-
bool created = false;
- bool recursive = false;
- /*
- * If it has an id at this point, we must have gotten it from the kernel
- */
- lookup.id = id ? id : ++id_counter;
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: id %u, nhg %p, vrf %d, type %d, depends %p",
+ __func__, id, nhg, vrf_id, type,
+ nhg_depends);
+ /* Use a temporary nhe and call into the superset/common code */
+ lookup.id = id;
lookup.type = type ? type : ZEBRA_ROUTE_NHG;
lookup.nhg = *nhg;
@@ -567,53 +823,8 @@ static bool zebra_nhg_find(struct nhg_hash_entry **nhe, uint32_t id,
}
}
- if (id)
- (*nhe) = zebra_nhg_lookup_id(id);
- else
- (*nhe) = hash_lookup(zrouter.nhgs, &lookup);
-
- /* If it found an nhe in our tables, this new ID is unused */
- if (*nhe)
- id_counter = old_id_counter;
-
- if (!(*nhe)) {
- /* Only hash/lookup the depends if the first lookup
- * fails to find something. This should hopefully save a
- * lot of cycles for larger ecmp sizes.
- */
- if (nhg_depends)
- /* If you don't want to hash on each nexthop in the
- * nexthop group struct you can pass the depends
- * directly. Kernel-side we do this since it just looks
- * them up via IDs.
- */
- lookup.nhg_depends = *nhg_depends;
- else {
- if (nhg->nexthop->next) {
- zebra_nhg_depends_init(&lookup);
-
- /* If its a group, create a dependency tree */
- struct nexthop *nh = NULL;
-
- for (nh = nhg->nexthop; nh; nh = nh->next)
- depends_find_add(&lookup.nhg_depends,
- nh, afi);
- } else if (CHECK_FLAG(nhg->nexthop->flags,
- NEXTHOP_FLAG_RECURSIVE)) {
- zebra_nhg_depends_init(&lookup);
- handle_recursive_depend(&lookup.nhg_depends,
- nhg->nexthop->resolved,
- afi);
- recursive = true;
- }
- }
-
- (*nhe) = hash_get(zrouter.nhgs, &lookup, zebra_nhg_hash_alloc);
- created = true;
+ created = zebra_nhe_find(nhe, &lookup, nhg_depends, afi);
- if (recursive)
- SET_FLAG((*nhe)->flags, NEXTHOP_GROUP_RECURSIVE);
- }
return created;
}
@@ -629,6 +840,10 @@ zebra_nhg_find_nexthop(uint32_t id, struct nexthop *nh, afi_t afi, int type)
zebra_nhg_find(&nhe, id, &nhg, NULL, vrf_id, afi, type);
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nh %pNHv => %p (%u)",
+ __func__, nh, nhe, nhe ? nhe->id : 0);
+
return nhe;
}
@@ -807,6 +1022,9 @@ done:
static void zebra_nhg_release(struct nhg_hash_entry *nhe)
{
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nhe %p (%u)", __func__, nhe, nhe->id);
+
/* Remove it from any lists it may be on */
zebra_nhg_depends_release(nhe);
zebra_nhg_dependents_release(nhe);
@@ -872,6 +1090,10 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
lookup = zebra_nhg_lookup_id(id);
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: id %u, count %d, lookup => %p",
+ __func__, id, count, lookup);
+
if (lookup) {
/* This is already present in our table, hence an update
* that we did not initate.
@@ -919,6 +1141,11 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
*/
kernel_nhe = zebra_nhg_copy(nhe, id);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: copying kernel nhe (%u), dup of %u",
+ __func__, id, nhe->id);
+
zebra_nhg_insert_id(kernel_nhe);
zebra_nhg_set_unhashable(kernel_nhe);
} else if (zebra_nhg_contains_unhashable(nhe)) {
@@ -926,10 +1153,18 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
* depend, so lets mark this group as unhashable as well
* and release it from the non-ID hash.
*/
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nhe %p (%u) unhashable",
+ __func__, nhe, nhe->id);
+
hash_release(zrouter.nhgs, nhe);
zebra_nhg_set_unhashable(nhe);
} else {
/* It actually created a new nhe */
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nhe %p (%u) is new",
+ __func__, nhe, nhe->id);
+
SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
}
@@ -1038,6 +1273,10 @@ int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
{
struct nhg_ctx *ctx = NULL;
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nh %pNHv, id %u, count %d",
+ __func__, nh, id, (int)count);
+
if (id > id_counter)
/* Increase our counter so we don't try to create
* an ID that already exists
@@ -1111,12 +1350,17 @@ static struct nhg_hash_entry *depends_find_singleton(const struct nexthop *nh,
/* The copy may have allocated labels; free them if necessary. */
nexthop_del_labels(&lookup);
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nh %pNHv => %p (%u)",
+ __func__, nh, nhe, nhe ? nhe->id : 0);
+
return nhe;
}
static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi)
{
struct nhg_hash_entry *nhe = NULL;
+ char rbuf[10];
if (!nh)
goto done;
@@ -1124,10 +1368,18 @@ static struct nhg_hash_entry *depends_find(const struct nexthop *nh, afi_t afi)
/* We are separating these functions out to increase handling speed
* in the non-recursive case (by not alloc/freeing)
*/
- if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE))
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)) {
nhe = depends_find_recursive(nh, afi);
- else
+ strlcpy(rbuf, "(R)", sizeof(rbuf));
+ } else {
nhe = depends_find_singleton(nh, afi);
+ rbuf[0] = '\0';
+ }
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nh %pNHv %s => %p (%u)",
+ __func__, nh, rbuf,
+ nhe, nhe ? nhe->id : 0);
done:
return nhe;
@@ -1136,6 +1388,10 @@ done:
static void depends_add(struct nhg_connected_tree_head *head,
struct nhg_hash_entry *depend)
{
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: head %p nh %pNHv",
+ __func__, head, depend->nhg.nexthop);
+
/* If NULL is returned, it was successfully added and
* needs to have its refcnt incremented.
*
@@ -1154,6 +1410,10 @@ depends_find_add(struct nhg_connected_tree_head *head, struct nexthop *nh,
depend = depends_find(nh, afi);
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nh %pNHv => %p",
+ __func__, nh, depend);
+
if (depend)
depends_add(head, depend);
@@ -1179,7 +1439,7 @@ static void depends_decrement_free(struct nhg_connected_tree_head *head)
nhg_connected_tree_free(head);
}
-/* Rib-side, you get a nexthop group struct */
+/* Find an nhe based on a list of nexthops */
struct nhg_hash_entry *
zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi)
{
@@ -1195,13 +1455,107 @@ zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi)
zebra_nhg_find(&nhe, id, nhg, NULL, vrf_id, rt_afi, 0);
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: => nhe %p (%u)",
+ __func__, nhe, nhe ? nhe->id : 0);
+
return nhe;
}
+/* Find an nhe based on a route's nhe */
+struct nhg_hash_entry *
+zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi)
+{
+ struct nhg_hash_entry *nhe = NULL;
+
+ if (!(rt_nhe && rt_nhe->nhg.nexthop)) {
+ flog_err(EC_ZEBRA_TABLE_LOOKUP_FAILED,
+ "No nexthop passed to %s", __func__);
+ return NULL;
+ }
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: rt_nhe %p (%u)",
+ __func__, rt_nhe,
+ rt_nhe ? rt_nhe->id : 0);
+
+ zebra_nhe_find(&nhe, rt_nhe, NULL, rt_afi);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: => nhe %p (%u)",
+ __func__, nhe, nhe ? nhe->id : 0);
+
+ return nhe;
+}
+
+/*
+ * Allocate backup nexthop info object. Typically these are embedded in
+ * nhg_hash_entry objects.
+ */
+struct nhg_backup_info *zebra_nhg_backup_alloc(void)
+{
+ struct nhg_backup_info *p;
+
+ p = XCALLOC(MTYPE_NHG, sizeof(struct nhg_backup_info));
+
+ p->nhe = zebra_nhg_alloc();
+
+ /* Identify the embedded group used to hold the list of backups */
+ SET_FLAG(p->nhe->flags, NEXTHOP_GROUP_BACKUP);
+
+ return p;
+}
+
+/*
+ * Free backup nexthop info object, deal with any embedded allocations
+ */
+void zebra_nhg_backup_free(struct nhg_backup_info **p)
+{
+ if (p && *p) {
+ if ((*p)->nhe)
+ zebra_nhg_free((*p)->nhe);
+
+ XFREE(MTYPE_NHG, (*p));
+ }
+}
+
+/* Accessor for backup nexthop group */
+struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe)
+{
+ struct nexthop_group *p = NULL;
+
+ if (nhe) {
+ if (nhe->backup_info && nhe->backup_info->nhe)
+ p = &(nhe->backup_info->nhe->nhg);
+ }
+
+ return p;
+}
+
+/*
+ * Helper to return a copy of a backup_info - note that this is a shallow
+ * copy, meant to be used when creating a new nhe from info passed in with
+ * a route e.g.
+ */
+static struct nhg_backup_info *
+nhg_backup_copy(const struct nhg_backup_info *orig)
+{
+ struct nhg_backup_info *b;
+
+ b = zebra_nhg_backup_alloc();
+
+ /* Copy list of nexthops */
+ nexthop_group_copy(&(b->nhe->nhg), &(orig->nhe->nhg));
+
+ return b;
+}
+
static void zebra_nhg_free_members(struct nhg_hash_entry *nhe)
{
nexthops_free(nhe->nhg.nexthop);
+ zebra_nhg_backup_free(&nhe->backup_info);
+
/* Decrement to remove connection ref */
nhg_connected_tree_decrement_ref(&nhe->nhg_depends);
nhg_connected_tree_free(&nhe->nhg_depends);
@@ -1210,6 +1564,21 @@ static void zebra_nhg_free_members(struct nhg_hash_entry *nhe)
void zebra_nhg_free(struct nhg_hash_entry *nhe)
{
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL) {
+ /* Group or singleton? */
+ if (nhe->nhg.nexthop && nhe->nhg.nexthop->next)
+ zlog_debug("%s: nhe %p (%u), refcnt %d",
+ __func__, nhe,
+ (nhe ? nhe->id : 0),
+ (nhe ? nhe->refcnt : 0));
+ else
+ zlog_debug("%s: nhe %p (%u), refcnt %d, NH %pNHv",
+ __func__, nhe,
+ (nhe ? nhe->id : 0),
+ (nhe ? nhe->refcnt : 0),
+ nhe->nhg.nexthop);
+ }
+
if (nhe->refcnt)
zlog_debug("nhe_id=%u hash refcnt=%d", nhe->id, nhe->refcnt);
@@ -1225,6 +1594,11 @@ void zebra_nhg_hash_free(void *p)
void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
{
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nhe %p (%u) %d => %d",
+ __func__, nhe, nhe->id, nhe->refcnt,
+ nhe->refcnt - 1);
+
nhe->refcnt--;
if (!zebra_nhg_depends_is_empty(nhe))
@@ -1236,6 +1610,11 @@ void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe)
void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe)
{
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: nhe %p (%u) %d => %d",
+ __func__, nhe, nhe->id, nhe->refcnt,
+ nhe->refcnt + 1);
+
nhe->refcnt++;
if (!zebra_nhg_depends_is_empty(nhe))
@@ -1385,6 +1764,10 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
nexthop->resolved = NULL;
re->nexthop_mtu = 0;
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: re %p, nexthop %pNHv",
+ __func__, re, nexthop);
+
/*
* If the kernel has sent us a NEW route, then
* by golly gee whiz it's a good route.
@@ -1533,6 +1916,12 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
|| nexthop->type == NEXTHOP_TYPE_IPV6)
nexthop->ifindex = newhop->ifindex;
}
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: CONNECT match %p (%u), newhop %pNHv",
+ __func__, match,
+ match->nhe->id, newhop);
+
return 1;
} else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
resolved = 0;
@@ -1543,6 +1932,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
if (!nexthop_valid_resolve(nexthop, newhop))
continue;
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: RECURSIVE match %p (%u), newhop %pNHv",
+ __func__, match,
+ match->nhe->id, newhop);
+
SET_FLAG(nexthop->flags,
NEXTHOP_FLAG_RECURSIVE);
nexthop_set_resolved(afi, newhop, nexthop);
@@ -1565,6 +1959,11 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
if (!nexthop_valid_resolve(nexthop, newhop))
continue;
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("%s: STATIC match %p (%u), newhop %pNHv",
+ __func__, match,
+ match->nhe->id, newhop);
+
SET_FLAG(nexthop->flags,
NEXTHOP_FLAG_RECURSIVE);
nexthop_set_resolved(afi, newhop, nexthop);
@@ -1683,11 +2082,11 @@ static unsigned nexthop_active_check(struct route_node *rn,
default:
break;
}
+
if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- " %s: Unable to find a active nexthop",
- __func__);
+ zlog_debug(" %s: Unable to find active nexthop",
+ __func__);
return 0;
}
@@ -1768,45 +2167,37 @@ done:
}
/*
- * Iterate over all nexthops of the given RIB entry and refresh their
- * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
- * the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
- *
- * Return value is the new number of active nexthops.
+ * Process a list of nexthops, given the head of the list, determining
+ * whether each one is ACTIVE/installable at this time.
*/
-int nexthop_active_update(struct route_node *rn, struct route_entry *re)
+static uint32_t nexthop_list_active_update(struct route_node *rn,
+ struct route_entry *re,
+ struct nexthop *nexthop)
{
- struct nexthop_group new_grp = {};
- struct nexthop *nexthop;
union g_addr prev_src;
unsigned int prev_active, new_active;
ifindex_t prev_index;
- uint8_t curr_active = 0;
+ uint32_t counter = 0;
- afi_t rt_afi = family2afi(rn->p.family);
-
- UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
-
- /* Copy over the nexthops in current state */
- nexthop_group_copy(&new_grp, &(re->nhe->nhg));
-
- for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next) {
+ /* Process nexthops one-by-one */
+ for ( ; nexthop; nexthop = nexthop->next) {
/* No protocol daemon provides src and so we're skipping
- * tracking it */
+ * tracking it
+ */
prev_src = nexthop->rmap_src;
prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
prev_index = nexthop->ifindex;
/*
* We need to respect the multipath_num here
* as that what we should be able to install from
- * a multipath perpsective should not be a data plane
+ * a multipath perspective should not be a data plane
* decision point.
*/
new_active =
nexthop_active_check(rn, re, nexthop);
- if (new_active && curr_active >= zrouter.multipath_num) {
+ if (new_active && counter >= zrouter.multipath_num) {
struct nexthop *nh;
/* Set it and its resolved nexthop as inactive. */
@@ -1817,7 +2208,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
}
if (new_active)
- curr_active++;
+ counter++;
/* Don't allow src setting on IPv6 addr for now */
if (prev_active != new_active || prev_index != nexthop->ifindex
@@ -1833,14 +2224,79 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
}
+ return counter;
+}
+
+/*
+ * Iterate over all nexthops of the given RIB entry and refresh their
+ * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag,
+ * the whole re structure is flagged with ROUTE_ENTRY_CHANGED.
+ *
+ * Return value is the new number of active nexthops.
+ */
+int nexthop_active_update(struct route_node *rn, struct route_entry *re)
+{
+ struct nhg_hash_entry *curr_nhe;
+ uint32_t curr_active = 0, backup_active = 0;
+
+ afi_t rt_afi = family2afi(rn->p.family);
+
+ UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+
+ /* Make a local copy of the existing nhe, so we don't work on/modify
+ * the shared nhe.
+ */
+ curr_nhe = zebra_nhg_copy(re->nhe, re->nhe->id);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: re %p nhe %p (%u), curr_nhe %p",
+ __func__, re, re->nhe, re->nhe->id,
+ curr_nhe);
+
+ /* Clear the existing id, if any: this will avoid any confusion
+ * if the id exists, and will also force the creation
+ * of a new nhe reflecting the changes we may make in this local copy.
+ */
+ curr_nhe->id = 0;
+
+ /* Process nexthops */
+ curr_active = nexthop_list_active_update(rn, re, curr_nhe->nhg.nexthop);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: re %p curr_active %u", __func__, re,
+ curr_active);
+
+ /* If there are no backup nexthops, we are done */
+ if (zebra_nhg_get_backup_nhg(curr_nhe) == NULL)
+ goto backups_done;
+
+ backup_active = nexthop_list_active_update(
+ rn, re, zebra_nhg_get_backup_nhg(curr_nhe)->nexthop);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: re %p backup_active %u", __func__, re,
+ backup_active);
+
+backups_done:
+
+ /*
+ * Ref or create an nhe that matches the current state of the
+ * nexthop(s).
+ */
if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) {
struct nhg_hash_entry *new_nhe = NULL;
- new_nhe = zebra_nhg_rib_find(0, &new_grp, rt_afi);
+ new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi);
+
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: re %p CHANGED: nhe %p (%u) => new_nhe %p (%u)",
+ __func__, re, re->nhe,
+ re->nhe->id, new_nhe, new_nhe->id);
route_entry_update_nhe(re, new_nhe);
}
+
/* Walk the NHE depends tree and toggle NEXTHOP_GROUP_VALID
* flag where appropriate.
*/
@@ -1848,11 +2304,11 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
zebra_nhg_set_valid_if_active(re->nhe);
/*
- * Do not need these nexthops anymore since they
- * were either copied over into an nhe or not
+ * Do not need the old / copied nhe anymore since it
+ * was either copied over into a new nhe or not
* used at all.
*/
- nexthops_free(new_grp.nexthop);
+ zebra_nhg_free(curr_nhe);
return curr_active;
}
@@ -1950,6 +2406,16 @@ static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp,
}
}
+ if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL)
+ goto done;
+
+ /* TODO -- For now, we are not trying to use or install any
+ * backup info in this nexthop-id path: we aren't prepared
+ * to use the backups here yet. We're just debugging what we find.
+ */
+ if (IS_ZEBRA_DEBUG_NHG_DETAIL)
+ zlog_debug("%s: skipping backup nhe", __func__);
+
done:
return i;
}
@@ -2036,7 +2502,7 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
id = dplane_ctx_get_nhe_id(ctx);
- if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug(
"Nexthop dplane ctx %p, op %s, nexthop ID (%u), result %s",
ctx, dplane_op2str(op), id, dplane_res2str(status));
diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h
index dc3a47c020..0a9e97ab48 100644
--- a/zebra/zebra_nhg.h
+++ b/zebra/zebra_nhg.h
@@ -50,6 +50,9 @@ struct nhg_hash_entry {
struct nexthop_group nhg;
+ /* If supported, a mapping of backup nexthops. */
+ struct nhg_backup_info *backup_info;
+
/* If this is not a group, it
* will be a single nexthop
* and must have an interface
@@ -72,6 +75,7 @@ struct nhg_hash_entry {
* faster with ID's.
*/
struct nhg_connected_tree_head nhg_depends, nhg_dependents;
+
/*
* Is this nexthop group valid, ie all nexthops are fully resolved.
* What is fully resolved? It's a nexthop that is either self contained
@@ -102,11 +106,25 @@ struct nhg_hash_entry {
* from the kernel. Therefore, it is unhashable.
*/
#define NEXTHOP_GROUP_UNHASHABLE (1 << 4)
+
+/*
+ * Backup nexthop support - identify groups that are backups for
+ * another group.
+ */
+#define NEXTHOP_GROUP_BACKUP (1 << 5)
+
};
/* Was this one we created, either this session or previously? */
#define ZEBRA_NHG_CREATED(NHE) ((NHE->type) == ZEBRA_ROUTE_NHG)
+/*
+ * Backup nexthops: this is a group object itself, so
+ * that the backup nexthops can use the same code as a normal object.
+ */
+struct nhg_backup_info {
+ struct nhg_hash_entry *nhe;
+};
enum nhg_ctx_op_e {
NHG_CTX_OP_NONE = 0,
@@ -162,13 +180,26 @@ bool zebra_nhg_kernel_nexthops_enabled(void);
/**
* NHE abstracted tree functions.
- * Use these where possible instead of the direct ones access ones.
+ * Use these where possible instead of direct access.
*/
struct nhg_hash_entry *zebra_nhg_alloc(void);
void zebra_nhg_free(struct nhg_hash_entry *nhe);
/* In order to clear a generic hash, we need a generic api, sigh. */
void zebra_nhg_hash_free(void *p);
+/* Init an nhe, for use in a hash lookup for example. There's some fuzziness
+ * if the nhe represents only a single nexthop, so we try to capture that
+ * variant also.
+ */
+void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
+ const struct nexthop *nh);
+
+/* Allocate, free backup nexthop info objects */
+struct nhg_backup_info *zebra_nhg_backup_alloc(void);
+void zebra_nhg_backup_free(struct nhg_backup_info **p);
+
+struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe);
+
extern struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe);
extern unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe);
@@ -203,10 +234,14 @@ extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh,
/* Del via kernel */
extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id);
-/* Find via route creation */
+/* Find an nhe based on a nexthop_group */
extern struct nhg_hash_entry *
zebra_nhg_rib_find(uint32_t id, struct nexthop_group *nhg, afi_t rt_afi);
+/* Find an nhe based on a route's nhe, used during route creation */
+struct nhg_hash_entry *
+zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi);
+
/* Reference counter functions */
extern void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe);
extern void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe);
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index dc54dee785..58967de778 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -213,7 +213,7 @@ static void route_entry_attach_ref(struct route_entry *re,
int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new)
{
- struct nhg_hash_entry *old = NULL;
+ struct nhg_hash_entry *old;
int ret = 0;
if (new == NULL) {
@@ -223,7 +223,7 @@ int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new)
goto done;
}
- if (re->nhe_id != new->id) {
+ if ((re->nhe_id != 0) && (re->nhe_id != new->id)) {
old = re->nhe;
route_entry_attach_ref(re, new);
@@ -2338,7 +2338,6 @@ static void rib_addnode(struct route_node *rn,
void rib_unlink(struct route_node *rn, struct route_entry *re)
{
rib_dest_t *dest;
- struct nhg_hash_entry *nhe = NULL;
assert(rn && re);
@@ -2353,11 +2352,10 @@ void rib_unlink(struct route_node *rn, struct route_entry *re)
if (dest->selected_fib == re)
dest->selected_fib = NULL;
- if (re->nhe_id) {
- nhe = zebra_nhg_lookup_id(re->nhe_id);
- if (nhe)
- zebra_nhg_decrement_ref(nhe);
- } else if (re->nhe->nhg.nexthop)
+ if (re->nhe && re->nhe_id) {
+ assert(re->nhe->id == re->nhe_id);
+ zebra_nhg_decrement_ref(re->nhe);
+ } else if (re->nhe && re->nhe->nhg.nexthop)
nexthops_free(re->nhe->nhg.nexthop);
nexthops_free(re->fib_ng.nexthop);
@@ -2396,11 +2394,75 @@ void rib_delnode(struct route_node *rn, struct route_entry *re)
}
}
+/*
+ * Helper that debugs a single nexthop within a route-entry
+ */
+static void _route_entry_dump_nh(const struct route_entry *re,
+ const char *straddr,
+ const struct nexthop *nexthop)
+{
+ char nhname[PREFIX_STRLEN];
+ char backup_str[50];
+ char wgt_str[50];
+ struct interface *ifp;
+ struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id);
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ sprintf(nhname, "Blackhole");
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
+ sprintf(nhname, "%s", ifp ? ifp->name : "Unknown");
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ /* fallthrough */
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ inet_ntop(AF_INET, &nexthop->gate, nhname, INET6_ADDRSTRLEN);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ inet_ntop(AF_INET6, &nexthop->gate, nhname, INET6_ADDRSTRLEN);
+ break;
+ }
+
+ backup_str[0] = '\0';
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ snprintf(backup_str, sizeof(backup_str), "backup %d,",
+ (int)nexthop->backup_idx);
+ }
+
+ wgt_str[0] = '\0';
+ if (nexthop->weight)
+ snprintf(wgt_str, sizeof(wgt_str), "wgt %d,", nexthop->weight);
+
+ zlog_debug("%s: %s %s[%u] vrf %s(%u) %s%s with flags %s%s%s%s%s",
+ straddr, (nexthop->rparent ? " NH" : "NH"), nhname,
+ nexthop->ifindex, vrf ? vrf->name : "Unknown",
+ nexthop->vrf_id,
+ wgt_str, backup_str,
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)
+ ? "ACTIVE "
+ : ""),
+ (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
+ ? "FIB "
+ : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)
+ ? "RECURSIVE "
+ : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)
+ ? "ONLINK "
+ : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)
+ ? "DUPLICATE "
+ : ""));
+
+}
+
/* This function dumps the contents of a given RE entry into
* standard debug log. Calling function name and IP prefix in
* question are passed as 1st and 2nd arguments.
*/
-
void _route_entry_dump(const char *func, union prefixconstptr pp,
union prefixconstptr src_pp,
const struct route_entry *re)
@@ -2409,9 +2471,9 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
bool is_srcdst = src_p && src_p->prefixlen;
char straddr[PREFIX_STRLEN];
char srcaddr[PREFIX_STRLEN];
- char nhname[PREFIX_STRLEN];
struct nexthop *nexthop;
struct vrf *vrf = vrf_lookup_by_id(re->vrf_id);
+ struct nexthop_group *nhg;
zlog_debug("%s: dumping RE entry %p for %s%s%s vrf %s(%u)", func,
(const void *)re, prefix2str(pp, straddr, sizeof(straddr)),
@@ -2422,65 +2484,32 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
zlog_debug("%s: uptime == %lu, type == %u, instance == %d, table == %d",
straddr, (unsigned long)re->uptime, re->type, re->instance,
re->table);
- zlog_debug(
- "%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u",
- straddr, re->metric, re->mtu, re->distance, re->flags, re->status);
+ zlog_debug("%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u",
+ straddr, re->metric, re->mtu, re->distance, re->flags,
+ re->status);
zlog_debug("%s: nexthop_num == %u, nexthop_active_num == %u", straddr,
nexthop_group_nexthop_num(&(re->nhe->nhg)),
nexthop_group_active_nexthop_num(&(re->nhe->nhg)));
- for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) {
- struct interface *ifp;
- struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id);
+ /* Dump nexthops */
+ for (ALL_NEXTHOPS(re->nhe->nhg, nexthop))
+ _route_entry_dump_nh(re, straddr, nexthop);
- switch (nexthop->type) {
- case NEXTHOP_TYPE_BLACKHOLE:
- sprintf(nhname, "Blackhole");
- break;
- case NEXTHOP_TYPE_IFINDEX:
- ifp = if_lookup_by_index(nexthop->ifindex,
- nexthop->vrf_id);
- sprintf(nhname, "%s", ifp ? ifp->name : "Unknown");
- break;
- case NEXTHOP_TYPE_IPV4:
- /* fallthrough */
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- inet_ntop(AF_INET, &nexthop->gate, nhname,
- INET6_ADDRSTRLEN);
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- inet_ntop(AF_INET6, &nexthop->gate, nhname,
- INET6_ADDRSTRLEN);
- break;
- }
- zlog_debug("%s: %s %s[%u] vrf %s(%u) with flags %s%s%s%s%s",
- straddr, (nexthop->rparent ? " NH" : "NH"), nhname,
- nexthop->ifindex, vrf ? vrf->name : "Unknown",
- nexthop->vrf_id,
- (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)
- ? "ACTIVE "
- : ""),
- (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
- ? "FIB "
- : ""),
- (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)
- ? "RECURSIVE "
- : ""),
- (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)
- ? "ONLINK "
- : ""),
- (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)
- ? "DUPLICATE "
- : ""));
+ if (zebra_nhg_get_backup_nhg(re->nhe)) {
+ zlog_debug("%s: backup nexthops:", straddr);
+
+ nhg = zebra_nhg_get_backup_nhg(re->nhe);
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop))
+ _route_entry_dump_nh(re, straddr, nexthop);
}
+
zlog_debug("%s: dump complete", straddr);
}
-/* This is an exported helper to rtm_read() to dump the strange
+/*
+ * This is an exported helper to rtm_read() to dump the strange
* RE entry found by rib_lookup_ipv4_route()
*/
-
void rib_lookup_and_dump(struct prefix_ipv4 *p, vrf_id_t vrf_id)
{
struct route_table *table;
@@ -2574,9 +2603,16 @@ void rib_lookup_and_pushup(struct prefix_ipv4 *p, vrf_id_t vrf_id)
}
}
-int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
- struct prefix_ipv6 *src_p, struct route_entry *re,
- struct nexthop_group *ng)
+/*
+ * Internal route-add implementation; there are a couple of different public
+ * signatures. Callers in this path are responsible for the memory they
+ * allocate: if they allocate a nexthop_group or backup nexthop info, they
+ * must free those objects. If this returns < 0, an error has occurred and the
+ * route_entry 're' has not been captured; the caller should free that also.
+ */
+int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p,
+ struct prefix_ipv6 *src_p, struct route_entry *re,
+ struct nhg_hash_entry *re_nhe)
{
struct nhg_hash_entry *nhe = NULL;
struct route_table *table;
@@ -2584,41 +2620,31 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
struct route_entry *same = NULL;
int ret = 0;
- if (!re)
- return 0;
+ if (!re || !re_nhe)
+ return -1;
assert(!src_p || !src_p->prefixlen || afi == AFI_IP6);
/* Lookup table. */
table = zebra_vrf_get_table_with_table_id(afi, safi, re->vrf_id,
re->table);
- if (!table) {
- if (ng)
- nexthop_group_delete(&ng);
- XFREE(MTYPE_RE, re);
- return 0;
- }
+ if (!table)
+ return -1;
- if (re->nhe_id) {
- nhe = zebra_nhg_lookup_id(re->nhe_id);
+ if (re_nhe->id > 0) {
+ nhe = zebra_nhg_lookup_id(re_nhe->id);
if (!nhe) {
flog_err(
EC_ZEBRA_TABLE_LOOKUP_FAILED,
"Zebra failed to find the nexthop hash entry for id=%u in a route entry",
- re->nhe_id);
- XFREE(MTYPE_RE, re);
+ re_nhe->id);
+
return -1;
}
} else {
- nhe = zebra_nhg_rib_find(0, ng, afi);
-
- /*
- * The nexthops got copied over into an nhe,
- * so free them now.
- */
- nexthop_group_delete(&ng);
-
+ /* Lookup nhe from route information */
+ nhe = zebra_nhg_rib_find_nhe(re_nhe, afi);
if (!nhe) {
char buf[PREFIX_STRLEN] = "";
char buf2[PREFIX_STRLEN] = "";
@@ -2631,7 +2657,6 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
src_p ? prefix2str(src_p, buf2, sizeof(buf2))
: "");
- XFREE(MTYPE_RE, re);
return -1;
}
}
@@ -2709,15 +2734,51 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
ret = 1;
/* Free implicit route.*/
- if (same) {
+ if (same)
rib_delnode(rn, same);
- ret = -1;
- }
route_unlock_node(rn);
return ret;
}
+/*
+ * Add a single route.
+ */
+int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
+ struct prefix_ipv6 *src_p, struct route_entry *re,
+ struct nexthop_group *ng)
+{
+ int ret;
+ struct nhg_hash_entry nhe;
+
+ if (!re)
+ return -1;
+
+ /* We either need nexthop(s) or an existing nexthop id */
+ if (ng == NULL && re->nhe_id == 0)
+ return -1;
+
+ /*
+ * Use a temporary nhe to convey info to the common/main api.
+ */
+ zebra_nhe_init(&nhe, afi, (ng ? ng->nexthop : NULL));
+ if (ng)
+ nhe.nhg.nexthop = ng->nexthop;
+ else if (re->nhe_id > 0)
+ nhe.id = re->nhe_id;
+
+ ret = rib_add_multipath_nhe(afi, safi, p, src_p, re, &nhe);
+
+ /* In this path, the callers expect memory to be freed. */
+ nexthop_group_delete(&ng);
+
+ /* In error cases, free the route also */
+ if (ret < 0)
+ XFREE(MTYPE_RE, re);
+
+ return ret;
+}
+
void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
unsigned short instance, int flags, struct prefix *p,
struct prefix_ipv6 *src_p, const struct nexthop *nh,
@@ -3188,6 +3249,9 @@ void rib_sweep_table(struct route_table *table)
if (!table)
return;
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%s: starting", __func__);
+
for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) {
RNODE_FOREACH_RE_SAFE (rn, re, next) {
@@ -3234,6 +3298,9 @@ void rib_sweep_table(struct route_table *table)
rib_delnode(rn, re);
}
}
+
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%s: ends", __func__);
}
/* Sweep all RIB tables. */
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index ccc6e9e46b..590ec57087 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -164,7 +164,8 @@ DEFUN (show_ip_rpf_addr,
return CMD_SUCCESS;
}
-static char re_status_output_char(struct route_entry *re, struct nexthop *nhop)
+static char re_status_output_char(const struct route_entry *re,
+ const struct nexthop *nhop)
{
if (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)) {
if (!CHECK_FLAG(nhop->flags, NEXTHOP_FLAG_DUPLICATE) &&
@@ -187,6 +188,152 @@ static char re_status_output_char(struct route_entry *re, struct nexthop *nhop)
return ' ';
}
+/*
+ * TODO -- Show backup nexthop info
+ */
+static void show_nh_backup_helper(struct vty *vty,
+ const struct nhg_hash_entry *nhe,
+ const struct nexthop *nexthop)
+{
+ /* Double-check that there _is_ a backup */
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ return;
+
+ /* Locate the backup nexthop */
+
+ /* Format the backup (indented) */
+
+}
+
+/*
+ * Helper api to format output for a nexthop, used in the 'detailed'
+ * output path.
+ */
+static void show_nexthop_detail_helper(struct vty *vty,
+ const struct route_entry *re,
+ const struct nexthop *nexthop)
+{
+ char addrstr[32];
+ char buf[MPLS_LABEL_STRLEN];
+
+ vty_out(vty, " %c%s",
+ re_status_output_char(re, nexthop),
+ nexthop->rparent ? " " : "");
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ vty_out(vty, " %s",
+ inet_ntoa(nexthop->gate.ipv4));
+ if (nexthop->ifindex)
+ vty_out(vty, ", via %s",
+ ifindex2ifname(
+ nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ vty_out(vty, " %s",
+ inet_ntop(AF_INET6, &nexthop->gate.ipv6,
+ buf, sizeof(buf)));
+ if (nexthop->ifindex)
+ vty_out(vty, ", via %s",
+ ifindex2ifname(
+ nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+
+ case NEXTHOP_TYPE_IFINDEX:
+ vty_out(vty, " directly connected, %s",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ vty_out(vty, " unreachable");
+ switch (nexthop->bh_type) {
+ case BLACKHOLE_REJECT:
+ vty_out(vty, " (ICMP unreachable)");
+ break;
+ case BLACKHOLE_ADMINPROHIB:
+ vty_out(vty,
+ " (ICMP admin-prohibited)");
+ break;
+ case BLACKHOLE_NULL:
+ vty_out(vty, " (blackhole)");
+ break;
+ case BLACKHOLE_UNSPEC:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if ((re->vrf_id != nexthop->vrf_id)
+ && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) {
+ struct vrf *vrf =
+ vrf_lookup_by_id(nexthop->vrf_id);
+
+ if (vrf)
+ vty_out(vty, "(vrf %s)", vrf->name);
+ else
+ vty_out(vty, "(vrf UNKNOWN)");
+ }
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
+ vty_out(vty, " (duplicate nexthop removed)");
+
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ vty_out(vty, " inactive");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
+ vty_out(vty, " onlink");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ vty_out(vty, " (recursive)");
+
+ /* Source specified? */
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ if (nexthop->src.ipv4.s_addr) {
+ if (inet_ntop(AF_INET, &nexthop->src.ipv4,
+ addrstr, sizeof(addrstr)))
+ vty_out(vty, ", src %s",
+ addrstr);
+ }
+ break;
+
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (!IPV6_ADDR_SAME(&nexthop->src.ipv6,
+ &in6addr_any)) {
+ if (inet_ntop(AF_INET6, &nexthop->src.ipv6,
+ addrstr, sizeof(addrstr)))
+ vty_out(vty, ", src %s",
+ addrstr);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (re->nexthop_mtu)
+ vty_out(vty, ", mtu %u", re->nexthop_mtu);
+
+ /* Label information */
+ if (nexthop->nh_label && nexthop->nh_label->num_labels) {
+ vty_out(vty, ", label %s",
+ mpls_label2str(nexthop->nh_label->num_labels,
+ nexthop->nh_label->label, buf,
+ sizeof(buf), 1 /*pretty*/));
+ }
+
+ if (nexthop->weight)
+ vty_out(vty, ", weight %u", nexthop->weight);
+}
+
/* New RIB. Detailed information for IPv4 route. */
static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn,
int mcast, bool use_fib, bool show_ng)
@@ -253,129 +400,122 @@ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn,
vty_out(vty, " Nexthop Group ID: %u\n", re->nhe_id);
for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) {
- char addrstr[32];
-
- vty_out(vty, " %c%s",
- re_status_output_char(re, nexthop),
- nexthop->rparent ? " " : "");
-
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- vty_out(vty, " %s",
- inet_ntoa(nexthop->gate.ipv4));
- if (nexthop->ifindex)
- vty_out(vty, ", via %s",
- ifindex2ifname(
- nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- vty_out(vty, " %s",
- inet_ntop(AF_INET6, &nexthop->gate.ipv6,
- buf, sizeof(buf)));
- if (nexthop->ifindex)
- vty_out(vty, ", via %s",
- ifindex2ifname(
- nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_IFINDEX:
- vty_out(vty, " directly connected, %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- vty_out(vty, " unreachable");
- switch (nexthop->bh_type) {
- case BLACKHOLE_REJECT:
- vty_out(vty, " (ICMP unreachable)");
- break;
- case BLACKHOLE_ADMINPROHIB:
- vty_out(vty,
- " (ICMP admin-prohibited)");
- break;
- case BLACKHOLE_NULL:
- vty_out(vty, " (blackhole)");
- break;
- case BLACKHOLE_UNSPEC:
- break;
- }
- break;
- default:
- break;
- }
-
- if ((re->vrf_id != nexthop->vrf_id)
- && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) {
- struct vrf *vrf =
- vrf_lookup_by_id(nexthop->vrf_id);
-
- if (vrf)
- vty_out(vty, "(vrf %s)", vrf->name);
- else
- vty_out(vty, "(vrf UNKNOWN)");
- }
+ /* Use helper to format each nexthop */
+ show_nexthop_detail_helper(vty, re, nexthop);
+ vty_out(vty, "\n");
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
- vty_out(vty, " (duplicate nexthop removed)");
+ /* Include backup info, if present */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ show_nh_backup_helper(vty, re->nhe, nexthop);
+ }
+ vty_out(vty, "\n");
+ }
+}
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- vty_out(vty, " inactive");
+/*
+ * Helper for nexthop output, used in the 'show ip route' path
+ */
+static void show_route_nexthop_helper(struct vty *vty,
+ const struct route_entry *re,
+ const struct nexthop *nexthop)
+{
+ char buf[MPLS_LABEL_STRLEN];
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ vty_out(vty, " via %s", inet_ntoa(nexthop->gate.ipv4));
+ if (nexthop->ifindex)
+ vty_out(vty, ", %s",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ vty_out(vty, " via %s",
+ inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf,
+ sizeof(buf)));
+ if (nexthop->ifindex)
+ vty_out(vty, ", %s",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
- vty_out(vty, " onlink");
+ case NEXTHOP_TYPE_IFINDEX:
+ vty_out(vty, " is directly connected, %s",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ vty_out(vty, " unreachable");
+ switch (nexthop->bh_type) {
+ case BLACKHOLE_REJECT:
+ vty_out(vty, " (ICMP unreachable)");
+ break;
+ case BLACKHOLE_ADMINPROHIB:
+ vty_out(vty, " (ICMP admin-prohibited)");
+ break;
+ case BLACKHOLE_NULL:
+ vty_out(vty, " (blackhole)");
+ break;
+ case BLACKHOLE_UNSPEC:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- vty_out(vty, " (recursive)");
+ if ((re == NULL || (nexthop->vrf_id != re->vrf_id)) &&
+ (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) {
+ struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id);
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nexthop->src.ipv4.s_addr) {
- if (inet_ntop(AF_INET,
- &nexthop->src.ipv4,
- addrstr, sizeof(addrstr)))
- vty_out(vty, ", src %s",
- addrstr);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&nexthop->src.ipv6,
- &in6addr_any)) {
- if (inet_ntop(AF_INET6,
- &nexthop->src.ipv6,
- addrstr, sizeof(addrstr)))
- vty_out(vty, ", src %s",
- addrstr);
- }
- break;
- default:
- break;
- }
+ if (vrf)
+ vty_out(vty, " (vrf %s)", vrf->name);
+ else
+ vty_out(vty, " (vrf UNKNOWN)");
+ }
- if (re->nexthop_mtu)
- vty_out(vty, ", mtu %u", re->nexthop_mtu);
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ vty_out(vty, " inactive");
- /* Label information */
- if (nexthop->nh_label
- && nexthop->nh_label->num_labels) {
- vty_out(vty, ", label %s",
- mpls_label2str(
- nexthop->nh_label->num_labels,
- nexthop->nh_label->label, buf,
- sizeof(buf), 1));
- }
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
+ vty_out(vty, " onlink");
- if (nexthop->weight)
- vty_out(vty, ", weight %u", nexthop->weight);
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ vty_out(vty, " (recursive)");
- vty_out(vty, "\n");
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ if (nexthop->src.ipv4.s_addr) {
+ if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf,
+ sizeof(buf)))
+ vty_out(vty, ", src %s", buf);
}
- vty_out(vty, "\n");
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) {
+ if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf,
+ sizeof(buf)))
+ vty_out(vty, ", src %s", buf);
+ }
+ break;
+ default:
+ break;
}
+
+ /* Label information */
+ if (nexthop->nh_label && nexthop->nh_label->num_labels) {
+ vty_out(vty, ", label %s",
+ mpls_label2str(nexthop->nh_label->num_labels,
+ nexthop->nh_label->label, buf,
+ sizeof(buf), 1));
+ }
+
+ if ((re == NULL) && nexthop->weight)
+ vty_out(vty, ", weight %u", nexthop->weight);
}
static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
@@ -660,105 +800,43 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
len - 3 + (2 * nexthop_level(nexthop)), ' ');
}
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- vty_out(vty, " via %s", inet_ntoa(nexthop->gate.ipv4));
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- vty_out(vty, " via %s",
- inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf,
- sizeof(buf)));
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
-
- case NEXTHOP_TYPE_IFINDEX:
- vty_out(vty, " is directly connected, %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- vty_out(vty, " unreachable");
- switch (nexthop->bh_type) {
- case BLACKHOLE_REJECT:
- vty_out(vty, " (ICMP unreachable)");
- break;
- case BLACKHOLE_ADMINPROHIB:
- vty_out(vty, " (ICMP admin-prohibited)");
- break;
- case BLACKHOLE_NULL:
- vty_out(vty, " (blackhole)");
- break;
- case BLACKHOLE_UNSPEC:
- break;
- }
- break;
- default:
- break;
- }
-
- if ((nexthop->vrf_id != re->vrf_id)
- && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE)) {
- struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id);
+ show_route_nexthop_helper(vty, re, nexthop);
- if (vrf)
- vty_out(vty, "(vrf %s)", vrf->name);
- else
- vty_out(vty, "(vrf UNKNOWN)");
- }
-
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- vty_out(vty, " inactive");
+ vty_out(vty, ", %s\n", up_str);
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
- vty_out(vty, " onlink");
+ /* Check for backup info */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ struct nexthop *backup;
+ int i;
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- vty_out(vty, " (recursive)");
+ if (re->nhe->backup_info == NULL ||
+ re->nhe->backup_info->nhe == NULL)
+ continue;
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nexthop->src.ipv4.s_addr) {
- if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf,
- sizeof(buf)))
- vty_out(vty, ", src %s", buf);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) {
- if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf,
- sizeof(buf)))
- vty_out(vty, ", src %s", buf);
+ i = 0;
+ for (ALL_NEXTHOPS(re->nhe->backup_info->nhe->nhg,
+ backup)) {
+ if (i == nexthop->backup_idx)
+ break;
+ i++;
}
- break;
- default:
- break;
- }
- /* Label information */
- if (nexthop->nh_label && nexthop->nh_label->num_labels) {
- vty_out(vty, ", label %s",
- mpls_label2str(nexthop->nh_label->num_labels,
- nexthop->nh_label->label, buf,
- sizeof(buf), 1));
+ /* Print useful backup info */
+ if (backup) {
+ /* TODO -- install state is not accurate */
+ vty_out(vty, " %*c [backup %d]",
+ /*re_status_output_char(re, backup),*/
+ len - 3 + (2 * nexthop_level(nexthop)),
+ ' ', nexthop->backup_idx);
+ show_route_nexthop_helper(vty, re, backup);
+ vty_out(vty, "\n");
+ }
}
-
- vty_out(vty, ", %s\n", up_str);
}
}
static void vty_show_ip_route_detail_json(struct vty *vty,
- struct route_node *rn, bool use_fib)
+ struct route_node *rn, bool use_fib)
{
json_object *json = NULL;
json_object *json_prefix = NULL;
@@ -1028,9 +1106,8 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe)
{
struct nexthop *nexthop = NULL;
struct nhg_connected *rb_node_dep = NULL;
- char buf[SRCDEST2STR_BUFFER];
-
struct vrf *nhe_vrf = vrf_lookup_by_id(nhe->vrf_id);
+ struct nexthop_group *backup_nhg;
vty_out(vty, "ID: %u\n", nhe->id);
vty_out(vty, " RefCnt: %d\n", nhe->refcnt);
@@ -1062,6 +1139,7 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe)
vty_out(vty, "\n");
}
+ /* Output nexthops */
for (ALL_NEXTHOPS(nhe->nhg, nexthop)) {
if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
vty_out(vty, " ");
@@ -1069,100 +1147,56 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe)
/* Make recursive nexthops a bit more clear */
vty_out(vty, " ");
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- vty_out(vty, " %s", inet_ntoa(nexthop->gate.ipv4));
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- vty_out(vty, " %s",
- inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf,
- sizeof(buf)));
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
+ show_route_nexthop_helper(vty, NULL, nexthop);
- case NEXTHOP_TYPE_IFINDEX:
- vty_out(vty, " directly connected %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- vty_out(vty, " unreachable");
- switch (nexthop->bh_type) {
- case BLACKHOLE_REJECT:
- vty_out(vty, " (ICMP unreachable)");
- break;
- case BLACKHOLE_ADMINPROHIB:
- vty_out(vty, " (ICMP admin-prohibited)");
- break;
- case BLACKHOLE_NULL:
- vty_out(vty, " (blackhole)");
- break;
- case BLACKHOLE_UNSPEC:
- break;
- }
- break;
- default:
- break;
+ if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) {
+ if (CHECK_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_HAS_BACKUP))
+ vty_out(vty, " [backup %d]",
+ nexthop->backup_idx);
+
+ vty_out(vty, "\n");
+ continue;
}
- struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id);
+ /* TODO -- print more useful backup info */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ struct nexthop *backup;
+ int i;
- if (vrf)
- vty_out(vty, " (vrf %s)", vrf->name);
- else
- vty_out(vty, " (vrf UNKNOWN)");
+ i = 0;
+ for (ALL_NEXTHOPS(nhe->backup_info->nhe->nhg, backup)) {
+ if (i == nexthop->backup_idx)
+ break;
+ i++;
+ }
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- vty_out(vty, " inactive");
+ /* TODO */
+ if (backup)
+ vty_out(vty, " [backup %d]",
+ nexthop->backup_idx);
+ else
+ vty_out(vty, " [backup INVALID]");
+ }
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
- vty_out(vty, " onlink");
+ vty_out(vty, "\n");
+ }
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- vty_out(vty, " (recursive)");
+ /* Output backup nexthops (if any) */
+ backup_nhg = zebra_nhg_get_backup_nhg(nhe);
+ if (backup_nhg) {
+ vty_out(vty, " Backups:\n");
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nexthop->src.ipv4.s_addr) {
- if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf,
- sizeof(buf)))
- vty_out(vty, ", src %s", buf);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) {
- if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf,
- sizeof(buf)))
- vty_out(vty, ", src %s", buf);
- }
- break;
- default:
- break;
- }
+ for (ALL_NEXTHOPS_PTR(backup_nhg, nexthop)) {
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ vty_out(vty, " ");
+ else
+ /* Make recursive nexthops a bit more clear */
+ vty_out(vty, " ");
- /* Label information */
- if (nexthop->nh_label && nexthop->nh_label->num_labels) {
- vty_out(vty, ", label %s",
- mpls_label2str(nexthop->nh_label->num_labels,
- nexthop->nh_label->label, buf,
- sizeof(buf), 1));
+ show_route_nexthop_helper(vty, NULL, nexthop);
+ vty_out(vty, "\n");
}
-
- if (nexthop->weight)
- vty_out(vty, ", weight %u", nexthop->weight);
-
- vty_out(vty, "\n");
}
if (!zebra_nhg_dependents_is_empty(nhe)) {
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 2e1daa6fdf..aa2e5c91c9 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -121,11 +121,11 @@ static struct interface *zvni_map_to_macvlan(struct interface *br_if,
/* l3-vni next-hop neigh related APIs */
static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni,
- struct ipaddr *ip);
+ const struct ipaddr *ip);
static void *zl3vni_nh_alloc(void *p);
static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni,
- struct ipaddr *vtep_ip,
- struct ethaddr *rmac);
+ const struct ipaddr *vtep_ip,
+ const struct ethaddr *rmac);
static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
@@ -133,10 +133,10 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
/* l3-vni rmac related APIs */
static void zl3vni_print_rmac_hash(struct hash_bucket *, void *);
static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni,
- struct ethaddr *rmac);
+ const struct ethaddr *rmac);
static void *zl3vni_rmac_alloc(void *p);
static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni,
- struct ethaddr *rmac);
+ const struct ethaddr *rmac);
static int zl3vni_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac);
static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac);
static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac);
@@ -4434,7 +4434,7 @@ static void zl3vni_cleanup_all(struct hash_bucket *bucket, void *args)
}
static void rb_find_or_add_host(struct host_rb_tree_entry *hrbe,
- struct prefix *host)
+ const struct prefix *host)
{
struct host_rb_entry lookup;
struct host_rb_entry *hle;
@@ -4473,7 +4473,7 @@ static void rb_delete_host(struct host_rb_tree_entry *hrbe, struct prefix *host)
* Look up MAC hash entry.
*/
static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni,
- struct ethaddr *rmac)
+ const struct ethaddr *rmac)
{
zebra_mac_t tmp;
zebra_mac_t *pmac;
@@ -4502,7 +4502,8 @@ static void *zl3vni_rmac_alloc(void *p)
/*
* Add RMAC entry to l3-vni
*/
-static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni, struct ethaddr *rmac)
+static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni,
+ const struct ethaddr *rmac)
{
zebra_mac_t tmp_rmac;
zebra_mac_t *zrmac = NULL;
@@ -4632,9 +4633,10 @@ static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
}
/* handle rmac add */
-static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni, struct ethaddr *rmac,
- struct ipaddr *vtep_ip,
- struct prefix *host_prefix)
+static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni,
+ const struct ethaddr *rmac,
+ const struct ipaddr *vtep_ip,
+ const struct prefix *host_prefix)
{
char buf[ETHER_ADDR_STRLEN];
char buf1[INET6_ADDRSTRLEN];
@@ -4709,7 +4711,8 @@ static void zl3vni_remote_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac,
/*
* Look up nh hash entry on a l3-vni.
*/
-static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni, struct ipaddr *ip)
+static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni,
+ const struct ipaddr *ip)
{
zebra_neigh_t tmp;
zebra_neigh_t *n;
@@ -4739,8 +4742,9 @@ static void *zl3vni_nh_alloc(void *p)
/*
* Add neighbor entry.
*/
-static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni, struct ipaddr *ip,
- struct ethaddr *mac)
+static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac)
{
zebra_neigh_t tmp_n;
zebra_neigh_t *n = NULL;
@@ -4822,9 +4826,10 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
}
/* add remote vtep as a neigh entry */
-static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni, struct ipaddr *vtep_ip,
- struct ethaddr *rmac,
- struct prefix *host_prefix)
+static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni,
+ const struct ipaddr *vtep_ip,
+ const struct ethaddr *rmac,
+ const struct prefix *host_prefix)
{
char buf[ETHER_ADDR_STRLEN];
char buf1[ETHER_ADDR_STRLEN];
@@ -5960,9 +5965,9 @@ int is_l3vni_for_prefix_routes_only(vni_t vni)
}
/* handle evpn route in vrf table */
-void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, struct ethaddr *rmac,
- struct ipaddr *vtep_ip,
- struct prefix *host_prefix)
+void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, const struct ethaddr *rmac,
+ const struct ipaddr *vtep_ip,
+ const struct prefix *host_prefix)
{
zebra_l3vni_t *zl3vni = NULL;
struct ipaddr ipv4_vtep;
diff --git a/zebra/zebra_vxlan.h b/zebra/zebra_vxlan.h
index 6ca93f6cb6..a5c13a59e3 100644
--- a/zebra/zebra_vxlan.h
+++ b/zebra/zebra_vxlan.h
@@ -199,9 +199,9 @@ extern void zebra_vxlan_cleanup_tables(struct zebra_vrf *);
extern void zebra_vxlan_init(void);
extern void zebra_vxlan_disable(void);
extern void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id,
- struct ethaddr *rmac,
- struct ipaddr *ip,
- struct prefix *host_prefix);
+ const struct ethaddr *rmac,
+ const struct ipaddr *ip,
+ const struct prefix *host_prefix);
extern void zebra_vxlan_evpn_vrf_route_del(vrf_id_t vrf_id,
struct ipaddr *vtep_ip,
struct prefix *host_prefix);