summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--babeld/message.c4
-rw-r--r--bgpd/bgp_attr.c10
-rw-r--r--bgpd/bgp_attr.h2
-rw-r--r--bgpd/bgp_ecommunity.c221
-rw-r--r--bgpd/bgp_ecommunity.h34
-rw-r--r--bgpd/bgp_io.c2
-rw-r--r--bgpd/bgp_mplsvpn.c35
-rw-r--r--bgpd/bgp_nht.c12
-rw-r--r--bgpd/bgp_routemap.c180
-rw-r--r--bgpd/bgp_routemap_nb.c7
-rw-r--r--bgpd/bgp_routemap_nb.h4
-rw-r--r--bgpd/bgp_routemap_nb_config.c52
-rw-r--r--bgpd/bgp_zebra.c13
-rw-r--r--bgpd/bgpd.c3
-rw-r--r--doc/developer/building.rst5
-rw-r--r--doc/developer/subdir.am4
-rw-r--r--doc/developer/topotests.rst2
-rw-r--r--doc/user/bgp.rst11
-rw-r--r--doc/user/ripngd.rst47
-rw-r--r--isisd/isis_zebra.c6
-rw-r--r--lib/darr.c114
-rw-r--r--lib/darr.h363
-rw-r--r--lib/mgmt.proto71
-rw-r--r--lib/mgmt_be_client.c7
-rw-r--r--lib/mgmt_fe_client.c117
-rw-r--r--lib/mgmt_fe_client.h32
-rw-r--r--lib/routemap.h3
-rw-r--r--lib/routemap_cli.c5
-rw-r--r--lib/subdir.am2
-rw-r--r--lib/vty.c51
-rw-r--r--lib/vty.h8
-rw-r--r--mgmtd/mgmt_be_adapter.c159
-rw-r--r--mgmtd/mgmt_fe_adapter.c439
-rw-r--r--mgmtd/mgmt_fe_adapter.h32
-rw-r--r--mgmtd/mgmt_txn.c906
-rw-r--r--mgmtd/mgmt_txn.h25
-rw-r--r--mgmtd/mgmt_vty.c4
-rw-r--r--ospf6d/ospf6_main.c29
-rw-r--r--ospf6d/ospf6_message.c11
-rw-r--r--ospf6d/ospf6d.c33
-rw-r--r--ospf6d/ospf6d.h9
-rw-r--r--ospf6d/subdir.am1
-rw-r--r--ospfd/ospf_main.c29
-rw-r--r--ospfd/ospf_packet.c10
-rw-r--r--ospfd/ospf_zebra.c6
-rw-r--r--ospfd/ospfd.c2
-rw-r--r--ospfd/ospfd.h3
-rw-r--r--pbrd/pbr_vty.c5
-rw-r--r--pimd/pim_iface.c13
-rw-r--r--ripngd/ripng_routemap.c72
-rw-r--r--staticd/static_bfd.c5
-rw-r--r--tests/lib/subdir.am7
-rw-r--r--tests/lib/test_darr.c279
-rw-r--r--tests/topotests/bgp_color_extcommunities/__init__.py0
-rw-r--r--tests/topotests/bgp_color_extcommunities/r1/bgpd.conf17
-rw-r--r--tests/topotests/bgp_color_extcommunities/r1/zebra.conf3
-rw-r--r--tests/topotests/bgp_color_extcommunities/r2/bgpd.conf4
-rw-r--r--tests/topotests/bgp_color_extcommunities/r2/zebra.conf4
-rw-r--r--tests/topotests/bgp_color_extcommunities/test_bgp_color_extcommunities.py125
-rw-r--r--tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py37
-rw-r--r--tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py3
-rw-r--r--tests/topotests/multicast_pim_uplink_topo2/multicast_pim_uplink_topo2.json288
-rw-r--r--tests/topotests/multicast_pim_uplink_topo2/test_multicast_pim_uplink_topo2.py1349
-rw-r--r--tests/topotests/multicast_pim_uplink_topo3/multicast_pim_uplink_topo3.json295
-rw-r--r--tests/topotests/multicast_pim_uplink_topo3/test_multicast_pim_uplink_topo3.py916
-rw-r--r--tests/topotests/ripng_route_map/__init__.py0
-rw-r--r--tests/topotests/ripng_route_map/r1/frr.conf21
-rw-r--r--tests/topotests/ripng_route_map/r2/frr.conf14
-rw-r--r--tests/topotests/ripng_route_map/r3/frr.conf14
-rw-r--r--tests/topotests/ripng_route_map/test_ripng_route_map.py79
-rw-r--r--yang/frr-bgp-route-map.yang35
-rw-r--r--zebra/zebra_opaque.c1
-rw-r--r--zebra/zebra_rib.c8
-rw-r--r--zebra/zebra_vxlan.c2
-rw-r--r--zebra/zserv.c23
77 files changed, 5411 insertions, 1336 deletions
diff --git a/.clang-format b/.clang-format
index 12362fae96..3971384a36 100644
--- a/.clang-format
+++ b/.clang-format
@@ -87,6 +87,8 @@ ForEachMacros:
# ospfd outliers:
- 'LSDB_LOOP'
# first git grep
+ - 'darr_foreach_p'
+ - 'darr_foreach_i'
- 'frr_each'
- 'frr_each_safe'
- 'frr_each_from'
diff --git a/babeld/message.c b/babeld/message.c
index d4ddebff08..f8549329c6 100644
--- a/babeld/message.c
+++ b/babeld/message.c
@@ -556,7 +556,7 @@ parse_packet(const unsigned char *from, struct interface *ifp,
int rc;
rc = network_address(message[2], message + 4, len - 2,
nh);
- if(rc < 0) {
+ if(rc <= 0) {
have_v4_nh = 0;
have_v6_nh = 0;
goto fail;
@@ -731,7 +731,7 @@ parse_packet(const unsigned char *from, struct interface *ifp,
DO_NTOHS(seqno, message + 4);
rc = network_prefix(message[2], message[3], 0,
message + 16, NULL, len - 14, prefix);
- if(rc < 0) goto fail;
+ if(rc <= 0) goto fail;
plen = message[3] + (message[2] == 1 ? 96 : 0);
debugf(BABEL_DEBUG_COMMON,"Received request (%d) for %s from %s on %s (%s, %d).",
message[6],
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 221605d985..34422bf514 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -2206,6 +2206,16 @@ cluster_list_ignore:
return bgp_attr_ignore(peer, args->type);
}
+/* get locally configure or received srte-color value*/
+uint32_t bgp_attr_get_color(struct attr *attr)
+{
+ if (attr->srte_color)
+ return attr->srte_color;
+ if (attr->ecommunity)
+ return ecommunity_select_color(attr->ecommunity);
+ return 0;
+}
+
/* Multiprotocol reachability information parse. */
int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
struct bgp_nlri *mp_update)
diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h
index 6cd34d301c..415df2ce53 100644
--- a/bgpd/bgp_attr.h
+++ b/bgpd/bgp_attr.h
@@ -466,6 +466,8 @@ extern void bgp_packet_mpunreach_end(struct stream *s, size_t attrlen_pnt);
extern enum bgp_attr_parse_ret bgp_attr_nexthop_valid(struct peer *peer,
struct attr *attr);
+extern uint32_t bgp_attr_get_color(struct attr *attr);
+
static inline bool bgp_rmap_nhop_changed(uint32_t out_rmap_flags,
uint32_t in_rmap_flags)
{
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index 29b2250747..c408edb166 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -355,6 +355,22 @@ bool ecommunity_cmp(const void *arg1, const void *arg2)
ecom1->unit_size) == 0);
}
+static void ecommunity_color_str(char *buf, size_t bufsz, uint8_t *ptr)
+{
+ /*
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | 0x03 | Sub-Type(0x0b) | Flags |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Color Value |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ uint32_t colorid;
+
+ memcpy(&colorid, ptr + 3, 4);
+ colorid = ntohl(colorid);
+ snprintf(buf, bufsz, "Color:%d", colorid);
+}
+
/* Initialize Extended Comminities related hash. */
void ecommunity_init(void)
{
@@ -373,6 +389,7 @@ enum ecommunity_token {
ecommunity_token_rt,
ecommunity_token_nt,
ecommunity_token_soo,
+ ecommunity_token_color,
ecommunity_token_val,
ecommunity_token_rt6,
ecommunity_token_val6,
@@ -510,6 +527,9 @@ static int ecommunity_encode_internal(uint8_t type, uint8_t sub_type,
memcpy(&eval6->val[2], ip6, sizeof(struct in6_addr));
eval6->val[18] = (val >> 8) & 0xff;
eval6->val[19] = val & 0xff;
+ } else if (type == ECOMMUNITY_ENCODE_OPAQUE &&
+ sub_type == ECOMMUNITY_COLOR) {
+ encode_color(val, eval);
} else {
encode_route_target_as4(as, val, eval, trans);
}
@@ -543,10 +563,15 @@ static const char *ecommunity_gettoken(const char *str, void *eval_ptr,
struct in6_addr ip6;
as_t as = 0;
uint32_t val = 0;
- uint8_t ecomm_type;
+ uint32_t val_color = 0;
+ uint8_t ecomm_type = 0;
+ uint8_t sub_type = 0;
char buf[INET_ADDRSTRLEN + 1];
struct ecommunity_val *eval = (struct ecommunity_val *)eval_ptr;
uint64_t tmp_as = 0;
+ static const char str_color[5] = "color";
+ const char *ptr_color;
+ bool val_color_set = false;
/* Skip white space. */
while (isspace((unsigned char)*p)) {
@@ -558,54 +583,49 @@ static const char *ecommunity_gettoken(const char *str, void *eval_ptr,
if (*p == '\0')
return NULL;
- /* "rt", "nt", and "soo" keyword parse. */
- if (!isdigit((unsigned char)*p)) {
- /* "rt" match check. */
- if (tolower((unsigned char)*p) == 'r') {
+ /* "rt", "nt", "soo", and "color" keyword parse. */
+ /* "rt" */
+ if (tolower((unsigned char)*p) == 'r') {
+ p++;
+ if (tolower((unsigned char)*p) == 't') {
p++;
- if (tolower((unsigned char)*p) == 't') {
- p++;
- if (*p != '\0' && tolower((int)*p) == '6')
- *token = ecommunity_token_rt6;
- else
- *token = ecommunity_token_rt;
- return p;
- }
- if (isspace((unsigned char)*p) || *p == '\0') {
+ if (*p != '\0' && tolower((int)*p) == '6')
+ *token = ecommunity_token_rt6;
+ else
*token = ecommunity_token_rt;
- return p;
- }
- goto error;
+ return p;
+ }
+ if (isspace((unsigned char)*p) || *p == '\0') {
+ *token = ecommunity_token_rt;
+ return p;
}
- /* "nt" match check. */
- if (tolower((unsigned char)*p) == 'n') {
+ goto error;
+ }
+
+ /* "nt" */
+ if (tolower((unsigned char)*p) == 'n') {
+ p++;
+ if (tolower((unsigned char)*p) == 't') {
p++;
- if (tolower((unsigned char)*p) == 't') {
- p++;
- *token = ecommunity_token_nt;
- return p;
- }
- if (isspace((unsigned char)*p) || *p == '\0') {
- *token = ecommunity_token_nt;
- return p;
- }
- goto error;
+ *token = ecommunity_token_nt;
+ return p;
}
- /* "soo" match check. */
- else if (tolower((unsigned char)*p) == 's') {
+ if (isspace((unsigned char)*p) || *p == '\0') {
+ *token = ecommunity_token_nt;
+ return p;
+ }
+ goto error;
+ }
+
+ /* "soo" */
+ if (tolower((unsigned char)*p) == 's') {
+ p++;
+ if (tolower((unsigned char)*p) == 'o') {
p++;
if (tolower((unsigned char)*p) == 'o') {
p++;
- if (tolower((unsigned char)*p) == 'o') {
- p++;
- *token = ecommunity_token_soo;
- return p;
- }
- if (isspace((unsigned char)*p) || *p == '\0') {
- *token = ecommunity_token_soo;
- return p;
- }
- goto error;
+ *token = ecommunity_token_soo;
+ return p;
}
if (isspace((unsigned char)*p) || *p == '\0') {
*token = ecommunity_token_soo;
@@ -613,9 +633,29 @@ static const char *ecommunity_gettoken(const char *str, void *eval_ptr,
}
goto error;
}
+ if (isspace((unsigned char)*p) || *p == '\0') {
+ *token = ecommunity_token_soo;
+ return p;
+ }
goto error;
}
+ /* "color" */
+ if (tolower((unsigned char)*p) == 'c') {
+ ptr_color = &str_color[0];
+ for (unsigned int i = 0; i < 5; i++) {
+ if (tolower((unsigned char)*p) != *ptr_color)
+ break;
+
+ p++;
+ ptr_color++;
+ }
+ if (isspace((unsigned char)*p) || *p == '\0') {
+ *token = ecommunity_token_color;
+ return p;
+ }
+ goto error;
+ }
/* What a mess, there are several possibilities:
*
* a) A.B.C.D:MN
@@ -716,17 +756,24 @@ static const char *ecommunity_gettoken(const char *str, void *eval_ptr,
} else {
digit = 1;
- /* We're past the IP/ASN part */
+ /* We're past the IP/ASN part,
+ * or we have a color
+ */
if (separator) {
val *= 10;
val += (*p - '0');
+ val_color_set = false;
+ } else {
+ val_color *= 10;
+ val_color += (*p - '0');
+ val_color_set = true;
}
}
p++;
}
/* Low digit part must be there. */
- if (!digit || !separator)
+ if (!digit && (!separator || !val_color_set))
goto error;
/* Encode result into extended community. */
@@ -734,9 +781,15 @@ static const char *ecommunity_gettoken(const char *str, void *eval_ptr,
ecomm_type = ECOMMUNITY_ENCODE_IP;
else if (as > BGP_AS_MAX)
ecomm_type = ECOMMUNITY_ENCODE_AS4;
- else
+ else if (as > 0)
ecomm_type = ECOMMUNITY_ENCODE_AS;
- if (ecommunity_encode(ecomm_type, type, 1, as, ip, val, eval))
+ else if (val_color) {
+ ecomm_type = ECOMMUNITY_ENCODE_OPAQUE;
+ sub_type = ECOMMUNITY_COLOR;
+ val = val_color;
+ }
+
+ if (ecommunity_encode(ecomm_type, sub_type, 1, as, ip, val, eval))
goto error;
*token = ecommunity_token_val;
return p;
@@ -763,6 +816,7 @@ static struct ecommunity *ecommunity_str2com_internal(const char *str, int type,
case ecommunity_token_nt:
case ecommunity_token_rt6:
case ecommunity_token_soo:
+ case ecommunity_token_color:
if (!keyword_included || keyword) {
if (ecom)
ecommunity_free(&ecom);
@@ -771,15 +825,14 @@ static struct ecommunity *ecommunity_str2com_internal(const char *str, int type,
keyword = 1;
if (token == ecommunity_token_rt ||
- token == ecommunity_token_rt6) {
+ token == ecommunity_token_rt6)
type = ECOMMUNITY_ROUTE_TARGET;
- }
- if (token == ecommunity_token_soo) {
+ if (token == ecommunity_token_soo)
type = ECOMMUNITY_SITE_ORIGIN;
- }
- if (token == ecommunity_token_nt) {
+ if (token == ecommunity_token_nt)
type = ECOMMUNITY_NODE_TARGET;
- }
+ if (token == ecommunity_token_color)
+ type = ECOMMUNITY_COLOR;
break;
case ecommunity_token_val:
if (keyword_included) {
@@ -990,26 +1043,26 @@ static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt,
}
/* Convert extended community attribute to string.
-
- Due to historical reason of industry standard implementation, there
- are three types of format.
-
- route-map set extcommunity format
- "rt 100:1 100:2soo 100:3"
-
- extcommunity-list
- "rt 100:1 rt 100:2 soo 100:3show [ip] bgp" and extcommunity-list regular expression matching
- "RT:100:1 RT:100:2 SoO:100:3"
-
- For each formath please use below definition for format:
-
- ECOMMUNITY_FORMAT_ROUTE_MAP
- ECOMMUNITY_FORMAT_COMMUNITY_LIST
- ECOMMUNITY_FORMAT_DISPLAY
-
- Filter is added to display only ECOMMUNITY_ROUTE_TARGET in some cases.
- 0 value displays all
-*/
+ * Due to historical reason of industry standard implementation, there
+ * are three types of format:
+ *
+ * route-map set extcommunity format:
+ * "rt 100:1 100:2soo 100:3"
+ *
+ * extcommunity-list:
+ * "rt 100:1 rt 100:2 soo 100:3"
+ *
+ * show bgp:
+ * "RT:100:1 RT:100:2 SoO:100:3"
+ *
+ * For each format please use below definition for format:
+ * ECOMMUNITY_FORMAT_ROUTE_MAP
+ * ECOMMUNITY_FORMAT_COMMUNITY_LIST
+ * ECOMMUNITY_FORMAT_DISPLAY
+ *
+ * Filter is added to display only ECOMMUNITY_ROUTE_TARGET in some cases.
+ * 0 value displays all.
+ */
char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter)
{
uint32_t i;
@@ -1086,6 +1139,9 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter)
} else if (*pnt == ECOMMUNITY_EVPN_SUBTYPE_DEF_GW) {
strlcpy(encbuf, "Default Gateway",
sizeof(encbuf));
+ } else if (*pnt == ECOMMUNITY_COLOR) {
+ ecommunity_color_str(encbuf, sizeof(encbuf),
+ pnt);
} else {
unk_ecom = 1;
}
@@ -1353,6 +1409,29 @@ bool ecommunity_match(const struct ecommunity *ecom1,
return false;
}
+/* return last occurence of color */
+/* it will be the greatest color value */
+extern uint32_t ecommunity_select_color(const struct ecommunity *ecom)
+{
+
+ uint32_t aux_color = 0;
+ uint8_t *p;
+ uint32_t c = 0;
+
+ /* If the value already exists in the structure return 0. */
+
+ for (p = ecom->val; c < ecom->size; p += ecom->unit_size, c++) {
+ if (p == NULL)
+ break;
+
+ if (p[0] == ECOMMUNITY_ENCODE_OPAQUE &&
+ p[1] == ECOMMUNITY_COLOR)
+ ptr_get_be32((const uint8_t *)&p[4], &aux_color);
+ }
+ return aux_color;
+}
+
+
/* return first occurence of type */
extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *ecom,
uint8_t type, uint8_t subtype)
diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h
index d62dc2e84c..7dc04d206a 100644
--- a/bgpd/bgp_ecommunity.h
+++ b/bgpd/bgp_ecommunity.h
@@ -46,6 +46,8 @@
#define ECOMMUNITY_REDIRECT_VRF 0x08
#define ECOMMUNITY_TRAFFIC_MARKING 0x09
#define ECOMMUNITY_REDIRECT_IP_NH 0x00
+#define ECOMMUNITY_COLOR 0x0b /* RFC9012 - color */
+
/* from IANA: bgp-extended-communities/bgp-extended-communities.xhtml
* 0x0c Flow-spec Redirect to IPv4 - draft-ietf-idr-flowspec-redirect
*/
@@ -290,6 +292,35 @@ static inline void encode_node_target(struct in_addr *node_id,
eval->val[7] = ECOMMUNITY_NODE_TARGET_RESERVED;
}
+/*
+ * Encode BGP Color extended community
+ * is's a transitive opaque Extended community (RFC 9012 4.3)
+ * flag is set to 0
+ * RFC 9012 14.10: No values have currently been registered.
+ * 4.3: this field MUST be set to zero by the originator
+ * and ignored by the receiver;
+ *
+ */
+static inline void encode_color(uint32_t color_id, struct ecommunity_val *eval)
+{
+ /*
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | 0x03 | Sub-Type(0x0b) | Flags |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Color Value |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ memset(eval, 0, sizeof(*eval));
+ eval->val[0] = ECOMMUNITY_ENCODE_OPAQUE;
+ eval->val[1] = ECOMMUNITY_COLOR;
+ eval->val[2] = 0x00;
+ eval->val[3] = 0x00;
+ eval->val[4] = (color_id >> 24) & 0xff;
+ eval->val[5] = (color_id >> 16) & 0xff;
+ eval->val[6] = (color_id >> 8) & 0xff;
+ eval->val[7] = color_id & 0xff;
+}
+
extern void ecommunity_init(void);
extern void ecommunity_finish(void);
extern void ecommunity_free(struct ecommunity **);
@@ -314,10 +345,11 @@ extern void ecommunity_strfree(char **s);
extern bool ecommunity_include(struct ecommunity *e1, struct ecommunity *e2);
extern bool ecommunity_match(const struct ecommunity *,
const struct ecommunity *);
-extern char *ecommunity_str(struct ecommunity *);
+extern char *ecommunity_str(struct ecommunity *ecom);
extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *,
uint8_t, uint8_t);
+extern uint32_t ecommunity_select_color(const struct ecommunity *ecom);
extern bool ecommunity_add_val(struct ecommunity *ecom,
struct ecommunity_val *eval,
bool unique, bool overwrite);
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index 650adc1c9a..e9178fd8fc 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -225,7 +225,7 @@ static void bgp_process_reads(struct event *thread)
peer = EVENT_ARG(thread);
- if (peer->fd < 0 || bm->terminating)
+ if (bm->terminating || peer->fd < 0)
return;
struct frr_pthread *fpt = bgp_pth_io;
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 068dc44bc6..9d2335a03c 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -4023,14 +4023,21 @@ static void bgp_mplsvpn_nh_label_bind_send_nexthop_label(
}
p = &pfx_nh;
if (nh->nh_label) {
- if (nh->nh_label->num_labels >
- MPLS_MAX_LABELS - num_labels)
- lsp_num_labels = MPLS_MAX_LABELS - num_labels;
- else
- lsp_num_labels = nh->nh_label->num_labels;
+ if (nh->nh_label->num_labels + 1 > MPLS_MAX_LABELS) {
+ /* label stack overflow. no label switching will be performed
+ */
+ flog_err(EC_BGP_LABEL,
+ "%s [Error] BGP label %u->%u to %pFX, forged label stack too big: %u. Abort LSP installation",
+ bmnc->bgp_vpn->name_pretty,
+ bmnc->new_label, bmnc->orig_label,
+ &bmnc->nexthop,
+ nh->nh_label->num_labels + 1);
+ return;
+ }
+ lsp_num_labels = nh->nh_label->num_labels;
for (i = 0; i < lsp_num_labels; i++)
label[num_labels + i] = nh->nh_label->label[i];
- num_labels += lsp_num_labels;
+ num_labels = lsp_num_labels;
}
label[num_labels] = bmnc->orig_label;
num_labels += 1;
@@ -4239,15 +4246,13 @@ void bgp_mplsvpn_nh_label_bind_register_local_label(struct bgp *bgp,
return;
bgp_mplsvpn_path_nh_label_bind_unlink(pi);
- if (bmnc) {
- /* updates NHT pi list reference */
- LIST_INSERT_HEAD(&(bmnc->paths), pi,
- mplsvpn.bmnc.nh_label_bind_thread);
- pi->mplsvpn.bmnc.nh_label_bind_cache = bmnc;
- pi->mplsvpn.bmnc.nh_label_bind_cache->path_count++;
- SET_FLAG(pi->flags, BGP_PATH_MPLSVPN_NH_LABEL_BIND);
- bmnc->last_update = monotime(NULL);
- }
+
+ /* updates NHT pi list reference */
+ LIST_INSERT_HEAD(&(bmnc->paths), pi, mplsvpn.bmnc.nh_label_bind_thread);
+ pi->mplsvpn.bmnc.nh_label_bind_cache = bmnc;
+ pi->mplsvpn.bmnc.nh_label_bind_cache->path_count++;
+ SET_FLAG(pi->flags, BGP_PATH_MPLSVPN_NH_LABEL_BIND);
+ bmnc->last_update = monotime(NULL);
/* Add or update the selected nexthop */
if (!bmnc->nh)
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index ba5b0c7a7d..a46616803c 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -32,6 +32,7 @@
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_rd.h"
#include "bgpd/bgp_mplsvpn.h"
+#include "bgpd/bgp_ecommunity.h"
extern struct zclient *zclient;
@@ -322,7 +323,10 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
return 0;
}
- srte_color = pi->attr->srte_color;
+ if (CHECK_FLAG(pi->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_SRTE_COLOR)))
+ srte_color = bgp_attr_get_color(pi->attr);
+
} else if (peer) {
/*
* Gather the ifindex for if up/down events to be
@@ -1249,9 +1253,9 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
else if (bpi_ultimate->extra)
bpi_ultimate->extra->igpmetric = 0;
- if (CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_METRIC_CHANGED)
- || CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED)
- || path->attr->srte_color != 0)
+ if (CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_METRIC_CHANGED) ||
+ CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED) ||
+ bgp_attr_get_color(path->attr))
SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index d29b91b48f..2f6480b753 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -2385,8 +2385,10 @@ route_set_aspath_replace(void *rule, const struct prefix *dummy, void *object)
struct aspath *aspath_new;
const char *replace = rule;
struct bgp_path_info *path = object;
- as_t own_asn = path->peer->change_local_as ? path->peer->change_local_as
- : path->peer->local_as;
+ as_t replace_asn = 0;
+ as_t configured_asn;
+ char *buf;
+ char src_asn[ASN_STRING_MAX_SIZE];
if (path->peer->sort != BGP_PEER_EBGP) {
zlog_warn(
@@ -2394,6 +2396,29 @@ route_set_aspath_replace(void *rule, const struct prefix *dummy, void *object)
return RMAP_NOOP;
}
+ buf = strchr(replace, ' ');
+ if (!buf) {
+ configured_asn = path->peer->change_local_as
+ ? path->peer->change_local_as
+ : path->peer->local_as;
+ } else {
+ memcpy(src_asn, replace, (size_t)(buf - replace));
+ src_asn[(size_t)(buf - replace)] = '\0';
+ replace = src_asn;
+ buf++;
+ if (!asn_str2asn(buf, &configured_asn)) {
+ zlog_warn(
+ "`set as-path replace`, invalid configured AS %s",
+ buf);
+ return RMAP_NOOP;
+ }
+ }
+
+ if (!strmatch(replace, "any") && !asn_str2asn(replace, &replace_asn)) {
+ zlog_warn("`set as-path replace`, invalid AS %s", replace);
+ return RMAP_NOOP;
+ }
+
if (path->attr->aspath->refcnt)
aspath_new = aspath_dup(path->attr->aspath);
else
@@ -2401,13 +2426,10 @@ route_set_aspath_replace(void *rule, const struct prefix *dummy, void *object)
if (strmatch(replace, "any")) {
path->attr->aspath =
- aspath_replace_all_asn(aspath_new, own_asn);
- } else {
- as_t replace_asn = strtoul(replace, NULL, 10);
-
+ aspath_replace_all_asn(aspath_new, configured_asn);
+ } else
path->attr->aspath = aspath_replace_specific_asn(
- aspath_new, replace_asn, own_asn);
- }
+ aspath_new, replace_asn, configured_asn);
aspath_free(aspath_new);
@@ -3092,6 +3114,44 @@ static void *route_set_ecommunity_lb_compile(const char *arg)
return rels;
}
+static enum route_map_cmd_result_t
+route_set_ecommunity_color(void *rule, const struct prefix *prefix,
+ void *object)
+{
+ struct bgp_path_info *path;
+
+ path = object;
+
+ route_set_ecommunity(rule, prefix, object);
+
+ path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_SRTE_COLOR);
+ return RMAP_OKAY;
+}
+
+static void *route_set_ecommunity_color_compile(const char *arg)
+{
+ struct rmap_ecom_set *rcs;
+ struct ecommunity *ecom;
+
+ ecom = ecommunity_str2com(arg, ECOMMUNITY_COLOR, 0);
+ if (!ecom)
+ return NULL;
+
+ rcs = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(struct rmap_ecom_set));
+ rcs->ecom = ecommunity_intern(ecom);
+ rcs->none = false;
+
+ return rcs;
+}
+
+static const struct route_map_rule_cmd route_set_ecommunity_color_cmd = {
+ "extcommunity color",
+ route_set_ecommunity_color,
+ route_set_ecommunity_color_compile,
+ route_set_ecommunity_free,
+};
+
+
static void route_set_ecommunity_lb_free(void *rule)
{
XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
@@ -5837,41 +5897,48 @@ DEFUN_YANG (set_aspath_prepend_lastas,
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY_YANG (set_aspath_replace_asn,
- set_aspath_replace_asn_cmd,
- "set as-path replace <any|ASNUM>$replace",
- SET_STR
- "Transform BGP AS_PATH attribute\n"
- "Replace AS number to local AS number\n"
- "Replace any AS number to local AS number\n"
- "Replace a specific AS number in plain or dotted format to local AS number\n")
+DEFPY_YANG(set_aspath_replace_asn, set_aspath_replace_asn_cmd,
+ "set as-path replace <any|ASNUM>$replace [<ASNUM>$configured_asn]",
+ SET_STR
+ "Transform BGP AS_PATH attribute\n"
+ "Replace AS number to local or configured AS number\n"
+ "Replace any AS number to local or configured AS number\n"
+ "Replace a specific AS number to local or configured AS number\n"
+ "Define the configured AS number\n")
{
const char *xpath =
"./set-action[action='frr-bgp-route-map:as-path-replace']";
char xpath_value[XPATH_MAXLEN];
- as_t as_value;
+ as_t as_value, as_configured_value;
+ char replace_value[ASN_STRING_MAX_SIZE * 2];
if (!strmatch(replace, "any") && !asn_str2asn(replace, &as_value)) {
vty_out(vty, "%% Invalid AS value %s\n", replace);
return CMD_WARNING_CONFIG_FAILED;
}
-
- nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+ if (configured_asn_str &&
+ !asn_str2asn(configured_asn_str, &as_configured_value)) {
+ vty_out(vty, "%% Invalid AS configured value %s\n",
+ configured_asn_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
snprintf(xpath_value, sizeof(xpath_value),
"%s/rmap-set-action/frr-bgp-route-map:replace-as-path", xpath);
- nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, replace);
+ snprintf(replace_value, sizeof(replace_value), "%s%s%s", replace,
+ configured_asn_str ? " " : "",
+ configured_asn_str ? configured_asn_str : "");
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, replace_value);
return nb_cli_apply_changes(vty, NULL);
}
-DEFPY_YANG (no_set_aspath_replace_asn,
- no_set_aspath_replace_asn_cmd,
- "no set as-path replace [<any|ASNUM>]",
- NO_STR
- SET_STR
- "Transform BGP AS_PATH attribute\n"
- "Replace AS number to local AS number\n"
- "Replace any AS number to local AS number\n"
- "Replace a specific AS number in plain or dotted format to local AS number\n")
+DEFPY_YANG(no_set_aspath_replace_asn, no_set_aspath_replace_asn_cmd,
+ "no set as-path replace [<any|ASNUM>] [<ASNUM>$configured_asn]",
+ NO_STR SET_STR
+ "Transform BGP AS_PATH attribute\n"
+ "Replace AS number to local or configured AS number\n"
+ "Replace any AS number to local or configured AS number\n"
+ "Replace a specific AS number to local or configured AS number\n"
+ "Define the configured AS number\n")
{
const char *xpath =
"./set-action[action='frr-bgp-route-map:as-path-replace']";
@@ -6585,6 +6652,57 @@ DEFPY_YANG (no_set_ecommunity_nt,
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY_YANG(set_ecommunity_color, set_ecommunity_color_cmd,
+ "set extcommunity color RTLIST...",
+ SET_STR
+ "BGP extended community attribute\n"
+ "Color extended community\n"
+ "Color ID\n")
+{
+ int idx_color = 3;
+ char *str;
+ int ret;
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-color']";
+ char xpath_value[XPATH_MAXLEN];
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ snprintf(xpath_value, sizeof(xpath_value),
+ "%s/rmap-set-action/frr-bgp-route-map:extcommunity-color",
+ xpath);
+ str = argv_concat(argv, argc, idx_color);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, str);
+ ret = nb_cli_apply_changes(vty, NULL);
+ XFREE(MTYPE_TMP, str);
+ return ret;
+}
+
+DEFPY_YANG(no_set_ecommunity_color_all, no_set_ecommunity_color_all_cmd,
+ "no set extcommunity color",
+ NO_STR SET_STR
+ "BGP extended community attribute\n"
+ "Color extended community\n")
+{
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-color']";
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY_YANG(no_set_ecommunity_color, no_set_ecommunity_color_cmd,
+ "no set extcommunity color RTLIST...",
+ NO_STR SET_STR
+ "BGP extended community attribute\n"
+ "Color extended community\n"
+ "Color ID\n")
+{
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-color']";
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
ALIAS_YANG (no_set_ecommunity_nt,
no_set_ecommunity_nt_short_cmd,
"no set extcommunity nt",
@@ -7438,6 +7556,7 @@ void bgp_route_map_init(void)
route_map_install_set(&route_set_ecommunity_nt_cmd);
route_map_install_set(&route_set_ecommunity_soo_cmd);
route_map_install_set(&route_set_ecommunity_lb_cmd);
+ route_map_install_set(&route_set_ecommunity_color_cmd);
route_map_install_set(&route_set_ecommunity_none_cmd);
route_map_install_set(&route_set_tag_cmd);
route_map_install_set(&route_set_label_index_cmd);
@@ -7542,6 +7661,9 @@ void bgp_route_map_init(void)
install_element(RMAP_NODE, &set_ecommunity_nt_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_nt_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_nt_short_cmd);
+ install_element(RMAP_NODE, &set_ecommunity_color_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_color_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_color_all_cmd);
#ifdef KEEP_OLD_VPN_COMMANDS
install_element(RMAP_NODE, &set_vpn_nexthop_cmd);
install_element(RMAP_NODE, &no_set_vpn_nexthop_cmd);
diff --git a/bgpd/bgp_routemap_nb.c b/bgpd/bgp_routemap_nb.c
index 282ebe9116..ae695a6f80 100644
--- a/bgpd/bgp_routemap_nb.c
+++ b/bgpd/bgp_routemap_nb.c
@@ -401,6 +401,13 @@ const struct frr_yang_module_info frr_bgp_route_map_info = {
}
},
{
+ .xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-color",
+ .cbs = {
+ .modify = lib_route_map_entry_set_action_rmap_set_action_extcommunity_color_modify,
+ .destroy = lib_route_map_entry_set_action_rmap_set_action_extcommunity_color_destroy,
+ }
+ },
+ {
.xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-lb/two-octet-as-specific",
.cbs = {
.modify = lib_route_map_entry_set_action_rmap_set_action_extcommunity_lb_two_octet_as_specific_modify,
diff --git a/bgpd/bgp_routemap_nb.h b/bgpd/bgp_routemap_nb.h
index 7066fdb419..3ff58f71a7 100644
--- a/bgpd/bgp_routemap_nb.h
+++ b/bgpd/bgp_routemap_nb.h
@@ -153,6 +153,10 @@ int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_modify(
struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy(
struct nb_cb_destroy_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_color_modify(
+ struct nb_cb_modify_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_color_destroy(
+ struct nb_cb_destroy_args *args);
int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify(
struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy(
diff --git a/bgpd/bgp_routemap_nb_config.c b/bgpd/bgp_routemap_nb_config.c
index 02564b0004..03b588a33b 100644
--- a/bgpd/bgp_routemap_nb_config.c
+++ b/bgpd/bgp_routemap_nb_config.c
@@ -2954,6 +2954,58 @@ lib_route_map_entry_set_action_rmap_set_action_extcommunity_lb_bandwidth_destroy
/*
* XPath:
+ * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-color
+ */
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_color_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct routemap_hook_context *rhc;
+ const char *str;
+ int rv;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ /* Add configuration. */
+ rhc = nb_running_get_entry(args->dnode, NULL, true);
+ str = yang_dnode_get_string(args->dnode, NULL);
+
+ /* Set destroy information. */
+ rhc->rhc_shook = generic_set_delete;
+ rhc->rhc_rule = "extcommunity color";
+ rhc->rhc_event = RMAP_EVENT_SET_DELETED;
+
+ rv = generic_set_add(rhc->rhc_rmi, "extcommunity color", str,
+ args->errmsg, args->errmsg_len);
+ if (rv != CMD_SUCCESS) {
+ rhc->rhc_shook = NULL;
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+
+ return NB_OK;
+}
+
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_color_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return lib_route_map_entry_match_destroy(args);
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-lb/two-octet-as-specific
*/
int
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 3e1fdc6284..e6fa94733f 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -1075,11 +1075,14 @@ static void bgp_zebra_tm_connect(struct event *t)
ret = tm_table_manager_connect(zclient);
}
if (ret < 0) {
- zlog_info("Error connecting to table manager!");
+ zlog_err("Error connecting to table manager!");
bgp_tm_status_connected = false;
} else {
- if (!bgp_tm_status_connected)
- zlog_debug("Connecting to table manager. Success");
+ if (!bgp_tm_status_connected) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug(
+ "Connecting to table manager. Success");
+ }
bgp_tm_status_connected = true;
if (!bgp_tm_chunk_obtained) {
if (bgp_zebra_get_table_range(bgp_tm_chunk_size,
@@ -1122,7 +1125,7 @@ void bgp_zebra_init_tm_connect(struct bgp *bgp)
bgp_tm_min = bgp_tm_max = 0;
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
bgp_tm_bgp = bgp;
- event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
+ event_add_timer(bm->master, bgp_zebra_tm_connect, zclient_sync, delay,
&bgp_tm_thread_connect);
}
@@ -1442,7 +1445,7 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
if (CHECK_FLAG(info->attr->flag,
ATTR_FLAG_BIT(BGP_ATTR_SRTE_COLOR)))
- api_nh->srte_color = info->attr->srte_color;
+ api_nh->srte_color = bgp_attr_get_color(info->attr);
if (bgp_debug_zebra(&api.prefix)) {
if (mpinfo->extra) {
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 619925ab2c..f0c8998259 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -8333,7 +8333,8 @@ struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp,
if (!peer) {
group = peer_group_lookup(bgp, ip_str);
- peer = listnode_head(group->peer);
+ if (group)
+ peer = listnode_head(group->peer);
}
if (!peer) {
diff --git a/doc/developer/building.rst b/doc/developer/building.rst
index 2d8cc209b0..dac0f9a84d 100644
--- a/doc/developer/building.rst
+++ b/doc/developer/building.rst
@@ -9,13 +9,13 @@ Building FRR
static-linking
building-frr-for-alpine
+ building-frr-for-archlinux
building-frr-for-centos6
building-frr-for-centos7
building-frr-for-centos8
building-frr-for-debian8
building-frr-for-debian9
building-frr-for-fedora
- building-frr-for-opensuse
building-frr-for-freebsd9
building-frr-for-freebsd10
building-frr-for-freebsd11
@@ -23,11 +23,12 @@ Building FRR
building-frr-for-netbsd6
building-frr-for-netbsd7
building-frr-for-openbsd6
+ building-frr-for-opensuse
building-frr-for-openwrt
building-frr-for-ubuntu1404
building-frr-for-ubuntu1604
building-frr-for-ubuntu1804
building-frr-for-ubuntu2004
- building-frr-for-archlinux
+ building-frr-for-ubuntu2204
building-docker
cross-compiling
diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am
index 2563082c46..840afa9f74 100644
--- a/doc/developer/subdir.am
+++ b/doc/developer/subdir.am
@@ -6,12 +6,12 @@ dev_RSTFILES = \
doc/developer/bgp-typecodes.rst \
doc/developer/bgpd.rst \
doc/developer/building-frr-for-alpine.rst \
+ doc/developer/building-frr-for-archlinux.rst \
doc/developer/building-frr-for-centos6.rst \
doc/developer/building-frr-for-centos7.rst \
doc/developer/building-frr-for-debian8.rst \
doc/developer/building-frr-for-debian9.rst \
doc/developer/building-frr-for-fedora.rst \
- doc/developer/building-frr-for-opensuse.rst \
doc/developer/building-frr-for-freebsd10.rst \
doc/developer/building-frr-for-freebsd11.rst \
doc/developer/building-frr-for-freebsd13.rst \
@@ -19,11 +19,13 @@ dev_RSTFILES = \
doc/developer/building-frr-for-netbsd6.rst \
doc/developer/building-frr-for-netbsd7.rst \
doc/developer/building-frr-for-openbsd6.rst \
+ doc/developer/building-frr-for-opensuse.rst \
doc/developer/building-frr-for-openwrt.rst \
doc/developer/building-frr-for-ubuntu1404.rst \
doc/developer/building-frr-for-ubuntu1604.rst \
doc/developer/building-frr-for-ubuntu1804.rst \
doc/developer/building-frr-for-ubuntu2004.rst \
+ doc/developer/building-frr-for-ubuntu2204.rst \
doc/developer/building-libunwind-note.rst \
doc/developer/building-libyang.rst \
doc/developer/building.rst \
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 1c2d6b3bdb..87edad40bb 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -443,7 +443,7 @@ as shown in the examples below.
For each capture a window is opened displaying a live summary of the captured
packets. Additionally, the entire packet stream is captured in a pcap file in
-the tests log directory e.g.,::
+the tests log directory e.g.,:
.. code:: console
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index df42e4d10a..f09512de32 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2105,10 +2105,11 @@ Using AS Path in Route Map
Prepend the existing last AS number (the leftmost ASN) to the AS_PATH.
The no form of this command removes this set operation from the route-map.
-.. clicmd:: set as-path replace <any|ASN>
+.. clicmd:: set as-path replace <any|ASN> [<ASN>]
- Replace a specific AS number to local AS number. ``any`` replaces each
- AS number in the AS-PATH with the local AS number.
+ Replace a specific AS number to local AS number or a configured AS number.
+ ``any`` replaces each AS number in the AS-PATH with either the local AS
+ number or the configured AS number.
.. clicmd:: set as-path exclude all
@@ -2620,6 +2621,10 @@ BGP Extended Communities in Route Map
This command sets Site of Origin value.
+.. clicmd:: set extcomumnity color EXTCOMMUNITY
+
+ This command sets colors values.
+
.. clicmd:: set extcommunity bandwidth <(1-25600) | cumulative | num-multipaths> [non-transitive]
This command sets the BGP link-bandwidth extended community for the prefix
diff --git a/doc/user/ripngd.rst b/doc/user/ripngd.rst
index 4c9b734d88..1e78294f32 100644
--- a/doc/user/ripngd.rst
+++ b/doc/user/ripngd.rst
@@ -92,6 +92,53 @@ RIPng routes can be filtered by a distribute-list.
`distribute-list` can be applied to both incoming and outgoing data.
+.. _ripng-route-map:
+
+RIPng route-map
+===============
+
+Usage of *ripngd*'s route-map support.
+
+Route-map statement (:ref:`route-map`) is needed to use route-map
+functionality.
+
+.. clicmd:: match interface WORD
+
+ This command match to incoming interface. Notation of this match is
+ different from Cisco. Cisco uses a list of interfaces - NAME1 NAME2 ...
+ NAMEN. Ripngd allows only one name (maybe will change in the future). Next -
+ Cisco means interface which includes next-hop of routes (it is somewhat
+ similar to "ipv6 next-hop" statement). Ripngd means interface where this route
+ will be sent. This difference is because "next-hop" of same routes which
+ sends to different interfaces must be different.
+
+.. clicmd:: match ipv6 address WORD
+
+.. clicmd:: match ipv6 address prefix-list WORD
+
+ Match if route destination is permitted by access-list/prefix-list.
+
+.. clicmd:: match metric (0-4294967295)
+
+ This command match to the metric value of RIPng updates. For other protocol
+ compatibility metric range is shown as (0-4294967295). But for RIPng protocol
+ only the value range (0-16) make sense.
+
+.. clicmd:: set ipv6 next-hop local IPV6_ADDRESS
+
+ Set the link-local IPv6 nexthop address.
+
+.. clicmd:: set metric (1-16)
+
+ Set a metric for matched route when sending announcement. The metric value
+ range is very large for compatibility with other protocols. For RIPng, valid
+ metric values are from 1 to 16.
+
+.. clicmd:: set tag (1-4294967295)
+
+ Set a tag on the matched route.
+
+
Sample configuration
====================
diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c
index 59b80c1e20..95bd37812f 100644
--- a/isisd/isis_zebra.c
+++ b/isisd/isis_zebra.c
@@ -772,9 +772,9 @@ static int isis_opaque_msg_handler(ZAPI_CALLBACK_ARGS)
switch (info.type) {
case LINK_STATE_SYNC:
- STREAM_GETC(s, dst.proto);
- STREAM_GETW(s, dst.instance);
- STREAM_GETL(s, dst.session_id);
+ dst.proto = info.src_proto;
+ dst.instance = info.src_instance;
+ dst.session_id = info.src_session_id;
dst.type = LINK_STATE_SYNC;
ret = isis_te_sync_ted(dst);
break;
diff --git a/lib/darr.c b/lib/darr.c
new file mode 100644
index 0000000000..2c8b7b8778
--- /dev/null
+++ b/lib/darr.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * June 23 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+#include <zebra.h>
+#include "darr.h"
+
+void __dar_resize(void **a, uint count, size_t esize);
+
+static uint _msb(uint count)
+{
+ uint bit = 0;
+ int msb = 0;
+
+ while (count) {
+ if (count & 1)
+ msb = bit;
+ count >>= 1;
+ bit += 1;
+ }
+ return msb;
+}
+
+static uint darr_next_count(uint count, size_t esize)
+{
+ uint ncount;
+
+ if (esize > sizeof(long long) && count == 1)
+ /* treat like a pointer */
+ ncount = 1;
+ else {
+ uint msb = _msb(count);
+
+ ncount = 1ull << msb;
+ /* if the users count wasn't a pow2 make it the next pow2. */
+ if (ncount != count) {
+ assert(ncount < count);
+ ncount <<= 1;
+ if (esize < sizeof(long long) && ncount < 8)
+ ncount = 8;
+ }
+ }
+ return ncount;
+}
+
+static size_t darr_size(uint count, size_t esize)
+{
+ return count * esize + sizeof(struct darr_metadata);
+}
+
+void *__darr_resize(void *a, uint count, size_t esize)
+{
+ uint ncount = darr_next_count(count, esize);
+ size_t osz = (a == NULL) ? 0 : darr_size(darr_cap(a), esize);
+ size_t sz = darr_size(ncount, esize);
+ struct darr_metadata *dm = realloc(a ? _darr_meta(a) : NULL, sz);
+ /* do *not* use a */
+
+ assert(dm);
+ if (sz > osz)
+ memset((char *)dm + osz, 0, sz - osz);
+
+ dm->cap = ncount;
+
+ return (void *)(dm + 1);
+}
+
+
+void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero)
+{
+
+ struct darr_metadata *dm;
+ uint olen, nlen;
+
+ if (!a)
+ a = __darr_resize(NULL, at + count, esize);
+ dm = (struct darr_metadata *)a - 1;
+ olen = dm->len;
+
+ // at == 1
+ // count == 100
+ // olen == 2
+
+ /* see if the user is expanding first using `at` */
+ if (at >= olen)
+ nlen = at + count;
+ else
+ nlen = olen + count;
+
+ if (nlen > dm->cap) {
+ a = __darr_resize(a, nlen, esize);
+ dm = (struct darr_metadata *)a - 1;
+ }
+
+#define _a_at(i) ((char *)a + ((i)*esize))
+ if (at < olen)
+ memmove(_a_at(at + count), _a_at(at), esize * (olen - at));
+
+ dm->len = nlen;
+
+ if (zero) {
+ if (at >= olen) {
+ at -= olen;
+ count += olen;
+ }
+ memset(_a_at(at), 0, esize * count);
+ }
+
+ return (void *)a;
+#undef _a_at
+}
diff --git a/lib/darr.h b/lib/darr.h
new file mode 100644
index 0000000000..ca46fb3054
--- /dev/null
+++ b/lib/darr.h
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * June 23 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ * API functions:
+ * ==============
+ * - darr_append
+ * - darr_append_n
+ * - darr_append_nz
+ * - darr_cap
+ * - darr_ensure_cap
+ * - darr_ensure_i
+ * - darr_foreach_i
+ * - darr_foreach_p
+ * - darr_free
+ * - darr_insert
+ * - darr_insertz
+ * - darr_insert_n
+ * - darr_insert_nz
+ * - darr_len
+ * - darr_maxi
+ * - darr_pop
+ * - darr_push
+ * - darr_pushz
+ * - darr_remove
+ * - darr_remove_n
+ * - darr_reset
+ * - darr_setlen
+ */
+/*
+ * A few assured items
+ *
+ * - DAs will never have capacity 0 unless they are NULL pointers.
+ */
+#include <zebra.h>
+
+struct darr_metadata {
+ uint len;
+ uint cap;
+};
+void *__darr_insert_n(void *a, uint at, uint count, size_t esize, bool zero);
+void *__darr_resize(void *a, uint count, size_t esize);
+
+#define _darr_esize(A) sizeof((A)[0])
+#define darr_esize(A) sizeof((A)[0])
+#define _darr_len(A) _darr_meta(A)->len
+#define _darr_meta(A) (((struct darr_metadata *)(A)) - 1)
+#define _darr_resize(A, C) ({ (A) = __darr_resize((A), C, _darr_esize(A)); })
+
+/* Get the current capacity of the array */
+#define darr_cap(A) (((A) == NULL) ? 0 : _darr_meta(A)->cap)
+
+/* Get the largest possible index one can `darr_ensure_i` w/o resizing */
+#define darr_maxi(A) ((int)darr_cap(A) - 1)
+
+/**
+ * Get the current length of the array.
+ *
+ * As long as `A` is non-NULL, this macro may be used as an L-value to modify
+ * the length of the array.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * The current length of the array.
+ */
+#define darr_len(A) (((A) == NULL) ? 0 : _darr_meta(A)->len)
+
+/**
+ * Set the current length of the array `A` to 0.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ */
+#define darr_reset(A) \
+ do { \
+ if ((A)) \
+ _darr_len(A) = 0; \
+ } while (0)
+
+/**
+ * Set the current length of the array `A` to `L`.
+ *
+ * This function does *not* guarantee the memory is valid to L,
+ * use `darr_ensure` or `darr_ensure_cap` for that.
+ *
+ * Args:
+ * A: The dynamic array, can only be NULL if (L) == 0.
+ * L: The new length of the array.
+ */
+#define darr_setlen(A, L) \
+ do { \
+ assert((A) || !(L)); \
+ if ((A)) { \
+ /* have to cast to avoid compiler warning for "0" */ \
+ assert((long long)darr_cap(A) >= (L)); \
+ _darr_len(A) = (L); \
+ } \
+ } while (0)
+
+/**
+ * Free memory allocated for the dynamic array `A`
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ */
+
+#define darr_free(A) \
+ do { \
+ if ((A)) { \
+ free(_darr_meta(A)); \
+ (A) = NULL; \
+ } \
+ } while (0)
+
+/**
+ * Make sure that there is room in the dynamic array `A` for `C` elements.
+ *
+ * The value `A` may be changed as a result of this call in which case any
+ * pointers into the previous memory block are no longer valid. The `A` value
+ * is guaranteed not to change if there is sufficient capacity in the array.
+ *
+ * Args:
+ * A: (IN/OUT) the dynamic array, can be NULL.
+ * I: the index to guarantee memory exists for
+ *
+ * Return:
+ * A pointer to the (possibly moved) array.
+ */
+#define darr_ensure_cap(A, C) \
+ ({ \
+ if (darr_cap(A) < (C)) \
+ _darr_resize((A), (C)); \
+ (A); \
+ })
+
+/**
+ * Return a pointer to the (I)th element of array `A`, making sure there is
+ * room for the element.
+ *
+ * If the array length is less than `I + 1` then the length is set to `I + 1`.
+ *
+ * The value `A` may be changed as a result of this call in which case any
+ * pointers into the previous memory block are no longer valid. The `A` value
+ * is guaranteed not to change if there is sufficient capacity in the array.
+ *
+ * Args:
+ *
+ * A: (IN/OUT) the dynamic array, can be NULL.
+ * I: the index to guarantee memory exists for
+ *
+ * Return:
+ * A pointer to the (I)th element in `A`
+ */
+#define darr_ensure_i(A, I) \
+ ({ \
+ if ((int)(I) > darr_maxi(A)) \
+ _darr_resize((A), (I) + 1); \
+ if ((I) + 1 > _darr_len(A)) \
+ _darr_len(A) = (I) + 1; \
+ &(A)[I]; \
+ })
+
+#define _darr_insert_n(A, I, N, Z) \
+ ({ \
+ (A) = __darr_insert_n(A, I, N, _darr_esize(A), Z); \
+ &(A)[I]; \
+ })
+/**
+ * Insert N uninitialized elements in the array at index `I`.
+ *
+ * Previous elements from `I` are shifted right by `N`. Array length is
+ * increased by `N`.
+ *
+ * The value `A` may be changed as a result of this call in which case any
+ * pointers into the previous memory block are no longer valid. The `A` value
+ * is guaranteed not to change if there is sufficient capacity in the array.
+ *
+ * The `z` variant zeros new elements.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * A pointer to the first inserted element in the array.
+ */
+#define darr_insert_n(A, I, N) _darr_insert_n(A, I, N, false)
+#define darr_insert_nz(A, I, N) _darr_insert_n(A, I, N, true)
+
+/**
+ * Insert an uninitialized element in the array at index `I`.
+ *
+ * Previous elements from `I` are shifted right by 1. Array length is
+ * increased by 1.
+ *
+ * The value `A` may be changed as a result of this call in which case any
+ * pointers into the previous memory block are no longer valid. The `A` value
+ * is guaranteed not to change if there is sufficient capacity in the array.
+ *
+ * The `z` variant zeros the new element.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * A pointer to the element in the array.
+ */
+#define darr_insert(A, I) _darr_insert_n(A, I, 1, false)
+#define darr_insertz(A, I) _darr_insert_n(A, I, 1, true)
+
+/**
+ * Remove `N` elements from the array starting at index `I`.
+ *
+ * Elements from `I` + `N` are shifted left by `N`. Array length is reduced by
+ * `N`.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ */
+#define darr_remove_n(A, I, N) \
+ do { \
+ uint __i = (I); \
+ uint __n = (N); \
+ uint __len = darr_len(A); \
+ if (!__len) \
+ break; \
+ else if (__i + __n < __len) { \
+ memmove(&(A)[__i], &(A)[__i + __n], \
+ _darr_esize(A) * (__len - (__i + __n))); \
+ _darr_len(A) = __len - __n; \
+ } else \
+ _darr_len(A) = __i; \
+ } while (0)
+
+/**
+ * Remove the `I`th element from the array.
+ *
+ * Previous elements from `I` + 1 are shifted left by 1, Array length is reduced
+ * by 1.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ */
+#define darr_remove(A, I) darr_remove_n(A, I, 1)
+
+
+#define _darr_append_n(A, N, Z) \
+ ({ \
+ uint __len = darr_len(A); \
+ darr_ensure_cap(A, __len + (N)); \
+ _darr_len(A) = __len + (N); \
+ if (Z) \
+ memset(&(A)[__len], 0, (N)*_darr_esize(A)); \
+ &(A)[__len]; \
+ })
+/**
+ * Extending the array's length by N.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * The `z` variant zeros new elements.
+ *
+ * Return:
+ * A pointer to the first of the added elements at the end of the array.
+ */
+#define darr_append_n(A, N) _darr_append_n(A, N, false)
+#define darr_append_nz(A, N) _darr_append_n(A, N, true)
+
+/**
+ * Extending the array's length by 1.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * The `z` variant zeros the new element.
+ *
+ * Return:
+ * A pointer to the new element at the end of the array.
+ */
+#define darr_append(A) _darr_append_n(A, 1, false)
+#define darr_appendz(A) _darr_append_n(A, 1, true)
+
+/**
+ * Append an element `E` onto the array `A`, extending it's length by 1.
+ *
+ * The `z` variant zeros the new element.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * A pointer to the element in the array.
+ */
+#define darr_push(A, E) (*darr_append(A) = (E))
+#define darr_pushz(A) (darr_appendz(A))
+
+
+/**
+ * Pop the last `N` elements from the array decrementing the length by `N`.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ */
+#define darr_pop_n(A, N) \
+ do { \
+ if ((A) && (N) >= _darr_len(A)) \
+ darr_reset(A); \
+ else \
+ _darr_len(A) -= (N); \
+ } while (0)
+
+
+/**
+ * Pop the last element from the array decrementing the length by 1.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * The element just popped.
+ */
+#define darr_pop(A) \
+ ({ \
+ uint __len = _darr_len(A); \
+ assert(__len); \
+ darr_remove(A, __len - 1); \
+ /* count on fact that we don't resize */ \
+ (A)[__len - 1]; \
+ })
+
+/**
+ * Return the address at the end of the array -- useful for iterating
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ *
+ * Return:
+ * The address of the end of the array (past the last elment) or NULL
+ * if `A` is NULL.
+ */
+#define darr_end(A) ((A) + darr_len(A))
+
+/**
+ * Iterate over array `A` using a pointer to each element in `P`.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ * P: A variable with the same type as A used as the iterator.
+ */
+#define darr_foreach_p(A, P) for ((P) = (A); (P) < darr_end(A); (P)++)
+
+/**
+ * Iterate over array `A`s indices.
+ *
+ * Args:
+ * A: The dynamic array, can be NULL.
+ * I: A uint variable to store the current element index in.
+ */
+#define darr_foreach_i(A, I) for ((I) = 0; (I) < darr_len(A); (I)++)
diff --git a/lib/mgmt.proto b/lib/mgmt.proto
index ac44eefd9e..9e4b39abe4 100644
--- a/lib/mgmt.proto
+++ b/lib/mgmt.proto
@@ -136,28 +136,6 @@ message BeOperDataGetReply {
optional YangDataReply data = 5;
}
-message BeOperDataNotify {
- required YangDataReply data = 5;
-}
-
-message BeConfigCmdReq {
- required string cmd = 1;
-}
-
-message BeConfigCmdReply {
- required bool success = 1;
- required string error_if_any = 2;
-}
-
-message BeShowCmdReq {
- required string cmd = 1;
-}
-
-message BeShowCmdReply {
- required bool success = 1;
- required string cmd_ouput = 2;
-}
-
//
// Any message on the MGMTD Backend Interface.
//
@@ -173,11 +151,6 @@ message BeMessage {
BeCfgDataApplyReply cfg_apply_reply = 9;
BeOperDataGetReq get_req = 10;
BeOperDataGetReply get_reply = 11;
- BeOperDataNotify notify_data = 12;
- BeConfigCmdReq cfg_cmd_req = 13;
- BeConfigCmdReply cfg_cmd_reply = 14;
- BeShowCmdReq show_cmd_req = 15;
- BeShowCmdReply show_cmd_reply = 16;
}
}
@@ -267,36 +240,22 @@ message FeCommitConfigReply {
optional string error_if_any = 8;
}
-message FeGetConfigReq {
- required uint64 session_id = 1;
- required DatastoreId ds_id = 2;
- required uint64 req_id = 3;
- repeated YangGetDataReq data = 4;
-}
-
-message FeGetConfigReply {
+message FeGetReq {
required uint64 session_id = 1;
- required DatastoreId ds_id = 2;
- required uint64 req_id = 3;
- required bool success = 4;
- optional string error_if_any = 5;
- optional YangDataReply data = 6;
-}
-
-message FeGetDataReq {
- required uint64 session_id = 1;
- required DatastoreId ds_id = 2;
- required uint64 req_id = 3;
- repeated YangGetDataReq data = 4;
+ required bool config = 2;
+ required DatastoreId ds_id = 3;
+ required uint64 req_id = 4;
+ repeated YangGetDataReq data = 5;
}
-message FeGetDataReply {
+message FeGetReply {
required uint64 session_id = 1;
- required DatastoreId ds_id = 2;
- required uint64 req_id = 3;
- required bool success = 4;
- optional string error_if_any = 5;
- optional YangDataReply data = 6;
+ required bool config = 2;
+ required DatastoreId ds_id = 3;
+ required uint64 req_id = 4;
+ required bool success = 5;
+ optional string error_if_any = 6;
+ optional YangDataReply data = 7;
}
message FeNotifyDataReq {
@@ -322,10 +281,8 @@ message FeMessage {
FeSetConfigReply setcfg_reply = 8;
FeCommitConfigReq commcfg_req = 9;
FeCommitConfigReply commcfg_reply = 10;
- FeGetConfigReq getcfg_req = 11;
- FeGetConfigReply getcfg_reply = 12;
- FeGetDataReq getdata_req = 13;
- FeGetDataReply getdata_reply = 14;
+ FeGetReq get_req = 11;
+ FeGetReply get_reply = 12;
FeNotifyDataReq notify_data_req = 15;
FeRegisterNotifyReq regnotify_req = 16;
}
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
index fdeff3ec0a..7bd9980357 100644
--- a/lib/mgmt_be_client.c
+++ b/lib/mgmt_be_client.c
@@ -768,9 +768,6 @@ static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
client_ctx, (uint64_t)be_msg->cfg_apply_req->txn_id);
break;
case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
- case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
- case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
- case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
MGMTD_BE_CLIENT_ERR("Got unhandled message type %u",
be_msg->message_case);
/*
@@ -781,13 +778,11 @@ static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
* NOTE: The following messages are always sent from Backend
* clients to MGMTd only and/or need not be handled here.
*/
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA:
case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
default:
/*
diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c
index 45d57175d6..da19db463f 100644
--- a/lib/mgmt_fe_client.c
+++ b/lib/mgmt_fe_client.c
@@ -247,58 +247,31 @@ int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq *data_req[],
- int num_data_reqs)
+int mgmt_fe_send_get_req(struct mgmt_fe_client *client, uint64_t session_id,
+ uint64_t req_id, bool is_config,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq *data_req[], int num_data_reqs)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
- Mgmtd__FeGetConfigReq getcfg_req;
+ Mgmtd__FeGetReq getcfg_req;
- mgmtd__fe_get_config_req__init(&getcfg_req);
+ mgmtd__fe_get_req__init(&getcfg_req);
getcfg_req.session_id = session_id;
+ getcfg_req.config = is_config;
getcfg_req.ds_id = ds_id;
getcfg_req.req_id = req_id;
getcfg_req.data = data_req;
getcfg_req.n_data = (size_t)num_data_reqs;
mgmtd__fe_message__init(&fe_msg);
- fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ;
- fe_msg.getcfg_req = &getcfg_req;
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GET_REQ;
+ fe_msg.get_req = &getcfg_req;
- MGMTD_FE_CLIENT_DBG(
- "Sending GET_CONFIG_REQ message for DS:%s session-id %" PRIu64
- " (#xpaths:%d)",
- dsid2name(ds_id), session_id, num_data_reqs);
-
- return mgmt_fe_client_send_msg(client, &fe_msg, false);
-}
-
-int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq *data_req[],
- int num_data_reqs)
-{
- (void)req_id;
- Mgmtd__FeMessage fe_msg;
- Mgmtd__FeGetDataReq getdata_req;
-
- mgmtd__fe_get_data_req__init(&getdata_req);
- getdata_req.session_id = session_id;
- getdata_req.ds_id = ds_id;
- getdata_req.req_id = req_id;
- getdata_req.data = data_req;
- getdata_req.n_data = (size_t)num_data_reqs;
-
- mgmtd__fe_message__init(&fe_msg);
- fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ;
- fe_msg.getdata_req = &getdata_req;
-
- MGMTD_FE_CLIENT_DBG(
- "Sending GET_CONFIG_REQ message for DS:%s session-id %" PRIu64
- " (#xpaths:%d)",
- dsid2name(ds_id), session_id, num_data_reqs);
+ MGMTD_FE_CLIENT_DBG("Sending GET_REQ (iscfg %d) message for DS:%s session-id %" PRIu64
+ " (#xpaths:%d)",
+ is_config, dsid2name(ds_id), session_id,
+ num_data_reqs);
return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
@@ -434,58 +407,33 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client,
fe_msg->commcfg_reply->validate_only,
fe_msg->commcfg_reply->error_if_any);
break;
- case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
- MGMTD_FE_CLIENT_DBG("Got GETCFG_REPLY for session-id %" PRIu64,
- fe_msg->getcfg_reply->session_id);
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY:
+ MGMTD_FE_CLIENT_DBG("Got GET_REPLY for session-id %" PRIu64,
+ fe_msg->get_reply->session_id);
- session = mgmt_fe_find_session_by_session_id(
- client, fe_msg->getcfg_reply->session_id);
+ session =
+ mgmt_fe_find_session_by_session_id(client,
+ fe_msg->get_reply
+ ->session_id);
if (session && session->client &&
session->client->cbs.get_data_notify)
(*session->client->cbs.get_data_notify)(
client, client->user_data, session->client_id,
- fe_msg->getcfg_reply->session_id,
- session->user_ctx, fe_msg->getcfg_reply->req_id,
- fe_msg->getcfg_reply->success,
- fe_msg->getcfg_reply->ds_id,
- fe_msg->getcfg_reply->data
- ? fe_msg->getcfg_reply->data->data
- : NULL,
- fe_msg->getcfg_reply->data
- ? fe_msg->getcfg_reply->data->n_data
- : 0,
- fe_msg->getcfg_reply->data
- ? fe_msg->getcfg_reply->data->next_indx
- : 0,
- fe_msg->getcfg_reply->error_if_any);
- break;
- case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
- MGMTD_FE_CLIENT_DBG("Got GETDATA_REPLY for session-id %" PRIu64,
- fe_msg->getdata_reply->session_id);
-
- session = mgmt_fe_find_session_by_session_id(
- client, fe_msg->getdata_reply->session_id);
-
- if (session && session->client &&
- session->client->cbs.get_data_notify)
- (*session->client->cbs.get_data_notify)(
- client, client->user_data, session->client_id,
- fe_msg->getdata_reply->session_id,
- session->user_ctx,
- fe_msg->getdata_reply->req_id,
- fe_msg->getdata_reply->success,
- fe_msg->getdata_reply->ds_id,
- fe_msg->getdata_reply->data
- ? fe_msg->getdata_reply->data->data
+ fe_msg->get_reply->session_id,
+ session->user_ctx, fe_msg->get_reply->req_id,
+ fe_msg->get_reply->success,
+ fe_msg->get_reply->ds_id,
+ fe_msg->get_reply->data
+ ? fe_msg->get_reply->data->data
: NULL,
- fe_msg->getdata_reply->data
- ? fe_msg->getdata_reply->data->n_data
+ fe_msg->get_reply->data
+ ? fe_msg->get_reply->data->n_data
: 0,
- fe_msg->getdata_reply->data
- ? fe_msg->getdata_reply->data->next_indx
+ fe_msg->get_reply->data
+ ? fe_msg->get_reply->data->next_indx
: 0,
- fe_msg->getdata_reply->error_if_any);
+ fe_msg->get_reply->error_if_any);
break;
case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
@@ -502,8 +450,7 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client,
case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
- case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
- case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REQ:
case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
default:
/*
diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h
index 532fee4397..286141da44 100644
--- a/lib/mgmt_fe_client.h
+++ b/lib/mgmt_fe_client.h
@@ -294,7 +294,10 @@ extern int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
bool validate_only, bool abort);
/*
- * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ * Send GET_REQ to MGMTD for one or more config data item(s).
+ *
+ * If is_config is true gets config from the MGMTD datastore, otherwise
+ * operational state is queried from the backend clients.
*
* lib_hndl
* Client library handler.
@@ -302,6 +305,9 @@ extern int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
* session_id
* Client session ID.
*
+ * is_config
+ * True if get-config else get-data.
+ *
* req_id
* Client request ID.
*
@@ -309,31 +315,19 @@ extern int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
* Datastore ID (Running/Candidate)
*
* data_req
- * Get config requested.
+ * Get xpaths requested.
*
* num_req
- * Number of get config requests.
+ * Number of get xpath requests.
*
* Returns:
* 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req,
- int num_reqs);
+extern int mgmt_fe_send_get_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ bool is_config, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req, int num_reqs);
-/*
- * Send GET_DATA_REQ to MGMTD for one or more data item(s).
- *
- * Similar to get config request but supports getting data
- * from operational ds aka backend clients directly.
- */
-extern int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req,
- int num_reqs);
/*
* Send NOTIFY_REGISTER_REQ to MGMTD daemon.
diff --git a/lib/routemap.h b/lib/routemap.h
index 7277744dc5..a83ef9c967 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -362,6 +362,9 @@ DECLARE_QOBJ_TYPE(route_map);
(strmatch(A, "frr-bgp-route-map:set-extcommunity-soo"))
#define IS_SET_EXTCOMMUNITY_LB(A) \
(strmatch(A, "frr-bgp-route-map:set-extcommunity-lb"))
+#define IS_SET_EXTCOMMUNITY_COLOR(A) \
+ (strmatch(A, "frr-bgp-route-map:set-extcommunity-color"))
+
#define IS_SET_AGGREGATOR(A) \
(strmatch(A, "frr-bgp-route-map:aggregator"))
#define IS_SET_AS_PREPEND(A) \
diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c
index 0ccc78e838..c1bdd28eab 100644
--- a/lib/routemap_cli.c
+++ b/lib/routemap_cli.c
@@ -1259,6 +1259,11 @@ void route_map_action_show(struct vty *vty, const struct lyd_node *dnode,
strlcat(str, " non-transitive", sizeof(str));
vty_out(vty, " set extcommunity bandwidth %s\n", str);
+ } else if (IS_SET_EXTCOMMUNITY_COLOR(action)) {
+ vty_out(vty, " set extcommunity color %s\n",
+ yang_dnode_get_string(
+ dnode,
+ "./rmap-set-action/frr-bgp-route-map:extcommunity-color"));
} else if (IS_SET_EXTCOMMUNITY_NONE(action)) {
if (yang_dnode_get_bool(
dnode,
diff --git a/lib/subdir.am b/lib/subdir.am
index c046c3c43c..d7b28ffbd5 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -25,6 +25,7 @@ lib_libfrr_la_SOURCES = \
lib/command_parse.y \
lib/cspf.c \
lib/csv.c \
+ lib/darr.c \
lib/debug.c \
lib/defaults.c \
lib/distribute.c \
@@ -209,6 +210,7 @@ pkginclude_HEADERS += \
lib/compiler.h \
lib/cspf.h \
lib/csv.h \
+ lib/darr.h \
lib/db.h \
lib/debug.h \
lib/defaults.h \
diff --git a/lib/vty.c b/lib/vty.c
index fc6bed6a0a..c9de00a271 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -3822,8 +3822,9 @@ int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
return 0;
}
-int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
- const char **xpath_list, int num_req)
+int vty_mgmt_send_get_req(struct vty *vty, bool is_config,
+ Mgmtd__DatastoreId datastore, const char **xpath_list,
+ int num_req)
{
Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
@@ -3841,13 +3842,11 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
get_req[i].data = &yang_data[i];
getreq[i] = &get_req[i];
}
- if (mgmt_fe_send_getcfg_req(mgmt_fe_client, vty->mgmt_session_id,
- vty->mgmt_req_id, datastore, getreq,
- num_req)) {
- zlog_err(
- "Failed to send GET-CONFIG to MGMTD for req-id %" PRIu64
- ".",
- vty->mgmt_req_id);
+ if (mgmt_fe_send_get_req(mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, is_config, datastore, getreq,
+ num_req)) {
+ zlog_err("Failed to send GET- to MGMTD for req-id %" PRIu64 ".",
+ vty->mgmt_req_id);
vty_out(vty, "Failed to send GET-CONFIG to MGMTD!\n");
return -1;
}
@@ -3857,40 +3856,6 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
return 0;
}
-int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
- const char **xpath_list, int num_req)
-{
- Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
- Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
- Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
- int i;
-
- vty->mgmt_req_id++;
-
- for (i = 0; i < num_req; i++) {
- mgmt_yang_get_data_req_init(&get_req[i]);
- mgmt_yang_data_init(&yang_data[i]);
-
- yang_data->xpath = (char *)xpath_list[i];
-
- get_req[i].data = &yang_data[i];
- getreq[i] = &get_req[i];
- }
- if (mgmt_fe_send_getdata_req(mgmt_fe_client, vty->mgmt_session_id,
- vty->mgmt_req_id, datastore, getreq,
- num_req)) {
- zlog_err("Failed to send GET-DATA to MGMTD for req-id %" PRIu64
- ".",
- vty->mgmt_req_id);
- vty_out(vty, "Failed to send GET-DATA to MGMTD!\n");
- return -1;
- }
-
- vty->mgmt_req_pending_cmd = "MESSAGE_GETDATA_REQ";
-
- return 0;
-}
-
/* Install vty's own commands like `who' command. */
void vty_init(struct event_loop *master_thread, bool do_command_logging)
{
diff --git a/lib/vty.h b/lib/vty.h
index 8fb1483e5b..ac3d2e5019 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -411,11 +411,9 @@ extern bool mgmt_vty_read_configs(void);
extern int vty_mgmt_send_config_data(struct vty *vty, bool implicit_commit);
extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
bool abort);
-extern int vty_mgmt_send_get_config(struct vty *vty,
- Mgmtd__DatastoreId datastore,
- const char **xpath_list, int num_req);
-extern int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
- const char **xpath_list, int num_req);
+extern int vty_mgmt_send_get_req(struct vty *vty, bool is_config,
+ Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req);
extern int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
bool lock, bool scok);
extern void vty_mgmt_resume_response(struct vty *vty, bool success);
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
index 512cc49feb..399fdafded 100644
--- a/mgmtd/mgmt_be_adapter.c
+++ b/mgmtd/mgmt_be_adapter.c
@@ -8,6 +8,7 @@
*/
#include <zebra.h>
+#include "darr.h"
#include "frrevent.h"
#include "sockopt.h"
#include "network.h"
@@ -28,23 +29,9 @@
frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter))
/*
- * Static mapping of YANG XPath regular expressions and
- * the corresponding interested backend clients.
- * NOTE: Thiis is a static mapping defined by all MGMTD
- * backend client modules (for now, till we develop a
- * more dynamic way of creating and updating this map).
- * A running map is created by MGMTD in run-time to
- * handle real-time mapping of YANG xpaths to one or
- * more interested backend client adapters.
- *
- * Please see xpath_map_reg[] in lib/mgmt_be_client.c
- * for the actual map
+ * Mapping of YANG XPath regular expressions to
+ * their corresponding backend clients.
*/
-struct mgmt_be_xpath_map_init {
- const char *xpath_regexp;
- uint subscr_info[MGMTD_BE_CLIENT_ID_MAX];
-};
-
struct mgmt_be_xpath_map {
char *xpath_regexp;
uint subscr_info[MGMTD_BE_CLIENT_ID_MAX];
@@ -67,55 +54,6 @@ struct mgmt_be_get_adapter_config_params {
};
/*
- * Static mapping of YANG XPath regular expressions and
- * the corresponding interested backend clients.
- * NOTE: Thiis is a static mapping defined by all MGMTD
- * backend client modules (for now, till we develop a
- * more dynamic way of creating and updating this map).
- * A running map is created by MGMTD in run-time to
- * handle real-time mapping of YANG xpaths to one or
- * more interested backend client adapters.
- */
-static const struct mgmt_be_xpath_map_init mgmt_xpath_map_init[] = {
- {
- .xpath_regexp = "/frr-vrf:lib/*",
- .subscr_info =
- {
-#if HAVE_STATICD
- [MGMTD_BE_CLIENT_ID_STATICD] =
- MGMT_SUBSCR_VALIDATE_CFG |
- MGMT_SUBSCR_NOTIFY_CFG,
-#endif
- },
- },
- {
- .xpath_regexp = "/frr-interface:lib/*",
- .subscr_info =
- {
-#if HAVE_STATICD
- [MGMTD_BE_CLIENT_ID_STATICD] =
- MGMT_SUBSCR_VALIDATE_CFG |
- MGMT_SUBSCR_NOTIFY_CFG,
-#endif
- },
- },
-
- {
- .xpath_regexp =
- "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/*",
- .subscr_info =
- {
-#if HAVE_STATICD
- [MGMTD_BE_CLIENT_ID_STATICD] =
- MGMT_SUBSCR_VALIDATE_CFG |
- MGMT_SUBSCR_NOTIFY_CFG,
-#endif
- },
- },
-};
-
-
-/*
* Each client gets their own map, but also union all the strings into the
* above map as well.
*/
@@ -145,12 +83,15 @@ static struct mgmt_be_client_xpath_map
#endif
};
-#define MGMTD_BE_MAX_NUM_XPATH_MAP 256
-
-/* We would like to have a better ADT than one with O(n)
- comparisons */
+/*
+ * We would like to have a better ADT than one with O(n) comparisons
+ *
+ * Perhaps it's possible to sort this array in a way that allows binary search
+ * to find the start, then walk until no possible match can follow? Intuition
+ * says this probably involves exact match/no-match on a stem in the map array
+ * or something like that.
+ */
static struct mgmt_be_xpath_map *mgmt_xpath_map;
-static uint mgmt_num_xpath_maps;
static struct event_loop *mgmt_loop;
static struct msg_server mgmt_be_server = {.fd = -1};
@@ -193,34 +134,52 @@ mgmt_be_find_adapter_by_name(const char *name)
return NULL;
}
+static void mgmt_register_client_xpath(enum mgmt_be_client_id id,
+ const char *xpath, uint subscribed)
+{
+ struct mgmt_be_xpath_map *map;
+
+ darr_foreach_p (mgmt_xpath_map, map)
+ if (!strcmp(xpath, map->xpath_regexp)) {
+ map->subscr_info[id] = subscribed;
+ return;
+ }
+ /* we didn't find a matching entry */
+ map = darr_append(mgmt_xpath_map);
+ map->xpath_regexp = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
+ map->subscr_info[id] = subscribed;
+}
+
+/*
+ * Load the initial mapping from static init map
+ */
static void mgmt_be_xpath_map_init(void)
{
- uint i;
+ struct mgmt_be_client_xpath *init, *end;
+ enum mgmt_be_client_id id;
MGMTD_BE_ADAPTER_DBG("Init XPath Maps");
- mgmt_num_xpath_maps = array_size(mgmt_xpath_map_init);
- mgmt_xpath_map =
- calloc(1, sizeof(*mgmt_xpath_map) * mgmt_num_xpath_maps);
- for (i = 0; i < mgmt_num_xpath_maps; i++) {
- MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'",
- mgmt_xpath_map_init[i].xpath_regexp);
- mgmt_xpath_map[i].xpath_regexp = XSTRDUP(
- MTYPE_MGMTD_XPATH, mgmt_xpath_map_init[i].xpath_regexp);
- memcpy(mgmt_xpath_map[i].subscr_info,
- mgmt_xpath_map_init[i].subscr_info,
- sizeof(mgmt_xpath_map_init[i].subscr_info));
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ init = mgmt_client_xpaths[id].xpaths;
+ end = init + mgmt_client_xpaths[id].nxpaths;
+ for (; init < end; init++) {
+ MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'", init->xpath);
+ mgmt_register_client_xpath(id, init->xpath,
+ init->subscribed);
+ }
}
- MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", mgmt_num_xpath_maps);
+
+ MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", darr_len(mgmt_xpath_map));
}
static void mgmt_be_xpath_map_cleanup(void)
{
- uint i;
+ struct mgmt_be_xpath_map *map;
- for (i = 0; i < mgmt_num_xpath_maps; i++)
- XFREE(MTYPE_MGMTD_XPATH, mgmt_xpath_map[i].xpath_regexp);
- free(mgmt_xpath_map);
+ darr_foreach_p (mgmt_xpath_map, map)
+ XFREE(MTYPE_MGMTD_XPATH, map->xpath_regexp);
+ darr_free(mgmt_xpath_map);
}
static int mgmt_be_eval_regexp_match(const char *xpath_regexp,
@@ -532,9 +491,6 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
be_msg->cfg_apply_reply->error_if_any, adapter);
break;
case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
- case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA:
/*
* TODO: Add handling code in future.
*/
@@ -548,8 +504,6 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
- case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
- case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
default:
/*
@@ -835,19 +789,17 @@ int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
void mgmt_be_get_subscr_info_for_xpath(
const char *xpath, struct mgmt_be_client_subscr_info *subscr_info)
{
+ struct mgmt_be_xpath_map *map;
enum mgmt_be_client_id id;
- uint i;
memset(subscr_info, 0, sizeof(*subscr_info));
MGMTD_BE_ADAPTER_DBG("XPATH: '%s'", xpath);
- for (i = 0; i < mgmt_num_xpath_maps; i++) {
- if (!mgmt_be_eval_regexp_match(mgmt_xpath_map[i].xpath_regexp,
- xpath))
+ darr_foreach_p (mgmt_xpath_map, map) {
+ if (!mgmt_be_eval_regexp_match(map->xpath_regexp, xpath))
continue;
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- subscr_info->xpath_subscr[id] |=
- mgmt_xpath_map[i].subscr_info[id];
+ subscr_info->xpath_subscr[id] |= map->subscr_info[id];
}
}
@@ -928,18 +880,17 @@ void mgmt_be_adapter_status_write(struct vty *vty)
void mgmt_be_xpath_register_write(struct vty *vty)
{
- uint indx;
+ struct mgmt_be_xpath_map *map;
enum mgmt_be_client_id id;
struct mgmt_be_client_adapter *adapter;
uint info;
vty_out(vty, "MGMTD Backend XPath Registry\n");
- for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
- vty_out(vty, " - XPATH: '%s'\n",
- mgmt_xpath_map[indx].xpath_regexp);
+ darr_foreach_p (mgmt_xpath_map, map) {
+ vty_out(vty, " - XPATH: '%s'\n", map->xpath_regexp);
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- info = mgmt_xpath_map[indx].subscr_info[id];
+ info = map->subscr_info[id];
if (!info)
continue;
vty_out(vty,
@@ -954,7 +905,7 @@ void mgmt_be_xpath_register_write(struct vty *vty)
}
}
- vty_out(vty, "Total XPath Registries: %u\n", mgmt_num_xpath_maps);
+ vty_out(vty, "Total XPath Registries: %u\n", darr_len(mgmt_xpath_map));
}
void mgmt_be_xpath_subscr_info_write(struct vty *vty, const char *xpath)
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
index 70c08d5cb4..c12d8646b1 100644
--- a/mgmtd/mgmt_fe_adapter.c
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -273,9 +273,8 @@ mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
return session;
}
-static int mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
- Mgmtd__FeMessage *fe_msg,
- bool short_circuit_ok)
+static int fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg, bool short_circuit_ok)
{
return msg_conn_send_msg(
adapter->conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
@@ -284,10 +283,9 @@ static int mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
short_circuit_ok);
}
-static int
-mgmt_fe_send_session_reply(struct mgmt_fe_client_adapter *adapter,
- struct mgmt_fe_session_ctx *session,
- bool create, bool success)
+static int fe_adapter_send_session_reply(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_fe_session_ctx *session,
+ bool create, bool success)
{
Mgmtd__FeMessage fe_msg;
Mgmtd__FeSessionReply session_reply;
@@ -309,13 +307,13 @@ mgmt_fe_send_session_reply(struct mgmt_fe_client_adapter *adapter,
"Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
adapter->name);
- return mgmt_fe_adapter_send_msg(adapter, &fe_msg, true);
+ return fe_adapter_send_msg(adapter, &fe_msg, true);
}
-static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
- Mgmtd__DatastoreId ds_id,
- uint64_t req_id, bool lock_ds,
- bool success, const char *error_if_any)
+static int fe_adapter_send_lockds_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool lock_ds,
+ bool success, const char *error_if_any)
{
Mgmtd__FeMessage fe_msg;
Mgmtd__FeLockDsReply lockds_reply;
@@ -340,10 +338,10 @@ static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
"Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s' scok: %d",
session->adapter->name, scok);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, scok);
+ return fe_adapter_send_msg(session->adapter, &fe_msg, scok);
}
-static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
+static int fe_adapter_send_set_cfg_reply(struct mgmt_fe_session_ctx *session,
Mgmtd__DatastoreId ds_id,
uint64_t req_id, bool success,
const char *error_if_any,
@@ -387,10 +385,10 @@ static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
-static int mgmt_fe_send_commitcfg_reply(
+static int fe_adapter_send_commit_cfg_reply(
struct mgmt_fe_session_ctx *session, Mgmtd__DatastoreId src_ds_id,
Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, enum mgmt_result result,
bool validate_only, const char *error_if_any)
@@ -433,83 +431,43 @@ static int mgmt_fe_send_commitcfg_reply(
if (mm->perf_stats_en)
gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
-}
-
-static int mgmt_fe_send_getcfg_reply(struct mgmt_fe_session_ctx *session,
- Mgmtd__DatastoreId ds_id,
- uint64_t req_id, bool success,
- Mgmtd__YangDataReply *data,
- const char *error_if_any)
-{
- Mgmtd__FeMessage fe_msg;
- Mgmtd__FeGetConfigReply getcfg_reply;
-
- assert(session->adapter);
-
- mgmtd__fe_get_config_reply__init(&getcfg_reply);
- getcfg_reply.session_id = session->session_id;
- getcfg_reply.ds_id = ds_id;
- getcfg_reply.req_id = req_id;
- getcfg_reply.success = success;
- getcfg_reply.data = data;
- if (error_if_any)
- getcfg_reply.error_if_any = (char *)error_if_any;
-
- mgmtd__fe_message__init(&fe_msg);
- fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY;
- fe_msg.getcfg_reply = &getcfg_reply;
-
- MGMTD_FE_ADAPTER_DBG(
- "Sending GET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
- session->adapter->name);
-
- /*
- * Cleanup the SHOW transaction associated with this session.
- */
- if (session->txn_id && (!success || (data && data->next_indx < 0)))
- mgmt_fe_session_register_event(
- session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
-
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
-static int mgmt_fe_send_getdata_reply(struct mgmt_fe_session_ctx *session,
- Mgmtd__DatastoreId ds_id,
- uint64_t req_id, bool success,
- Mgmtd__YangDataReply *data,
- const char *error_if_any)
+static int fe_adapter_send_get_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ bool success, Mgmtd__YangDataReply *data,
+ const char *error_if_any)
{
Mgmtd__FeMessage fe_msg;
- Mgmtd__FeGetDataReply getdata_reply;
+ Mgmtd__FeGetReply get_reply;
assert(session->adapter);
- mgmtd__fe_get_data_reply__init(&getdata_reply);
- getdata_reply.session_id = session->session_id;
- getdata_reply.ds_id = ds_id;
- getdata_reply.req_id = req_id;
- getdata_reply.success = success;
- getdata_reply.data = data;
+ mgmtd__fe_get_reply__init(&get_reply);
+ get_reply.session_id = session->session_id;
+ get_reply.ds_id = ds_id;
+ get_reply.req_id = req_id;
+ get_reply.success = success;
+ get_reply.data = data;
if (error_if_any)
- getdata_reply.error_if_any = (char *)error_if_any;
+ get_reply.error_if_any = (char *)error_if_any;
mgmtd__fe_message__init(&fe_msg);
- fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY;
- fe_msg.getdata_reply = &getdata_reply;
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY;
+ fe_msg.get_reply = &get_reply;
- MGMTD_FE_ADAPTER_DBG(
- "Sending GET_DATA_REPLY message to MGMTD Frontend client '%s'",
- session->adapter->name);
+ MGMTD_FE_ADAPTER_DBG("Sending GET_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
/*
* Cleanup the SHOW transaction associated with this session.
*/
if (session->txn_id && (!success || (data && data->next_indx < 0)))
- mgmt_fe_session_register_event(
- session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+ mgmt_fe_session_register_event(session,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
static void mgmt_fe_session_cfg_txn_clnup(struct event *thread)
@@ -562,19 +520,6 @@ mgmt_fe_find_adapter_by_fd(int conn_fd)
return NULL;
}
-static struct mgmt_fe_client_adapter *
-mgmt_fe_find_adapter_by_name(const char *name)
-{
- struct mgmt_fe_client_adapter *adapter;
-
- FOREACH_ADAPTER_IN_LIST (adapter) {
- if (!strncmp(adapter->name, name, sizeof(adapter->name)))
- return adapter;
- }
-
- return NULL;
-}
-
static void mgmt_fe_adapter_delete(struct mgmt_fe_client_adapter *adapter)
{
struct mgmt_fe_session_ctx *session;
@@ -631,7 +576,7 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
if (lockds_req->ds_id != MGMTD_DS_CANDIDATE &&
lockds_req->ds_id != MGMTD_DS_RUNNING) {
- mgmt_fe_send_lockds_reply(
+ fe_adapter_send_lockds_reply(
session, lockds_req->ds_id, lockds_req->req_id,
lockds_req->lock, false,
"Lock/Unlock on DS other than candidate or running DS not supported");
@@ -640,10 +585,10 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
ds_ctx = mgmt_ds_get_ctx_by_id(mm, lockds_req->ds_id);
if (!ds_ctx) {
- mgmt_fe_send_lockds_reply(
- session, lockds_req->ds_id, lockds_req->req_id,
- lockds_req->lock, false,
- "Failed to retrieve handle for DS!");
+ fe_adapter_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id,
+ lockds_req->lock, false,
+ "Failed to retrieve handle for DS!");
return -1;
}
@@ -651,7 +596,7 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id,
ds_ctx, session)
!= 0) {
- mgmt_fe_send_lockds_reply(
+ fe_adapter_send_lockds_reply(
session, lockds_req->ds_id, lockds_req->req_id,
lockds_req->lock, false,
"Lock already taken on DS by another session!");
@@ -659,7 +604,7 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
}
} else {
if (!session->ds_locked[lockds_req->ds_id]) {
- mgmt_fe_send_lockds_reply(
+ fe_adapter_send_lockds_reply(
session, lockds_req->ds_id, lockds_req->req_id,
lockds_req->lock, false,
"Lock on DS was not taken by this session!");
@@ -669,10 +614,9 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
mgmt_fe_session_unlock_ds(lockds_req->ds_id, ds_ctx, session);
}
- if (mgmt_fe_send_lockds_reply(session, lockds_req->ds_id,
- lockds_req->req_id, lockds_req->lock,
- true, NULL)
- != 0) {
+ if (fe_adapter_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id, lockds_req->lock,
+ true, NULL) != 0) {
MGMTD_FE_ADAPTER_DBG(
"Failed to send LOCK_DS_REPLY for DS %u session-id: %" PRIu64
" from %s",
@@ -700,7 +644,7 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
/* MGMTD currently only supports editing the candidate DS. */
if (setcfg_req->ds_id != MGMTD_DS_CANDIDATE) {
- mgmt_fe_send_setcfg_reply(
+ fe_adapter_send_set_cfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id, false,
"Set-Config on datastores other than Candidate DS not supported",
setcfg_req->implicit_commit);
@@ -712,7 +656,7 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
/* MGMTD currently only supports targetting the running DS. */
if (setcfg_req->implicit_commit &&
setcfg_req->commit_ds_id != MGMTD_DS_RUNNING) {
- mgmt_fe_send_setcfg_reply(
+ fe_adapter_send_set_cfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id, false,
"Implicit commit on datastores other than running DS not supported",
setcfg_req->implicit_commit);
@@ -723,10 +667,10 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
/* User should have write lock to change the DS */
if (!session->ds_locked[setcfg_req->ds_id]) {
- mgmt_fe_send_setcfg_reply(session, setcfg_req->ds_id,
- setcfg_req->req_id, false,
- "Candidate DS is not locked",
- setcfg_req->implicit_commit);
+ fe_adapter_send_set_cfg_reply(session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Candidate DS is not locked",
+ setcfg_req->implicit_commit);
return 0;
}
@@ -738,7 +682,7 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
session->cfg_txn_id = mgmt_create_txn(session->session_id,
MGMTD_TXN_TYPE_CONFIG);
if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
- mgmt_fe_send_setcfg_reply(
+ fe_adapter_send_set_cfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id,
false,
"Failed to create a Configuration session!",
@@ -761,7 +705,7 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
* In this scenario need to skip cleanup of the txn,
* so setting implicit commit to false.
*/
- mgmt_fe_send_setcfg_reply(
+ fe_adapter_send_set_cfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id,
false,
"A Configuration transaction is already in progress!",
@@ -771,16 +715,16 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
}
/* Create the SETConfig request under the transaction. */
- if (mgmt_txn_send_set_config_req(
- session->cfg_txn_id, setcfg_req->req_id, setcfg_req->ds_id,
- ds_ctx, setcfg_req->data, setcfg_req->n_data,
- setcfg_req->implicit_commit, setcfg_req->commit_ds_id,
- dst_ds_ctx)
- != 0) {
- mgmt_fe_send_setcfg_reply(
- session, setcfg_req->ds_id, setcfg_req->req_id, false,
- "Request processing for SET-CONFIG failed!",
- setcfg_req->implicit_commit);
+ if (mgmt_txn_send_set_config_req(session->cfg_txn_id, setcfg_req->req_id,
+ setcfg_req->ds_id, ds_ctx,
+ setcfg_req->data, setcfg_req->n_data,
+ setcfg_req->implicit_commit,
+ setcfg_req->commit_ds_id,
+ dst_ds_ctx) != 0) {
+ fe_adapter_send_set_cfg_reply(session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Request processing for SET-CONFIG failed!",
+ setcfg_req->implicit_commit);
/* delete transaction if we just created it */
if (txn_created)
@@ -790,33 +734,27 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
return 0;
}
-static int
-mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
- Mgmtd__FeGetConfigReq *getcfg_req)
+static int mgmt_fe_session_handle_get_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetReq *get_req)
{
struct mgmt_ds_ctx *ds_ctx;
struct nb_config *cfg_root = NULL;
-
- /*
- * Get the DS handle.
- */
- ds_ctx = mgmt_ds_get_ctx_by_id(mm, getcfg_req->ds_id);
- if (!ds_ctx) {
- mgmt_fe_send_getcfg_reply(session, getcfg_req->ds_id,
- getcfg_req->req_id, false, NULL,
- "No such DS exists!");
- return 0;
- }
-
- /* GETCFG must be on candidate or running DS */
- if (getcfg_req->ds_id != MGMTD_DS_CANDIDATE
- && getcfg_req->ds_id != MGMTD_DS_RUNNING) {
- mgmt_fe_send_getcfg_reply(
- session, getcfg_req->ds_id, getcfg_req->req_id, false,
- NULL,
- "Get-Config on datastores other than Candidate or Running DS not permitted!");
+ Mgmtd__DatastoreId ds_id = get_req->ds_id;
+ uint64_t req_id = get_req->req_id;
+ bool is_cfg = get_req->config;
+ bool ds_ok = true;
+
+ if (is_cfg && ds_id != MGMTD_DS_CANDIDATE && ds_id != MGMTD_DS_RUNNING)
+ ds_ok = false;
+ else if (!is_cfg && ds_id != MGMTD_DS_OPERATIONAL)
+ ds_ok = false;
+ if (!ds_ok) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "get-req on unsupported datastore");
return 0;
}
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ assert(ds_ctx);
if (session->txn_id == MGMTD_TXN_ID_NONE) {
/*
@@ -825,44 +763,43 @@ mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
session->txn_id = mgmt_create_txn(session->session_id,
MGMTD_TXN_TYPE_SHOW);
if (session->txn_id == MGMTD_SESSION_ID_NONE) {
- mgmt_fe_send_getcfg_reply(
- session, getcfg_req->ds_id, getcfg_req->req_id,
- false, NULL,
- "Failed to create a Show transaction!");
- goto mgmt_fe_sess_handle_getcfg_req_failed;
+ fe_adapter_send_get_reply(session, ds_id, req_id, false,
+ NULL,
+ "Failed to create a Show transaction!");
+ return -1;
}
MGMTD_FE_ADAPTER_DBG("Created new show txn-id: %" PRIu64
" for session-id: %" PRIu64,
session->txn_id, session->session_id);
} else {
- MGMTD_FE_ADAPTER_DBG("Show txn-id: %" PRIu64
- " for session-id: %" PRIu64
- " already created",
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "Request processing for GET failed!");
+ MGMTD_FE_ADAPTER_DBG("Transaction in progress txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
session->txn_id, session->session_id);
+ return -1;
}
/*
* Get a copy of the datastore config root, avoids locking.
*/
- cfg_root = nb_config_dup(mgmt_ds_get_nb_config(ds_ctx));
+ if (is_cfg)
+ cfg_root = nb_config_dup(mgmt_ds_get_nb_config(ds_ctx));
/*
- * Create a GETConfig request under the transaction.
+ * Create a GET request under the transaction.
*/
- if (mgmt_txn_send_get_config_req(
- session->txn_id, getcfg_req->req_id, getcfg_req->ds_id,
- cfg_root, getcfg_req->data, getcfg_req->n_data) != 0) {
- mgmt_fe_send_getcfg_reply(
- session, getcfg_req->ds_id, getcfg_req->req_id, false,
- NULL, "Request processing for GET-CONFIG failed!");
- goto mgmt_fe_sess_handle_getcfg_req_failed;
+ if (mgmt_txn_send_get_req(session->txn_id, req_id, ds_id, cfg_root,
+ get_req->data, get_req->n_data)) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "Request processing for GET failed!");
+
+ goto failed;
}
return 0;
-
-mgmt_fe_sess_handle_getcfg_req_failed:
-
+failed:
if (cfg_root)
nb_config_free(cfg_root);
/*
@@ -874,79 +811,6 @@ mgmt_fe_sess_handle_getcfg_req_failed:
return -1;
}
-static int
-mgmt_fe_session_handle_getdata_req_msg(struct mgmt_fe_session_ctx *session,
- Mgmtd__FeGetDataReq *getdata_req)
-{
- struct mgmt_ds_ctx *ds_ctx;
-
- /*
- * Get the DS handle.
- */
- ds_ctx = mgmt_ds_get_ctx_by_id(mm, getdata_req->ds_id);
- if (!ds_ctx) {
- mgmt_fe_send_getdata_reply(session, getdata_req->ds_id,
- getdata_req->req_id, false, NULL,
- "No such DS exists!");
- return 0;
- }
-
- /* GETDATA must be on operational DS */
- if (getdata_req->ds_id != MGMTD_DS_OPERATIONAL) {
- mgmt_fe_send_getdata_reply(
- session, getdata_req->ds_id, getdata_req->req_id, false,
- NULL,
- "Get-Data on datastore other than Operational DS not permitted!");
- return 0;
- }
-
- if (session->txn_id == MGMTD_TXN_ID_NONE) {
- /*
- * Start a SHOW Transaction (if not started already)
- */
- session->txn_id = mgmt_create_txn(session->session_id,
- MGMTD_TXN_TYPE_SHOW);
- if (session->txn_id == MGMTD_SESSION_ID_NONE) {
- mgmt_fe_send_getdata_reply(
- session, getdata_req->ds_id,
- getdata_req->req_id, false, NULL,
- "Failed to create a Show transaction!");
- goto mgmt_fe_sess_handle_getdata_req_failed;
- }
-
- MGMTD_FE_ADAPTER_DBG("Created new Show Txn %" PRIu64
- " for session %" PRIu64,
- session->txn_id, session->session_id);
- } else {
- MGMTD_FE_ADAPTER_DBG("Show txn-id: %" PRIu64
- " for session %" PRIu64 " already created",
- session->txn_id, session->session_id);
- }
-
- /*
- * Create a GETData request under the transaction.
- */
- if (mgmt_txn_send_get_data_req(session->txn_id, getdata_req->req_id,
- getdata_req->ds_id, getdata_req->data,
- getdata_req->n_data) != 0) {
- mgmt_fe_send_getdata_reply(
- session, getdata_req->ds_id, getdata_req->req_id, false,
- NULL, "Request processing for GET-CONFIG failed!");
- goto mgmt_fe_sess_handle_getdata_req_failed;
- }
-
- return 0;
-
-mgmt_fe_sess_handle_getdata_req_failed:
-
- /*
- * Destroy the transaction created recently.
- */
- if (session->txn_id != MGMTD_TXN_ID_NONE)
- mgmt_destroy_txn(&session->txn_id);
-
- return -1;
-}
static int mgmt_fe_session_handle_commit_config_req_msg(
struct mgmt_fe_session_ctx *session,
@@ -961,7 +825,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
/* Validate source and dest DS */
if (commcfg_req->src_ds_id != MGMTD_DS_CANDIDATE ||
commcfg_req->dst_ds_id != MGMTD_DS_RUNNING) {
- mgmt_fe_send_commitcfg_reply(
+ fe_adapter_send_commit_cfg_reply(
session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
commcfg_req->validate_only,
@@ -976,7 +840,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
/* User should have lock on both source and dest DS */
if (!session->ds_locked[commcfg_req->dst_ds_id] ||
!session->ds_locked[commcfg_req->src_ds_id]) {
- mgmt_fe_send_commitcfg_reply(
+ fe_adapter_send_commit_cfg_reply(
session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
commcfg_req->req_id, MGMTD_DS_LOCK_FAILED,
commcfg_req->validate_only,
@@ -991,11 +855,10 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
session->cfg_txn_id = mgmt_create_txn(session->session_id,
MGMTD_TXN_TYPE_CONFIG);
if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
- mgmt_fe_send_commitcfg_reply(
+ fe_adapter_send_commit_cfg_reply(
session, commcfg_req->src_ds_id,
commcfg_req->dst_ds_id, commcfg_req->req_id,
- MGMTD_INTERNAL_ERROR,
- commcfg_req->validate_only,
+ MGMTD_INTERNAL_ERROR, commcfg_req->validate_only,
"Failed to create a Configuration session!");
return 0;
}
@@ -1013,7 +876,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
commcfg_req->src_ds_id, src_ds_ctx, commcfg_req->dst_ds_id,
dst_ds_ctx, commcfg_req->validate_only, commcfg_req->abort,
false) != 0) {
- mgmt_fe_send_commitcfg_reply(
+ fe_adapter_send_commit_cfg_reply(
session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
commcfg_req->validate_only,
@@ -1058,8 +921,8 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_fe_create_session(
adapter, fe_msg->session_req->client_conn_id);
- mgmt_fe_send_session_reply(adapter, session, true,
- session ? true : false);
+ fe_adapter_send_session_reply(adapter, session, true,
+ session ? true : false);
} else if (
!fe_msg->session_req->create
&& fe_msg->session_req->id_case
@@ -1071,8 +934,8 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->session_req->session_id);
- mgmt_fe_send_session_reply(adapter, session, false,
- true);
+ fe_adapter_send_session_reply(adapter, session, false,
+ true);
mgmt_fe_cleanup_session(&session);
}
break;
@@ -1116,29 +979,15 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
mgmt_fe_session_handle_commit_config_req_msg(
session, fe_msg->commcfg_req);
break;
- case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
- session = mgmt_session_id2ctx(
- fe_msg->getcfg_req->session_id);
- MGMTD_FE_ADAPTER_DBG(
- "Got GETCFG_REQ for DS:%s (xpaths: %d) on session-id %" PRIu64
- " from '%s'",
- mgmt_ds_id2name(fe_msg->getcfg_req->ds_id),
- (int)fe_msg->getcfg_req->n_data,
- fe_msg->getcfg_req->session_id, adapter->name);
- mgmt_fe_session_handle_getcfg_req_msg(
- session, fe_msg->getcfg_req);
- break;
- case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
- session = mgmt_session_id2ctx(
- fe_msg->getdata_req->session_id);
- MGMTD_FE_ADAPTER_DBG(
- "Got GETDATA_REQ for DS:%s (xpaths: %d) on session-id %" PRIu64
- " from '%s'",
- mgmt_ds_id2name(fe_msg->getdata_req->ds_id),
- (int)fe_msg->getdata_req->n_data,
- fe_msg->getdata_req->session_id, adapter->name);
- mgmt_fe_session_handle_getdata_req_msg(
- session, fe_msg->getdata_req);
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REQ:
+ session = mgmt_session_id2ctx(fe_msg->get_req->session_id);
+ MGMTD_FE_ADAPTER_DBG("Got GET_REQ (iscfg %d) for DS:%s (xpaths: %d) on session-id %" PRIu64
+ " from '%s'",
+ (int)fe_msg->get_req->config,
+ mgmt_ds_id2name(fe_msg->get_req->ds_id),
+ (int)fe_msg->get_req->n_data,
+ fe_msg->get_req->session_id, adapter->name);
+ mgmt_fe_session_handle_get_req_msg(session, fe_msg->get_req);
break;
case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
@@ -1157,8 +1006,7 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
- case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
- case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY:
case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
default:
/*
@@ -1293,11 +1141,6 @@ struct msg_conn *mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
return adapter->conn;
}
-struct mgmt_fe_client_adapter *mgmt_fe_get_adapter(const char *name)
-{
- return mgmt_fe_find_adapter_by_name(name);
-}
-
int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
Mgmtd__DatastoreId ds_id, uint64_t req_id,
enum mgmt_result result,
@@ -1316,9 +1159,10 @@ int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
return -1;
}
- return mgmt_fe_send_setcfg_reply(
- session, ds_id, req_id, result == MGMTD_SUCCESS ? true : false,
- error_if_any, implicit_commit);
+ return fe_adapter_send_set_cfg_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS ? true
+ : false,
+ error_if_any, implicit_commit);
}
int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
@@ -1334,16 +1178,16 @@ int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
if (!session || session->cfg_txn_id != txn_id)
return -1;
- return mgmt_fe_send_commitcfg_reply(session, src_ds_id, dst_ds_id,
+ return fe_adapter_send_commit_cfg_reply(session, src_ds_id, dst_ds_id,
req_id, result, validate_only,
error_if_any);
}
-int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
- Mgmtd__DatastoreId ds_id, uint64_t req_id,
- enum mgmt_result result,
- Mgmtd__YangDataReply *data_resp,
- const char *error_if_any)
+int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
{
struct mgmt_fe_session_ctx *session;
@@ -1351,34 +1195,9 @@ int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
if (!session || session->txn_id != txn_id)
return -1;
- return mgmt_fe_send_getcfg_reply(session, ds_id, req_id,
- result == MGMTD_SUCCESS, data_resp,
- error_if_any);
-}
-
-int mgmt_fe_send_get_data_reply(uint64_t session_id, uint64_t txn_id,
- Mgmtd__DatastoreId ds_id, uint64_t req_id,
- enum mgmt_result result,
- Mgmtd__YangDataReply *data_resp,
- const char *error_if_any)
-{
- struct mgmt_fe_session_ctx *session;
-
- session = mgmt_session_id2ctx(session_id);
- if (!session || session->txn_id != txn_id)
- return -1;
-
- return mgmt_fe_send_getdata_reply(session, ds_id, req_id,
- result == MGMTD_SUCCESS,
- data_resp, error_if_any);
-}
-
-int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
- Mgmtd__YangData * data_resp[], int num_data)
-{
- /* struct mgmt_fe_session_ctx *session; */
-
- return 0;
+ return fe_adapter_send_get_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS, data_resp,
+ error_if_any);
}
struct mgmt_setcfg_stats *
diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h
index fef205f36a..d2991ec1db 100644
--- a/mgmtd/mgmt_fe_adapter.h
+++ b/mgmtd/mgmt_fe_adapter.h
@@ -87,10 +87,6 @@ mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
extern struct msg_conn *mgmt_fe_create_adapter(int conn_fd,
union sockunion *su);
-/* Fetch frontend adapter given a name */
-extern struct mgmt_fe_client_adapter *
-mgmt_fe_get_adapter(const char *name);
-
/*
* Send set-config reply to the frontend client.
*
@@ -134,29 +130,13 @@ extern int mgmt_fe_send_commit_cfg_reply(
enum mgmt_result result, const char *error_if_any);
/*
- * Send get-config reply to the frontend client.
- */
-extern int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
- Mgmtd__DatastoreId ds_id,
- uint64_t req_id,
- enum mgmt_result result,
- Mgmtd__YangDataReply *data_resp,
- const char *error_if_any);
-
-/*
- * Send get-data reply to the frontend client.
- */
-extern int mgmt_fe_send_get_data_reply(
- uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId ds_id,
- uint64_t req_id, enum mgmt_result result,
- Mgmtd__YangDataReply *data_resp, const char *error_if_any);
-
-/*
- * Send data notify to the frontend client.
+ * Send get-config/get-data reply to the frontend client.
*/
-extern int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
- Mgmtd__YangData * data_resp[],
- int num_data);
+extern int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any);
/* Fetch frontend client session set-config stats */
extern struct mgmt_setcfg_stats *
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
index de1ffa1a1f..eff3b7e34c 100644
--- a/mgmtd/mgmt_txn.c
+++ b/mgmtd/mgmt_txn.c
@@ -19,7 +19,7 @@
#define MGMTD_TXN_ERR(fmt, ...) \
zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
+#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
#define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
enum mgmt_txn_event {
@@ -53,8 +53,7 @@ enum mgmt_commit_phase {
MGMTD_COMMIT_PHASE_MAX
};
-static inline const char *
-mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
+static inline const char *mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
{
switch (cmt_phase) {
case MGMTD_COMMIT_PHASE_PREPARE_CFG:
@@ -83,7 +82,7 @@ struct mgmt_txn_be_cfg_batch {
struct mgmt_be_client_adapter *be_adapter;
uint xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
- Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangCfgDataReq *cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
size_t num_cfg_data;
@@ -143,8 +142,7 @@ struct mgmt_commit_cfg_req {
* The last batch added for any backend client. This is always on
* 'curr_batches'
*/
- struct mgmt_txn_be_cfg_batch
- *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
+ struct mgmt_txn_be_cfg_batch *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
struct hash *batches;
uint64_t next_batch_id;
@@ -157,7 +155,7 @@ struct mgmt_get_data_reply {
int last_batch;
Mgmtd__YangDataReply data_reply;
Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
- Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangData *reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
};
@@ -256,8 +254,8 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
enum mgmt_result result,
const char *error_if_any);
-static inline const char *
-mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
+static inline const char *mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn,
+ bool curr)
{
if (!txn->commit_cfg_req)
return "None";
@@ -267,99 +265,90 @@ mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
: txn->commit_cfg_req->req.commit_cfg.next_phase));
}
-static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
- int line);
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line);
static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
- int line);
-static int
-mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
- struct mgmt_be_client_adapter *adapter);
+ int line);
+static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter);
static struct event_loop *mgmt_txn_tm;
static struct mgmt_master *mgmt_txn_mm;
static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
- enum mgmt_txn_event event);
+ enum mgmt_txn_event event);
static int
mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
- struct mgmt_be_client_adapter *adapter);
+ struct mgmt_be_client_adapter *adapter);
static struct mgmt_txn_be_cfg_batch *
-mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
- enum mgmt_be_client_id id,
- struct mgmt_be_client_adapter *be_adapter)
+mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn, enum mgmt_be_client_id id,
+ struct mgmt_be_client_adapter *be_adapter)
{
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
- cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
- sizeof(struct mgmt_txn_be_cfg_batch));
- assert(cfg_btch);
- cfg_btch->be_id = id;
+ batch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
+ sizeof(struct mgmt_txn_be_cfg_batch));
+ assert(batch);
+ batch->be_id = id;
- cfg_btch->txn = txn;
+ batch->txn = txn;
MGMTD_TXN_LOCK(txn);
assert(txn->commit_cfg_req);
- mgmt_txn_batches_add_tail(
- &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
- cfg_btch);
- cfg_btch->be_adapter = be_adapter;
- cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
+ mgmt_txn_batches_add_tail(&txn->commit_cfg_req->req.commit_cfg
+ .curr_batches[id],
+ batch);
+ batch->be_adapter = be_adapter;
+ batch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
if (be_adapter)
mgmt_be_adapter_lock(be_adapter);
- txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
- cfg_btch;
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = batch;
if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
- cfg_btch->batch_id =
- txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
- hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
+ batch->batch_id = txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+ hash_get(txn->commit_cfg_req->req.commit_cfg.batches, batch,
hash_alloc_intern);
- return cfg_btch;
+ return batch;
}
-static void
-mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
+static void mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **batch)
{
size_t indx;
struct mgmt_commit_cfg_req *cmtcfg_req;
MGMTD_TXN_DBG(" freeing batch-id: %" PRIu64 " txn-id %" PRIu64,
- (*cfg_btch)->batch_id, (*cfg_btch)->txn->txn_id);
+ (*batch)->batch_id, (*batch)->txn->txn_id);
- assert((*cfg_btch)->txn
- && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
+ assert((*batch)->txn && (*batch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
- cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
- hash_release(cmtcfg_req->batches, *cfg_btch);
- mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
- *cfg_btch);
- mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
- *cfg_btch);
+ cmtcfg_req = &(*batch)->txn->commit_cfg_req->req.commit_cfg;
+ hash_release(cmtcfg_req->batches, *batch);
+ mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*batch)->be_id], *batch);
+ mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*batch)->be_id], *batch);
- if ((*cfg_btch)->be_adapter)
- mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
+ if ((*batch)->be_adapter)
+ mgmt_be_adapter_unlock(&(*batch)->be_adapter);
- for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
- if ((*cfg_btch)->data[indx].xpath) {
- free((*cfg_btch)->data[indx].xpath);
- (*cfg_btch)->data[indx].xpath = NULL;
+ for (indx = 0; indx < (*batch)->num_cfg_data; indx++) {
+ if ((*batch)->data[indx].xpath) {
+ free((*batch)->data[indx].xpath);
+ (*batch)->data[indx].xpath = NULL;
}
}
- MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
+ MGMTD_TXN_UNLOCK(&(*batch)->txn);
- XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
- *cfg_btch = NULL;
+ XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *batch);
+ *batch = NULL;
}
static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
{
const struct mgmt_txn_be_cfg_batch *batch = data;
- return jhash2((uint32_t *) &batch->batch_id,
+ return jhash2((uint32_t *)&batch->batch_id,
sizeof(batch->batch_id) / sizeof(uint32_t), 0);
}
@@ -381,15 +370,14 @@ static void mgmt_txn_cfgbatch_hash_free(void *data)
static inline struct mgmt_txn_be_cfg_batch *
mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
{
- struct mgmt_txn_be_cfg_batch key = {0};
+ struct mgmt_txn_be_cfg_batch key = { 0 };
struct mgmt_txn_be_cfg_batch *batch;
if (!txn->commit_cfg_req)
return NULL;
key.batch_id = batch_id;
- batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
- &key);
+ batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches, &key);
return batch;
}
@@ -397,18 +385,18 @@ mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
enum mgmt_be_client_id id)
{
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
struct mgmt_txn_batches_head *list;
list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
- FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
- mgmt_txn_cfg_batch_free(&cfg_btch);
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, batch)
+ mgmt_txn_cfg_batch_free(&batch);
mgmt_txn_batches_fini(list);
list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
- FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
- mgmt_txn_cfg_batch_free(&cfg_btch);
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, batch)
+ mgmt_txn_cfg_batch_free(&batch);
mgmt_txn_batches_fini(list);
@@ -416,8 +404,8 @@ static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
}
static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
- uint64_t req_id,
- enum mgmt_txn_event req_event)
+ uint64_t req_id,
+ enum mgmt_txn_event req_event)
{
struct mgmt_txn_req *txn_req;
enum mgmt_be_client_id id;
@@ -502,17 +490,19 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
indx++) {
if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
- MGMTD_TXN_DBG(
- "Freeing value for %s at %p ==> '%s'",
- (*txn_req)
- ->req.set_cfg->cfg_changes[indx]
- .xpath,
- (*txn_req)
- ->req.set_cfg->cfg_changes[indx]
- .value,
- (*txn_req)
- ->req.set_cfg->cfg_changes[indx]
- .value);
+ MGMTD_TXN_DBG("Freeing value for %s at %p ==> '%s'",
+ (*txn_req)
+ ->req.set_cfg
+ ->cfg_changes[indx]
+ .xpath,
+ (*txn_req)
+ ->req.set_cfg
+ ->cfg_changes[indx]
+ .value,
+ (*txn_req)
+ ->req.set_cfg
+ ->cfg_changes[indx]
+ .value);
free((void *)(*txn_req)
->req.set_cfg->cfg_changes[indx]
.value);
@@ -614,8 +604,7 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
mgmt_txn_reqs_del(req_list, *txn_req);
MGMTD_TXN_DBG("Removed req-id: %" PRIu64
" from request-list (left:%zu)",
- (*txn_req)->req_id,
- mgmt_txn_reqs_count(req_list));
+ (*txn_req)->req_id, mgmt_txn_reqs_count(req_list));
}
(*txn_req)->pending_be_proc = false;
@@ -650,36 +639,42 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
ds_ctx = txn_req->req.set_cfg->ds_ctx;
if (!ds_ctx) {
- mgmt_fe_send_set_cfg_reply(
- txn->session_id, txn->txn_id,
- txn_req->req.set_cfg->ds_id, txn_req->req_id,
- MGMTD_INTERNAL_ERROR, "No such datastore!",
- txn_req->req.set_cfg->implicit_commit);
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "No such datastore!",
+ txn_req->req.set_cfg
+ ->implicit_commit);
goto mgmt_txn_process_set_cfg_done;
}
nb_config = mgmt_ds_get_nb_config(ds_ctx);
if (!nb_config) {
- mgmt_fe_send_set_cfg_reply(
- txn->session_id, txn->txn_id,
- txn_req->req.set_cfg->ds_id, txn_req->req_id,
- MGMTD_INTERNAL_ERROR,
- "Unable to retrieve DS Config Tree!",
- txn_req->req.set_cfg->implicit_commit);
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve DS Config Tree!",
+ txn_req->req.set_cfg
+ ->implicit_commit);
goto mgmt_txn_process_set_cfg_done;
}
error = false;
- nb_candidate_edit_config_changes(
- nb_config, txn_req->req.set_cfg->cfg_changes,
- (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
- NULL, 0, err_buf, sizeof(err_buf), &error);
+ nb_candidate_edit_config_changes(nb_config,
+ txn_req->req.set_cfg->cfg_changes,
+ (size_t)txn_req->req.set_cfg
+ ->num_cfg_changes,
+ NULL, NULL, 0, err_buf,
+ sizeof(err_buf), &error);
if (error) {
- mgmt_fe_send_set_cfg_reply(
- txn->session_id, txn->txn_id,
- txn_req->req.set_cfg->ds_id, txn_req->req_id,
- MGMTD_INTERNAL_ERROR, err_buf,
- txn_req->req.set_cfg->implicit_commit);
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, err_buf,
+ txn_req->req.set_cfg
+ ->implicit_commit);
goto mgmt_txn_process_set_cfg_done;
}
@@ -690,41 +685,44 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
/* We expect the user to have locked the DST DS */
if (!mgmt_ds_is_locked(txn_req->req.set_cfg->dst_ds_ctx,
txn->session_id)) {
- MGMTD_TXN_ERR(
- "DS %u not locked for implicit commit txn-id: %" PRIu64
- " session-id: %" PRIu64 " err: %s",
- txn_req->req.set_cfg->dst_ds_id,
- txn->txn_id, txn->session_id,
- strerror(ret));
+ MGMTD_TXN_ERR("DS %u not locked for implicit commit txn-id: %" PRIu64
+ " session-id: %" PRIu64 " err: %s",
+ txn_req->req.set_cfg->dst_ds_id,
+ txn->txn_id, txn->session_id,
+ strerror(ret));
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_DS_LOCK_FAILED,
"running DS not locked for implicit commit");
goto mgmt_txn_process_set_cfg_done;
}
- mgmt_txn_send_commit_config_req(
- txn->txn_id, txn_req->req_id,
- txn_req->req.set_cfg->ds_id,
- txn_req->req.set_cfg->ds_ctx,
- txn_req->req.set_cfg->dst_ds_id,
- txn_req->req.set_cfg->dst_ds_ctx, false, false,
- true);
+ mgmt_txn_send_commit_config_req(txn->txn_id,
+ txn_req->req_id,
+ txn_req->req.set_cfg
+ ->ds_id,
+ txn_req->req.set_cfg
+ ->ds_ctx,
+ txn_req->req.set_cfg
+ ->dst_ds_id,
+ txn_req->req.set_cfg
+ ->dst_ds_ctx,
+ false, false, true);
if (mm->perf_stats_en)
gettimeofday(&cmt_stats->last_start, NULL);
cmt_stats->commit_cnt++;
- } else if (mgmt_fe_send_set_cfg_reply(
- txn->session_id, txn->txn_id,
- txn_req->req.set_cfg->ds_id,
- txn_req->req_id, MGMTD_SUCCESS, NULL, false)
- != 0) {
- MGMTD_TXN_ERR(
- "Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
- " session-id: %" PRIu64,
- txn->txn_id, txn->session_id);
+ } else if (mgmt_fe_send_set_cfg_reply(txn->session_id,
+ txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_SUCCESS, NULL,
+ false) != 0) {
+ MGMTD_TXN_ERR("Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
}
- mgmt_txn_process_set_cfg_done:
+mgmt_txn_process_set_cfg_done:
/*
* Note: The following will remove it from the list as well.
@@ -738,17 +736,16 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
if (left) {
- MGMTD_TXN_DBG(
- "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
- num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
- (int)left);
+ MGMTD_TXN_DBG("Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
+ (int)left);
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
}
}
static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
- enum mgmt_result result,
- const char *error_if_any)
+ enum mgmt_result result,
+ const char *error_if_any)
{
bool success, create_cmt_info_rec;
@@ -761,31 +758,31 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
* b/c right now that is special cased.. that special casing should be
* removed; however...
*/
- if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
- && !txn->commit_cfg_req->req.commit_cfg.rollback
- && mgmt_fe_send_commit_cfg_reply(
- txn->session_id, txn->txn_id,
- txn->commit_cfg_req->req.commit_cfg.src_ds_id,
- txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
- txn->commit_cfg_req->req_id,
- txn->commit_cfg_req->req.commit_cfg.validate_only,
- result, error_if_any)
- != 0) {
- MGMTD_TXN_ERR(
- "Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
- " session-id: %" PRIu64,
- txn->txn_id, txn->session_id);
+ if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback &&
+ mgmt_fe_send_commit_cfg_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_id,
+ txn->commit_cfg_req->req_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .validate_only,
+ result, error_if_any) != 0) {
+ MGMTD_TXN_ERR("Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
}
- if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
- && !txn->commit_cfg_req->req.commit_cfg.rollback
- && mgmt_fe_send_set_cfg_reply(
- txn->session_id, txn->txn_id,
- txn->commit_cfg_req->req.commit_cfg.src_ds_id,
- txn->commit_cfg_req->req_id,
- success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
- error_if_any, true)
- != 0) {
+ if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback &&
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_id,
+ txn->commit_cfg_req->req_id,
+ success ? MGMTD_SUCCESS
+ : MGMTD_INTERNAL_ERROR,
+ error_if_any, true) != 0) {
MGMTD_TXN_ERR("Failed to send SET-CONFIG-REPLY txn-id: %" PRIu64
" session-id: %" PRIu64,
txn->txn_id, txn->session_id);
@@ -804,10 +801,10 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
* Successful commit: Merge Src DS into Dst DS if and only if
* this was not a validate-only or abort request.
*/
- if ((txn->session_id
- && !txn->commit_cfg_req->req.commit_cfg.validate_only
- && !txn->commit_cfg_req->req.commit_cfg.abort)
- || txn->commit_cfg_req->req.commit_cfg.rollback) {
+ if ((txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.validate_only &&
+ !txn->commit_cfg_req->req.commit_cfg.abort) ||
+ txn->commit_cfg_req->req.commit_cfg.rollback) {
mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
.src_ds_ctx,
txn->commit_cfg_req->req.commit_cfg
@@ -819,8 +816,7 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
* Restore Src DS back to Dest DS only through a commit abort
* request.
*/
- if (txn->session_id
- && txn->commit_cfg_req->req.commit_cfg.abort)
+ if (txn->session_id && txn->commit_cfg_req->req.commit_cfg.abort)
mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
.dst_ds_ctx,
txn->commit_cfg_req->req.commit_cfg
@@ -868,24 +864,24 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
static void
mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
- struct mgmt_txn_be_cfg_batch *cfg_btch,
+ struct mgmt_txn_be_cfg_batch *batch,
struct mgmt_txn_batches_head *src_list,
struct mgmt_txn_batches_head *dst_list,
bool update_commit_phase,
enum mgmt_commit_phase to_phase)
{
- mgmt_txn_batches_del(src_list, cfg_btch);
+ mgmt_txn_batches_del(src_list, batch);
if (update_commit_phase) {
MGMTD_TXN_DBG("Move txn-id %" PRIu64 " batch-id: %" PRIu64
" from '%s' --> '%s'",
- cfg_btch->txn->txn_id, cfg_btch->batch_id,
- mgmt_commit_phase2str(cfg_btch->comm_phase),
- mgmt_txn_commit_phase_str(cfg_btch->txn, false));
- cfg_btch->comm_phase = to_phase;
+ batch->txn->txn_id, batch->batch_id,
+ mgmt_commit_phase2str(batch->comm_phase),
+ mgmt_txn_commit_phase_str(batch->txn, false));
+ batch->comm_phase = to_phase;
}
- mgmt_txn_batches_add_tail(dst_list, cfg_btch);
+ mgmt_txn_batches_add_tail(dst_list, batch);
}
static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
@@ -895,12 +891,12 @@ static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
bool update_commit_phase,
enum mgmt_commit_phase to_phase)
{
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
- FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
- mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
- dst_list, update_commit_phase,
- to_phase);
+ FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, batch) {
+ mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, batch, src_list,
+ dst_list, update_commit_phase,
+ to_phase);
}
}
@@ -949,8 +945,8 @@ mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
FOREACH_MGMTD_BE_CLIENT_ID (id) {
curr_list = &cmtcfg_req->curr_batches[id];
next_list = &cmtcfg_req->next_batches[id];
- mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
- curr_list, false, 0);
+ mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list, curr_list,
+ false, 0);
}
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
@@ -976,13 +972,12 @@ mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
mgmt_txn_commit_phase_str(txn, true),
mgmt_txn_commit_phase_str(txn, false));
- MGMTD_TXN_DBG(
- "Move all config batches for '%s' from current to next list",
- adapter->name);
+ MGMTD_TXN_DBG("Move all config batches for '%s' from current to next list",
+ adapter->name);
curr_list = &cmtcfg_req->curr_batches[adapter->id];
next_list = &cmtcfg_req->next_batches[adapter->id];
mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
- cmtcfg_req->next_phase);
+ cmtcfg_req->next_phase);
MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
@@ -997,11 +992,11 @@ mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
}
static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
- struct nb_config_cbs *changes)
+ struct nb_config_cbs *changes)
{
struct nb_config_cb *cb, *nxt;
struct nb_config_change *chg;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
struct mgmt_be_client_subscr_info subscr_info;
char *xpath = NULL, *value = NULL;
char err_buf[1024];
@@ -1035,7 +1030,7 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
- value ? value : "NIL");
+ value ? value : "NIL");
mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info);
@@ -1052,51 +1047,46 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
if (!adapter)
continue;
- cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
- if (!cfg_btch
- || (cfg_btch->num_cfg_data
- == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
- || (cfg_btch->buf_space_left
- < (xpath_len + value_len))) {
+ batch = cmtcfg_req->last_be_cfg_batch[id];
+ if (!batch ||
+ (batch->num_cfg_data ==
+ MGMTD_MAX_CFG_CHANGES_IN_BATCH) ||
+ (batch->buf_space_left < (xpath_len + value_len))) {
/* Allocate a new config batch */
- cfg_btch = mgmt_txn_cfg_batch_alloc(
- txn_req->txn, id, adapter);
+ batch = mgmt_txn_cfg_batch_alloc(txn_req->txn,
+ id, adapter);
}
- cfg_btch->buf_space_left -= (xpath_len + value_len);
- memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
+ batch->buf_space_left -= (xpath_len + value_len);
+ memcpy(&batch->xp_subscr[batch->num_cfg_data],
&subscr_info.xpath_subscr[id],
- sizeof(cfg_btch->xp_subscr[0]));
+ sizeof(batch->xp_subscr[0]));
mgmt_yang_cfg_data_req_init(
- &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
- cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
- &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
+ &batch->cfg_data[batch->num_cfg_data]);
+ batch->cfg_datap[batch->num_cfg_data] =
+ &batch->cfg_data[batch->num_cfg_data];
if (chg->cb.operation == NB_OP_DESTROY)
- cfg_btch->cfg_data[cfg_btch->num_cfg_data]
- .req_type =
+ batch->cfg_data[batch->num_cfg_data].req_type =
MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
else
- cfg_btch->cfg_data[cfg_btch->num_cfg_data]
- .req_type =
+ batch->cfg_data[batch->num_cfg_data].req_type =
MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
- mgmt_yang_data_init(
- &cfg_btch->data[cfg_btch->num_cfg_data]);
- cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
- &cfg_btch->data[cfg_btch->num_cfg_data];
- cfg_btch->data[cfg_btch->num_cfg_data].xpath =
- strdup(xpath);
+ mgmt_yang_data_init(&batch->data[batch->num_cfg_data]);
+ batch->cfg_data[batch->num_cfg_data].data =
+ &batch->data[batch->num_cfg_data];
+ batch->data[batch->num_cfg_data].xpath = strdup(xpath);
mgmt_yang_data_value_init(
- &cfg_btch->value[cfg_btch->num_cfg_data]);
- cfg_btch->data[cfg_btch->num_cfg_data].value =
- &cfg_btch->value[cfg_btch->num_cfg_data];
- cfg_btch->value[cfg_btch->num_cfg_data].value_case =
+ &batch->value[batch->num_cfg_data]);
+ batch->data[batch->num_cfg_data].value =
+ &batch->value[batch->num_cfg_data];
+ batch->value[batch->num_cfg_data].value_case =
MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
- cfg_btch->value[cfg_btch->num_cfg_data]
- .encoded_str_val = value;
+ batch->value[batch->num_cfg_data].encoded_str_val =
+ value;
value = NULL;
if (subscr_info.xpath_subscr[id] &
@@ -1105,17 +1095,11 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
cmtcfg_req->subscr_info.xpath_subscr[id] |=
subscr_info.xpath_subscr[id];
- MGMTD_TXN_DBG(" -- %s, {V:%d, N:%d}, batch-id: %" PRIu64
- " item:%d",
- adapter->name,
- (subscr_info.xpath_subscr[id] &
- MGMT_SUBSCR_VALIDATE_CFG) != 0,
- (subscr_info.xpath_subscr[id] &
- MGMT_SUBSCR_NOTIFY_CFG) != 0,
- cfg_btch->batch_id,
- (int)cfg_btch->num_cfg_data);
-
- cfg_btch->num_cfg_data++;
+ MGMTD_TXN_DBG(" -- %s, batch-id: %" PRIu64 " item:%d",
+ adapter->name, batch->batch_id,
+ (int)batch->num_cfg_data);
+
+ batch->num_cfg_data++;
num_chgs++;
}
@@ -1131,9 +1115,9 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
if (!num_chgs) {
- (void)mgmt_txn_send_commit_cfg_reply(
- txn_req->txn, MGMTD_NO_CFG_CHANGES,
- "No changes found to commit!");
+ (void)mgmt_txn_send_commit_cfg_reply(txn_req->txn,
+ MGMTD_NO_CFG_CHANGES,
+ "No changes found to commit!");
return -1;
}
@@ -1159,8 +1143,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
goto mgmt_txn_prep_config_validation_done;
}
- if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
- != MGMTD_DS_CANDIDATE) {
+ if (txn->commit_cfg_req->req.commit_cfg.src_ds_id != MGMTD_DS_CANDIDATE) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INVALID_PARAM,
"Source DS cannot be any other than CANDIDATE!");
@@ -1168,8 +1151,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
goto mgmt_txn_prepare_config_done;
}
- if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
- != MGMTD_DS_RUNNING) {
+ if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id != MGMTD_DS_RUNNING) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INVALID_PARAM,
"Destination DS cannot be any other than RUNNING!");
@@ -1178,16 +1160,15 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
}
if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
- (void)mgmt_txn_send_commit_cfg_reply(
- txn, MGMTD_INVALID_PARAM, "No such source datastore!");
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ "No such source datastore!");
ret = -1;
goto mgmt_txn_prepare_config_done;
}
if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
- (void)mgmt_txn_send_commit_cfg_reply(
- txn, MGMTD_INVALID_PARAM,
- "No such destination datastore!");
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ "No such destination datastore!");
ret = -1;
goto mgmt_txn_prepare_config_done;
}
@@ -1198,8 +1179,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
* That should trigger a restore of Candidate datastore to
* Running.
*/
- (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
- NULL);
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
goto mgmt_txn_prepare_config_done;
}
@@ -1225,10 +1205,10 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
* diff from a full comparison of the candidate and
* running DSs.
*/
- nb_config_diff(
- mgmt_ds_get_nb_config(txn->commit_cfg_req->req
- .commit_cfg.dst_ds_ctx),
- nb_config, &changes);
+ nb_config_diff(mgmt_ds_get_nb_config(
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx),
+ nb_config, &changes);
cfg_chgs = &changes;
del_cfg_chgs = true;
}
@@ -1238,9 +1218,8 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
* This means there's no changes to commit whatsoever
* is the source of the changes in config.
*/
- (void)mgmt_txn_send_commit_cfg_reply(
- txn, MGMTD_NO_CFG_CHANGES,
- "No changes found to be committed!");
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_NO_CFG_CHANGES,
+ "No changes found to be committed!");
ret = -1;
goto mgmt_txn_prepare_config_done;
}
@@ -1254,7 +1233,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
* Validate YANG contents of the source DS and get the diff
* between source and destination DS contents.
*/
- char err_buf[1024] = {0};
+ char err_buf[1024] = { 0 };
nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
nb_ctx.user = (void *)txn;
@@ -1264,7 +1243,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
strlcpy(err_buf, "Validation failed", sizeof(err_buf));
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
- err_buf);
+ err_buf);
ret = -1;
goto mgmt_txn_prepare_config_done;
}
@@ -1279,7 +1258,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
strlcpy(err_buf, "Validation failed", sizeof(err_buf));
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
- err_buf);
+ err_buf);
ret = -1;
goto mgmt_txn_prepare_config_done;
}
@@ -1288,8 +1267,7 @@ static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
/*
* This was a validate-only COMMIT request return success.
*/
- (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
- NULL);
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
goto mgmt_txn_prepare_config_done;
}
#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
@@ -1334,7 +1312,7 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
enum mgmt_be_client_id id;
struct mgmt_be_client_adapter *adapter;
struct mgmt_commit_cfg_req *cmtcfg_req;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
@@ -1349,11 +1327,11 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
return -1;
}
- FOREACH_TXN_CFG_BATCH_IN_LIST (
- &txn->commit_cfg_req->req.commit_cfg
- .curr_batches[id],
- cfg_btch)
- cfg_btch->comm_phase =
+ FOREACH_TXN_CFG_BATCH_IN_LIST (&txn->commit_cfg_req->req
+ .commit_cfg
+ .curr_batches[id],
+ batch)
+ batch->comm_phase =
MGMTD_COMMIT_PHASE_TXN_CREATE;
}
}
@@ -1379,8 +1357,8 @@ static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
struct mgmt_be_client_adapter *adapter)
{
struct mgmt_commit_cfg_req *cmtcfg_req;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
- struct mgmt_be_cfgreq cfg_req = {0};
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_be_cfgreq cfg_req = { 0 };
size_t num_batches, indx;
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
@@ -1392,29 +1370,31 @@ static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
num_batches =
mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
- cfg_btch) {
+ batch) {
assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
- cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
- cfg_req.num_reqs = cfg_btch->num_cfg_data;
+ cfg_req.cfgdata_reqs = batch->cfg_datap;
+ cfg_req.num_reqs = batch->num_cfg_data;
indx++;
- if (mgmt_be_send_cfgdata_req(
- adapter, txn->txn_id, cfg_btch->batch_id,
- cfg_req.cfgdata_reqs, cfg_req.num_reqs,
- indx == num_batches ? true : false)) {
+ if (mgmt_be_send_cfgdata_req(adapter, txn->txn_id,
+ batch->batch_id,
+ cfg_req.cfgdata_reqs,
+ cfg_req.num_reqs,
+ indx == num_batches ? true
+ : false)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Internal Error! Could not send config data to backend!");
- MGMTD_TXN_ERR(
- "Could not send CFGDATA_CREATE txn-id: %" PRIu64
- " batch-id: %" PRIu64 " to client '%s",
- txn->txn_id, cfg_btch->batch_id, adapter->name);
+ MGMTD_TXN_ERR("Could not send CFGDATA_CREATE txn-id: %" PRIu64
+ " batch-id: %" PRIu64 " to client '%s",
+ txn->txn_id, batch->batch_id,
+ adapter->name);
return -1;
}
cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
mgmt_move_txn_cfg_batch_to_next(
- cmtcfg_req, cfg_btch,
+ cmtcfg_req, batch,
&cmtcfg_req->curr_batches[adapter->id],
&cmtcfg_req->next_batches[adapter->id], true,
MGMTD_COMMIT_PHASE_SEND_CFG);
@@ -1429,9 +1409,8 @@ static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
return 0;
}
-static int
-mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
- struct mgmt_be_client_adapter *adapter)
+static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
{
struct mgmt_commit_cfg_req *cmtcfg_req =
&txn->commit_cfg_req->req.commit_cfg;
@@ -1483,8 +1462,8 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
enum mgmt_be_client_id id;
struct mgmt_be_client_adapter *adapter;
struct mgmt_commit_cfg_req *cmtcfg_req;
- struct mgmt_txn_batches_head *btch_list;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_batches_head *batch_list;
+ struct mgmt_txn_be_cfg_batch *batch;
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
@@ -1493,8 +1472,7 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
/*
* If this was a validate-only COMMIT request return success.
*/
- (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
- NULL);
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
return 0;
}
@@ -1505,7 +1483,7 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
if (!adapter)
return -1;
- btch_list = &cmtcfg_req->curr_batches[id];
+ batch_list = &cmtcfg_req->curr_batches[id];
if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
@@ -1517,9 +1495,8 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
UNSET_FLAG(adapter->flags,
MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
- FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
- cfg_btch->comm_phase =
- MGMTD_COMMIT_PHASE_APPLY_CFG;
+ FOREACH_TXN_CFG_BATCH_IN_LIST (batch_list, batch)
+ batch->comm_phase = MGMTD_COMMIT_PHASE_APPLY_CFG;
}
}
@@ -1543,8 +1520,7 @@ static void mgmt_txn_process_commit_cfg(struct event *thread)
assert(txn);
MGMTD_TXN_DBG("Processing COMMIT_CONFIG for txn-id: %" PRIu64
- " session-id: %" PRIu64
- " Phase(Current:'%s', Next: '%s')",
+ " session-id: %" PRIu64 " Phase(Current:'%s', Next: '%s')",
txn->txn_id, txn->session_id,
mgmt_txn_commit_phase_str(txn, true),
mgmt_txn_commit_phase_str(txn, false));
@@ -1574,16 +1550,14 @@ static void mgmt_txn_process_commit_cfg(struct event *thread)
*/
#ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
- MGMTD_TXN_DBG(
- "txn-id: %" PRIu64 " session-id: %" PRIu64
- " trigger sending CFG_VALIDATE_REQ to all backend clients",
- txn->txn_id, txn->session_id);
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
+ " trigger sending CFG_VALIDATE_REQ to all backend clients",
+ txn->txn_id, txn->session_id);
#else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
- MGMTD_TXN_DBG(
- "txn-id: %" PRIu64 " session-id: %" PRIu64
- " trigger sending CFG_APPLY_REQ to all backend clients",
- txn->txn_id, txn->session_id);
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
+ " trigger sending CFG_APPLY_REQ to all backend clients",
+ txn->txn_id, txn->session_id);
#endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
break;
case MGMTD_COMMIT_PHASE_APPLY_CFG:
@@ -1664,7 +1638,7 @@ static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
}
static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
- struct mgmt_get_data_req *get_req)
+ struct mgmt_get_data_req *get_req)
{
struct mgmt_get_data_reply *get_reply;
Mgmtd__YangDataReply *data_reply;
@@ -1677,45 +1651,42 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
mgmt_yang_data_reply_init(data_reply);
data_reply->n_data = get_reply->num_reply;
data_reply->data = get_reply->reply_datap;
- data_reply->next_indx =
- (!get_reply->last_batch ? get_req->total_reply : -1);
+ data_reply->next_indx = (!get_reply->last_batch ? get_req->total_reply
+ : -1);
MGMTD_TXN_DBG("Sending %zu Get-Config/Data replies next-index:%" PRId64,
data_reply->n_data, data_reply->next_indx);
switch (txn_req->req_event) {
case MGMTD_TXN_PROC_GETCFG:
- if (mgmt_fe_send_get_cfg_reply(
- txn_req->txn->session_id, txn_req->txn->txn_id,
- get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
- data_reply, NULL)
- != 0) {
- MGMTD_TXN_ERR(
- "Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
- " session-id: %" PRIu64 " req-id: %" PRIu64,
- txn_req->txn->txn_id, txn_req->txn->session_id,
- txn_req->req_id);
+ if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
+ txn_req->txn->txn_id, get_req->ds_id,
+ txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL) != 0) {
+ MGMTD_TXN_ERR("Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->txn->txn_id,
+ txn_req->txn->session_id, txn_req->req_id);
}
break;
case MGMTD_TXN_PROC_GETDATA:
- if (mgmt_fe_send_get_data_reply(
- txn_req->txn->session_id, txn_req->txn->txn_id,
- get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
- data_reply, NULL)
- != 0) {
- MGMTD_TXN_ERR(
- "Failed to send GET-DATA-REPLY txn-id: %" PRIu64
- " session-id: %" PRIu64 " req-id: %" PRIu64,
- txn_req->txn->txn_id, txn_req->txn->session_id,
- txn_req->req_id);
+ if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
+ txn_req->txn->txn_id, get_req->ds_id,
+ txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL) != 0) {
+ MGMTD_TXN_ERR("Failed to send GET-DATA-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->txn->txn_id,
+ txn_req->txn->session_id, txn_req->req_id);
}
break;
case MGMTD_TXN_PROC_SETCFG:
case MGMTD_TXN_PROC_COMMITCFG:
case MGMTD_TXN_COMMITCFG_TIMEOUT:
case MGMTD_TXN_CLEANUP:
- MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
- txn_req->req_event);
+ MGMTD_TXN_ERR("Invalid Txn-Req-Event %u", txn_req->req_event);
break;
}
@@ -1743,8 +1714,8 @@ static void mgmt_txn_iter_and_send_get_cfg_reply(const char *xpath,
if (!(node->schema->nodetype & LYD_NODE_TERM))
return;
- assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
- || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG ||
+ txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
get_req = txn_req->req.get_data;
assert(get_req);
@@ -1762,7 +1733,7 @@ static void mgmt_txn_iter_and_send_get_cfg_reply(const char *xpath,
get_reply->num_reply++;
get_req->total_reply++;
MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
- data->xpath, data_value->encoded_str_val);
+ data->xpath, data_value->encoded_str_val);
if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
@@ -1782,10 +1753,9 @@ static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
sizeof(struct mgmt_get_data_reply));
if (!get_data->reply) {
- mgmt_fe_send_get_cfg_reply(
- txn->session_id, txn->txn_id,
- get_data->ds_id, txn_req->req_id,
- MGMTD_INTERNAL_ERROR, NULL,
+ mgmt_fe_send_get_reply(
+ txn->session_id, txn->txn_id, get_data->ds_id,
+ txn_req->req_id, MGMTD_INTERNAL_ERROR, NULL,
"Internal error: Unable to allocate reply buffers!");
goto mgmt_txn_get_config_failed;
}
@@ -1798,7 +1768,7 @@ static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
get_reply = get_data->reply;
for (indx = 0; indx < get_data->num_xpaths; indx++) {
MGMTD_TXN_DBG("Trying to get all data under '%s'",
- get_data->xpaths[indx]);
+ get_data->xpaths[indx]);
mgmt_init_get_data_reply(get_reply);
/*
* mgmt_ds_iter_data works on path prefixes, but the user may
@@ -1810,15 +1780,15 @@ static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
mgmt_txn_iter_and_send_get_cfg_reply,
(void *)txn_req) == -1) {
MGMTD_TXN_DBG("Invalid Xpath '%s",
- get_data->xpaths[indx]);
- mgmt_fe_send_get_cfg_reply(
- txn->session_id, txn->txn_id,
- get_data->ds_id, txn_req->req_id,
- MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
+ get_data->xpaths[indx]);
+ mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
+ get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "Invalid xpath");
goto mgmt_txn_get_config_failed;
}
MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
- get_reply->num_reply, get_data->xpaths[indx]);
+ get_reply->num_reply, get_data->xpaths[indx]);
get_reply->last_batch = true;
mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
}
@@ -1857,11 +1827,11 @@ static void mgmt_txn_process_get_cfg(struct event *thread)
assert(cfg_root);
if (mgmt_txn_get_config(txn, txn_req, cfg_root) != 0) {
- MGMTD_TXN_ERR(
- "Unable to retrieve config from DS %d txn-id: %" PRIu64
- " session-id: %" PRIu64 " req-id: %" PRIu64,
- txn_req->req.get_data->ds_id, txn->txn_id,
- txn->session_id, txn_req->req_id);
+ MGMTD_TXN_ERR("Unable to retrieve config from DS %d txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->req.get_data->ds_id, txn->txn_id,
+ txn->session_id, txn_req->req_id);
error = true;
}
@@ -1884,9 +1854,8 @@ static void mgmt_txn_process_get_cfg(struct event *thread)
}
if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
- MGMTD_TXN_DBG(
- "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
- num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
+ MGMTD_TXN_DBG("Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
}
}
@@ -1912,11 +1881,10 @@ static void mgmt_txn_process_get_data(struct event *thread)
* TODO: Trigger GET procedures for Backend
* For now return back error.
*/
- mgmt_fe_send_get_data_reply(
- txn->session_id, txn->txn_id,
- txn_req->req.get_data->ds_id, txn_req->req_id,
- MGMTD_INTERNAL_ERROR, NULL,
- "GET-DATA on Oper DS is not supported yet!");
+ mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
+ txn_req->req.get_data->ds_id,
+ txn_req->req_id, MGMTD_INTERNAL_ERROR,
+ NULL, "GET-DATA is not supported yet!");
/*
* Delete the txn request.
* Note: The following will remove it from the list
@@ -1934,16 +1902,15 @@ static void mgmt_txn_process_get_data(struct event *thread)
}
if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
- MGMTD_TXN_DBG(
- "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
- num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
+ MGMTD_TXN_DBG("Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
}
}
static struct mgmt_txn_ctx *
mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
- enum mgmt_txn_type type)
+ enum mgmt_txn_type type)
{
struct mgmt_txn_ctx *txn;
@@ -1956,7 +1923,7 @@ mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
}
static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
- enum mgmt_txn_type type)
+ enum mgmt_txn_type type)
{
struct mgmt_txn_ctx *txn = NULL;
@@ -1970,8 +1937,7 @@ static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
goto mgmt_create_txn_done;
}
- txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
- type);
+ txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id, type);
if (!txn) {
txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
assert(txn);
@@ -2012,7 +1978,7 @@ static unsigned int mgmt_txn_hash_key(const void *data)
{
const struct mgmt_txn_ctx *txn = data;
- return jhash2((uint32_t *) &txn->txn_id,
+ return jhash2((uint32_t *)&txn->txn_id,
sizeof(txn->txn_id) / sizeof(uint32_t), 0);
}
@@ -2036,9 +2002,8 @@ static void mgmt_txn_hash_init(void)
if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
return;
- mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
- mgmt_txn_hash_cmp,
- "MGMT Transactions");
+ mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key, mgmt_txn_hash_cmp,
+ "MGMT Transactions");
}
static void mgmt_txn_hash_destroy(void)
@@ -2046,16 +2011,14 @@ static void mgmt_txn_hash_destroy(void)
if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
return;
- hash_clean(mgmt_txn_mm->txn_hash,
- mgmt_txn_hash_free);
+ hash_clean(mgmt_txn_mm->txn_hash, mgmt_txn_hash_free);
hash_free(mgmt_txn_mm->txn_hash);
mgmt_txn_mm->txn_hash = NULL;
}
-static inline struct mgmt_txn_ctx *
-mgmt_txn_id2ctx(uint64_t txn_id)
+static inline struct mgmt_txn_ctx *mgmt_txn_id2ctx(uint64_t txn_id)
{
- struct mgmt_txn_ctx key = {0};
+ struct mgmt_txn_ctx key = { 0 };
struct mgmt_txn_ctx *txn;
if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
@@ -2067,8 +2030,7 @@ mgmt_txn_id2ctx(uint64_t txn_id)
return txn;
}
-static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
- int line)
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line)
{
txn->refcount++;
MGMTD_TXN_DBG("%s:%d --> Lock %s txn-id: %" PRIu64 " refcnt: %d", file,
@@ -2077,7 +2039,7 @@ static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
}
static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
- int line)
+ int line)
{
assert(*txn && (*txn)->refcount);
@@ -2114,8 +2076,7 @@ static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
mgmt_txn_delete(txn);
}
-static void
-mgmt_txn_cleanup_all_txns(void)
+static void mgmt_txn_cleanup_all_txns(void)
{
struct mgmt_txn_ctx *txn;
@@ -2137,40 +2098,39 @@ static void mgmt_txn_cleanup(struct event *thread)
}
static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
- enum mgmt_txn_event event)
+ enum mgmt_txn_event event)
{
- struct timeval tv = {.tv_sec = 0,
- .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
+ struct timeval tv = { .tv_sec = 0,
+ .tv_usec = MGMTD_TXN_PROC_DELAY_USEC };
assert(mgmt_txn_mm && mgmt_txn_tm);
switch (event) {
case MGMTD_TXN_PROC_SETCFG:
- event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
- txn, &tv, &txn->proc_set_cfg);
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg, txn,
+ &tv, &txn->proc_set_cfg);
break;
case MGMTD_TXN_PROC_COMMITCFG:
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
- txn, &tv, &txn->proc_comm_cfg);
+ txn, &tv, &txn->proc_comm_cfg);
break;
case MGMTD_TXN_PROC_GETCFG:
- event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
- txn, &tv, &txn->proc_get_cfg);
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg, txn,
+ &tv, &txn->proc_get_cfg);
break;
case MGMTD_TXN_PROC_GETDATA:
- event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
- txn, &tv, &txn->proc_get_data);
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data, txn,
+ &tv, &txn->proc_get_data);
break;
case MGMTD_TXN_COMMITCFG_TIMEOUT:
- event_add_timer_msec(mgmt_txn_tm,
- mgmt_txn_cfg_commit_timedout, txn,
- MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
- &txn->comm_cfg_timeout);
+ event_add_timer_msec(mgmt_txn_tm, mgmt_txn_cfg_commit_timedout,
+ txn, MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
+ &txn->comm_cfg_timeout);
break;
case MGMTD_TXN_CLEANUP:
tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
- &txn->clnup);
+ &txn->clnup);
}
}
@@ -2224,12 +2184,12 @@ void mgmt_destroy_txn(uint64_t *txn_id)
}
int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- struct mgmt_ds_ctx *ds_ctx,
- Mgmtd__YangCfgDataReq **cfg_req,
- size_t num_req, bool implicit_commit,
- Mgmtd__DatastoreId dst_ds_id,
- struct mgmt_ds_ctx *dst_ds_ctx)
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx)
{
struct mgmt_txn_ctx *txn;
struct mgmt_txn_req *txn_req;
@@ -2254,40 +2214,38 @@ int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
for (indx = 0; indx < num_req; indx++) {
cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
- if (cfg_req[indx]->req_type
- == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+ if (cfg_req[indx]->req_type ==
+ MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
cfg_chg->operation = NB_OP_DESTROY;
- else if (cfg_req[indx]->req_type
- == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
+ else if (cfg_req[indx]->req_type ==
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
cfg_chg->operation =
- mgmt_ds_find_data_node_by_xpath(
- ds_ctx, cfg_req[indx]->data->xpath)
+ mgmt_ds_find_data_node_by_xpath(ds_ctx,
+ cfg_req[indx]
+ ->data
+ ->xpath)
? NB_OP_MODIFY
: NB_OP_CREATE;
else
continue;
- MGMTD_TXN_DBG(
- "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
- (cfg_req[indx]->data->value
- && cfg_req[indx]
- ->data->value
- ->encoded_str_val
- ? cfg_req[indx]->data->value->encoded_str_val
- : "NULL"));
+ MGMTD_TXN_DBG("XPath: '%s', Value: '%s'",
+ cfg_req[indx]->data->xpath,
+ (cfg_req[indx]->data->value &&
+ cfg_req[indx]->data->value->encoded_str_val
+ ? cfg_req[indx]->data->value->encoded_str_val
+ : "NULL"));
strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
sizeof(cfg_chg->xpath));
- cfg_chg->value = (cfg_req[indx]->data->value
- && cfg_req[indx]
- ->data->value
- ->encoded_str_val
- ? strdup(cfg_req[indx]
- ->data->value
- ->encoded_str_val)
- : NULL);
+ cfg_chg->value =
+ (cfg_req[indx]->data->value &&
+ cfg_req[indx]->data->value->encoded_str_val
+ ? strdup(cfg_req[indx]
+ ->data->value->encoded_str_val)
+ : NULL);
if (cfg_chg->value)
MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
- cfg_chg->value, cfg_chg->value);
+ cfg_chg->value, cfg_chg->value);
(*num_chgs)++;
}
@@ -2342,7 +2300,7 @@ int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
}
int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
- bool connect)
+ bool connect)
{
struct mgmt_txn_ctx *txn;
struct mgmt_txn_req *txn_req;
@@ -2367,9 +2325,8 @@ int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
*/
txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
if (!txn) {
- MGMTD_TXN_ERR(
- "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
- adapter->name);
+ MGMTD_TXN_ERR("Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
+ adapter->name);
return -1;
}
@@ -2380,8 +2337,7 @@ int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
* Set the changeset for transaction to commit and trigger the
* commit request.
*/
- txn_req =
- mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
txn_req->req.commit_cfg.src_ds_ctx = 0;
txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
@@ -2407,8 +2363,8 @@ int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
* completed */
if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
cmtcfg_req = txn->commit_cfg_req
- ? &txn->commit_cfg_req
- ->req.commit_cfg
+ ? &txn->commit_cfg_req->req
+ .commit_cfg
: NULL;
if (cmtcfg_req &&
cmtcfg_req->subscr_info
@@ -2424,9 +2380,8 @@ int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
return 0;
}
-int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
- bool success,
- struct mgmt_be_client_adapter *adapter)
+int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+ struct mgmt_be_client_adapter *adapter)
{
struct mgmt_txn_ctx *txn;
struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
@@ -2446,8 +2401,8 @@ int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
* Done with TXN_CREATE. Move the backend client to
* next phase.
*/
- assert(cmtcfg_req->curr_phase
- == MGMTD_COMMIT_PHASE_TXN_CREATE);
+ assert(cmtcfg_req->curr_phase ==
+ MGMTD_COMMIT_PHASE_TXN_CREATE);
/*
* Send CFGDATA_CREATE-REQs to the backend immediately.
@@ -2469,12 +2424,12 @@ int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
return 0;
}
-int mgmt_txn_notify_be_cfgdata_reply(
- uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
- struct mgmt_be_client_adapter *adapter)
+int mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, uint64_t batch_id,
+ bool success, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
{
struct mgmt_txn_ctx *txn;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
struct mgmt_commit_cfg_req *cmtcfg_req;
txn = mgmt_txn_id2ctx(txn_id);
@@ -2485,32 +2440,31 @@ int mgmt_txn_notify_be_cfgdata_reply(
return -1;
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
- cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
- if (!cfg_btch || cfg_btch->txn != txn)
+ batch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
+ if (!batch || batch->txn != txn)
return -1;
if (!success) {
- MGMTD_TXN_ERR(
- "CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
- " batch-id %" PRIu64 " err: %s",
- adapter->name, txn->txn_id, cfg_btch->batch_id,
- error_if_any ? error_if_any : "None");
+ MGMTD_TXN_ERR("CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
+ " batch-id %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch->batch_id,
+ error_if_any ? error_if_any : "None");
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
- error_if_any ? error_if_any :
- "Internal error! Failed to download config data to backend!");
+ error_if_any
+ ? error_if_any
+ : "Internal error! Failed to download config data to backend!");
return 0;
}
- MGMTD_TXN_DBG(
- "CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
- " batch-id %" PRIu64 " err: %s",
- adapter->name, txn->txn_id, cfg_btch->batch_id,
- error_if_any ? error_if_any : "None");
- mgmt_move_txn_cfg_batch_to_next(
- cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
- &cmtcfg_req->next_batches[adapter->id], true,
- MGMTD_COMMIT_PHASE_APPLY_CFG);
+ MGMTD_TXN_DBG("CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
+ " batch-id %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch->batch_id,
+ error_if_any ? error_if_any : "None");
+ mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, batch,
+ &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id],
+ true, MGMTD_COMMIT_PHASE_APPLY_CFG);
mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
@@ -2523,37 +2477,36 @@ int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
struct mgmt_be_client_adapter *adapter)
{
struct mgmt_txn_ctx *txn;
- struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_be_cfg_batch *batch;
struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
size_t indx;
txn = mgmt_txn_id2ctx(txn_id);
- if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
- || !txn->commit_cfg_req)
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
return -1;
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
if (!success) {
- MGMTD_TXN_ERR(
- "CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
- " batch ids %" PRIu64 " - %" PRIu64 " err: %s",
- adapter->name, txn->txn_id, batch_ids[0],
- batch_ids[num_batch_ids - 1],
- error_if_any ? error_if_any : "None");
+ MGMTD_TXN_ERR("CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
+ " batch ids %" PRIu64 " - %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch_ids[0],
+ batch_ids[num_batch_ids - 1],
+ error_if_any ? error_if_any : "None");
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
- error_if_any ? error_if_any :
- "Internal error! Failed to apply config data on backend!");
+ error_if_any
+ ? error_if_any
+ : "Internal error! Failed to apply config data on backend!");
return 0;
}
for (indx = 0; indx < num_batch_ids; indx++) {
- cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
- if (cfg_btch->txn != txn)
+ batch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
+ if (batch->txn != txn)
return -1;
mgmt_move_txn_cfg_batch_to_next(
- cmtcfg_req, cfg_btch,
+ cmtcfg_req, batch,
&cmtcfg_req->curr_batches[adapter->id],
&cmtcfg_req->next_batches[adapter->id], true,
MGMTD_COMMIT_PHASE_TXN_DELETE);
@@ -2575,53 +2528,24 @@ int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
return 0;
}
-int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- struct nb_config *cfg_root,
- Mgmtd__YangGetDataReq **data_req,
- size_t num_reqs)
+int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, struct nb_config *cfg_root,
+ Mgmtd__YangGetDataReq **data_req, size_t num_reqs)
{
struct mgmt_txn_ctx *txn;
struct mgmt_txn_req *txn_req;
+ enum mgmt_txn_event req_event;
size_t indx;
txn = mgmt_txn_id2ctx(txn_id);
if (!txn)
return -1;
- txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
- txn_req->req.get_data->ds_id = ds_id;
- txn_req->req.get_data->cfg_root = cfg_root;
- for (indx = 0;
- indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
- indx++) {
- MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
- txn_req->req.get_data->xpaths[indx] =
- strdup(data_req[indx]->data->xpath);
- txn_req->req.get_data->num_xpaths++;
- }
-
- mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
-
- return 0;
-}
-
-int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req,
- size_t num_reqs)
-{
- struct mgmt_txn_ctx *txn;
- struct mgmt_txn_req *txn_req;
- size_t indx;
-
- txn = mgmt_txn_id2ctx(txn_id);
- if (!txn)
- return -1;
+ req_event = cfg_root ? MGMTD_TXN_PROC_GETCFG : MGMTD_TXN_PROC_GETDATA;
- txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
+ txn_req = mgmt_txn_req_alloc(txn, req_id, req_event);
txn_req->req.get_data->ds_id = ds_id;
- txn_req->req.get_data->cfg_root = NULL;
+ txn_req->req.get_data->cfg_root = cfg_root;
for (indx = 0;
indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
indx++) {
@@ -2631,7 +2555,7 @@ int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
txn_req->req.get_data->num_xpaths++;
}
- mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
+ mgmt_txn_register_event(txn, req_event);
return 0;
}
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
index 69d75fed07..068f07a5ca 100644
--- a/mgmtd/mgmt_txn.h
+++ b/mgmtd/mgmt_txn.h
@@ -177,25 +177,16 @@ extern int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
bool implicit);
/*
- * Send get-config request to be processed later in transaction.
+ * Send get-{cfg,data} request to be processed later in transaction.
*
- * Similar to set-config request.
+ * Is get-config if cfg_root is provided and the config is gathered locally,
+ * otherwise it's get-data and data is fetched from backedn clients.
*/
-extern int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- struct nb_config *cfg_root,
- Mgmtd__YangGetDataReq **data_req,
- size_t num_reqs);
-
-/*
- * Send get-data request to be processed later in transaction.
- *
- * Similar to get-config request, but here data is fetched from backedn client.
- */
-extern int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req,
- size_t num_reqs);
+extern int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct nb_config *cfg_root,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs);
/*
* Notifiy backend adapter on connection.
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
index 6a6f32353d..44c6c0097a 100644
--- a/mgmtd/mgmt_vty.c
+++ b/mgmtd/mgmt_vty.c
@@ -194,7 +194,7 @@ DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
datastore = mgmt_ds_name2id(dsname);
xpath_list[0] = path;
- vty_mgmt_send_get_config(vty, datastore, xpath_list, 1);
+ vty_mgmt_send_get_req(vty, true, datastore, xpath_list, 1);
return CMD_SUCCESS;
}
@@ -214,7 +214,7 @@ DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
datastore = mgmt_ds_name2id(dsname);
xpath_list[0] = path;
- vty_mgmt_send_get_data(vty, datastore, xpath_list, 1);
+ vty_mgmt_send_get_req(vty, false, datastore, xpath_list, 1);
return CMD_SUCCESS;
}
diff --git a/ospf6d/ospf6_main.c b/ospf6d/ospf6_main.c
index fdb93475d4..932304578a 100644
--- a/ospf6d/ospf6_main.c
+++ b/ospf6d/ospf6_main.c
@@ -173,6 +173,32 @@ FRR_DAEMON_INFO(ospf6d, OSPF6, .vty_port = OSPF6_VTY_PORT,
.n_yang_modules = array_size(ospf6d_yang_modules),
);
+/* Max wait time for config to load before accepting hellos */
+#define OSPF6_PRE_CONFIG_MAX_WAIT_SECONDS 600
+
+static void ospf6_config_finish(struct event *t)
+{
+ zlog_err("OSPF6 configuration end timer expired after %d seconds.",
+ OSPF6_PRE_CONFIG_MAX_WAIT_SECONDS);
+}
+
+static void ospf6_config_start(void)
+{
+ if (IS_OSPF6_DEBUG_EVENT)
+ zlog_debug("ospf6d config start received");
+ EVENT_OFF(t_ospf6_cfg);
+ event_add_timer(master, ospf6_config_finish, NULL,
+ OSPF6_PRE_CONFIG_MAX_WAIT_SECONDS, &t_ospf6_cfg);
+}
+
+static void ospf6_config_end(void)
+{
+ if (IS_OSPF6_DEBUG_EVENT)
+ zlog_debug("ospf6d config end received");
+
+ EVENT_OFF(t_ospf6_cfg);
+}
+
/* Main routine of ospf6d. Treatment of argument and starting ospf finite
state machine is handled here. */
int main(int argc, char *argv[], char *envp[])
@@ -217,6 +243,9 @@ int main(int argc, char *argv[], char *envp[])
/* initialize ospf6 */
ospf6_init(master);
+ /* Configuration processing callback initialization. */
+ cmd_init_config_callbacks(ospf6_config_start, ospf6_config_end);
+
frr_config_fork();
frr_run(master);
diff --git a/ospf6d/ospf6_message.c b/ospf6d/ospf6_message.c
index 032988a91f..29a68c5c3d 100644
--- a/ospf6d/ospf6_message.c
+++ b/ospf6d/ospf6_message.c
@@ -2248,6 +2248,17 @@ void ospf6_hello_send(struct event *thread)
if (oi->gr.hello_delay.t_grace_send)
return;
+ /* Check if config is still being processed */
+ if (event_is_scheduled(t_ospf6_cfg)) {
+ if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_HELLO, SEND))
+ zlog_debug(
+ "Suppressing Hello on interface %s during config load",
+ oi->interface->name);
+ event_add_timer(master, ospf6_hello_send, oi,
+ oi->hello_interval, &oi->thread_send_hello);
+ return;
+ }
+
if (oi->state <= OSPF6_INTERFACE_DOWN) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_HELLO, SEND_HDR))
zlog_debug("Unable to send Hello on down interface %s",
diff --git a/ospf6d/ospf6d.c b/ospf6d/ospf6d.c
index 214007d041..d90a950d79 100644
--- a/ospf6d/ospf6d.c
+++ b/ospf6d/ospf6d.c
@@ -34,9 +34,16 @@
#include "lib/json.h"
#include "ospf6_nssa.h"
#include "ospf6_auth_trailer.h"
+#include "ospf6d/ospf6d_clippy.c"
DEFINE_MGROUP(OSPF6D, "ospf6d");
+/* OSPF6 config processing timer thread */
+struct event *t_ospf6_cfg;
+
+/* OSPF6 debug event state */
+unsigned char conf_debug_ospf6_event;
+
struct route_node *route_prev(struct route_node *node)
{
struct route_node *end;
@@ -62,6 +69,7 @@ struct route_node *route_prev(struct route_node *node)
}
static int config_write_ospf6_debug(struct vty *vty);
+static int config_write_ospf6_debug_event(struct vty *vty);
static struct cmd_node debug_node = {
.name = "debug",
.node = DEBUG_NODE,
@@ -85,6 +93,7 @@ static int config_write_ospf6_debug(struct vty *vty)
config_write_ospf6_debug_nssa(vty);
config_write_ospf6_debug_gr_helper(vty);
config_write_ospf6_debug_auth(vty);
+ config_write_ospf6_debug_event(vty);
return 0;
}
@@ -1374,6 +1383,29 @@ DEFUN(show_ipv6_ospf6_linkstate_detail, show_ipv6_ospf6_linkstate_detail_cmd,
return CMD_SUCCESS;
}
+DEFPY(debug_ospf6_event, debug_ospf6_event_cmd, "[no] debug ospf6 event",
+ NO_STR DEBUG_STR OSPF6_STR "Debug OSPFv3 event function\n")
+{
+ if (!no)
+ OSPF6_DEBUG_EVENT_ON();
+ else
+ OSPF6_DEBUG_EVENT_OFF();
+ return CMD_SUCCESS;
+}
+
+static int config_write_ospf6_debug_event(struct vty *vty)
+{
+ if (IS_OSPF6_DEBUG_EVENT)
+ vty_out(vty, "debug ospf6 event\n");
+ return 0;
+}
+
+static void install_element_ospf6_debug_event(void)
+{
+ install_element(ENABLE_NODE, &debug_ospf6_event_cmd);
+ install_element(CONFIG_NODE, &debug_ospf6_event_cmd);
+}
+
/* Install ospf related commands. */
void ospf6_init(struct event_loop *master)
{
@@ -1447,6 +1479,7 @@ void ospf6_init(struct event_loop *master)
VIEW_NODE,
&show_ipv6_ospf6_database_type_self_originated_linkstate_id_cmd);
install_element(VIEW_NODE, &show_ipv6_ospf6_database_aggr_router_cmd);
+ install_element_ospf6_debug_event();
install_element_ospf6_debug_auth();
ospf6_interface_auth_trailer_cmd_init();
install_element_ospf6_clear_intf_auth();
diff --git a/ospf6d/ospf6d.h b/ospf6d/ospf6d.h
index 980a365265..c927ee7566 100644
--- a/ospf6d/ospf6d.h
+++ b/ospf6d/ospf6d.h
@@ -15,6 +15,9 @@ DECLARE_MGROUP(OSPF6D);
/* global variables */
extern struct event_loop *master;
+/* OSPF config processing timer thread */
+extern struct event *t_ospf6_cfg;
+
/* Historical for KAME. */
#ifndef IPV6_JOIN_GROUP
#ifdef IPV6_ADD_MEMBERSHIP
@@ -105,6 +108,12 @@ extern struct event_loop *master;
extern struct zebra_privs_t ospf6d_privs;
+/* Event Debug option */
+extern unsigned char conf_debug_ospf6_event;
+#define OSPF6_DEBUG_EVENT_ON() (conf_debug_ospf6_event = 1)
+#define OSPF6_DEBUG_EVENT_OFF() (conf_debug_ospf6_event = 0)
+#define IS_OSPF6_DEBUG_EVENT (conf_debug_ospf6_event)
+
/* Function Prototypes */
extern struct route_node *route_prev(struct route_node *node);
diff --git a/ospf6d/subdir.am b/ospf6d/subdir.am
index c34db3012d..f6d27c84cd 100644
--- a/ospf6d/subdir.am
+++ b/ospf6d/subdir.am
@@ -75,6 +75,7 @@ ospf6d_ospf6d_snmp_la_LDFLAGS = $(MODULE_LDFLAGS)
ospf6d_ospf6d_snmp_la_LIBADD = lib/libfrrsnmp.la
clippy_scan += \
+ ospf6d/ospf6d.c \
ospf6d/ospf6_top.c \
ospf6d/ospf6_area.c \
ospf6d/ospf6_asbr.c \
diff --git a/ospfd/ospf_main.c b/ospfd/ospf_main.c
index 1f476a7e3d..536bd592d2 100644
--- a/ospfd/ospf_main.c
+++ b/ospfd/ospf_main.c
@@ -134,6 +134,32 @@ FRR_DAEMON_INFO(ospfd, OSPF, .vty_port = OSPF_VTY_PORT,
.n_yang_modules = array_size(ospfd_yang_modules),
);
+/** Max wait time for config to load before accepting hellos */
+#define OSPF_PRE_CONFIG_MAX_WAIT_SECONDS 600
+
+static void ospf_config_finish(struct event *t)
+{
+ zlog_err("OSPF configuration end timer expired after %d seconds.",
+ OSPF_PRE_CONFIG_MAX_WAIT_SECONDS);
+}
+
+static void ospf_config_start(void)
+{
+ EVENT_OFF(t_ospf_cfg);
+ if (IS_DEBUG_OSPF_EVENT)
+ zlog_debug("ospfd config start callback received.");
+ event_add_timer(master, ospf_config_finish, NULL,
+ OSPF_PRE_CONFIG_MAX_WAIT_SECONDS, &t_ospf_cfg);
+}
+
+static void ospf_config_end(void)
+{
+ if (IS_DEBUG_OSPF_EVENT)
+ zlog_debug("ospfd config end callback received.");
+
+ EVENT_OFF(t_ospf_cfg);
+}
+
/* OSPFd main routine. */
int main(int argc, char **argv)
{
@@ -193,6 +219,9 @@ int main(int argc, char **argv)
access_list_init();
prefix_list_init();
+ /* Configuration processing callback initialization. */
+ cmd_init_config_callbacks(ospf_config_start, ospf_config_end);
+
/* OSPFd inits. */
ospf_if_init();
ospf_zebra_init(master, ospf_instance);
diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c
index d010b8b6e6..105c04c7a1 100644
--- a/ospfd/ospf_packet.c
+++ b/ospfd/ospf_packet.c
@@ -3682,6 +3682,16 @@ static void ospf_hello_send_sub(struct ospf_interface *oi, in_addr_t addr)
struct ospf_packet *op;
uint16_t length = OSPF_HEADER_SIZE;
+ /* Check if config is still being processed */
+ if (event_is_scheduled(t_ospf_cfg)) {
+ if (IS_DEBUG_OSPF_PACKET(0, SEND))
+ zlog_debug(
+ "Suppressing hello to %pI4 on %s during config load",
+ &(addr), IF_NAME(oi));
+
+ return;
+ }
+
op = ospf_packet_new(oi->ifp->mtu);
/* Prepare OSPF common header. */
diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c
index 8aabfc914b..96d474fe86 100644
--- a/ospfd/ospf_zebra.c
+++ b/ospfd/ospf_zebra.c
@@ -2161,9 +2161,9 @@ static int ospf_opaque_msg_handler(ZAPI_CALLBACK_ARGS)
switch (info.type) {
case LINK_STATE_SYNC:
- STREAM_GETC(s, dst.proto);
- STREAM_GETW(s, dst.instance);
- STREAM_GETL(s, dst.session_id);
+ dst.proto = info.src_proto;
+ dst.instance = info.src_instance;
+ dst.session_id = info.src_session_id;
dst.type = LINK_STATE_SYNC;
ret = ospf_te_sync_ted(dst);
break;
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index 0922aecc37..fc51c739e4 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -62,6 +62,8 @@ unsigned short ospf_instance;
extern struct zclient *zclient;
extern struct zclient *zclient_sync;
+/* OSPF config processing timer thread */
+struct event *t_ospf_cfg;
static void ospf_remove_vls_through_area(struct ospf *, struct ospf_area *);
static void ospf_network_free(struct ospf *, struct ospf_network *);
diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h
index 36936b16f4..860140cb76 100644
--- a/ospfd/ospfd.h
+++ b/ospfd/ospfd.h
@@ -70,6 +70,9 @@
/* Default socket buffer size */
#define OSPF_DEFAULT_SOCK_BUFSIZE (8 * 1024 * 1024)
+/* OSPF config processing timer thread */
+extern struct event *t_ospf_cfg;
+
struct ospf_external {
unsigned short instance;
struct route_table *external_info;
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index c4e36ebd46..ee9ee32f8b 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -199,6 +199,11 @@ DEFPY(pbr_map_match_ip_proto, pbr_map_match_ip_proto_cmd,
return CMD_WARNING_CONFIG_FAILED;
if (!no) {
+ if (!ip_proto) {
+ vty_out(vty, "Unable to convert (null) to proto id\n");
+ return CMD_WARNING;
+ }
+
p = getprotobyname(ip_proto);
if (!p) {
vty_out(vty, "Unable to convert %s to proto id\n",
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index f26fd818b5..7340eeaa60 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -40,9 +40,8 @@
#include "pim6_mld.h"
-#if PIM_IPV == 4
-static void pim_if_igmp_join_del_all(struct interface *ifp);
-#endif
+static void pim_if_gm_join_del_all(struct interface *ifp);
+
static int gm_join_sock(const char *ifname, ifindex_t ifindex,
pim_addr group_addr, pim_addr source_addr,
struct pim_interface *pim_ifp);
@@ -189,11 +188,9 @@ void pim_if_delete(struct interface *ifp)
assert(pim_ifp);
pim_ifp->pim->mcast_if_count--;
-#if PIM_IPV == 4
if (pim_ifp->gm_join_list) {
- pim_if_igmp_join_del_all(ifp);
+ pim_if_gm_join_del_all(ifp);
}
-#endif
pim_ifchannel_delete_all(ifp);
#if PIM_IPV == 4
@@ -1380,9 +1377,8 @@ int pim_if_gm_join_del(struct interface *ifp, pim_addr group_addr,
return 0;
}
-#if PIM_IPV == 4
__attribute__((unused))
-static void pim_if_igmp_join_del_all(struct interface *ifp)
+static void pim_if_gm_join_del_all(struct interface *ifp)
{
struct pim_interface *pim_ifp;
struct listnode *node;
@@ -1402,7 +1398,6 @@ static void pim_if_igmp_join_del_all(struct interface *ifp)
for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, nextnode, ij))
pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr);
}
-#endif /* PIM_IPV == 4 */
/*
RFC 4601
diff --git a/ripngd/ripng_routemap.c b/ripngd/ripng_routemap.c
index 3e6880b4df..b5f74be3f6 100644
--- a/ripngd/ripng_routemap.c
+++ b/ripngd/ripng_routemap.c
@@ -112,6 +112,70 @@ static const struct route_map_rule_cmd route_match_interface_cmd = {
route_match_interface_free
};
+/* match ipv6 address WORD */
+
+static enum route_map_cmd_result_t
+route_match_ipv6_address(void *rule, const struct prefix *prefix, void *object)
+{
+ struct access_list *alist;
+
+ alist = access_list_lookup(AFI_IP6, (char *)rule);
+ if (access_list_apply(alist, prefix) != FILTER_DENY)
+ return RMAP_MATCH;
+
+ return RMAP_NOMATCH;
+}
+
+static void *route_match_ipv6_address_compile(const char *arg)
+{
+ return XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg);
+}
+
+static void route_match_ipv6_address_free(void *rule)
+{
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+static const struct route_map_rule_cmd route_match_ipv6_address_cmd = {
+ "ipv6 address",
+ route_match_ipv6_address,
+ route_match_ipv6_address_compile,
+ route_match_ipv6_address_free
+};
+
+/* match ipv6 address prefix-list PREFIX_LIST */
+
+static enum route_map_cmd_result_t
+route_match_ipv6_address_prefix_list(void *rule, const struct prefix *prefix,
+ void *object)
+{
+ struct prefix_list *plist;
+
+ plist = prefix_list_lookup(AFI_IP6, (char *)rule);
+ if (prefix_list_apply(plist, prefix) != PREFIX_DENY)
+ return RMAP_MATCH;
+
+ return RMAP_NOMATCH;
+}
+
+static void *route_match_ipv6_address_prefix_list_compile(const char *arg)
+{
+ return XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg);
+}
+
+static void route_match_ipv6_address_prefix_list_free(void *rule)
+{
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+static const struct route_map_rule_cmd
+ route_match_ipv6_address_prefix_list_cmd = {
+ "ipv6 address prefix-list",
+ route_match_ipv6_address_prefix_list,
+ route_match_ipv6_address_prefix_list_compile,
+ route_match_ipv6_address_prefix_list_free
+};
+
/* `match tag TAG' */
/* Match function return 1 if match is success else return zero. */
static enum route_map_cmd_result_t route_match_tag(void *rule,
@@ -327,6 +391,12 @@ void ripng_route_map_init(void)
route_map_match_interface_hook(generic_match_add);
route_map_no_match_interface_hook(generic_match_delete);
+ route_map_match_ipv6_address_hook(generic_match_add);
+ route_map_no_match_ipv6_address_hook(generic_match_delete);
+
+ route_map_match_ipv6_address_prefix_list_hook(generic_match_add);
+ route_map_no_match_ipv6_address_prefix_list_hook(generic_match_delete);
+
route_map_match_metric_hook(generic_match_add);
route_map_no_match_metric_hook(generic_match_delete);
@@ -344,6 +414,8 @@ void ripng_route_map_init(void)
route_map_install_match(&route_match_metric_cmd);
route_map_install_match(&route_match_interface_cmd);
+ route_map_install_match(&route_match_ipv6_address_cmd);
+ route_map_install_match(&route_match_ipv6_address_prefix_list_cmd);
route_map_install_match(&route_match_tag_cmd);
route_map_install_set(&route_set_metric_cmd);
route_map_install_set(&route_set_ipv6_nexthop_local_cmd);
diff --git a/staticd/static_bfd.c b/staticd/static_bfd.c
index 78d8c05807..507c64e6a4 100644
--- a/staticd/static_bfd.c
+++ b/staticd/static_bfd.c
@@ -88,6 +88,7 @@ void static_next_hop_bfd_monitor_enable(struct static_nexthop *sn,
bool mhop;
int family;
struct ipaddr source;
+ struct vrf *vrf = NULL;
use_interface = false;
use_source = yang_dnode_exists(dnode, "./source");
@@ -95,7 +96,7 @@ void static_next_hop_bfd_monitor_enable(struct static_nexthop *sn,
onlink = yang_dnode_exists(dnode, "../onlink") &&
yang_dnode_get_bool(dnode, "../onlink");
mhop = yang_dnode_get_bool(dnode, "./multi-hop");
-
+ vrf = vrf_lookup_by_name(yang_dnode_get_string(dnode, "../vrf"));
family = static_next_hop_type_to_family(sn);
if (family == AF_UNSPEC)
@@ -133,6 +134,8 @@ void static_next_hop_bfd_monitor_enable(struct static_nexthop *sn,
bfd_sess_set_profile(sn->bsp, use_profile ? yang_dnode_get_string(
dnode, "./profile")
: NULL);
+ if (vrf && vrf->vrf_id != VRF_UNKNOWN)
+ bfd_sess_set_vrf(sn->bsp, vrf->vrf_id);
bfd_sess_set_hop_count(sn->bsp, (onlink || mhop == false) ? 1 : 254);
diff --git a/tests/lib/subdir.am b/tests/lib/subdir.am
index c3a1a3e2c0..6c1be50201 100644
--- a/tests/lib/subdir.am
+++ b/tests/lib/subdir.am
@@ -157,6 +157,13 @@ tests_lib_test_checksum_LDADD = $(ALL_TESTS_LDADD)
tests_lib_test_checksum_SOURCES = tests/lib/test_checksum.c tests/helpers/c/prng.c
+check_PROGRAMS += tests/lib/test_darr
+tests_lib_test_darr_CFLAGS = $(TESTS_CFLAGS)
+tests_lib_test_darr_CPPFLAGS = $(TESTS_CPPFLAGS)
+tests_lib_test_darr_LDADD = $(ALL_TESTS_LDADD)
+tests_lib_test_darr_SOURCES = tests/lib/test_darr.c
+
+
check_PROGRAMS += tests/lib/test_graph
tests_lib_test_graph_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_graph_CPPFLAGS = $(TESTS_CPPFLAGS)
diff --git a/tests/lib/test_darr.c b/tests/lib/test_darr.c
new file mode 100644
index 0000000000..9150aed09d
--- /dev/null
+++ b/tests/lib/test_darr.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * June 23 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+#include <zebra.h>
+#include "darr.h"
+
+/*
+ * Public functions to test:
+ * [x] - darr_append
+ * [x] - darr_append_n
+ * [x] - darr_append_nz
+ * [x] - darr_cap
+ * [-] - darr_ensure_cap
+ * [x] - darr_ensure_i
+ * [x] - darr_foreach_i
+ * [x] - darr_foreach_p
+ * [x] - darr_free
+ * [x] - darr_insert
+ * [ ] - darr_insertz
+ * [x] - darr_insert_n
+ * [x] - darr_insert_nz
+ * [x] - darr_maxi
+ * [x] - darr_pop
+ * [x] - darr_push
+ * [ ] - darr_pushz
+ * [x] - darr_remove
+ * [x] - darr_remove_n
+ * [x] - darr_reset
+ * [x] - darr_setlen
+ */
+
+static void test_int(void)
+{
+ int z105[105] = {0};
+ int a1[] = {0, 1, 2, 3, 4};
+ int a2[] = {4, 3, 2, 1, 0};
+ int *da1 = NULL;
+ int *da2 = NULL;
+ int *dap;
+ uint i;
+
+ darr_ensure_i(da1, 0);
+ da1[0] = 0;
+ assert(darr_len(da1) == 1);
+ assert(darr_cap(da1) == 1);
+
+ *darr_ensure_i(da1, 1) = 1;
+ assert(darr_len(da1) == 2);
+ assert(darr_cap(da1) == 2);
+
+ darr_ensure_i(da1, 4);
+ darr_foreach_i (da1, i)
+ da1[i] = i;
+
+ assert(darr_len(da1) == 5);
+ /* minimum non-pow2 array size for long long and smaller */
+ assert(darr_cap(da1) == 8);
+ assert(!memcmp(da1, a1, sizeof(a1)));
+
+ /* reverse the numbers */
+ darr_foreach_p (da1, dap)
+ *dap = darr_end(da1) - dap - 1;
+ assert(!memcmp(da1, a2, sizeof(a2)));
+
+ darr_append_n(da1, 100);
+ darr_foreach_p (da1, dap)
+ *dap = darr_end(da1) - dap - 1;
+
+ darr_pop_n(da1, 100);
+ darr_append_nz(da1, 100);
+ assert(!memcmp(&da1[5], z105, _darr_esize(da1) * 100));
+
+ assert(darr_len(da1) == 105);
+ assert(darr_maxi(da1) == 127);
+ assert(darr_cap(da1) == 128);
+
+ darr_setlen(da1, 102);
+ assert(darr_len(da1) == 102);
+ assert(darr_maxi(da1) == 127);
+
+ int a3[] = { 0xdeadbeaf, 0x12345678 };
+
+ da1[0] = a3[0];
+ da1[101] = a3[1];
+ darr_remove_n(da1, 1, 100);
+ assert(darr_len(da1) == array_size(a3));
+ assert(!memcmp(da1, a3, sizeof(a3)));
+
+ da1[0] = a3[1];
+ da1[1] = a3[0];
+
+ darr_insert_n(da1, 1, 100);
+ assert(darr_len(da1) == 102);
+ assert(da1[0] == a3[1]);
+ assert(da1[101] == a3[0]);
+
+ darr_reset(da1);
+ assert(darr_len(da1) == 0);
+ assert(darr_maxi(da1) == 127);
+ assert(darr_cap(da1) == 128);
+
+ /* we touch the length field of the freed block here somehow */
+ darr_insert_n(da1, 100, 300);
+ assert(darr_len(da1) == 400);
+ assert(darr_cap(da1) == 512);
+
+ da1[400 - 1] = 0x0BAD;
+ *darr_insert(da1, 0) = 0xF00D;
+ assert(da1[0] == 0xF00D);
+ assert(da1[400] == 0x0BAD);
+ assert(darr_len(da1) == 401);
+ assert(darr_cap(da1) == 512);
+
+ darr_free(da1);
+ assert(da1 == NULL);
+ assert(darr_len(da1) == 0);
+ darr_setlen(da1, 0);
+ darr_reset(da1);
+ darr_free(da1);
+
+ *darr_append(da2) = 0;
+ *darr_append(da2) = 1;
+ darr_push(da2, 2);
+ darr_push(da2, 3);
+ darr_push(da2, 4);
+
+ assert(!memcmp(da2, a1, sizeof(a1)));
+
+ assert(darr_pop(da2) == 4);
+ assert(darr_pop(da2) == 3);
+ assert(darr_pop(da2) == 2);
+ assert(darr_len(da2) == 2);
+ assert(darr_pop(da2) == 1);
+ assert(darr_pop(da2) == 0);
+ assert(darr_len(da2) == 0);
+
+ darr_free(da2);
+}
+
+static void test_struct(void)
+{
+ /*
+ *uwould like to use different sizes with padding but memcmp can't be
+ *used then.
+ */
+ struct st {
+ long long a;
+ long long b;
+ };
+ struct st z102[102] = {{0, 0}};
+ struct st *da1 = NULL;
+ struct st *da2 = NULL;
+ struct st a1[] = {
+ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4},
+ };
+ uint i;
+
+ darr_ensure_i(da1, 0);
+ da1[0].a = 0;
+ da1[0].b = 0;
+ assert(darr_len(da1) == 1);
+ assert(darr_cap(da1) == 1);
+
+ darr_ensure_i(da1, 1)->a = 1;
+ darr_ensure_i(da1, 1)->b = 1;
+ assert(darr_len(da1) == 2);
+ assert(darr_cap(da1) == 2);
+
+ darr_ensure_i(da1, 4);
+ da1[2].a = 2;
+ da1[2].b = 2;
+
+ da1[3].a = 3;
+ da1[3].b = 3;
+
+ da1[4].a = 4;
+ da1[4].b = 4;
+
+ assert(darr_len(da1) == 5);
+ /* minimum non-pow2 array size for long long and smaller */
+ assert(darr_cap(da1) == 8);
+ assert(!memcmp(da1, a1, sizeof(a1)));
+
+ darr_append_n(da1, 100);
+
+ assert(darr_len(da1) == 105);
+ assert(darr_maxi(da1) == 127);
+ assert(darr_cap(da1) == 128);
+
+ darr_setlen(da1, 102);
+ assert(darr_len(da1) == 102);
+ assert(darr_maxi(da1) == 127);
+
+ struct st a2[] = {
+ {0xdeadbeaf, 0xdeadbeaf},
+ {0x12345678, 0x12345678},
+ };
+ da1[0] = a2[0];
+ da1[101] = a2[1];
+ darr_remove_n(da1, 1, 100);
+ assert(darr_len(da1) == array_size(a2));
+ assert(!memcmp(da1, a2, sizeof(a2)));
+
+ da1[0] = a2[1];
+ da1[1] = a2[0];
+
+ darr_insert_n(da1, 1, 100);
+ assert(darr_len(da1) == 102);
+ darr_foreach_i (da1, i) {
+ da1[i].a = i;
+ da1[i].b = i;
+ }
+ darr_remove_n(da1, 1, 100);
+ assert(darr_len(da1) == 2);
+ darr_insert_nz(da1, 1, 100);
+ assert(!memcmp(&da1[1], z102, 100 * sizeof(da1[0])));
+ /* assert(da1[0] == a2[1]); */
+ /* assert(da1[101] == a2[0]); */
+
+ darr_reset(da1);
+ assert(darr_len(da1) == 0);
+ assert(darr_maxi(da1) == 127);
+ assert(darr_cap(da1) == 128);
+
+ /* we touch the length field of the freed block here somehow */
+ darr_insert_n(da1, 100, 300);
+
+ assert(darr_len(da1) == 400);
+ assert(darr_cap(da1) == 512);
+
+ darr_free(da1);
+ assert(da1 == NULL);
+
+ assert(darr_len(da1) == 0);
+ darr_setlen(da1, 0);
+ darr_reset(da1);
+
+ darr_free(da1);
+
+ struct st i0 = {0, 0};
+ struct st i1 = {1, 1};
+ struct st i2 = {2, 2};
+ struct st i3 = {3, 3};
+ struct st i4 = {4, 4};
+
+ *darr_append(da2) = i0;
+ *darr_append(da2) = i1;
+ darr_push(da2, i2);
+ darr_push(da2, i3);
+ darr_push(da2, i4);
+
+ assert(!memcmp(da2, a1, sizeof(a1)));
+
+ struct st p0, p1, p2, p3, p4;
+
+ p4 = darr_pop(da2);
+ p3 = darr_pop(da2);
+ p2 = darr_pop(da2);
+ p1 = darr_pop(da2);
+ p0 = darr_pop(da2);
+ assert(darr_len(da2) == 0);
+ assert(p4.a == i4.a && p4.b == i4.b);
+ assert(p3.a == i3.a && p3.b == i3.b);
+ assert(p2.a == i2.a && p2.b == i2.b);
+ assert(p1.a == i1.a && p1.b == i1.b);
+ assert(p0.a == i0.a && p0.b == i0.b);
+
+ darr_free(da2);
+}
+
+int main(int argc, char **argv)
+{
+ test_int();
+ test_struct();
+}
diff --git a/tests/topotests/bgp_color_extcommunities/__init__.py b/tests/topotests/bgp_color_extcommunities/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_color_extcommunities/__init__.py
diff --git a/tests/topotests/bgp_color_extcommunities/r1/bgpd.conf b/tests/topotests/bgp_color_extcommunities/r1/bgpd.conf
new file mode 100644
index 0000000000..d4ca392b1a
--- /dev/null
+++ b/tests/topotests/bgp_color_extcommunities/r1/bgpd.conf
@@ -0,0 +1,17 @@
+!
+router bgp 65001
+ bgp router-id 192.168.1.1
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as external
+ address-family ipv4 unicast
+ network 10.10.10.10/24 route-map rmap
+ neighbor 192.168.1.2 route-map rmap out
+ neighbor 192.168.1.2 activate
+ exit-address-family
+!
+route-map rmap permit 10
+ set extcommunity color 1
+ set extcommunity rt 80:987
+ set extcommunity color 100 55555 200
+exit
diff --git a/tests/topotests/bgp_color_extcommunities/r1/zebra.conf b/tests/topotests/bgp_color_extcommunities/r1/zebra.conf
new file mode 100644
index 0000000000..42a830372f
--- /dev/null
+++ b/tests/topotests/bgp_color_extcommunities/r1/zebra.conf
@@ -0,0 +1,3 @@
+!
+int r1-eth0
+ ip address 192.168.1.1/24
diff --git a/tests/topotests/bgp_color_extcommunities/r2/bgpd.conf b/tests/topotests/bgp_color_extcommunities/r2/bgpd.conf
new file mode 100644
index 0000000000..2f83ada9d3
--- /dev/null
+++ b/tests/topotests/bgp_color_extcommunities/r2/bgpd.conf
@@ -0,0 +1,4 @@
+router bgp 65002
+ bgp router-id 192.168.1.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
diff --git a/tests/topotests/bgp_color_extcommunities/r2/zebra.conf b/tests/topotests/bgp_color_extcommunities/r2/zebra.conf
new file mode 100644
index 0000000000..cffe827363
--- /dev/null
+++ b/tests/topotests/bgp_color_extcommunities/r2/zebra.conf
@@ -0,0 +1,4 @@
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_color_extcommunities/test_bgp_color_extcommunities.py b/tests/topotests/bgp_color_extcommunities/test_bgp_color_extcommunities.py
new file mode 100644
index 0000000000..6d17cdb4d9
--- /dev/null
+++ b/tests/topotests/bgp_color_extcommunities/test_bgp_color_extcommunities.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright 2022 6WIND S.A.
+# Copyright 2023 6WIND S.A.
+# François Dumontet <francois.dumontet@6wind.com>
+#
+
+
+"""
+test_bgp_color_extcommunity.py: Test the FRR BGP color extented
+community feature
+"""
+
+import os
+import sys
+import json
+import functools
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+ logger.info("setup_module")
+
+ router_list = tgen.routers()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_color_extended_communities():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ def _bgp_converge():
+ output = json.loads(r1.vtysh_cmd("show bgp summary json"))
+ expected = {
+ "ipv4Unicast": {
+ "peers": {
+ "192.168.1.2": {
+ "pfxSnt": 1,
+ "state": "Established",
+ },
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed announcing 10.10.10.10/32 to r2"
+
+ def _bgp_check_route(router, exists):
+ output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast 10.10.10.10 json"))
+ if exists:
+ expected = {
+ "prefix": "10.10.10.0/24",
+ "paths": [
+ {
+ "valid": True,
+ "extendedCommunity": {
+ "string": "RT:80:987 Color:100 Color:200 Color:55555"
+ },
+ }
+ ],
+ }
+ else:
+ expected = {}
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_route, r2, True)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "10.10.10.0/24 ext community is correctly not installed, but SHOULD be"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf b/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf
index 1e98f4e491..f586c1f99c 100644
--- a/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf
+++ b/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf
@@ -9,6 +9,7 @@ router bgp 65001
!
ip prefix-list p1 seq 5 permit 172.16.255.31/32
!
+bgp route-map delay-timer 1
route-map r2 permit 10
match ip address prefix-list p1
set as-path replace 65003
diff --git a/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py b/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py
index 463df2f2a6..0433c15e0a 100644
--- a/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py
+++ b/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py
@@ -24,6 +24,7 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
@@ -63,7 +64,7 @@ def teardown_module(mod):
tgen.stop_topology()
-def test_bgp_maximum_prefix_out():
+def test_bgp_set_aspath_replace_test1():
tgen = get_topogen()
if tgen.routers_have_failure():
@@ -85,6 +86,40 @@ def test_bgp_maximum_prefix_out():
assert result is None, "Failed overriding incoming AS-PATH with route-map"
+def test_bgp_set_aspath_replace_test2():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Configuring r1 to replace the matching AS with a configured ASN")
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure terminal\nroute-map r2 permit 10\nset as-path replace 65003 65500\n",
+ isjson=False,
+ )
+ router.vtysh_cmd(
+ "configure terminal\nroute-map r2 permit 20\nset as-path replace any 65501\n",
+ isjson=False,
+ )
+
+ def _bgp_converge(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast json"))
+ expected = {
+ "routes": {
+ "172.16.255.31/32": [{"path": "65002 65500"}],
+ "172.16.255.32/32": [{"path": "65501 65501"}],
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge, router)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+
+ assert (
+ result is None
+ ), "Failed overriding incoming AS-PATH with route-map replace with configured ASN"
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py b/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py
index 7b0dc1cff9..c47822614b 100644
--- a/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py
+++ b/tests/topotests/bgp_vpnv4_asbr/test_bgp_vpnv4_asbr.py
@@ -250,7 +250,8 @@ def check_ping(name, dest_addr, expect_connected):
tgen = get_topogen()
output = tgen.gears[name].run("ping {} -c 1 -w 1".format(dest_addr))
logger.info(output)
- assert match in output, "ping fail"
+ if match not in output:
+ return "ping fail"
match = ", {} packet loss".format("0%" if expect_connected else "100%")
logger.info("[+] check {} {} {}".format(name, dest_addr, match))
diff --git a/tests/topotests/multicast_pim_uplink_topo2/multicast_pim_uplink_topo2.json b/tests/topotests/multicast_pim_uplink_topo2/multicast_pim_uplink_topo2.json
new file mode 100644
index 0000000000..158e1135af
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo2/multicast_pim_uplink_topo2.json
@@ -0,0 +1,288 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r4": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "i1": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "r3": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "r5": {"ipv4": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast_pim_uplink_topo2/test_multicast_pim_uplink_topo2.py b/tests/topotests/multicast_pim_uplink_topo2/test_multicast_pim_uplink_topo2.py
new file mode 100644
index 0000000000..eb3246b513
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo2/test_multicast_pim_uplink_topo2.py
@@ -0,0 +1,1349 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2023 by VMware, Inc. ("VMware")
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+1. Verify changing RP address on DUT from Static to BSR , IIF and OIF
+ updated correctly
+2. Verify when mroute RPT and SPT path is difference
+3. Verify mroutes updated with correct OIL and IIF after shut / no shut of
+ upstream interface from DUT
+4. Verify mroutes updated with correct OIL and IIF after shut / no
+shut of downstream interface from FHR
+
+
+"""
+
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ reset_config_on_routers,
+ shutdown_bringup_interface,
+ required_linux_kernel_version,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_mroutes,
+ clear_pim_interface_traffic,
+ verify_upstream_iif,
+ clear_mroute,
+ verify_multicast_traffic,
+ verify_pim_rp_info,
+ verify_pim_interface_traffic,
+ McastTesterHelper,
+)
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+# Global variables
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+GROUP_RANGE_2 = [
+ "226.1.1.1/32",
+ "226.1.1.2/32",
+ "226.1.1.3/32",
+ "226.1.1.4/32",
+ "226.1.1.5/32",
+]
+IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"]
+
+r1_r2_links = []
+r1_r3_links = []
+r2_r1_links = []
+r3_r1_links = []
+r2_r4_links = []
+r4_r2_links = []
+r4_r3_links = []
+
+pytestmark = [pytest.mark.pimd]
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ testdir = os.path.dirname(os.path.realpath(__file__))
+ json_file = "{}/multicast_pim_uplink_topo2.json".format(testdir)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, tgen.json_topo)
+
+ # Pre-requisite data
+ get_interfaces_names(topo)
+
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ app_helper.cleanup()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def get_interfaces_names(topo):
+ """
+ API to fetch interfaces names and create list, which further would be used
+ for verification
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for link in range(1, 5):
+
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(link)]["interface"]
+ r1_r2_links.append(intf)
+
+ intf = topo["routers"]["r1"]["links"]["r3-link{}".format(link)]["interface"]
+ r1_r3_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r1-link{}".format(link)]["interface"]
+ r2_r1_links.append(intf)
+
+ intf = topo["routers"]["r3"]["links"]["r1-link{}".format(link)]["interface"]
+ r3_r1_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r4-link{}".format(link)]["interface"]
+ r2_r4_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(link)]["interface"]
+ r4_r2_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(link)]["interface"]
+ r4_r3_links.append(intf)
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] > state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_iif_oil_when_RP_address_changes_from_static_to_BSR_p1(request):
+ """
+ Verify changing RP address on DUT from Static to BSR , IIF and OIF
+ updated correctly
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Shutdown interfaces which are not required")
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ intf_r1_r5 = topo["routers"]["r1"]["links"]["r5"]["interface"]
+ intf_r4_r1 = topo["routers"]["r4"]["links"]["r1"]["interface"]
+ intf_r5_r1 = topo["routers"]["r5"]["links"]["r1"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r4, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r5, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r1, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r1, False)
+
+ step("Enable IGMP on DUT and R4 interface")
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r2", "r4"], [intf_r2_i3, intf_r4_i7]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT, and R4 for group range 225.1.1.1-5")
+ input_join = {
+ "i3": topo["routers"]["i3"]["links"]["r2"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP as R4 loopback interface for group range 225.1.1.1-5")
+
+ input_dict = {
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from DUT for group range 225.1.1.1-5")
+
+ input_src = {
+ "i4": topo["routers"]["i4"]["links"]["r2"]["interface"],
+ "i6": topo["routers"]["i6"]["links"]["r4"]["interface"],
+ }
+
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF and OIL updated on both the nodes")
+
+ step(
+ "(S,G) IIF updated towards shortest path to source on both the nodes "
+ ", verify using 'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ source_i4 = topo["routers"]["i4"]["links"]["r2"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r2",
+ "src_address": "*",
+ "iif": r2_r4_links,
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": "lo",
+ "oil": r4_r2_links + [topo["routers"]["r4"]["links"]["i7"]["interface"]],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i6,
+ "iif": r2_r4_links,
+ "oil": topo["routers"]["r2"]["links"]["i3"]["interface"],
+ },
+ {
+ "dut": "r2",
+ "src_address": source_i4,
+ "iif": topo["routers"]["r2"]["links"]["i4"]["interface"],
+ "oil": r2_r4_links + [topo["routers"]["r2"]["links"]["i3"]["interface"]],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r4"]["links"]["i6"]["interface"],
+ "oil": r4_r2_links + [topo["routers"]["r4"]["links"]["i7"]["interface"]],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i4,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "OIL is updated and traffic is received for all the groups on both "
+ "the nodes , verify using 'show ip multicast'; 'show ip multicast json'"
+ )
+
+ intf_r4_i6 = topo["routers"]["r4"]["links"]["i6"]["interface"]
+ intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
+ input_traffic = {
+ "r2": {"traffic_sent": [intf_r2_i3]},
+ "r4": {"traffic_received": [intf_r4_i6]},
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Change RP address for range 225.1.1.1-5 to cisco (BSRP) " "loopback interface"
+ )
+
+ input_dict = {
+ "r4": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ "delete": True,
+ }
+ ]
+ }
+ },
+ "r5": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r5"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send one more traffic stream from R4 to group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("RP type is changed to BSRP for 225.1.1.1-5 groups range on DUT")
+
+ rp_addr = topo["routers"]["r5"]["links"]["lo"]["ipv4"].split("/")[0]
+
+ result = verify_pim_rp_info(
+ tgen, topo, "r5", GROUP_RANGE_1, "lo", rp_addr, "Static"
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "No impact seen on multicast data traffic for both groups range "
+ "verify using 'show ip multicast json' and 'show ip mroute json'"
+ )
+
+ for data in input_dict_star_sg:
+ if data["src_address"] != "*":
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Stop traffic and do clear mroute on all the node (make "
+ "sure (s,g) got timeout"
+ )
+
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+
+ step("Verify (S,G) got cleared after stop of traffic and 'clear mroute'")
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " "Mroutes are still present \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_mroute_when_RPT_and_SPT_path_is_different_p1(request):
+ """
+ Verify when mroute RPT and SPT path is difference
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Shut link from R3 to R1 and no shut R1 to R4 link to make star topology")
+ for i in range(1, 5):
+ intf = topo["routers"]["r3"]["links"]["r1-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r3", intf, False)
+
+ intf = topo["routers"]["r1"]["links"]["r3-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ intf_r4_r5 = topo["routers"]["r4"]["links"]["r5"]["interface"]
+ intf_r5_r4 = topo["routers"]["r5"]["links"]["r4"]["interface"]
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ intf_r1_r5 = topo["routers"]["r1"]["links"]["r5"]["interface"]
+ intf_r4_r1 = topo["routers"]["r4"]["links"]["r1"]["interface"]
+ intf_r5_r1 = topo["routers"]["r5"]["links"]["r1"]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r4, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r4, True)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r5, True)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r1, True)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r1, True)
+
+ step("Done in base config: Connected one more route R5 before R1 ( R5-R1)")
+
+ step("Enable IGMP on R5 and R4 interface")
+ intf_r5_i8 = topo["routers"]["r5"]["links"]["i8"]["interface"]
+ intf_r4_i7 = topo["routers"]["r4"]["links"]["i7"]["interface"]
+ for dut, intf in zip(["r4", "r5"], [intf_r4_i7, intf_r5_i8]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from R5, for group range 226.1.1.1-5")
+ input_join = {
+ "i8": topo["routers"]["i8"]["links"]["r5"]["interface"],
+ "i7": topo["routers"]["i7"]["links"]["r4"]["interface"],
+ }
+
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_2, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure RP as R2 for group range 226.1.1.1-5")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_2,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure EBGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R3 for group range 226.1.1.1-5")
+
+ result = app_helper.run_traffic("i5", IGMP_JOIN_RANGE_2, "r3")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("(*,G) IIF updated for 225.1.1.1-5 towards R2 and RP " "type is static on DUT")
+
+ step("(S,G) on R5 has updated for all the groups")
+
+ source_i5 = topo["routers"]["i5"]["links"]["r3"]["ipv4"].split("/")[0]
+ input_dict_star_sg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["r5"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": "*",
+ "iif": r4_r2_links + [intf_r4_r1],
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i5,
+ "iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["r5"]["interface"],
+ },
+ {
+ "dut": "r4",
+ "src_address": source_i5,
+ "iif": r4_r2_links + r4_r3_links,
+ "oil": topo["routers"]["r4"]["links"]["i7"]["interface"],
+ },
+ ]
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) on R1 updated and has IIF toward R4 and OIL toward R5 , "
+ "RP path OIL is removed"
+ )
+
+ source_i5 = topo["routers"]["i5"]["links"]["r3"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {"dut": "r1", "src_address": source_i5, "iif": r1_r2_links, "oil": r1_r2_links},
+ {"dut": "r4", "src_address": source_i5, "iif": r4_r2_links, "oil": r4_r2_links},
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " "OIF and IIF are same \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Shut and no Shut of mroute OIL selected links from R1 towards R2 and R4")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, True)
+
+ step(
+ "After shut and no shut of link verify mroute got populated as per "
+ "verification step 8"
+ )
+
+ for data in input_dict_star_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_2,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " "OIF and IIF are same \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_with_correct_oil_iif_after_shut_noshut_upstream_interface_p0(
+ request,
+):
+ """
+ Verify mroutes updated with correct OIL and IIF after shut / no shut of
+ upstream interface from DUT
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Shutdown interfaces which are not required")
+ intf_r1_r5 = topo["routers"]["r1"]["links"]["r5"]["interface"]
+ intf_r5_r1 = topo["routers"]["r5"]["links"]["r1"]["interface"]
+ intf_r4_r5 = topo["routers"]["r4"]["links"]["r5"]["interface"]
+ intf_r5_r4 = topo["routers"]["r5"]["links"]["r4"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r1, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r4, False)
+
+ step("Enable IGMP on DUT receiver interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ for dut, intf in zip(["r1", "r1"], [intf_r1_i1, intf_r1_i2]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Verify pim interface traffic before sending join/traffic")
+
+ intf_traffic = topo["routers"]["r4"]["links"]["r3-link1"]["interface"]
+ state_dict = {"r4": {intf_traffic: ["registerStopRx"]}}
+ state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("Send IGMP joins from DUT for group range 225.1.1.1-5")
+ result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, "r1")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure RP as R2 and R3 interface (225.1.1.1-3 on R2 and "
+ "225.1.1.4-5 on R3)"
+ )
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1[0:3],
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1[3:5],
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure BGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(*,G) IIF is updated DUT-R2 any one interface for groups 225.1.1.1-3 "
+ "and DUT to R3 any one interface for groups 225.1.1.1-3"
+ )
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links,
+ "iif_r1_r3": r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r2"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ data["iif_r1_r3"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r3"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source verify using "
+ "'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(*,G) and (S,G) OIL is updated and traffic is received for all "
+ "the groups verify using 'show ip multicast' and"
+ "'show ip multicast count json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1], "traffic_received": [intf_r1_r4]}
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Register packets sent/received count is incrementing verify "
+ "using 'show ip pim interface traffic json'"
+ )
+
+ state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut interface connected from R4 to DUT")
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r4, False)
+
+ step(
+ "After shut of R4 to DUT interface verify (S,G) has taken "
+ "different path ( via R2 or R3 any link) , uptime got resetted "
+ "and OIL is updated accordingly No impact seen on (*,G) routes , "
+ "verify uptime for (*,G) using 'show ip mroute json' and "
+ "'show ip pim state'"
+ )
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r2"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ data["iif_r1_r3"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r3"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut the interface connected from DUT to R2 one by one")
+
+ for i in range(1, 5):
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(i)]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf, False)
+
+ step(
+ "After shut of DUT to R2 all the interfaces (S,G) created via R3, "
+ "(S,G) uptime get reset and OIL is updated accordingly, No impact "
+ "seen on (*,G) routes verify using 'show ip mroute json'"
+ )
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ data["iif_r1_r3"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r3"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ data["iif_r1_r3"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r3"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_mroutes_updated_with_correct_oil_iif_after_shut_noshut_downstream_interface_p0(
+ request,
+):
+ """
+ Verify mroutes updated with correct OIL and IIF after shut / no
+ shut of downstream interface from FHR
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Shutdown interfaces which are not required")
+ intf_r1_r5 = topo["routers"]["r1"]["links"]["r5"]["interface"]
+ intf_r5_r1 = topo["routers"]["r5"]["links"]["r1"]["interface"]
+ intf_r4_r5 = topo["routers"]["r4"]["links"]["r5"]["interface"]
+ intf_r5_r4 = topo["routers"]["r5"]["links"]["r4"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r1, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r4, False)
+
+ step("Enable IGMP on DUT receiver interface")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ for dut, intf in zip(["r1", "r1"], [intf_r1_i1, intf_r1_i2]):
+ input_dict = {dut: {"igmp": {"interfaces": {intf: {"igmp": {"version": "2"}}}}}}
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Send IGMP joins from DUT for group range 225.1.1.1-5")
+ result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, "r1")
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure RP as R2 and R3 interface (225.1.1.1-3 on R2 and "
+ "225.1.1.4-5 on R3)"
+ )
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1[0:3],
+ }
+ ]
+ }
+ },
+ "r3": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r3"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1[3:5],
+ }
+ ]
+ }
+ },
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Done in base config: " "Configure BGP peering between all the nodes")
+
+ step("Done in base config: " "Enable PIM on all the interfaces of all the nodes")
+
+ step("Send traffic from R4 for group range 225.1.1.1-5")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(*,G) IIF is updated DUT-R2 any one interface for groups 225.1.1.1-3 "
+ "and DUT to R3 any one interface for groups 225.1.1.1-3"
+ )
+
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif_r1_r2": r1_r2_links,
+ "iif_r1_r3": r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r2"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ data["iif_r1_r3"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r3"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(S,G) IIF updated towards shortest path to source verify using "
+ "'show ip mroute' and 'show ip mroute json'"
+ )
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": topo["routers"]["r1"]["links"]["r4"]["interface"],
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "(*,G) and (S,G) OIL is updated and traffic is received for all "
+ "the groups verify using 'show ip multicast' and"
+ "'show ip multicast count json'"
+ )
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ input_traffic = {
+ "r1": {"traffic_sent": [intf_r1_i1], "traffic_received": [intf_r1_r4]}
+ }
+ result = verify_multicast_traffic(tgen, input_traffic)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Shut interface connected from R4 to DUT")
+ intf_r4_r1 = topo["routers"]["r4"]["links"]["r1"]["interface"]
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r1, False)
+
+ step(
+ "After shut of R4 to DUT interface verify (S,G) has taken "
+ "different path ( via R2 or R3 any link) , uptime got resetted "
+ "and OIL is updated accordingly No impact seen on (*,G) routes , "
+ "verify uptime for (*,G) using 'show ip mroute json' and "
+ "'show ip pim state'"
+ )
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ data["iif_r1_r2"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r2"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[0:3],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ data["iif_r1_r3"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif_r1_r3"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1[3:5],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_links + r1_r3_links,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/multicast_pim_uplink_topo3/multicast_pim_uplink_topo3.json b/tests/topotests/multicast_pim_uplink_topo3/multicast_pim_uplink_topo3.json
new file mode 100644
index 0000000000..dc9e1ac49b
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo3/multicast_pim_uplink_topo3.json
@@ -0,0 +1,295 @@
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"},
+ "i1": {"ipv4": "auto", "pim": "enable"},
+ "i2": {"ipv4": "auto", "pim": "enable"},
+ "i9": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1-link1": {},
+ "r1-link2": {},
+ "r1-link3": {},
+ "r1-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i3": {"ipv4": "auto", "pim": "enable"},
+ "i4": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2-link1": {},
+ "r2-link2": {},
+ "r2-link3": {},
+ "r2-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1-link1": {"ipv4": "auto", "pim": "enable"},
+ "r1-link2": {"ipv4": "auto", "pim": "enable"},
+ "r1-link3": {"ipv4": "auto", "pim": "enable"},
+ "r1-link4": {"ipv4": "auto", "pim": "enable"},
+ "r4-link1": {"ipv4": "auto", "pim": "enable"},
+ "r4-link2": {"ipv4": "auto", "pim": "enable"},
+ "r4-link3": {"ipv4": "auto", "pim": "enable"},
+ "r4-link4": {"ipv4": "auto", "pim": "enable"},
+ "i5": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r2-link1": {"ipv4": "auto", "pim": "enable"},
+ "r2-link2": {"ipv4": "auto", "pim": "enable"},
+ "r2-link3": {"ipv4": "auto", "pim": "enable"},
+ "r2-link4": {"ipv4": "auto", "pim": "enable"},
+ "r3-link1": {"ipv4": "auto", "pim": "enable"},
+ "r3-link2": {"ipv4": "auto", "pim": "enable"},
+ "r3-link3": {"ipv4": "auto", "pim": "enable"},
+ "r3-link4": {"ipv4": "auto", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r5": {"ipv4": "auto", "pim": "enable"},
+ "i6": {"ipv4": "auto", "pim": "enable"},
+ "i7": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4-link1": {},
+ "r4-link2": {},
+ "r4-link3": {},
+ "r4-link4": {}
+ }
+ },
+ "r1": {
+ "dest_link": {
+ "r4": {}
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"},
+ "r1": {"ipv4": "auto", "pim": "enable"},
+ "r4": {"ipv4": "auto", "pim": "enable"},
+ "i8": {"ipv4": "auto", "pim": "enable"}
+ },
+ "bgp": {
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"}
+ ],
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r5": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r5": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "i1": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i2": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ },
+ "i3": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i4": {
+ "links": {
+ "r2": {"ipv4": "auto"}
+ }
+ },
+ "i5": {
+ "links": {
+ "r3": {"ipv4": "auto"}
+ }
+ },
+ "i6": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i7": {
+ "links": {
+ "r4": {"ipv4": "auto"}
+ }
+ },
+ "i8": {
+ "links": {
+ "r5": {"ipv4": "auto"}
+ }
+ },
+ "i9": {
+ "links": {
+ "r1": {"ipv4": "auto"}
+ }
+ }
+
+ }
+}
diff --git a/tests/topotests/multicast_pim_uplink_topo3/test_multicast_pim_uplink_topo3.py b/tests/topotests/multicast_pim_uplink_topo3/test_multicast_pim_uplink_topo3.py
new file mode 100644
index 0000000000..19a8cd0c17
--- /dev/null
+++ b/tests/topotests/multicast_pim_uplink_topo3/test_multicast_pim_uplink_topo3.py
@@ -0,0 +1,916 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2023 by VMware, Inc. ("VMware")
+#
+
+"""
+Following tests are covered to test multicast pim sm:
+
+1. TC:1 Verify static IGMP group populated when static "ip igmp join <grp>" in configured
+2. TC:2 Verify mroute and upstream populated with correct OIL/IIF with static igmp join
+3. TC:3 Verify local IGMP join not allowed for "224.0.0.0/24" and non multicast group
+4. TC:4 Verify static IGMP group removed from DUT while removing "ip igmp join" CLI
+5. TC:5 Verify static IGMP groups after removing and adding IGMP config
+"""
+
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ step,
+ addKernelRoute,
+ reset_config_on_routers,
+ shutdown_bringup_interface,
+ required_linux_kernel_version,
+)
+from lib.pim import (
+ create_pim_config,
+ create_igmp_config,
+ verify_igmp_groups,
+ verify_mroutes,
+ clear_pim_interface_traffic,
+ verify_upstream_iif,
+ clear_mroute,
+ verify_pim_rp_info,
+ verify_local_igmp_groups,
+ McastTesterHelper,
+)
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+# Global variables
+TOPOLOGY = """
+
+ i9 i3-+-i4 i6-+-i7
+ | | |
+ i1--- R1-------R2----------R4------R5---i8
+ | | |
+ i2 R3-------------------+
+ +
+ |
+ i5
+
+ Description:
+ i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
+ join and traffic
+ R1 - DUT (LHR/FHR)
+ R2 - RP
+ R3 - Transit
+ R4 - (LHR/FHR)
+ R5 - Transit
+"""
+# Global variables
+RP_RANGE1 = "226.0.0.1/32"
+RP_RANGE2 = "226.0.0.2/32"
+RP_RANGE3 = "226.0.0.3/32"
+RP_RANGE4 = "226.0.0.4/32"
+RP_RANGE5 = "226.0.0.5/32"
+RP_RANGE6 = "232.0.0.1/32"
+RP_RANGE7 = "232.0.0.2/32"
+RP_RANGE8 = "232.0.0.3/32"
+RP_RANGE9 = "232.0.0.4/32"
+RP_RANGE10 = "232.0.0.5/32"
+
+GROUP_RANGE = "224.0.0.0/4"
+IGMP_GROUP = "225.1.1.1/32"
+IGMP_JOIN = "225.1.1.1"
+GROUP_RANGE_1 = [
+ "225.1.1.1/32",
+ "225.1.1.2/32",
+ "225.1.1.3/32",
+ "225.1.1.4/32",
+ "225.1.1.5/32",
+]
+IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"]
+IGMP_JOIN_RANGE_2 = ["224.0.0.1", "224.0.0.2", "224.0.0.3", "192.0.0.4", "192.0.0.5"]
+IGMP_JOIN_RANGE_3 = [
+ "226.0.0.1",
+ "226.0.0.2",
+ "226.0.0.3",
+ "226.0.0.4",
+ "226.0.0.5",
+ "232.0.0.1",
+ "232.0.0.2",
+ "232.0.0.3",
+ "232.0.0.4",
+ "232.0.0.5",
+]
+GROUP_RANGE_3 = [
+ "226.0.0.1/32",
+ "226.0.0.2/32",
+ "226.0.0.3/32",
+ "226.0.0.4/32",
+ "226.0.0.5/32",
+ "232.0.0.1/32",
+ "232.0.0.2/32",
+ "232.0.0.3/32",
+ "232.0.0.4/32",
+ "232.0.0.5/32",
+]
+
+r1_r2_links = []
+r1_r3_links = []
+r2_r1_links = []
+r2_r4_links = []
+r3_r1_links = []
+r3_r4_links = []
+r4_r2_links = []
+r4_r3_links = []
+
+pytestmark = [pytest.mark.pimd]
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.19")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ testdir = os.path.dirname(os.path.realpath(__file__))
+ json_file = "{}/multicast_pim_uplink_topo3.json".format(testdir)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, tgen.json_topo)
+
+ # Pre-requisite data
+ get_interfaces_names(topo)
+
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ app_helper.cleanup()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local APIs
+#
+#####################################################
+
+
+def get_interfaces_names(topo):
+ """
+ API to fetch interfaces names and create list, which further would be used
+ for verification
+
+ Parameters
+ ----------
+ * `topo` : inout JSON data
+ """
+
+ for link in range(1, 5):
+
+ intf = topo["routers"]["r1"]["links"]["r2-link{}".format(link)]["interface"]
+ r1_r2_links.append(intf)
+
+ intf = topo["routers"]["r1"]["links"]["r3-link{}".format(link)]["interface"]
+ r1_r3_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r1-link{}".format(link)]["interface"]
+ r2_r1_links.append(intf)
+
+ intf = topo["routers"]["r3"]["links"]["r1-link{}".format(link)]["interface"]
+ r3_r1_links.append(intf)
+
+ intf = topo["routers"]["r2"]["links"]["r4-link{}".format(link)]["interface"]
+ r2_r4_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r2-link{}".format(link)]["interface"]
+ r4_r2_links.append(intf)
+
+ intf = topo["routers"]["r4"]["links"]["r3-link{}".format(link)]["interface"]
+ r4_r3_links.append(intf)
+
+
+def shutdown_interfaces(tgen):
+ """
+ API to Shut down interfaces which is not
+ used in all the testcases as part of this TDS
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+
+ """
+ logger.info("shutting down extra interfaces")
+ intf_r1_r4 = topo["routers"]["r1"]["links"]["r4"]["interface"]
+ intf_r1_r5 = topo["routers"]["r1"]["links"]["r5"]["interface"]
+ intf_r4_r1 = topo["routers"]["r4"]["links"]["r1"]["interface"]
+ intf_r5_r1 = topo["routers"]["r5"]["links"]["r1"]["interface"]
+ intf_r4_r5 = topo["routers"]["r4"]["links"]["r5"]["interface"]
+ intf_r5_r4 = topo["routers"]["r5"]["links"]["r4"]["interface"]
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r4, False)
+ shutdown_bringup_interface(tgen, "r1", intf_r1_r5, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r1, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r1, False)
+ shutdown_bringup_interface(tgen, "r4", intf_r4_r5, False)
+ shutdown_bringup_interface(tgen, "r5", intf_r5_r4, False)
+
+
+def config_to_send_igmp_join_and_traffic(
+ tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
+):
+ """
+ API to do pre-configuration to send IGMP join and multicast
+ traffic
+
+ parameters:
+ -----------
+ * `tgen`: topogen object
+ * `topo`: input json data
+ * `tc_name`: caller test case name
+ * `iperf`: router running iperf
+ * `iperf_intf`: interface name router running iperf
+ * `GROUP_RANGE`: group range
+ * `join`: IGMP join, default False
+ * `traffic`: multicast traffic, default False
+ """
+
+ if join:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ if traffic:
+ # Add route to kernal
+ result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ router_list = tgen.routers()
+ for router in router_list.keys():
+ if router == iperf:
+ continue
+
+ rnode = router_list[router]
+ rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
+
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_ip_igmp_local_joins_p0(request):
+ """
+ TC_1 Verify static IGMP group populated when static
+ "ip igmp join <grp>" in configured
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interfac of R1 and configure local igmp groups")
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_1}},
+ intf_r1_i2: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_1}},
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify static igmp join using show ip igmp join")
+ dut = "r1"
+ interfaces = [intf_r1_i1, intf_r1_i2]
+ for interface in interfaces:
+ result = verify_local_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify igmp groups using show ip igmp groups")
+ interfaces = [intf_r1_i1, intf_r1_i2]
+ for interface in interfaces:
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_mroute_with_igmp_local_joins_p0(request):
+ """
+ TC_2 Verify mroute and upstream populated with correct OIL/IIF with
+ static igmp join
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interfac of R1 and configure local igmp groups")
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_1}},
+ intf_r1_i2: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_1}},
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify static igmp join using show ip igmp join")
+ dut = "r1"
+ interfaces = [intf_r1_i1, intf_r1_i2]
+ for interface in interfaces:
+ result = verify_local_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify igmp groups using show ip igmp groups")
+ interfaces = [intf_r1_i1, intf_r1_i2]
+ for interface in interfaces:
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify RP-info populated in DUT")
+ dut = "r1"
+ rp_address = topo["routers"]["r2"]["links"]["lo"]["ipv4"].split("/")[0]
+ SOURCE = "Static"
+ oif = r1_r2_links
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Send traffic from R4 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+
+ r1_r2_r3 = r1_r2_links + r1_r3_links
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_links,
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ },
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ },
+ ]
+
+ step("Verify mroutes and iff upstream for local igmp groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify mroutes not created with local interface ip ")
+
+ input_dict_local_sg = [
+ {
+ "dut": "r1",
+ "src_address": intf_r1_i1,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ },
+ {
+ "dut": "r1",
+ "src_address": intf_r1_i2,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i2"]["interface"],
+ },
+ ]
+
+ for data in input_dict_local_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed Error: {}"
+ "sg created with local interface ip".format(tc_name, result)
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert result is not True, (
+ "Testcase {} : Failed Error: {}"
+ "upstream created with local interface ip".format(tc_name, result)
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_igmp_local_join_with_reserved_address_p0(request):
+ """
+ TC_3 Verify local IGMP join not allowed for "224.0.0.0/24"
+ and non multicast group
+ """
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interface of R1 and configure local igmp groups")
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_2}}
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("verify static igmp join using show ip igmp join")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_local_igmp_groups(
+ tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Error: {}" "IGMP join still present".format(
+ tc_name, result
+ )
+
+ step("verify igmp groups using show ip igmp groups")
+ interface = intf_r1_i1
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Error: {}" "IGMP groups still present".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_remove_add_igmp_local_joins_p1(request):
+ """
+ TC_4 Verify static IGMP group removed from DUT while
+ removing "ip igmp join" CLI
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Creating configuration from JSON
+ app_helper.stop_all_hosts()
+ clear_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("shut down not required interfaces")
+ shutdown_interfaces(tgen)
+
+ step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
+ step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
+ step("Enable the IGMP on R11 interfac of R1 and configure local igmp groups")
+
+ intf_r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
+ intf_r1_i2 = topo["routers"]["r1"]["links"]["i2"]["interface"]
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_1}}
+ }
+ }
+ }
+ }
+
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE,
+ }
+ ]
+ }
+ }
+ }
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify static igmp join using show ip igmp join")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_local_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("verify igmp groups using show ip igmp groups")
+
+ interface = intf_r1_i1
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("verify RP-info populated in DUT")
+ dut = "r1"
+ rp_address = topo["routers"]["r2"]["links"]["lo"]["ipv4"].split("/")[0]
+ SOURCE = "Static"
+ oif = r1_r2_links
+ result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE_1, oif, rp_address, SOURCE)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Send traffic from R4 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ result = app_helper.run_traffic("i6", IGMP_JOIN_RANGE_1, "r4")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i6 = topo["routers"]["i6"]["links"]["r4"]["ipv4"].split("/")[0]
+
+ logger.info("waiting 30 sec for SPT switchover")
+
+ r1_r2_r3 = r1_r2_links + r1_r3_links
+ input_dict_starg = [
+ {
+ "dut": "r1",
+ "src_address": "*",
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "r1",
+ "src_address": source_i6,
+ "iif": r1_r2_r3,
+ "oil": topo["routers"]["r1"]["links"]["i1"]["interface"],
+ }
+ ]
+
+ step("Verify mroutes and iff upstream for local igmp groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove IGMP join from DUT")
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {
+ "igmp": {
+ "join": IGMP_JOIN_RANGE_1,
+ "delete_attr": True,
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("verify static igmp join removed using show ip igmp join")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_local_igmp_groups(
+ tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Error: {}" "IGMP join still present".format(
+ tc_name, result
+ )
+
+ step("verify igmp groups removed using show ip igmp groups")
+ interface = intf_r1_i1
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Error: {}" "IGMP groups still present".format(
+ tc_name, result
+ )
+
+ step("Verify mroutes and iff upstream for local igmp groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: {}" "mroutes still present".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen,
+ data["dut"],
+ data["iif"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: {}" "mroutes still present".format(
+ tc_name, result
+ )
+
+ step("Add IGMP join on DUT again")
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ intf_r1_i1: {
+ "igmp": {
+ "join": IGMP_JOIN_RANGE_1,
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("verify static igmp join using show ip igmp join")
+ dut = "r1"
+ interface = intf_r1_i1
+ result = verify_local_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("verify igmp groups using show ip igmp groups")
+
+ interface = intf_r1_i1
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify mroutes and iff upstream for local igmp groups")
+ for input_dict in [input_dict_starg, input_dict_sg]:
+ for data in input_dict:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_upstream_iif(
+ tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ripng_route_map/__init__.py b/tests/topotests/ripng_route_map/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/ripng_route_map/__init__.py
diff --git a/tests/topotests/ripng_route_map/r1/frr.conf b/tests/topotests/ripng_route_map/r1/frr.conf
new file mode 100644
index 0000000000..6d0fb46d98
--- /dev/null
+++ b/tests/topotests/ripng_route_map/r1/frr.conf
@@ -0,0 +1,21 @@
+!
+int r1-eth0
+ ipv6 address 2001:db8::1/64
+!
+router ripng
+ network 2001:db8::/64
+ route-map rmap in r1-eth0
+ timers basic 5 15 10
+exit
+!
+ipv6 access-list r2 seq 5 permit 2001:db8:2::/64
+!
+ipv6 prefix-list r3 seq 5 permit 2001:db8:3::/64
+!
+route-map rmap permit 10
+ match ipv6 address r2
+ set metric 12
+route-map rmap permit 20
+ match ipv6 address prefix-list r3
+ set metric 13
+exit
diff --git a/tests/topotests/ripng_route_map/r2/frr.conf b/tests/topotests/ripng_route_map/r2/frr.conf
new file mode 100644
index 0000000000..fb9d6702c4
--- /dev/null
+++ b/tests/topotests/ripng_route_map/r2/frr.conf
@@ -0,0 +1,14 @@
+!
+int lo
+ ipv6 address 2001:db8:2::1/64
+!
+int r2-eth0
+ ipv6 address 2001:db8::2/64
+!
+router ripng
+ redistribute connected
+ network 2001:db8::/64
+ network 2001:db8:2::1/64
+ timers basic 5 15 10
+exit
+
diff --git a/tests/topotests/ripng_route_map/r3/frr.conf b/tests/topotests/ripng_route_map/r3/frr.conf
new file mode 100644
index 0000000000..a6d07789c3
--- /dev/null
+++ b/tests/topotests/ripng_route_map/r3/frr.conf
@@ -0,0 +1,14 @@
+!
+int lo
+ ipv6 address 2001:db8:3::1/64
+!
+int r3-eth0
+ ipv6 address 2001:db8::3/64
+!
+router ripng
+ redistribute connected
+ network 2001:db8::/64
+ network 2001:db8:3::1/64
+ timers basic 5 15 10
+exit
+
diff --git a/tests/topotests/ripng_route_map/test_ripng_route_map.py b/tests/topotests/ripng_route_map/test_ripng_route_map.py
new file mode 100644
index 0000000000..e1cc88e9b6
--- /dev/null
+++ b/tests/topotests/ripng_route_map/test_ripng_route_map.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if route-map for ripng basic functionality works.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.ripngd]
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2", "r3")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_ripng_route_map():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ def _show_routes(nh_num):
+ output = json.loads(r1.vtysh_cmd("show ipv6 route ripng json"))
+ expected = {
+ "2001:db8:2::/64": [
+ {
+ "metric": 13,
+ }
+ ],
+ "2001:db8:3::/64": [
+ {
+ "metric": 14,
+ }
+ ],
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_show_routes, 2)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Got routes, but metric is not set as expected"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/yang/frr-bgp-route-map.yang b/yang/frr-bgp-route-map.yang
index b557cabb22..4b6619739d 100644
--- a/yang/frr-bgp-route-map.yang
+++ b/yang/frr-bgp-route-map.yang
@@ -214,6 +214,12 @@ module frr-bgp-route-map {
"Set BGP extended community attribute";
}
+identity set-extcommunity-color {
+ base frr-route-map:rmap-set-type;
+ description
+ "Set BGP extended community attribute";
+ }
+
identity set-ipv4-nexthop {
base frr-route-map:rmap-set-type;
description
@@ -511,6 +517,22 @@ module frr-bgp-route-map {
}
}
+ typedef color-list {
+ type string {
+ pattern '((429496729[0-5]|42949672[0-8][0-9]|'
+ + '4294967[0-1][0-9]{2}|429496[0-6][0-9]{3}|'
+ + '42949[0-5][0-9]{4}|4294[0-8][0-9]{5}|'
+ + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|'
+ + '4[0-1][0-9]{8}|[1-3][0-9]{9}|'
+ + '[1-9][0-9]{0,8})(\s*))+';
+ }
+ description
+ "The color-list type represent a set of colors of value (1..4294967295)
+ values are separated by white spaces";
+ reference
+ "RFC 9012 - The BGP Tunnel Encapsulation Attribute";
+ }
+
augment "/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:match-condition/frr-route-map:rmap-match-condition/frr-route-map:match-condition" {
case local-preference {
when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:match-condition/frr-route-map:condition, 'frr-bgp-route-map:match-local-preference')";
@@ -852,6 +874,19 @@ module frr-bgp-route-map {
}
}
+ case extcommunity-color {
+ when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-color')";
+ description
+ "Value of the ext-community";
+ leaf extcommunity-color {
+ type color-list;
+ description
+ "Set BGP ext-community color attribute with a list of colors";
+ reference
+ "RFC9012";
+ }
+ }
+
case ipv4-address {
when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:ipv4-vpn-address')";
description
diff --git a/zebra/zebra_opaque.c b/zebra/zebra_opaque.c
index 9503c74697..90533955a4 100644
--- a/zebra/zebra_opaque.c
+++ b/zebra/zebra_opaque.c
@@ -546,7 +546,6 @@ static void opq_send_notifications(const struct opq_msg_reg *reg,
client->session_id, msg)) {
/* Error - need to free the message */
stream_free(msg);
- msg = NULL;
}
}
}
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 357f112821..431c6b0500 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -3978,6 +3978,10 @@ void rib_delnode(struct route_node *rn, struct route_entry *re)
if (IS_ZEBRA_DEBUG_RIB)
rnode_debug(rn, re->vrf_id, "rn %p, re %p, removing",
(void *)rn, (void *)re);
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ route_entry_dump(&rn->p, NULL, re);
+
SET_FLAG(re->status, ROUTE_ENTRY_REMOVED);
afi = (rn->p.family == AF_INET)
@@ -4123,8 +4127,8 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
zclient_dump_route_flags(re->flags, flags_buf,
sizeof(flags_buf)),
_dump_re_status(re, status_buf, sizeof(status_buf)));
- zlog_debug("%s: nexthop_num == %u, nexthop_active_num == %u", straddr,
- nexthop_group_nexthop_num(&(re->nhe->nhg)),
+ zlog_debug("%s: tag == %u, nexthop_num == %u, nexthop_active_num == %u",
+ straddr, re->tag, nexthop_group_nexthop_num(&(re->nhe->nhg)),
nexthop_group_active_nexthop_num(&(re->nhe->nhg)));
/* Dump nexthops */
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index babd93ab20..132862e690 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -194,7 +194,7 @@ static int l3vni_rmac_nh_list_cmp(void *p1, void *p2)
const struct ipaddr *vtep_ip1 = p1;
const struct ipaddr *vtep_ip2 = p2;
- return !ipaddr_cmp(vtep_ip1, vtep_ip2);
+ return ipaddr_cmp(vtep_ip1, vtep_ip2);
}
static void l3vni_rmac_nh_free(struct ipaddr *vtep_ip)
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 85e1edeca0..00a78140e3 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -582,30 +582,27 @@ static void zserv_client_free(struct zserv *client)
/* Close file descriptor. */
if (client->sock) {
- unsigned long nroutes;
- unsigned long nnhgs;
+ unsigned long nroutes = 0;
+ unsigned long nnhgs = 0;
close(client->sock);
- /* If this is a synchronous BGP Zebra client for label/table
- * manager, then ignore it. It's not GR-aware, and causes GR to
- * be skipped for the session_id == 0 (asynchronous).
- */
- if (client->proto == ZEBRA_ROUTE_BGP && client->session_id == 1)
- return;
-
if (DYNAMIC_CLIENT_GR_DISABLED(client)) {
- zebra_mpls_client_cleanup_vrf_label(client->proto);
+ if (!client->synchronous) {
+ zebra_mpls_client_cleanup_vrf_label(
+ client->proto);
- nroutes = rib_score_proto(client->proto,
- client->instance);
+ nroutes = rib_score_proto(client->proto,
+ client->instance);
+ }
zlog_notice(
"client %d disconnected %lu %s routes removed from the rib",
client->sock, nroutes,
zebra_route_string(client->proto));
/* Not worrying about instance for now */
- nnhgs = zebra_nhg_score_proto(client->proto);
+ if (!client->synchronous)
+ nnhgs = zebra_nhg_score_proto(client->proto);
zlog_notice(
"client %d disconnected %lu %s nhgs removed from the rib",
client->sock, nnhgs,