diff options
374 files changed, 16255 insertions, 5960 deletions
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 8983b39eb3..895e8ad0dc 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -50,8 +50,8 @@ A clear and concise description of what the bug is.  Put "x" in "[ ]" if you already tried following:  --> -[ ] Did you check if this is a duplicate issue? -[ ] Did you test it on the latest FRRouting/frr master branch? +- [ ] Did you check if this is a duplicate issue? +- [ ] Did you test it on the latest FRRouting/frr master branch?  **To Reproduce** diff --git a/bfdd/bfd.c b/bfdd/bfd.c index 455dd9fe85..d52eeeddba 100644 --- a/bfdd/bfd.c +++ b/bfdd/bfd.c @@ -769,7 +769,7 @@ static void _bfd_session_update(struct bfd_session *bs,  	 * Apply profile last: it also calls `bfd_set_shutdown`.  	 *  	 * There is no problem calling `shutdown` twice if the value doesn't -	 * change or if it is overriden by peer specific configuration. +	 * change or if it is overridden by peer specific configuration.  	 */  	if (bpc->bpc_has_profile)  		bfd_profile_apply(bpc->bpc_profile, bs); diff --git a/bfdd/bfd.h b/bfdd/bfd.h index 7ab5ef13b4..00cc431e10 100644 --- a/bfdd/bfd.h +++ b/bfdd/bfd.h @@ -724,7 +724,7 @@ void bfd_profile_free(struct bfd_profile *bp);  /**   * Apply a profile configuration to an existing BFD session. The non default - * values will not be overriden. + * values will not be overridden.   *   * NOTE: if the profile doesn't exist yet, then the profile will be applied   * once it begins to exist. diff --git a/bfdd/ptm_adapter.c b/bfdd/ptm_adapter.c index 0e5f701fc2..f6259b9c3b 100644 --- a/bfdd/ptm_adapter.c +++ b/bfdd/ptm_adapter.c @@ -678,7 +678,8 @@ static void bfdd_sessions_enable_interface(struct interface *ifp)  		/* If Interface matches vrfname, then bypass iface check */  		if (vrf_is_backend_netns() || strcmp(ifp->name, vrf->name)) {  			/* Interface name mismatch. */ -			if (strcmp(ifp->name, bs->key.ifname)) +			if (bs->key.ifname[0] && +			    strcmp(ifp->name, bs->key.ifname))  				continue;  		} diff --git a/bgpd/bgp_aspath.c b/bgpd/bgp_aspath.c index dd27c9f6a1..880e15fadb 100644 --- a/bgpd/bgp_aspath.c +++ b/bgpd/bgp_aspath.c @@ -544,7 +544,7 @@ static void aspath_make_str_count(struct aspath *as, bool make_json)  	seg = as->segments; -/* ASN takes 5 to 10 chars plus seperator, see below. +/* ASN takes 5 to 10 chars plus separator, see below.   * If there is one differing segment type, we need an additional   * 2 chars for segment delimiters, and the final '\0'.   * Hopefully this is large enough to avoid hitting the realloc @@ -560,17 +560,17 @@ static void aspath_make_str_count(struct aspath *as, bool make_json)  	while (seg) {  		int i; -		char seperator; +		char separator; -		/* Check AS type validity. Set seperator for segment */ +		/* Check AS type validity. Set separator for segment */  		switch (seg->type) {  		case AS_SET:  		case AS_CONFED_SET: -			seperator = ','; +			separator = ',';  			break;  		case AS_SEQUENCE:  		case AS_CONFED_SEQUENCE: -			seperator = ' '; +			separator = ' ';  			break;  		default:  			XFREE(MTYPE_AS_STR, str_buf); @@ -584,7 +584,7 @@ static void aspath_make_str_count(struct aspath *as, bool make_json)  /* We might need to increase str_buf, particularly if path has   * differing segments types, our initial guesstimate above will - * have been wrong. Need 10 chars for ASN, a seperator each and + * have been wrong. Need 10 chars for ASN, a separator each and   * potentially two segment delimiters, plus a space between each   * segment and trailing zero.   * @@ -607,7 +607,7 @@ static void aspath_make_str_count(struct aspath *as, bool make_json)  		if (make_json)  			jseg_list = json_object_new_array(); -		/* write out the ASNs, with their seperators, bar the last one*/ +		/* write out the ASNs, with their separators, bar the last one*/  		for (i = 0; i < seg->length; i++) {  			if (make_json)  				json_object_array_add( @@ -619,7 +619,7 @@ static void aspath_make_str_count(struct aspath *as, bool make_json)  			if (i < (seg->length - 1))  				len += snprintf(str_buf + len, str_size - len, -						"%c", seperator); +						"%c", separator);  		}  		if (make_json) { @@ -1258,6 +1258,28 @@ struct aspath *aspath_replace_specific_asn(struct aspath *aspath,  	return new;  } +/* Replace all ASNs with our own ASN */ +struct aspath *aspath_replace_all_asn(struct aspath *aspath, as_t our_asn) +{ +	struct aspath *new; +	struct assegment *seg; + +	new = aspath_dup(aspath); +	seg = new->segments; + +	while (seg) { +		int i; + +		for (i = 0; i < seg->length; i++) +			seg->as[i] = our_asn; + +		seg = seg->next; +	} + +	aspath_str_update(new, false); +	return new; +} +  /* Replace all private ASNs with our own ASN */  struct aspath *aspath_replace_private_asns(struct aspath *aspath, as_t asn,  					   as_t peer_asn) @@ -1917,7 +1939,7 @@ static const char *aspath_gettoken(const char *buf, enum as_token *token,  {  	const char *p = buf; -	/* Skip seperators (space for sequences, ',' for sets). */ +	/* Skip separators (space for sequences, ',' for sets). */  	while (isspace((unsigned char)*p) || *p == ',')  		p++; diff --git a/bgpd/bgp_aspath.h b/bgpd/bgp_aspath.h index 4b16818167..912db7b254 100644 --- a/bgpd/bgp_aspath.h +++ b/bgpd/bgp_aspath.h @@ -112,6 +112,8 @@ extern bool aspath_single_asn_check(struct aspath *, as_t asn);  extern struct aspath *aspath_replace_specific_asn(struct aspath *aspath,  						  as_t target_asn,  						  as_t our_asn); +extern struct aspath *aspath_replace_all_asn(struct aspath *aspath, +					     as_t our_asn);  extern struct aspath *aspath_replace_private_asns(struct aspath *aspath,  						  as_t asn, as_t peer_asn);  extern struct aspath *aspath_remove_private_asns(struct aspath *aspath, diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c index 2f246e61d8..f45362f811 100644 --- a/bgpd/bgp_attr.c +++ b/bgpd/bgp_attr.c @@ -847,7 +847,7 @@ struct attr *bgp_attr_intern(struct attr *attr)  	struct lcommunity *lcomm = NULL;  	struct community *comm = NULL; -	/* Intern referenced strucutre. */ +	/* Intern referenced structure. */  	if (attr->aspath) {  		if (!attr->aspath->refcnt)  			attr->aspath = aspath_intern(attr->aspath); @@ -986,6 +986,10 @@ struct attr *bgp_attr_aggregate_intern(  	attr.origin = origin;  	attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN); +	/* MED */ +	attr.med = 0; +	attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC); +  	/* AS path attribute. */  	if (aspath)  		attr.aspath = aspath_intern(aspath); @@ -1008,18 +1012,13 @@ struct attr *bgp_attr_aggregate_intern(  		}  		bgp_attr_set_community(&attr, community); -		attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);  	} -	if (ecommunity) { +	if (ecommunity)  		bgp_attr_set_ecommunity(&attr, ecommunity); -		attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); -	} -	if (lcommunity) { +	if (lcommunity)  		bgp_attr_set_lcommunity(&attr, lcommunity); -		attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES); -	}  	if (bgp_in_graceful_shutdown(bgp))  		bgp_attr_add_gshut_community(&attr); @@ -1096,22 +1095,18 @@ void bgp_attr_unintern_sub(struct attr *attr)  	comm = bgp_attr_get_community(attr);  	community_unintern(&comm); -	UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES));  	bgp_attr_set_community(attr, NULL);  	ecomm = bgp_attr_get_ecommunity(attr);  	ecommunity_unintern(&ecomm); -	UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));  	bgp_attr_set_ecommunity(attr, NULL);  	ipv6_ecomm = bgp_attr_get_ipv6_ecommunity(attr);  	ecommunity_unintern(&ipv6_ecomm); -	UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_IPV6_EXT_COMMUNITIES));  	bgp_attr_set_ipv6_ecommunity(attr, NULL);  	lcomm = bgp_attr_get_lcommunity(attr);  	lcommunity_unintern(&lcomm); -	UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES));  	bgp_attr_set_lcommunity(attr, NULL);  	cluster = bgp_attr_get_cluster(attr); @@ -1243,7 +1238,7 @@ void bgp_attr_flush(struct attr *attr)   * are partial/optional and hence where the error likely was not   * introduced by the sending neighbour.   */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_malformed(struct bgp_attr_parser_args *args, uint8_t subcode,  		   bgp_size_t length)  { @@ -1450,7 +1445,8 @@ static bool bgp_attr_flag_invalid(struct bgp_attr_parser_args *args)  }  /* Get origin attribute of the update message. */ -static bgp_attr_parse_ret_t bgp_attr_origin(struct bgp_attr_parser_args *args) +static enum bgp_attr_parse_ret +bgp_attr_origin(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer;  	struct attr *const attr = args->attr; @@ -1520,8 +1516,8 @@ static int bgp_attr_aspath(struct bgp_attr_parser_args *args)  	return BGP_ATTR_PARSE_PROCEED;  } -static bgp_attr_parse_ret_t bgp_attr_aspath_check(struct peer *const peer, -						  struct attr *const attr) +static enum bgp_attr_parse_ret bgp_attr_aspath_check(struct peer *const peer, +						     struct attr *const attr)  {  	/* These checks were part of bgp_attr_aspath, but with  	 * as4 we should to check aspath things when @@ -1603,8 +1599,8 @@ static int bgp_attr_as4_path(struct bgp_attr_parser_args *args,  /*   * Check that the nexthop attribute is valid.   */ -bgp_attr_parse_ret_t -bgp_attr_nexthop_valid(struct peer *peer, struct attr *attr) +enum bgp_attr_parse_ret bgp_attr_nexthop_valid(struct peer *peer, +					       struct attr *attr)  {  	in_addr_t nexthop_h; @@ -1633,7 +1629,8 @@ bgp_attr_nexthop_valid(struct peer *peer, struct attr *attr)  }  /* Nexthop attribute. */ -static bgp_attr_parse_ret_t bgp_attr_nexthop(struct bgp_attr_parser_args *args) +static enum bgp_attr_parse_ret +bgp_attr_nexthop(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer;  	struct attr *const attr = args->attr; @@ -1655,7 +1652,7 @@ static bgp_attr_parse_ret_t bgp_attr_nexthop(struct bgp_attr_parser_args *args)  }  /* MED atrribute. */ -static bgp_attr_parse_ret_t bgp_attr_med(struct bgp_attr_parser_args *args) +static enum bgp_attr_parse_ret bgp_attr_med(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer;  	struct attr *const attr = args->attr; @@ -1678,7 +1675,7 @@ static bgp_attr_parse_ret_t bgp_attr_med(struct bgp_attr_parser_args *args)  }  /* Local preference attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_local_pref(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -1790,7 +1787,7 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args)  }  /* New Aggregator attribute */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args,  			as_t *as4_aggregator_as,  			struct in_addr *as4_aggregator_addr) @@ -1834,7 +1831,7 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args,  /* Munge Aggregator and New-Aggregator, AS_PATH and NEW_AS_PATH.   */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_munge_as4_attrs(struct peer *const peer, struct attr *const attr,  			 struct aspath *as4_path, as_t as4_aggregator,  			 struct in_addr *as4_aggregator_addr) @@ -1936,7 +1933,7 @@ bgp_attr_munge_as4_attrs(struct peer *const peer, struct attr *const attr,  }  /* Community attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_community(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -1963,13 +1960,11 @@ bgp_attr_community(struct bgp_attr_parser_args *args)  		return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,  					  args->total); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); -  	return BGP_ATTR_PARSE_PROCEED;  }  /* Originator ID attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_originator_id(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -1997,7 +1992,7 @@ bgp_attr_originator_id(struct bgp_attr_parser_args *args)  }  /* Cluster list attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_cluster_list(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -2268,7 +2263,7 @@ int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args,  }  /* Large Community attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_large_community(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -2294,13 +2289,11 @@ bgp_attr_large_community(struct bgp_attr_parser_args *args)  		return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,  					  args->total); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES); -  	return BGP_ATTR_PARSE_PROCEED;  }  /* Extended Community attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_ext_communities(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -2332,8 +2325,6 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args)  		return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,  					  args->total); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); -  	/* Extract DF election preference and  mobility sequence number */  	attr->df_pref = bgp_attr_df_pref_from_ec(attr, &attr->df_alg); @@ -2376,7 +2367,7 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args)  }  /* IPv6 Extended Community attribute. */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_ipv6_ext_communities(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -2403,8 +2394,6 @@ bgp_attr_ipv6_ext_communities(struct bgp_attr_parser_args *args)  		return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,  					  args->total); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_IPV6_EXT_COMMUNITIES); -  	return BGP_ATTR_PARSE_PROCEED;  } @@ -2541,7 +2530,7 @@ static int bgp_attr_encap(uint8_t type, struct peer *peer, /* IN */  /* SRv6 Service Data Sub-Sub-TLV attribute   * draft-ietf-bess-srv6-services-07   */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_srv6_service_data(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -2622,7 +2611,7 @@ bgp_attr_srv6_service_data(struct bgp_attr_parser_args *args)  /* SRv6 Service Sub-TLV attribute   * draft-ietf-bess-srv6-services-07   */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_srv6_service(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -2631,7 +2620,7 @@ bgp_attr_srv6_service(struct bgp_attr_parser_args *args)  	uint8_t type, sid_flags;  	uint16_t length, endpoint_behavior;  	size_t headersz = sizeof(type) + sizeof(length); -	bgp_attr_parse_ret_t err; +	enum bgp_attr_parse_ret err;  	char buf[BUFSIZ];  	if (STREAM_READABLE(peer->curr) < headersz) { @@ -2717,8 +2706,9 @@ bgp_attr_srv6_service(struct bgp_attr_parser_args *args)   * Read an individual SID value returning how much data we have read   * Returns 0 if there was an error that needs to be passed up the stack   */ -static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length, -					      struct bgp_attr_parser_args *args) +static enum bgp_attr_parse_ret +bgp_attr_psid_sub(uint8_t type, uint16_t length, +		  struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer;  	struct attr *const attr = args->attr; @@ -2925,11 +2915,11 @@ static bgp_attr_parse_ret_t bgp_attr_psid_sub(uint8_t type, uint16_t length,  /* Prefix SID attribute   * draft-ietf-idr-bgp-prefix-sid-05   */ -bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args) +enum bgp_attr_parse_ret bgp_attr_prefix_sid(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer;  	struct attr *const attr = args->attr; -	bgp_attr_parse_ret_t ret; +	enum bgp_attr_parse_ret ret;  	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID); @@ -2988,7 +2978,7 @@ bgp_attr_parse_ret_t bgp_attr_prefix_sid(struct bgp_attr_parser_args *args)  /* PMSI tunnel attribute (RFC 6514)   * Basic validation checks done here.   */ -static bgp_attr_parse_ret_t +static enum bgp_attr_parse_ret  bgp_attr_pmsi_tunnel(struct bgp_attr_parser_args *args)  {  	struct peer *const peer = args->peer; @@ -3036,7 +3026,8 @@ bgp_attr_pmsi_tunnel(struct bgp_attr_parser_args *args)  }  /* BGP unknown attribute treatment. */ -static bgp_attr_parse_ret_t bgp_attr_unknown(struct bgp_attr_parser_args *args) +static enum bgp_attr_parse_ret +bgp_attr_unknown(struct bgp_attr_parser_args *args)  {  	bgp_size_t total = args->total;  	struct transit *transit; @@ -3141,11 +3132,12 @@ static int bgp_attr_check(struct peer *peer, struct attr *attr)  /* Read attribute of update packet.  This function is called from     bgp_update_receive() in bgp_packet.c.  */ -bgp_attr_parse_ret_t bgp_attr_parse(struct peer *peer, struct attr *attr, -				    bgp_size_t size, struct bgp_nlri *mp_update, -				    struct bgp_nlri *mp_withdraw) +enum bgp_attr_parse_ret bgp_attr_parse(struct peer *peer, struct attr *attr, +				       bgp_size_t size, +				       struct bgp_nlri *mp_update, +				       struct bgp_nlri *mp_withdraw)  { -	bgp_attr_parse_ret_t ret; +	enum bgp_attr_parse_ret ret;  	uint8_t flag = 0;  	uint8_t type = 0;  	bgp_size_t length; @@ -3604,8 +3596,8 @@ size_t bgp_packet_mpattr_start(struct stream *s, struct peer *peer, afi_t afi,  			       struct attr *attr)  {  	size_t sizep; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	afi_t nh_afi;  	/* Set extended bit always to encode the attribute length as 2 bytes */ @@ -4185,9 +4177,14 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,  	if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY)  	    && (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) {  		struct ecommunity *ecomm = bgp_attr_get_ecommunity(attr); - -		if (peer->sort == BGP_PEER_IBGP -		    || peer->sort == BGP_PEER_CONFED) { +		bool transparent = CHECK_FLAG(peer->af_flags[afi][safi], +					      PEER_FLAG_RSERVER_CLIENT) && +				   from && +				   CHECK_FLAG(from->af_flags[afi][safi], +					      PEER_FLAG_RSERVER_CLIENT); + +		if (peer->sort == BGP_PEER_IBGP || +		    peer->sort == BGP_PEER_CONFED || transparent) {  			if (ecomm->size * 8 > 255) {  				stream_putc(s,  					    BGP_ATTR_FLAG_OPTIONAL @@ -4407,8 +4404,8 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,  size_t bgp_packet_mpunreach_start(struct stream *s, afi_t afi, safi_t safi)  {  	unsigned long attrlen_pnt; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	/* Set extended bit always to encode the attribute length as 2 bytes */  	stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_EXTLEN); diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index 1f199da161..ac5734ede6 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -370,25 +370,25 @@ struct transit {  		 ? bgp_attr_get_cluster((attr))->length                        \  		 : 0) -typedef enum { +enum bgp_attr_parse_ret {  	BGP_ATTR_PARSE_PROCEED = 0,  	BGP_ATTR_PARSE_ERROR = -1,  	BGP_ATTR_PARSE_WITHDRAW = -2,  	/* only used internally, send notify + convert to BGP_ATTR_PARSE_ERROR -	   */ +	 */  	BGP_ATTR_PARSE_ERROR_NOTIFYPLS = -3,  	BGP_ATTR_PARSE_EOR = -4, -} bgp_attr_parse_ret_t; +};  struct bpacket_attr_vec_arr;  /* Prototypes. */  extern void bgp_attr_init(void);  extern void bgp_attr_finish(void); -extern bgp_attr_parse_ret_t bgp_attr_parse(struct peer *, struct attr *, -					   bgp_size_t, struct bgp_nlri *, -					   struct bgp_nlri *); +extern enum bgp_attr_parse_ret bgp_attr_parse(struct peer *, struct attr *, +					      bgp_size_t, struct bgp_nlri *, +					      struct bgp_nlri *);  extern struct attr *bgp_attr_intern(struct attr *attr);  extern void bgp_attr_unintern_sub(struct attr *);  extern void bgp_attr_unintern(struct attr **); @@ -432,7 +432,7 @@ extern int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,  			      struct bgp_nlri *);  extern int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args,  				struct bgp_nlri *); -extern bgp_attr_parse_ret_t +extern enum bgp_attr_parse_ret  bgp_attr_prefix_sid(struct bgp_attr_parser_args *args);  extern struct bgp_attr_encap_subtlv * @@ -471,8 +471,8 @@ extern void bgp_packet_mpunreach_prefix(  	bool addpath_capable, uint32_t addpath_tx_id, struct attr *attr);  extern void bgp_packet_mpunreach_end(struct stream *s, size_t attrlen_pnt); -extern bgp_attr_parse_ret_t bgp_attr_nexthop_valid(struct peer *peer, -						   struct attr *attr); +extern enum bgp_attr_parse_ret bgp_attr_nexthop_valid(struct peer *peer, +						      struct attr *attr);  static inline int bgp_rmap_nhop_changed(uint32_t out_rmap_flags,  					uint32_t in_rmap_flags) @@ -516,6 +516,11 @@ static inline void bgp_attr_set_ecommunity(struct attr *attr,  					   struct ecommunity *ecomm)  {  	attr->ecommunity = ecomm; + +	if (ecomm) +		SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES)); +	else +		UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));  }  static inline struct lcommunity * @@ -528,6 +533,12 @@ static inline void bgp_attr_set_lcommunity(struct attr *attr,  					   struct lcommunity *lcomm)  {  	attr->lcommunity = lcomm; + +	if (lcomm) +		SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)); +	else +		UNSET_FLAG(attr->flag, +			   ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES));  }  static inline struct community *bgp_attr_get_community(const struct attr *attr) @@ -539,6 +550,11 @@ static inline void bgp_attr_set_community(struct attr *attr,  					  struct community *comm)  {  	attr->community = comm; + +	if (comm) +		SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES)); +	else +		UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES));  }  static inline struct ecommunity * @@ -551,6 +567,13 @@ static inline void bgp_attr_set_ipv6_ecommunity(struct attr *attr,  						struct ecommunity *ipv6_ecomm)  {  	attr->ipv6_ecommunity = ipv6_ecomm; + +	if (ipv6_ecomm) +		SET_FLAG(attr->flag, +			 ATTR_FLAG_BIT(BGP_ATTR_IPV6_EXT_COMMUNITIES)); +	else +		UNSET_FLAG(attr->flag, +			   ATTR_FLAG_BIT(BGP_ATTR_IPV6_EXT_COMMUNITIES));  }  static inline struct transit *bgp_attr_get_transit(const struct attr *attr) diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index 48d3706ec5..47922985d9 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -761,8 +761,8 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags)  	struct peer *peer;  	struct listnode *node;  	struct stream *s, *s2; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	frrtrace(3, frr_bgp, bmp_eor, afi, safi, flags); diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 0e590a463c..0631f8b95a 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -557,7 +557,7 @@ static bool community_regexp_match(struct community *com, regex_t *reg)  	if (com == NULL || com->size == 0)  		str = "";  	else -		str = community_str(com, false); +		str = community_str(com, false, true);  	regstr = bgp_alias2community_str(str); @@ -631,7 +631,7 @@ static bool lcommunity_regexp_match(struct lcommunity *com, regex_t *reg)  	if (com == NULL || com->size == 0)  		str = "";  	else -		str = lcommunity_str(com, false); +		str = lcommunity_str(com, false, true);  	regstr = bgp_alias2community_str(str); diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c index 6e6a3cd587..78cf9ea76c 100644 --- a/bgpd/bgp_community.c +++ b/bgpd/bgp_community.c @@ -200,7 +200,8 @@ struct community *community_uniq_sort(struct community *com)     0xFFFFFF04      "no-peer"     For other values, "AS:VAL" format is used.  */ -static void set_community_string(struct community *com, bool make_json) +static void set_community_string(struct community *com, bool make_json, +				 bool translate_alias)  {  	int i;  	char *str; @@ -447,7 +448,9 @@ static void set_community_string(struct community *com, bool make_json)  			val = comval & 0xFFFF;  			char buf[32];  			snprintf(buf, sizeof(buf), "%u:%d", as, val); -			const char *com2alias = bgp_community2alias(buf); +			const char *com2alias = +				translate_alias ? bgp_community2alias(buf) +						: buf;  			strlcat(str, com2alias, len);  			if (make_json) { @@ -487,7 +490,7 @@ struct community *community_intern(struct community *com)  	/* Make string.  */  	if (!find->str) -		set_community_string(find, false); +		set_community_string(find, false, true);  	return find;  } @@ -548,7 +551,7 @@ struct community *community_dup(struct community *com)  }  /* Return string representation of communities attribute. */ -char *community_str(struct community *com, bool make_json) +char *community_str(struct community *com, bool make_json, bool translate_alias)  {  	if (!com)  		return NULL; @@ -557,7 +560,7 @@ char *community_str(struct community *com, bool make_json)  		XFREE(MTYPE_COMMUNITY_STR, com->str);  	if (!com->str) -		set_community_string(com, make_json); +		set_community_string(com, make_json, translate_alias);  	return com->str;  } diff --git a/bgpd/bgp_community.h b/bgpd/bgp_community.h index 2a1fbf526a..6f0ae0235c 100644 --- a/bgpd/bgp_community.h +++ b/bgpd/bgp_community.h @@ -76,7 +76,8 @@ extern struct community *community_uniq_sort(struct community *);  extern struct community *community_parse(uint32_t *, unsigned short);  extern struct community *community_intern(struct community *);  extern void community_unintern(struct community **); -extern char *community_str(struct community *, bool make_json); +extern char *community_str(struct community *, bool make_json, +			   bool translate_alias);  extern unsigned int community_hash_make(const struct community *);  extern struct community *community_str2com(const char *);  extern bool community_match(const struct community *, const struct community *); diff --git a/bgpd/bgp_community_alias.c b/bgpd/bgp_community_alias.c index 2c86efb5a0..caf469c0f7 100644 --- a/bgpd/bgp_community_alias.c +++ b/bgpd/bgp_community_alias.c @@ -18,6 +18,8 @@   * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA   */ +#include "zebra.h" +  #include "memory.h"  #include "lib/jhash.h"  #include "frrstr.h" diff --git a/bgpd/bgp_conditional_adv.c b/bgpd/bgp_conditional_adv.c index f72a373a1c..dd1510a678 100644 --- a/bgpd/bgp_conditional_adv.c +++ b/bgpd/bgp_conditional_adv.c @@ -300,7 +300,7 @@ void bgp_conditional_adv_enable(struct peer *peer, afi_t afi, safi_t safi)  	 */  	peer->advmap_config_change[afi][safi] = true; -	/* advertise-map is already configured on atleast one of its +	/* advertise-map is already configured on at least one of its  	 * neighbors (AFI/SAFI). So just increment the counter.  	 */  	if (++bgp->condition_filter_count > 1) { diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 5d14ff0fa6..49003e9428 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -411,12 +411,14 @@ bool bgp_dump_attr(struct attr *attr, char *buf, size_t size)  	if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES)))  		snprintf(buf + strlen(buf), size - strlen(buf),  			 ", community %s", -			 community_str(bgp_attr_get_community(attr), false)); +			 community_str(bgp_attr_get_community(attr), false, +				       true));  	if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)))  		snprintf(buf + strlen(buf), size - strlen(buf),  			 ", large-community %s", -			 lcommunity_str(bgp_attr_get_lcommunity(attr), false)); +			 lcommunity_str(bgp_attr_get_lcommunity(attr), false, +					true));  	if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES)))  		snprintf(buf + strlen(buf), size - strlen(buf), diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index 5a053a7f34..4120524e63 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -63,7 +63,7 @@ void ecommunity_strfree(char **s)  	XFREE(MTYPE_ECOMMUNITY_STR, *s);  } -/* Allocate ecommunities.  */ +/* Free ecommunities.  */  void ecommunity_free(struct ecommunity **ecom)  {  	if (!(*ecom)) @@ -210,7 +210,7 @@ ecommunity_uniq_sort_internal(struct ecommunity *ecom,  	return new;  } -/* This function takes pointer to Extended Communites strucutre then +/* This function takes pointer to Extended Communites structure then   * create a new Extended Communities structure by uniq and sort each   * Extended Communities value.   */ diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c index 9f3f8389ad..78d7bf20eb 100644 --- a/bgpd/bgp_evpn.c +++ b/bgpd/bgp_evpn.c @@ -520,8 +520,10 @@ static void form_auto_rt(struct bgp *bgp, vni_t vni, struct list *rtl)  	ecomadd = ecommunity_new();  	ecommunity_add_val(ecomadd, &eval, false, false);  	for (ALL_LIST_ELEMENTS_RO(rtl, node, ecom)) -		if (ecommunity_cmp(ecomadd, ecom)) +		if (ecommunity_cmp(ecomadd, ecom)) {  			ecom_found = true; +			break; +		}  	if (!ecom_found)  		listnode_add_sort(rtl, ecomadd); @@ -759,8 +761,6 @@ static void build_evpn_type5_route_extcomm(struct bgp *bgp_vrf,  		ecommunity_add_val(bgp_attr_get_ecommunity(attr), &eval_rmac,  				   true, true);  	} - -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);  }  /* @@ -870,8 +870,6 @@ static void build_evpn_route_extcomm(struct bgpevpn *vpn, struct attr *attr,  			attr, ecommunity_merge(bgp_attr_get_ecommunity(attr),  					       &ecom_na));  	} - -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);  }  /* @@ -3370,7 +3368,9 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,  	assert(attr); -	/* Only type-2, type-3, type-4 and type-5 are supported currently */ +	/* Only type-1, type-2, type-3, type-4 and type-5 +	 * are supported currently +	 */  	if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE  	      || evp->prefix.route_type == BGP_EVPN_IMET_ROUTE  	      || evp->prefix.route_type == BGP_EVPN_ES_ROUTE @@ -3475,7 +3475,7 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,  		if (evp->prefix.route_type == BGP_EVPN_ES_ROUTE) {  			/* we will match based on the entire esi to avoid -			 * imoort of an es route for esi2 into esi1 +			 * import of an es route for esi2 into esi1  			 */  			es = bgp_evpn_es_find(&evp->prefix.es_addr.esi);  			if (es && bgp_evpn_is_es_local(es)) @@ -4558,6 +4558,7 @@ void evpn_rt_delete_auto(struct bgp *bgp, vni_t vni, struct list *rtl)  		if (ecommunity_match(ecom, ecom_auto)) {  			ecommunity_free(&ecom);  			node_to_del = node; +			break;  		}  	} @@ -5106,7 +5107,6 @@ int bgp_nlri_parse_evpn(struct peer *peer, struct attr *attr,  /*   * Map the RTs (configured or automatically derived) of a VRF to the VRF.   * The mapping will be used during route processing. - * bgp_def: default bgp instance   * bgp_vrf: specific bgp vrf instance on which RT is configured   */  void bgp_evpn_map_vrf_to_its_rts(struct bgp *bgp_vrf) @@ -6101,8 +6101,9 @@ bool bgp_evpn_is_prefix_nht_supported(const struct prefix *pfx)  	 * EVPN routes should be marked as valid only if the nexthop is  	 * reachable. Only if this happens, the route should be imported  	 * (into VNI or VRF routing tables) and/or advertised. -	 * Note: This is currently applied for EVPN type-2, type-3 and -	 * type-5 routes. It may be tweaked later on for other routes, or +	 * Note: This is currently applied for EVPN type-1, type-2, +	 * type-3, type-4 and type-5 routes. +	 * It may be tweaked later on for other routes, or  	 * even removed completely when all routes are handled.  	 */  	if (pfx && pfx->family == AF_EVPN diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c index ed3716f601..39b31c0c1a 100644 --- a/bgpd/bgp_evpn_mh.c +++ b/bgpd/bgp_evpn_mh.c @@ -196,9 +196,8 @@ static int bgp_evpn_es_route_install(struct bgp *bgp,  	/* Check if route entry is already present. */  	for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) -		if (pi->extra -				&& (struct bgp_path_info *)pi->extra->parent == -				parent_pi) +		if (pi->extra && +		    (struct bgp_path_info *)pi->extra->parent == parent_pi)  			break;  	if (!pi) { @@ -287,7 +286,7 @@ static int bgp_evpn_es_route_uninstall(struct bgp *bgp, struct bgp_evpn_es *es,  	return ret;  } -/* Install or unistall a Tyoe-4 route in the per-ES routing table */ +/* Install or unistall a Type-4 route in the per-ES routing table */  int bgp_evpn_es_route_install_uninstall(struct bgp *bgp, struct bgp_evpn_es *es,  		afi_t afi, safi_t safi, struct prefix_evpn *evp,  		struct bgp_path_info *pi, int install) @@ -378,16 +377,16 @@ int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,  			remote_pi = tmp_pi;  	} -	/* we don't expect to see a remote_ri at this point as +	/* we don't expect to see a remote_pi at this point as  	 * an ES route has {esi, vtep_ip} as the key in the ES-rt-table  	 * in the VNI-rt-table.  	 */  	if (remote_pi) {  		flog_err(  			EC_BGP_ES_INVALID, -			"%u ERROR: local es route for ESI: %s Vtep %pI4 also learnt from remote", +			"%u ERROR: local es route for ESI: %s vtep %pI4 also learnt from remote",  			bgp->vrf_id, es ? es->esi_str : "Null", -			&es->originator_ip); +			es ? &es->originator_ip : NULL);  		return -1;  	} @@ -622,8 +621,6 @@ static void bgp_evpn_type4_route_extcomm_build(struct bgp_evpn_es *es,  	bgp_attr_set_ecommunity(  		attr,  		ecommunity_merge(bgp_attr_get_ecommunity(attr), &ecom_df)); - -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);  }  /* Create or update local type-4 route */ @@ -904,8 +901,6 @@ bgp_evpn_type1_es_route_extcomm_build(struct bgp_evpn_es_frag *es_frag,  							       ecom));  		}  	} - -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);  }  /* Extended communities associated with EAD-per-EVI */ @@ -932,8 +927,6 @@ static void bgp_evpn_type1_evi_route_extcomm_build(struct bgp_evpn_es *es,  		bgp_attr_set_ecommunity(  			attr,  			ecommunity_merge(bgp_attr_get_ecommunity(attr), ecom)); - -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);  }  /* Update EVPN EAD (type-1) route - @@ -1273,9 +1266,9 @@ void bgp_evpn_mh_config_ead_export_rt(struct bgp *bgp,  				}  			} -			if (node_to_del) -				list_delete_node(bgp_mh_info->ead_es_export_rtl, -						 node_to_del); +			assert(node_to_del); +			list_delete_node(bgp_mh_info->ead_es_export_rtl, +					 node_to_del);  		}  	} else {  		listnode_add_sort(bgp_mh_info->ead_es_export_rtl, ecomcfg); @@ -1688,7 +1681,7 @@ static bool bgp_evpn_is_macip_path(struct bgp_path_info *pi)   * This is done indirectly by re-attempting an install of the   * route in the associated VRFs. As a part of the VRF install use   * of l3 NHG is evaluated and this results in the - * attr.es_flag ATTR_ES_USE_L3_NHG being set or cleared. + * attr.es_flag ATTR_ES_L3_NHG_USE being set or cleared.   */  static void  bgp_evpn_es_path_update_on_es_vrf_chg(struct bgp_evpn_es_vrf *es_vrf, @@ -1902,9 +1895,6 @@ static struct bgp_evpn_es *bgp_evpn_es_new(struct bgp *bgp, const esi_t *esi)  {  	struct bgp_evpn_es *es; -	if (!bgp) -		return NULL; -  	es = XCALLOC(MTYPE_BGP_EVPN_ES, sizeof(struct bgp_evpn_es));  	/* set the ESI */ @@ -2364,7 +2354,6 @@ int bgp_evpn_local_es_add(struct bgp *bgp, esi_t *esi,  			  struct in_addr originator_ip, bool oper_up,  			  uint16_t df_pref, bool bypass)  { -	char buf[ESI_STR_LEN];  	struct bgp_evpn_es *es;  	bool new_es = true;  	bool regen_esr = false; @@ -2374,15 +2363,8 @@ int bgp_evpn_local_es_add(struct bgp *bgp, esi_t *esi,  	if (es) {  		if (CHECK_FLAG(es->flags, BGP_EVPNES_LOCAL))  			new_es = false; -	} else { +	} else  		es = bgp_evpn_es_new(bgp, esi); -		if (!es) { -			flog_err(EC_BGP_ES_CREATE, -				"%u: Failed to allocate ES entry for ESI %s - at Local ES Add", -				bgp->vrf_id, esi_to_str(esi, buf, sizeof(buf))); -			return -1; -		} -	}  	if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))  		zlog_debug("add local es %s orig-ip %pI4 df_pref %u %s", @@ -2886,7 +2868,7 @@ static void bgp_evpn_l3nhg_zebra_add_v4_or_v6(struct bgp_evpn_es_vrf *es_vrf,  static bool bgp_evpn_l3nhg_zebra_ok(struct bgp_evpn_es_vrf *es_vrf)  { -	if (!bgp_mh_info->host_routes_use_l3nhg && !bgp_mh_info->install_l3nhg) +	if (!bgp_mh_info->host_routes_use_l3nhg)  		return false;  	/* Check socket. */ @@ -3460,14 +3442,11 @@ static void bgp_evpn_es_evi_vtep_re_eval_active(struct bgp *bgp,  			   new_active ? "active" : "inactive");  	/* add VTEP to parent es */ -	if (new_active) { -		struct bgp_evpn_es_vtep *es_vtep; - -		es_vtep = bgp_evpn_es_vtep_add(bgp, evi_vtep->es_evi->es, -					       evi_vtep->vtep_ip, false /*esr*/, -					       0, 0); -		evi_vtep->es_vtep = es_vtep; -	} else { +	if (new_active) +		evi_vtep->es_vtep = bgp_evpn_es_vtep_add( +			bgp, evi_vtep->es_evi->es, evi_vtep->vtep_ip, +			false /*esr*/, 0, 0); +	else {  		if (evi_vtep->es_vtep) {  			bgp_evpn_es_vtep_do_del(bgp, evi_vtep->es_vtep,  					false /*esr*/); @@ -3821,15 +3800,8 @@ int bgp_evpn_remote_es_evi_add(struct bgp *bgp, struct bgpevpn *vpn,  			   &p->prefix.ead_addr.ip.ipaddr_v4);  	es = bgp_evpn_es_find(esi); -	if (!es) { +	if (!es)  		es = bgp_evpn_es_new(bgp, esi); -		if (!es) { -			flog_err(EC_BGP_ES_CREATE, -				"%u: Failed to allocate ES entry for ESI %s - at remote ES Add", -				bgp->vrf_id, esi_to_str(esi, buf, sizeof(buf))); -			return -1; -		} -	}  	es_evi = bgp_evpn_es_evi_find(es, vpn);  	if (!es_evi) @@ -3868,13 +3840,13 @@ int bgp_evpn_remote_es_evi_del(struct bgp *bgp, struct bgpevpn *vpn,  	es = bgp_evpn_es_find(&p->prefix.ead_addr.esi);  	if (!es) {  		if (BGP_DEBUG(evpn_mh, EVPN_MH_ES)) -			zlog_debug("del remote %s es %s evi %u vtep %pI4, NO es", -				   p->prefix.ead_addr.eth_tag ? "ead-es" -							      : "ead-evi", -				   esi_to_str(&p->prefix.ead_addr.esi, buf, -					      sizeof(buf)), -				   vpn->vni, -    			   &p->prefix.ead_addr.ip.ipaddr_v4); +			zlog_debug( +				"del remote %s es %s evi %u vtep %pI4, NO es", +				p->prefix.ead_addr.eth_tag ? "ead-es" +							   : "ead-evi", +				esi_to_str(&p->prefix.ead_addr.esi, buf, +					   sizeof(buf)), +				vpn->vni, &p->prefix.ead_addr.ip.ipaddr_v4);  		return 0;  	}  	es_evi = bgp_evpn_es_evi_find(es, vpn); @@ -4419,14 +4391,12 @@ static uint32_t bgp_evpn_es_run_consistency_checks(struct bgp_evpn_es *es)  static void bgp_evpn_run_consistency_checks(struct thread *t)  {  	int proc_cnt = 0; -	int es_cnt = 0;  	struct listnode *node;  	struct listnode *nextnode;  	struct bgp_evpn_es *es;  	for (ALL_LIST_ELEMENTS(bgp_mh_info->pend_es_list,  				node, nextnode, es)) { -		++es_cnt;  		++proc_cnt;  		/* run consistency checks on the ES and remove it from the  		 * pending list @@ -4966,7 +4936,6 @@ void bgp_evpn_mh_init(void)  	/* config knobs - XXX add cli to control it */  	bgp_mh_info->ead_evi_adv_for_down_links = true;  	bgp_mh_info->consistency_checking = true; -	bgp_mh_info->install_l3nhg = false;  	bgp_mh_info->host_routes_use_l3nhg = BGP_EVPN_MH_USE_ES_L3NHG_DEF;  	bgp_mh_info->suppress_l3_ecomm_on_inactive_es = true;  	bgp_mh_info->bgp_evpn_nh_setup = true; diff --git a/bgpd/bgp_evpn_mh.h b/bgpd/bgp_evpn_mh.h index d9e2e72e4f..dc3fe44776 100644 --- a/bgpd/bgp_evpn_mh.h +++ b/bgpd/bgp_evpn_mh.h @@ -50,7 +50,9 @@ struct bgp_evpn_es_frag {  	/* RD for this ES fragment */  	struct prefix_rd prd; -	/* Memory used for linking bgp_evpn_es_rd to bgp_evpn_es->rd_list */ +	/* Memory used for linking bgp_evpn_es_frag to +	 * bgp_evpn_es->es_frag_list +	 */  	struct listnode es_listnode;  	/* List of ES-EVIs associated with this fragment */ @@ -59,11 +61,11 @@ struct bgp_evpn_es_frag {  /* Ethernet Segment entry -   * - Local and remote ESs are maintained in a global RB tree, - * bgp_mh_info->es_rb_tree using ESI as key + *   bgp_mh_info->es_rb_tree using ESI as key   * - Local ESs are received from zebra (BGP_EVPNES_LOCAL)   * - Remotes ESs are implicitly created (by reference) by a remote ES-EVI   *   (BGP_EVPNES_REMOTE) - * - An ES can be simulatenously LOCAL and REMOTE; infact all LOCAL ESs are + * - An ES can be simultaneously LOCAL and REMOTE; infact all LOCAL ESs are   *   expected to have REMOTE ES peers.   */  struct bgp_evpn_es { @@ -101,7 +103,7 @@ struct bgp_evpn_es {  	 */  	struct listnode pend_es_listnode; -	/* [EVPNES_LOCAL] List of RDs for this ES (bgp_evpn_es_rd) */ +	/* [EVPNES_LOCAL] List of RDs for this ES (bgp_evpn_es_frag) */  	struct list *es_frag_list;  	struct bgp_evpn_es_frag *es_base_frag; @@ -319,7 +321,6 @@ struct bgp_evpn_mh_info {  	/* Enable ES consistency checking */  	bool consistency_checking;  	/* Use L3 NHGs for host routes in symmetric IRB */ -	bool install_l3nhg;  	bool host_routes_use_l3nhg;  	/* Some vendors are not generating the EAD-per-EVI route. This knob  	 * can be turned off to activate a remote ES-PE when the EAD-per-ES diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index 4da3fa8f3b..109de1efb2 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -3867,19 +3867,19 @@ DEFUN (bgp_evpn_advertise_type5,  	if (!(afi == AFI_IP || afi == AFI_IP6)) {  		vty_out(vty, -			"%%only ipv4 or ipv6 address families are supported\n"); +			"%% Only ipv4 or ipv6 address families are supported\n");  		return CMD_WARNING;  	}  	if (safi != SAFI_UNICAST) {  		vty_out(vty, -			"%%only ipv4 unicast or ipv6 unicast are supported\n"); +			"%% Only ipv4 unicast or ipv6 unicast are supported\n");  		return CMD_WARNING;  	}  	if ((oly != OVERLAY_INDEX_TYPE_NONE)  	    && (oly != OVERLAY_INDEX_GATEWAY_IP)) { -		vty_out(vty, "%%Unknown overlay-index type specified\n"); +		vty_out(vty, "%% Unknown overlay-index type specified\n");  		return CMD_WARNING;  	} @@ -4058,13 +4058,13 @@ DEFUN (no_bgp_evpn_advertise_type5,  	if (!(afi == AFI_IP || afi == AFI_IP6)) {  		vty_out(vty, -			"%%only ipv4 or ipv6 address families are supported\n"); +			"%% Only ipv4 or ipv6 address families are supported\n");  		return CMD_WARNING;  	}  	if (safi != SAFI_UNICAST) {  		vty_out(vty, -			"%%only ipv4 unicast or ipv6 unicast are supported\n"); +			"%% Only ipv4 unicast or ipv6 unicast are supported\n");  		return CMD_WARNING;  	} @@ -4495,7 +4495,7 @@ DEFPY(show_bgp_l2vpn_evpn_es,  	if (esi_str) {  		if (!str_to_esi(esi_str, &esi)) { -			vty_out(vty, "%%Malformed ESI\n"); +			vty_out(vty, "%% Malformed ESI\n");  			return CMD_WARNING;  		}  		bgp_evpn_es_show_esi(vty, &esi, uj); @@ -4517,7 +4517,7 @@ DEFPY(show_bgp_l2vpn_evpn_es_vrf, show_bgp_l2vpn_evpn_es_vrf_cmd,  	if (esi_str) {  		if (!str_to_esi(esi_str, &esi)) { -			vty_out(vty, "%%Malformed ESI\n"); +			vty_out(vty, "%% Malformed ESI\n");  			return CMD_WARNING;  		}  		bgp_evpn_es_vrf_show_esi(vty, &esi, uj); @@ -5132,7 +5132,7 @@ DEFPY_HIDDEN(  	if (esi_str) {  		if (!str_to_esi(esi_str, &esi)) { -			vty_out(vty, "%%Malformed ESI\n"); +			vty_out(vty, "%% Malformed ESI\n");  			return CMD_WARNING;  		}  		esi_p = &esi; @@ -5165,7 +5165,7 @@ DEFPY_HIDDEN(  	if (esi_str) {  		if (!str_to_esi(esi_str, &esi)) { -			vty_out(vty, "%%Malformed ESI\n"); +			vty_out(vty, "%% Malformed ESI\n");  			return CMD_WARNING;  		}  		esi_p = &esi; @@ -5267,19 +5267,19 @@ DEFPY_HIDDEN(test_es_add,  	bgp = bgp_get_evpn();  	if (!bgp) { -		vty_out(vty, "%%EVPN BGP instance not yet created\n"); +		vty_out(vty, "%% EVPN BGP instance not yet created\n");  		return CMD_WARNING;  	}  	if (!str_to_esi(esi_str, &esi)) { -		vty_out(vty, "%%Malformed ESI\n"); +		vty_out(vty, "%% Malformed ESI\n");  		return CMD_WARNING;  	}  	if (no) {  		ret = bgp_evpn_local_es_del(bgp, &esi);  		if (ret == -1) { -			vty_out(vty, "%%Failed to delete ES\n"); +			vty_out(vty, "%% Failed to delete ES\n");  			return CMD_WARNING;  		}  	} else { @@ -5292,7 +5292,7 @@ DEFPY_HIDDEN(test_es_add,  		ret = bgp_evpn_local_es_add(bgp, &esi, vtep_ip, oper_up,  					    EVPN_MH_DF_PREF_MIN, false);  		if (ret == -1) { -			vty_out(vty, "%%Failed to add ES\n"); +			vty_out(vty, "%% Failed to add ES\n");  			return CMD_WARNING;  		}  	} @@ -5316,25 +5316,25 @@ DEFPY_HIDDEN(test_es_vni_add,  	bgp = bgp_get_evpn();  	if (!bgp) { -		vty_out(vty, "%%EVPN BGP instance not yet created\n"); +		vty_out(vty, "%% EVPN BGP instance not yet created\n");  		return CMD_WARNING;  	}  	if (!str_to_esi(esi_str, &esi)) { -		vty_out(vty, "%%Malformed ESI\n"); +		vty_out(vty, "%% Malformed ESI\n");  		return CMD_WARNING;  	}  	if (no) {  		ret = bgp_evpn_local_es_evi_del(bgp, &esi, vni);  		if (ret == -1) { -			vty_out(vty, "%%Failed to deref ES VNI\n"); +			vty_out(vty, "%% Failed to deref ES VNI\n");  			return CMD_WARNING;  		}  	} else {  		ret = bgp_evpn_local_es_evi_add(bgp, &esi, vni);  		if (ret == -1) { -			vty_out(vty, "%%Failed to ref ES VNI\n"); +			vty_out(vty, "%% Failed to ref ES VNI\n");  			return CMD_WARNING;  		}  	} @@ -5747,9 +5747,13 @@ DEFUN (show_bgp_vrf_l3vni_info,  	name = argv[idx_vrf]->arg;  	bgp = bgp_lookup_by_name(name); +	if (strmatch(name, VRF_DEFAULT_NAME)) +		bgp = bgp_get_default(); +  	if (!bgp) {  		if (!uj) -			vty_out(vty, "BGP instance for VRF %s not found", name); +			vty_out(vty, "BGP instance for VRF %s not found\n", +				name);  		else {  			json_object_string_add(json, "warning",  					       "BGP instance not found"); @@ -5858,16 +5862,15 @@ DEFUN (bgp_evpn_vrf_rt,  		return CMD_WARNING;  	} +	ecomadd = ecommunity_str2com(argv[2]->arg, ECOMMUNITY_ROUTE_TARGET, 0); +	if (!ecomadd) { +		vty_out(vty, "%% Malformed Route Target list\n"); +		return CMD_WARNING; +	} +	ecommunity_str(ecomadd); +  	/* Add/update the import route-target */  	if (rt_type == RT_TYPE_BOTH || rt_type == RT_TYPE_IMPORT) { -		ecomadd = ecommunity_str2com(argv[2]->arg, -					     ECOMMUNITY_ROUTE_TARGET, 0); -		if (!ecomadd) { -			vty_out(vty, "%% Malformed Route Target list\n"); -			return CMD_WARNING; -		} -		ecommunity_str(ecomadd); -  		/* Do nothing if we already have this import route-target */  		if (!bgp_evpn_rt_matches_existing(bgp->vrf_import_rtl, ecomadd))  			bgp_evpn_configure_import_rt_for_vrf(bgp, ecomadd); @@ -5875,14 +5878,6 @@ DEFUN (bgp_evpn_vrf_rt,  	/* Add/update the export route-target */  	if (rt_type == RT_TYPE_BOTH || rt_type == RT_TYPE_EXPORT) { -		ecomadd = ecommunity_str2com(argv[2]->arg, -					     ECOMMUNITY_ROUTE_TARGET, 0); -		if (!ecomadd) { -			vty_out(vty, "%% Malformed Route Target list\n"); -			return CMD_WARNING; -		} -		ecommunity_str(ecomadd); -  		/* Do nothing if we already have this export route-target */  		if (!bgp_evpn_rt_matches_existing(bgp->vrf_export_rtl, ecomadd))  			bgp_evpn_configure_export_rt_for_vrf(bgp, ecomadd); @@ -6107,16 +6102,15 @@ DEFUN (bgp_evpn_vni_rt,  		return CMD_WARNING;  	} +	ecomadd = ecommunity_str2com(argv[2]->arg, ECOMMUNITY_ROUTE_TARGET, 0); +	if (!ecomadd) { +		vty_out(vty, "%% Malformed Route Target list\n"); +		return CMD_WARNING; +	} +	ecommunity_str(ecomadd); +  	/* Add/update the import route-target */  	if (rt_type == RT_TYPE_BOTH || rt_type == RT_TYPE_IMPORT) { -		ecomadd = ecommunity_str2com(argv[2]->arg, -					     ECOMMUNITY_ROUTE_TARGET, 0); -		if (!ecomadd) { -			vty_out(vty, "%% Malformed Route Target list\n"); -			return CMD_WARNING; -		} -		ecommunity_str(ecomadd); -  		/* Do nothing if we already have this import route-target */  		if (!bgp_evpn_rt_matches_existing(vpn->import_rtl, ecomadd))  			evpn_configure_import_rt(bgp, vpn, ecomadd); @@ -6124,14 +6118,6 @@ DEFUN (bgp_evpn_vni_rt,  	/* Add/update the export route-target */  	if (rt_type == RT_TYPE_BOTH || rt_type == RT_TYPE_EXPORT) { -		ecomadd = ecommunity_str2com(argv[2]->arg, -					     ECOMMUNITY_ROUTE_TARGET, 0); -		if (!ecomadd) { -			vty_out(vty, "%% Malformed Route Target list\n"); -			return CMD_WARNING; -		} -		ecommunity_str(ecomadd); -  		/* Do nothing if we already have this export route-target */  		if (!bgp_evpn_rt_matches_existing(vpn->export_rtl, ecomadd))  			evpn_configure_export_rt(bgp, vpn, ecomadd); diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index f8de3b8dc4..6854a6501a 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -677,7 +677,7 @@ static void bgp_llgr_stale_timer_expire(struct thread *thread)  	 * stale routes from the neighbor that it is retaining.  	 */  	if (bgp_debug_neighbor_events(peer)) -		zlog_debug("%s Long-lived stale timer (%s) expired", peer->host, +		zlog_debug("%pBP Long-lived stale timer (%s) expired", peer,  			   get_afi_safi_str(afi, safi, false));  	UNSET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_LLGR_WAIT); @@ -719,8 +719,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)  					if (bgp_debug_neighbor_events(peer))  						zlog_debug( -							"%s Long-lived set stale community (LLGR_STALE) for: %pFX", -							peer->host, &dest->p); +							"%pBP Long-lived set stale community (LLGR_STALE) for: %pFX", +							peer, &dest->p);  					attr = *pi->attr;  					bgp_attr_add_llgr_community(&attr); @@ -747,8 +747,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)  				if (bgp_debug_neighbor_events(peer))  					zlog_debug( -						"%s Long-lived set stale community (LLGR_STALE) for: %pFX", -						peer->host, &dest->p); +						"%pBP Long-lived set stale community (LLGR_STALE) for: %pFX", +						peer, &dest->p);  				attr = *pi->attr;  				bgp_attr_add_llgr_community(&attr); @@ -772,9 +772,9 @@ static void bgp_graceful_restart_timer_expire(struct thread *thread)  	peer = THREAD_ARG(thread);  	if (bgp_debug_neighbor_events(peer)) { -		zlog_debug("%s graceful restart timer expired", peer->host); -		zlog_debug("%s graceful restart stalepath timer stopped", -			   peer->host); +		zlog_debug("%pBP graceful restart timer expired", peer); +		zlog_debug("%pBP graceful restart stalepath timer stopped", +			   peer);  	}  	FOREACH_AFI_SAFI (afi, safi) { @@ -800,8 +800,8 @@ static void bgp_graceful_restart_timer_expire(struct thread *thread)  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s Long-lived stale timer (%s) started for %d sec", -					peer->host, +					"%pBP Long-lived stale timer (%s) started for %d sec", +					peer,  					get_afi_safi_str(afi, safi, false),  					peer->llgr[afi][safi].stale_time); @@ -836,8 +836,8 @@ static void bgp_graceful_stale_timer_expire(struct thread *thread)  	peer = THREAD_ARG(thread);  	if (bgp_debug_neighbor_events(peer)) -		zlog_debug("%s graceful restart stalepath timer expired", -			   peer->host); +		zlog_debug("%pBP graceful restart stalepath timer expired", +			   peer);  	/* NSF delete stale route */  	FOREACH_AFI_SAFI_NSF (afi, safi) @@ -1389,12 +1389,11 @@ int bgp_stop(struct peer *peer)  			struct vrf *vrf = vrf_lookup_by_id(peer->bgp->vrf_id);  			zlog_info( -				"%%ADJCHANGE: neighbor %s(%s) in vrf %s Down %s", -				peer->host, -				(peer->hostname) ? peer->hostname : "Unknown", +				"%%ADJCHANGE: neighbor %pBP in vrf %s Down %s", +				peer,  				vrf ? ((vrf->vrf_id != VRF_DEFAULT) -						? vrf->name -						: VRF_DEFAULT_NAME) +					       ? vrf->name +					       : VRF_DEFAULT_NAME)  				    : "",  				peer_down_str[(int)peer->last_reset]);  		} @@ -1404,17 +1403,17 @@ int bgp_stop(struct peer *peer)  			BGP_TIMER_OFF(peer->t_gr_stale);  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s graceful restart stalepath timer stopped", -					peer->host); +					"%pBP graceful restart stalepath timer stopped", +					peer);  		}  		if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {  			if (bgp_debug_neighbor_events(peer)) {  				zlog_debug( -					"%s graceful restart timer started for %d sec", -					peer->host, peer->v_gr_restart); +					"%pBP graceful restart timer started for %d sec", +					peer, peer->v_gr_restart);  				zlog_debug( -					"%s graceful restart stalepath timer started for %d sec", -					peer->host, peer->bgp->stalepath_time); +					"%pBP graceful restart stalepath timer started for %d sec", +					peer, peer->bgp->stalepath_time);  			}  			BGP_TIMER_ON(peer->t_gr_restart,  				     bgp_graceful_restart_timer_expire, @@ -1435,8 +1434,8 @@ int bgp_stop(struct peer *peer)  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s: route-refresh restart stalepath timer stopped", -					peer->host); +					"%pBP route-refresh restart stalepath timer stopped", +					peer);  		}  		/* If peer reset before receiving EOR, decrement EOR count and @@ -2092,12 +2091,11 @@ static int bgp_establish(struct peer *peer)  	/* bgp log-neighbor-changes of neighbor Up */  	if (CHECK_FLAG(peer->bgp->flags, BGP_FLAG_LOG_NEIGHBOR_CHANGES)) {  		struct vrf *vrf = vrf_lookup_by_id(peer->bgp->vrf_id); -		zlog_info( -			"%%ADJCHANGE: neighbor %s(%s) in vrf %s Up", peer->host, -			(peer->hostname) ? peer->hostname : "Unknown", -			vrf ? ((vrf->vrf_id != VRF_DEFAULT) ? vrf->name -							    : VRF_DEFAULT_NAME) -			    : ""); +		zlog_info("%%ADJCHANGE: neighbor %pBP in vrf %s Up", peer, +			  vrf ? ((vrf->vrf_id != VRF_DEFAULT) +					 ? vrf->name +					 : VRF_DEFAULT_NAME) +			      : "");  	}  	/* assign update-group/subgroup */  	update_group_adjust_peer_afs(peer); @@ -2106,9 +2104,9 @@ static int bgp_establish(struct peer *peer)  	UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);  	if (bgp_debug_neighbor_events(peer)) {  		if (BGP_PEER_RESTARTING_MODE(peer)) -			zlog_debug("peer %s BGP_RESTARTING_MODE", peer->host); +			zlog_debug("%pBP BGP_RESTARTING_MODE", peer);  		else if (BGP_PEER_HELPER_MODE(peer)) -			zlog_debug("peer %s BGP_HELPER_MODE", peer->host); +			zlog_debug("%pBP BGP_HELPER_MODE", peer);  	}  	FOREACH_AFI_SAFI_NSF (afi, safi) { @@ -2182,16 +2180,15 @@ static int bgp_establish(struct peer *peer)  			BGP_TIMER_OFF(peer->t_gr_stale);  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s graceful restart stalepath timer stopped", -					peer->host); +					"%pBP graceful restart stalepath timer stopped", +					peer);  		}  	}  	if (peer->t_gr_restart) {  		BGP_TIMER_OFF(peer->t_gr_restart);  		if (bgp_debug_neighbor_events(peer)) -			zlog_debug("%s graceful restart timer stopped", -				   peer->host); +			zlog_debug("%pBP graceful restart timer stopped", peer);  	}  	/* Reset uptime, turn on keepalives, send current table. */ @@ -2200,8 +2197,18 @@ static int bgp_establish(struct peer *peer)  	peer->uptime = bgp_clock(); -	/* Send route-refresh when ORF is enabled */ +	/* Send route-refresh when ORF is enabled. +	 * Stop Long-lived Graceful Restart timers. +	 */  	FOREACH_AFI_SAFI (afi, safi) { +		if (peer->t_llgr_stale[afi][safi]) { +			BGP_TIMER_OFF(peer->t_llgr_stale[afi][safi]); +			if (bgp_debug_neighbor_events(peer)) +				zlog_debug( +					"%pBP Long-lived stale timer stopped for afi/safi: %d/%d", +					peer, afi, safi); +		} +  		if (CHECK_FLAG(peer->af_cap[afi][safi],  			       PEER_CAP_ORF_PREFIX_SM_ADV)) {  			if (CHECK_FLAG(peer->af_cap[afi][safi], @@ -2748,10 +2755,10 @@ const char *print_peer_gr_cmd(enum peer_gr_command pr_gr_cmd)  		peer_gr_cmd = "NO_PEER_GR_CMD";  		break;  	case PEER_DISABLE_CMD: -		peer_gr_cmd = "PEER_GR_CMD"; +		peer_gr_cmd = "PEER_DISABLE_GR_CMD";  		break;  	case NO_PEER_DISABLE_CMD: -		peer_gr_cmd = "NO_PEER_GR_CMD"; +		peer_gr_cmd = "NO_PEER_DISABLE_GR_CMD";  		break;  	case PEER_HELPER_CMD:  		peer_gr_cmd = "PEER_HELPER_CMD"; diff --git a/bgpd/bgp_label.c b/bgpd/bgp_label.c index 4a20f2c090..f53deb63b3 100644 --- a/bgpd/bgp_label.c +++ b/bgpd/bgp_label.c @@ -313,14 +313,14 @@ static int bgp_nlri_get_labels(struct peer *peer, uint8_t *pnt, uint8_t plen,  	/* If we RX multiple labels we will end up keeping only the last  	 * one. We do not yet support a label stack greater than 1. */  	if (label_depth > 1) -		zlog_info("%s rcvd UPDATE with label stack %d deep", peer->host, +		zlog_info("%pBP rcvd UPDATE with label stack %d deep", peer,  			  label_depth);  	if (!(bgp_is_withdraw_label(label) || label_bos(label)))  		flog_warn(  			EC_BGP_INVALID_LABEL_STACK, -			"%s rcvd UPDATE with invalid label stack - no bottom of stack", -			peer->host); +			"%pBP rcvd UPDATE with invalid label stack - no bottom of stack", +			peer);  	return llen;  } diff --git a/bgpd/bgp_lcommunity.c b/bgpd/bgp_lcommunity.c index 60ad75c73b..e0cca50d89 100644 --- a/bgpd/bgp_lcommunity.c +++ b/bgpd/bgp_lcommunity.c @@ -103,7 +103,7 @@ static bool lcommunity_add_val(struct lcommunity *lcom,  	return true;  } -/* This function takes pointer to Large Communites strucutre then +/* This function takes pointer to Large Communites structure then     create a new Large Communities structure by uniq and sort each     Large Communities value.  */  struct lcommunity *lcommunity_uniq_sort(struct lcommunity *lcom) @@ -175,7 +175,8 @@ struct lcommunity *lcommunity_merge(struct lcommunity *lcom1,  	return lcom1;  } -static void set_lcommunity_string(struct lcommunity *lcom, bool make_json) +static void set_lcommunity_string(struct lcommunity *lcom, bool make_json, +				  bool translate_alias)  {  	int i;  	int len; @@ -228,7 +229,8 @@ static void set_lcommunity_string(struct lcommunity *lcom, bool make_json)  		snprintf(lcsb, sizeof(lcsb), "%u:%u:%u", global, local1,  			 local2); -		const char *com2alias = bgp_community2alias(lcsb); +		const char *com2alias = +			translate_alias ? bgp_community2alias(lcsb) : lcsb;  		len = strlcat(str_buf, com2alias, str_buf_sz);  		assert((unsigned int)len < str_buf_sz); @@ -264,7 +266,7 @@ struct lcommunity *lcommunity_intern(struct lcommunity *lcom)  	find->refcnt++;  	if (!find->str) -		set_lcommunity_string(find, false); +		set_lcommunity_string(find, false, true);  	return find;  } @@ -291,7 +293,8 @@ void lcommunity_unintern(struct lcommunity **lcom)  }  /* Return string representation of lcommunities attribute. */ -char *lcommunity_str(struct lcommunity *lcom, bool make_json) +char *lcommunity_str(struct lcommunity *lcom, bool make_json, +		     bool translate_alias)  {  	if (!lcom)  		return NULL; @@ -300,7 +303,7 @@ char *lcommunity_str(struct lcommunity *lcom, bool make_json)  		XFREE(MTYPE_LCOMMUNITY_STR, lcom->str);  	if (!lcom->str) -		set_lcommunity_string(lcom, make_json); +		set_lcommunity_string(lcom, make_json, translate_alias);  	return lcom->str;  } diff --git a/bgpd/bgp_lcommunity.h b/bgpd/bgp_lcommunity.h index 6ccb6b7879..b9b5fe35d5 100644 --- a/bgpd/bgp_lcommunity.h +++ b/bgpd/bgp_lcommunity.h @@ -69,7 +69,8 @@ extern struct hash *lcommunity_hash(void);  extern struct lcommunity *lcommunity_str2com(const char *);  extern bool lcommunity_match(const struct lcommunity *,  			     const struct lcommunity *); -extern char *lcommunity_str(struct lcommunity *, bool make_json); +extern char *lcommunity_str(struct lcommunity *, bool make_json, +			    bool translate_alias);  extern bool lcommunity_include(struct lcommunity *lcom, uint8_t *ptr);  extern void lcommunity_del_val(struct lcommunity *lcom, uint8_t *ptr); diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index 6e695d0301..6cd6ddd9dd 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -905,18 +905,12 @@ void bgp_path_info_mpath_aggregate_update(struct bgp_path_info *new_best,  		attr.aspath = aspath;  		attr.origin = origin; -		if (community) { +		if (community)  			bgp_attr_set_community(&attr, community); -			attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); -		} -		if (ecomm) { +		if (ecomm)  			bgp_attr_set_ecommunity(&attr, ecomm); -			attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); -		} -		if (lcomm) { +		if (lcomm)  			bgp_attr_set_lcommunity(&attr, lcomm); -			attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES); -		}  		/* Zap multipath attr nexthop so we set nexthop to self */  		attr.nexthop.s_addr = INADDR_ANY; diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index 32a1d9a152..4d8c4ac2ac 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -516,28 +516,37 @@ static bool sid_exist(struct bgp *bgp, const struct in6_addr *sid)  }  /* + * This function generates a new SID based on bgp->srv6_locator_chunks and + * index. The locator and generated SID are stored in arguments sid_locator + * and sid, respectively. + *   * if index != 0: try to allocate as index-mode   * else: try to allocate as auto-mode   */  static uint32_t alloc_new_sid(struct bgp *bgp, uint32_t index, -			      struct in6_addr *sid_locator) +			      struct in6_addr *sid_locator, +			      struct in6_addr *sid)  {  	struct listnode *node; -	struct prefix_ipv6 *chunk; -	struct in6_addr sid_buf; +	struct srv6_locator_chunk *chunk;  	bool alloced = false;  	int label = 0; +	uint8_t offset = 0; +	uint8_t len = 0; -	if (!bgp || !sid_locator) +	if (!bgp || !sid_locator || !sid)  		return false;  	for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) { -		*sid_locator = chunk->prefix; -		sid_buf = chunk->prefix; +		*sid_locator = chunk->prefix.prefix; +		*sid = chunk->prefix.prefix; +		offset = chunk->block_bits_length + chunk->node_bits_length; +		len = chunk->function_bits_length ?: 16; +  		if (index != 0) {  			label = index << 12; -			transpose_sid(&sid_buf, label, 64, 16); -			if (sid_exist(bgp, &sid_buf)) +			transpose_sid(sid, label, offset, len); +			if (sid_exist(bgp, sid))  				return false;  			alloced = true;  			break; @@ -545,8 +554,8 @@ static uint32_t alloc_new_sid(struct bgp *bgp, uint32_t index,  		for (size_t i = 1; i < 255; i++) {  			label = i << 12; -			transpose_sid(&sid_buf, label, 64, 16); -			if (sid_exist(bgp, &sid_buf)) +			transpose_sid(sid, label, offset, len); +			if (sid_exist(bgp, sid))  				continue;  			alloced = true;  			break; @@ -556,7 +565,7 @@ static uint32_t alloc_new_sid(struct bgp *bgp, uint32_t index,  	if (!alloced)  		return 0; -	sid_register(bgp, &sid_buf, bgp->srv6_locator_name); +	sid_register(bgp, sid, bgp->srv6_locator_name);  	return label;  } @@ -600,20 +609,19 @@ void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi)  	tovpn_sid_locator =  		XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); -	tovpn_sid_transpose_label = -		alloc_new_sid(bgp_vpn, tovpn_sid_index, tovpn_sid_locator); +	tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); + +	tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, +						  tovpn_sid_locator, tovpn_sid); +  	if (tovpn_sid_transpose_label == 0) {  		zlog_debug("%s: not allocated new sid for vrf %s: afi %s",  			   __func__, bgp_vrf->name_pretty, afi2str(afi)); +		XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid_locator); +		XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid);  		return;  	} -	tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); -	*tovpn_sid = *tovpn_sid_locator; -	transpose_sid(tovpn_sid, tovpn_sid_transpose_label, -		      BGP_PREFIX_SID_SRV6_TRANSPOSITION_OFFSET, -		      BGP_PREFIX_SID_SRV6_TRANSPOSITION_LENGTH); -  	if (debug) {  		inet_ntop(AF_INET6, tovpn_sid, buf, sizeof(buf));  		zlog_debug("%s: new sid %s allocated for vrf %s: afi %s", @@ -627,13 +635,29 @@ void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi)  		tovpn_sid_transpose_label;  } +/* + * This function shifts "label" 4 bits to the right and + * embeds it by length "len", starting at offset "offset" + * as seen from the MSB (Most Significant Bit) of "sid". + * + * e.g. if "label" is 0x1000 and "len" is 16, "label" is + * embedded in "sid" as follows: + * + *                 <----   len  -----> + *         label:  0000 0001 0000 0000 0000 + *         sid:    .... 0000 0001 0000 0000 + *                      <----   len  -----> + *                    ^ + *                    | + *                 offset from MSB + */  void transpose_sid(struct in6_addr *sid, uint32_t label, uint8_t offset,  		   uint8_t len)  {  	for (uint8_t idx = 0; idx < len; idx++) {  		uint8_t tidx = offset + idx;  		sid->s6_addr[tidx / 8] &= ~(0x1 << (7 - tidx % 8)); -		if (label >> (19 - idx) & 0x1) +		if (label >> (len + 3 - idx) & 0x1)  			sid->s6_addr[tidx / 8] |= 0x1 << (7 - tidx % 8);  	}  } @@ -1147,7 +1171,6 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn,	    /* to */  				.rtlist[BGP_VPN_POLICY_DIR_TOVPN]);  	}  	bgp_attr_set_ecommunity(&static_attr, new_ecom); -	SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));  	if (debug && bgp_attr_get_ecommunity(&static_attr)) {  		char *s = ecommunity_ecom2str( @@ -1511,8 +1534,6 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf,	    /* to */  		bgp_attr_set_ecommunity(&static_attr, new_ecom);  		if (new_ecom->size == 0) { -			UNSET_FLAG(static_attr.flag, -				   ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));  			ecommunity_free(&new_ecom);  			bgp_attr_set_ecommunity(&static_attr, NULL);  		} @@ -1924,7 +1945,7 @@ void vpn_handle_router_id_update(struct bgp *bgp, bool withdraw,  	struct bgp *bgp_import;  	struct listnode *node;  	struct ecommunity *ecom; -	vpn_policy_direction_t idir, edir; +	enum vpn_policy_direction idir, edir;  	/*  	 * Router-id change that is not explicitly configured @@ -2037,7 +2058,7 @@ void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,  			 afi_t afi, safi_t safi)  {  	const char *export_name; -	vpn_policy_direction_t idir, edir; +	enum vpn_policy_direction idir, edir;  	char *vname, *tmp_name;  	char buf[RD_ADDRSTRLEN];  	struct ecommunity *ecom; @@ -2166,7 +2187,7 @@ void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,  			   afi_t afi, safi_t safi)  {  	const char *export_name, *tmp_name; -	vpn_policy_direction_t idir, edir; +	enum vpn_policy_direction idir, edir;  	char *vname;  	struct ecommunity *ecom = NULL;  	struct listnode *node; @@ -3089,7 +3110,7 @@ void bgp_vpn_leak_export(struct bgp *from_bgp)  	char *vname;  	struct listnode *node, *next;  	struct ecommunity *ecom; -	vpn_policy_direction_t idir, edir; +	enum vpn_policy_direction idir, edir;  	safi_t safi = SAFI_UNICAST;  	struct bgp *to_bgp;  	int debug; diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 5bf772fefe..8c2eae279c 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -205,7 +205,7 @@ static inline int vpn_leak_from_vpn_active(struct bgp *bgp_vrf, afi_t afi,  	return 1;  } -static inline void vpn_leak_prechange(vpn_policy_direction_t direction, +static inline void vpn_leak_prechange(enum vpn_policy_direction direction,  				      afi_t afi, struct bgp *bgp_vpn,  				      struct bgp *bgp_vrf)  { @@ -225,7 +225,7 @@ static inline void vpn_leak_prechange(vpn_policy_direction_t direction,  	}  } -static inline void vpn_leak_postchange(vpn_policy_direction_t direction, +static inline void vpn_leak_postchange(enum vpn_policy_direction direction,  				       afi_t afi, struct bgp *bgp_vpn,  				       struct bgp *bgp_vrf)  { diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index d3ebc0e6a2..3433e1471c 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -323,7 +323,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,  			pi->extra->igpmetric = 0;  	} else if (peer) {  		/* -		 * Let's not accidently save the peer data for a peer +		 * Let's not accidentally save the peer data for a peer  		 * we are going to throw away in a second or so.  		 * When we come back around we'll fix up this  		 * data properly in replace_nexthop_by_peer @@ -390,7 +390,8 @@ void bgp_delete_connected_nexthop(afi_t afi, struct peer *peer)  }  static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc, -				       struct zapi_route *nhr) +				       struct zapi_route *nhr, +				       bool import_check)  {  	struct nexthop *nexthop;  	struct nexthop *oldnh; @@ -421,7 +422,21 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,  	if (nhr->nexthop_num != bnc->nexthop_num)  		bnc->change_flags |= BGP_NEXTHOP_CHANGED; -	if (nhr->nexthop_num) { +	if (import_check && (nhr->type == ZEBRA_ROUTE_BGP || +			     !prefix_same(&bnc->prefix, &nhr->prefix))) { +		SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED); +		UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID); +		UNSET_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID); +		UNSET_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE); + +		bnc_nexthop_free(bnc); +		bnc->nexthop = NULL; + +		if (BGP_DEBUG(nht, NHT)) +			zlog_debug( +				"%s: Import Check does not resolve to the same prefix for %pFX received %pFX or matching route is BGP", +				__func__, &bnc->prefix, &nhr->prefix); +	} else if (nhr->nexthop_num) {  		struct peer *peer = bnc->nht_info;  		/* notify bgp fsm if nbr ip goes from invalid->valid */ @@ -589,6 +604,10 @@ static void bgp_nht_ifp_handle(struct interface *ifp, bool up)  	if (!bgp)  		return; +	bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP], ifp, +				 up); +	bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP], ifp, +				 up);  	bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP6], ifp,  				 up);  	bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP6], ifp, @@ -691,7 +710,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)  				"parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache",  				&nhr.prefix, nhr.srte_color, bgp->name_pretty);  	} else -		bgp_process_nexthop_update(bnc_nhc, &nhr); +		bgp_process_nexthop_update(bnc_nhc, &nhr, false);  	tree = &bgp->import_check_table[afi]; @@ -702,17 +721,8 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)  				"parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check",  				&nhr.prefix, nhr.srte_color, bgp->name_pretty);  		return; -	} else { -		if (nhr.type == ZEBRA_ROUTE_BGP -		    || !prefix_same(&bnc_import->prefix, &nhr.prefix)) { -			if (BGP_DEBUG(nht, NHT)) -				zlog_debug( -					"%s: Import Check does not resolve to the same prefix for %pFX received %pFX", -					__func__, &bnc_import->prefix, &nhr.prefix); -			return; -		} -		bgp_process_nexthop_update(bnc_import, &nhr);  	} +	bgp_process_nexthop_update(bnc_import, &nhr, true);  	/*  	 * HACK: if any BGP route is dependant on an SR-policy that doesn't @@ -735,7 +745,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)  			    || CHECK_FLAG(bnc_iter->flags, BGP_NEXTHOP_VALID))  				continue; -			bgp_process_nexthop_update(bnc_iter, &nhr); +			bgp_process_nexthop_update(bnc_iter, &nhr, false);  		}  	}  } diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c index 6bdefd0e9b..c2562c75d3 100644 --- a/bgpd/bgp_open.c +++ b/bgpd/bgp_open.c @@ -1324,8 +1324,8 @@ static void bgp_open_capability_orf(struct stream *s, struct peer *peer,  	unsigned long orfp;  	unsigned long numberp;  	int number_of_orfs = 0; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	/* Convert AFI, SAFI to values for packet. */  	bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi); @@ -1389,10 +1389,10 @@ static void bgp_peer_send_gr_capability(struct stream *s, struct peer *peer,  					bool ext_opt_params)  {  	int len; -	iana_afi_t pkt_afi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4;  	afi_t afi;  	safi_t safi; -	iana_safi_t pkt_safi; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	uint32_t restart_time;  	unsigned long capp = 0;  	unsigned long rcapp = 0; @@ -1472,10 +1472,10 @@ static void bgp_peer_send_llgr_capability(struct stream *s, struct peer *peer,  					  bool ext_opt_params)  {  	int len; -	iana_afi_t pkt_afi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4;  	afi_t afi;  	safi_t safi; -	iana_safi_t pkt_safi; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	unsigned long capp = 0;  	unsigned long rcapp = 0; @@ -1523,10 +1523,10 @@ uint16_t bgp_open_capability(struct stream *s, struct peer *peer,  {  	uint16_t len;  	unsigned long cp, capp, rcapp, eopl = 0; -	iana_afi_t pkt_afi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4;  	afi_t afi;  	safi_t safi; -	iana_safi_t pkt_safi; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	as_t local_as;  	uint8_t afi_safi_count = 0;  	int adv_addpath_tx = 0; diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index 09db041780..5ed888f486 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -131,8 +131,8 @@ static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,  					    safi_t safi)  {  	struct stream *s; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	if (DISABLE_BGP_ANNOUNCE)  		return NULL; @@ -483,8 +483,8 @@ void bgp_generate_updgrp_packets(struct thread *thread)  						if (bgp_debug_neighbor_events(  							    peer))  							zlog_debug( -								"%s sending route-refresh (EoRR) for %s/%s", -								peer->host, +								"%pBP sending route-refresh (EoRR) for %s/%s", +								peer,  								afi2str(afi),  								safi2str(safi));  					} @@ -869,8 +869,8 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,  	struct stream *s;  	struct bgp_filter *filter;  	int orf_refresh = 0; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	if (DISABLE_BGP_ANNOUNCE)  		return; @@ -913,9 +913,10 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,  				stream_putc(s, ORF_COMMON_PART_REMOVE_ALL);  				if (bgp_debug_neighbor_events(peer))  					zlog_debug( -						"%s sending REFRESH_REQ to remove ORF(%d) (%s) for afi/safi: %s/%s", -						peer->host, orf_type, -						(when_to_refresh == REFRESH_DEFER +						"%pBP sending REFRESH_REQ to remove ORF(%d) (%s) for afi/safi: %s/%s", +						peer, orf_type, +						(when_to_refresh == +								 REFRESH_DEFER  							 ? "defer"  							 : "immediate"),  						iana_afi2str(pkt_afi), @@ -930,9 +931,10 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,  					ORF_COMMON_PART_DENY);  				if (bgp_debug_neighbor_events(peer))  					zlog_debug( -						"%s sending REFRESH_REQ with pfxlist ORF(%d) (%s) for afi/safi: %s/%s", -						peer->host, orf_type, -						(when_to_refresh == REFRESH_DEFER +						"%pBP sending REFRESH_REQ with pfxlist ORF(%d) (%s) for afi/safi: %s/%s", +						peer, orf_type, +						(when_to_refresh == +								 REFRESH_DEFER  							 ? "defer"  							 : "immediate"),  						iana_afi2str(pkt_afi), @@ -949,9 +951,10 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,  	if (bgp_debug_neighbor_events(peer)) {  		if (!orf_refresh) -			zlog_debug("%s sending REFRESH_REQ for afi/safi: %s/%s", -				   peer->host, iana_afi2str(pkt_afi), -				   iana_safi2str(pkt_safi)); +			zlog_debug( +				"%pBP sending REFRESH_REQ for afi/safi: %s/%s", +				peer, iana_afi2str(pkt_afi), +				iana_safi2str(pkt_safi));  	}  	/* Add packet to the peer. */ @@ -973,8 +976,8 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi,  			 int capability_code, int action)  {  	struct stream *s; -	iana_afi_t pkt_afi; -	iana_safi_t pkt_safi; +	iana_afi_t pkt_afi = IANA_AFI_IPV4; +	iana_safi_t pkt_safi = IANA_SAFI_UNICAST;  	/* Convert AFI, SAFI to values for packet. */  	bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi); @@ -995,8 +998,8 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi,  		if (bgp_debug_neighbor_events(peer))  			zlog_debug( -				"%s sending CAPABILITY has %s MP_EXT CAP for afi/safi: %s/%s", -				peer->host, +				"%pBP sending CAPABILITY has %s MP_EXT CAP for afi/safi: %s/%s", +				peer,  				action == CAPABILITY_ACTION_SET ? "Advertising"  								: "Removing",  				iana_afi2str(pkt_afi), iana_safi2str(pkt_safi)); @@ -1551,8 +1554,9 @@ static void bgp_refresh_stalepath_timer_expire(struct thread *thread)  		bgp_clear_stale_route(peer, afi, safi);  	if (bgp_debug_neighbor_events(peer)) -		zlog_debug("%s: route-refresh (BoRR) timer for %s/%s expired", -			   peer->host, afi2str(afi), safi2str(safi)); +		zlog_debug( +			"%pBP route-refresh (BoRR) timer expired for afi/safi: %d/%d", +			peer, afi, safi);  	bgp_timer_set(peer);  } @@ -1675,7 +1679,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)  	 *  	 * Complicates the flow a little though..  	 */ -	bgp_attr_parse_ret_t attr_parse_ret = BGP_ATTR_PARSE_PROCEED; +	enum bgp_attr_parse_ret attr_parse_ret = BGP_ATTR_PARSE_PROCEED;  /* This define morphs the update case into a withdraw when lower levels   * have signalled an error condition where this is best.   */ @@ -1704,11 +1708,11 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)  		if (attr_parse_ret == BGP_ATTR_PARSE_WITHDRAW)  			flog_err(  				EC_BGP_UPDATE_RCV, -				"%s rcvd UPDATE with errors in attr(s)!! Withdrawing route.", -				peer->host); +				"%pBP rcvd UPDATE with errors in attr(s)!! Withdrawing route.", +				peer);  		if (ret && bgp_debug_update(peer, NULL, NULL, 1)) { -			zlog_debug("%s rcvd UPDATE w/ attr: %s", peer->host, +			zlog_debug("%pBP rcvd UPDATE w/ attr: %s", peer,  				   peer->rcvd_attr_str);  			peer->rcvd_attr_printed = 1;  		} @@ -1738,8 +1742,8 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)  	}  	if (BGP_DEBUG(update, UPDATE_IN)) -		zlog_debug("%s rcvd UPDATE wlen %d attrlen %d alen %d", -			   peer->host, withdraw_len, attribute_len, update_len); +		zlog_debug("%pBP rcvd UPDATE wlen %d attrlen %d alen %d", peer, +			   withdraw_len, attribute_len, update_len);  	/* Parse any given NLRIs */  	for (int i = NLRI_UPDATE; i < NLRI_TYPE_MAX; i++) { @@ -2092,8 +2096,8 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  				if (bgp_debug_neighbor_events(peer)) {  					zlog_debug( -						"%s rcvd Prefixlist ORF(%d) length %d", -						peer->host, orf_type, orf_len); +						"%pBP rcvd Prefixlist ORF(%d) length %d", +						peer, orf_type, orf_len);  				}  				/* we're going to read at least 1 byte of common @@ -2125,8 +2129,8 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  						if (bgp_debug_neighbor_events(  							    peer))  							zlog_debug( -								"%s rcvd Remove-All pfxlist ORF request", -								peer->host); +								"%pBP rcvd Remove-All pfxlist ORF request", +								peer);  						prefix_bgp_orf_remove_all(afi,  									  name);  						break; @@ -2177,8 +2181,8 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  						char buf[INET6_BUFSIZ];  						zlog_debug( -							"%s rcvd %s %s seq %u %s/%d ge %d le %d%s", -							peer->host, +							"%pBP rcvd %s %s seq %u %s/%d ge %d le %d%s", +							peer,  							(common & ORF_COMMON_PART_REMOVE  								 ? "Remove"  								 : "Add"), @@ -2208,8 +2212,8 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  					if (!ok || (ok && ret != CMD_SUCCESS)) {  						zlog_info( -							"%s Received misformatted prefixlist ORF. Remove All pfxlist", -							peer->host); +							"%pBP Received misformatted prefixlist ORF. Remove All pfxlist", +							peer);  						prefix_bgp_orf_remove_all(afi,  									  name);  						break; @@ -2222,7 +2226,7 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  			stream_forward_getp(s, orf_len);  		}  		if (bgp_debug_neighbor_events(peer)) -			zlog_debug("%s rcvd Refresh %s ORF request", peer->host, +			zlog_debug("%pBP rcvd Refresh %s ORF request", peer,  				   when_to_refresh == REFRESH_DEFER  					   ? "Defer"  					   : "Immediate"); @@ -2273,18 +2277,16 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  				   PEER_STATUS_EOR_RECEIVED)) {  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s rcvd route-refresh (BoRR) for %s/%s before EoR", -					peer->host, afi2str(afi), -					safi2str(safi)); +					"%pBP rcvd route-refresh (BoRR) for %s/%s before EoR", +					peer, afi2str(afi), safi2str(safi));  			return BGP_PACKET_NOOP;  		}  		if (peer->t_refresh_stalepath) {  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s rcvd route-refresh (BoRR) for %s/%s, whereas BoRR already received", -					peer->host, afi2str(afi), -					safi2str(safi)); +					"%pBP rcvd route-refresh (BoRR) for %s/%s, whereas BoRR already received", +					peer, afi2str(afi), safi2str(safi));  			return BGP_PACKET_NOOP;  		} @@ -2312,14 +2314,14 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  		if (bgp_debug_neighbor_events(peer))  			zlog_debug( -				"%s rcvd route-refresh (BoRR) for %s/%s, triggering timer for %u seconds", -				peer->host, afi2str(afi), safi2str(safi), +				"%pBP rcvd route-refresh (BoRR) for %s/%s, triggering timer for %u seconds", +				peer, afi2str(afi), safi2str(safi),  				peer->bgp->stalepath_time);  	} else if (subtype == BGP_ROUTE_REFRESH_EORR) {  		if (!peer->t_refresh_stalepath) {  			zlog_err( -				"%s rcvd route-refresh (EoRR) for %s/%s, whereas no BoRR received", -				peer->host, afi2str(afi), safi2str(safi)); +				"%pBP rcvd route-refresh (EoRR) for %s/%s, whereas no BoRR received", +				peer, afi2str(afi), safi2str(safi));  			return BGP_PACKET_NOOP;  		} @@ -2331,15 +2333,16 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  		if (bgp_debug_neighbor_events(peer))  			zlog_debug( -				"%s rcvd route-refresh (EoRR) for %s/%s, stopping BoRR timer", -				peer->host, afi2str(afi), safi2str(safi)); +				"%pBP rcvd route-refresh (EoRR) for %s/%s, stopping BoRR timer", +				peer, afi2str(afi), safi2str(safi));  		if (peer->nsf[afi][safi])  			bgp_clear_stale_route(peer, afi, safi);  	} else {  		if (bgp_debug_neighbor_events(peer)) -			zlog_debug("%s rcvd route-refresh (REQUEST) for %s/%s", -				   peer->host, afi2str(afi), safi2str(safi)); +			zlog_debug( +				"%pBP rcvd route-refresh (REQUEST) for %s/%s", +				peer, afi2str(afi), safi2str(safi));  		/* In response to a "normal route refresh request" from the  		 * peer, the speaker MUST send a BoRR message. @@ -2354,8 +2357,8 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  					PEER_STATUS_EOR_SEND)) {  				if (bgp_debug_neighbor_events(peer))  					zlog_debug( -						"%s rcvd route-refresh (REQUEST) for %s/%s before EoR", -						peer->host, afi2str(afi), +						"%pBP rcvd route-refresh (REQUEST) for %s/%s before EoR", +						peer, afi2str(afi),  						safi2str(safi));  				return BGP_PACKET_NOOP;  			} @@ -2365,9 +2368,8 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s sending route-refresh (BoRR) for %s/%s", -					peer->host, afi2str(afi), -					safi2str(safi)); +					"%pBP sending route-refresh (BoRR) for %s/%s", +					peer, afi2str(afi), safi2str(safi));  			/* Set flag Ready-To-Send to know when we can send EoRR  			 * message. diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 5255eb5800..2544ea5208 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -565,8 +565,8 @@ static int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,  {  	const struct prefix *new_p;  	struct attr *newattr, *existattr; -	bgp_peer_sort_t new_sort; -	bgp_peer_sort_t exist_sort; +	enum bgp_peer_sort new_sort; +	enum bgp_peer_sort exist_sort;  	uint32_t new_pref;  	uint32_t exist_pref;  	uint32_t new_med; @@ -1253,10 +1253,10 @@ static int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,  		}  	} -	/* 13. Router-ID comparision. */ +	/* 13. Router-ID comparison. */  	/* If one of the paths is "stale", the corresponding peer router-id will  	 * be 0 and would always win over the other path. If originator id is -	 * used for the comparision, it will decide which path is better. +	 * used for the comparison, it will decide which path is better.  	 */  	if (newattr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))  		new_id.s_addr = newattr->originator_id.s_addr; @@ -1285,7 +1285,7 @@ static int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,  		return 0;  	} -	/* 14. Cluster length comparision. */ +	/* 14. Cluster length comparison. */  	new_cluster = BGP_CLUSTER_LIST_LENGTH(new->attr);  	exist_cluster = BGP_CLUSTER_LIST_LENGTH(exist->attr); @@ -1309,7 +1309,7 @@ static int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,  		return 0;  	} -	/* 15. Neighbor address comparision. */ +	/* 15. Neighbor address comparison. */  	/* Do this only if neither path is "stale" as stale paths do not have  	 * valid peer information (as the connection may or may not be up).  	 */ @@ -1598,7 +1598,7 @@ static int bgp_input_modifier(struct peer *peer, const struct prefix *p,  	/* Route map apply. */  	if (rmap) {  		memset(&rmap_path, 0, sizeof(struct bgp_path_info)); -		/* Duplicate current value to new strucutre for modification. */ +		/* Duplicate current value to new structure for modification. */  		rmap_path.peer = peer;  		rmap_path.attr = attr;  		rmap_path.extra = &extra; @@ -1655,7 +1655,7 @@ static int bgp_output_modifier(struct peer *peer, const struct prefix *p,  	memset(&rmap_path, 0, sizeof(struct bgp_path_info));  	/* Route map apply. */ -	/* Duplicate current value to new strucutre for modification. */ +	/* Duplicate current value to new structure for modification. */  	rmap_path.peer = peer;  	rmap_path.attr = attr; @@ -1769,7 +1769,6 @@ void bgp_attr_add_llgr_community(struct attr *attr)  	community_free(&llgr);  	bgp_attr_set_community(attr, new); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);  }  void bgp_attr_add_gshut_community(struct attr *attr) @@ -1798,7 +1797,6 @@ void bgp_attr_add_gshut_community(struct attr *attr)  	community_free(&gshut);  	bgp_attr_set_community(attr, new); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);  	/* When we add the graceful-shutdown community we must also  	 * lower the local-preference */ @@ -2207,6 +2205,7 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,  			ret = route_map_apply(ROUTE_MAP_OUT(filter), p,  					      &rmap_path); +		bgp_attr_flush(&dummy_attr);  		peer->rmap_type = 0;  		if (ret == RMAP_DENYMATCH) { @@ -2216,7 +2215,6 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,  					peer->host, p,  					ROUTE_MAP_OUT_NAME(filter)); -			bgp_attr_flush(&dummy_attr);  			return false;  		}  	} @@ -3173,7 +3171,8 @@ int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)  	/* Process the route list */  	for (dest = bgp_table_top(bgp->rib[afi][safi]); -	     dest && bgp->gr_info[afi][safi].gr_deferred != 0; +	     dest && bgp->gr_info[afi][safi].gr_deferred != 0 && +	     cnt < BGP_MAX_BEST_ROUTE_SELECT;  	     dest = bgp_route_next(dest)) {  		if (!CHECK_FLAG(dest->flags, BGP_NODE_SELECT_DEFER))  			continue; @@ -3182,10 +3181,13 @@ int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)  		bgp->gr_info[afi][safi].gr_deferred--;  		bgp_process_main_one(bgp, dest, afi, safi);  		cnt++; -		if (cnt >= BGP_MAX_BEST_ROUTE_SELECT) { -			bgp_dest_unlock_node(dest); -			break; -		} +	} +	/* If iteration stopped before the entire table was traversed then the +	 * node needs to be unlocked. +	 */ +	if (dest) { +		bgp_dest_unlock_node(dest); +		dest = NULL;  	}  	/* Send EOR message when all routes are processed */ @@ -3433,8 +3435,8 @@ bool bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,  			return false;  		zlog_info( -			"%%MAXPFXEXCEED: No. of %s prefix received from %s %u exceed, limit %u", -			get_afi_safi_str(afi, safi, false), peer->host, pcount, +			"%%MAXPFXEXCEED: No. of %s prefix received from %pBP %u exceed, limit %u", +			get_afi_safi_str(afi, safi, false), peer, pcount,  			peer->pmax[afi][safi]);  		SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_LIMIT); @@ -3473,8 +3475,8 @@ bool bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,  			if (bgp_debug_neighbor_events(peer))  				zlog_debug( -					"%s Maximum-prefix restart timer started for %d secs", -					peer->host, peer->v_pmax_restart); +					"%pBP Maximum-prefix restart timer started for %d secs", +					peer, peer->v_pmax_restart);  			BGP_TIMER_ON(peer->t_pmax_restart,  				     bgp_maximum_prefix_restart_timer, @@ -3494,8 +3496,8 @@ bool bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,  			return false;  		zlog_info( -			"%%MAXPFX: No. of %s prefix received from %s reaches %u, max %u", -			get_afi_safi_str(afi, safi, false), peer->host, pcount, +			"%%MAXPFX: No. of %s prefix received from %pBP reaches %u, max %u", +			get_afi_safi_str(afi, safi, false), peer, pcount,  			peer->pmax[afi][safi]);  		SET_FLAG(peer->af_sflags[afi][safi],  			 PEER_STATUS_PREFIX_THRESHOLD); @@ -3711,7 +3713,6 @@ static void bgp_attr_add_no_export_community(struct attr *attr)  	community_free(&no_export);  	bgp_attr_set_community(attr, new); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);  }  int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, @@ -4008,7 +4009,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  						num_labels, addpath_id ? 1 : 0,  						addpath_id, evpn, pfx_buf,  						sizeof(pfx_buf)); -					zlog_debug("%s rcvd %s", peer->host, +					zlog_debug("%pBP rcvd %s", peer,  						   pfx_buf);  				} @@ -4023,8 +4024,8 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  				if (bgp_debug_update(peer, p, NULL, 1)) {  					if (!peer->rcvd_attr_printed) {  						zlog_debug( -							"%s rcvd UPDATE w/ attr: %s", -							peer->host, +							"%pBP rcvd UPDATE w/ attr: %s", +							peer,  							peer->rcvd_attr_str);  						peer->rcvd_attr_printed = 1;  					} @@ -4035,8 +4036,8 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  						addpath_id, evpn, pfx_buf,  						sizeof(pfx_buf));  					zlog_debug( -						"%s rcvd %s...duplicate ignored", -						peer->host, pfx_buf); +						"%pBP rcvd %s...duplicate ignored", +						peer, pfx_buf);  				}  				/* graceful restart STALE flag unset. */ @@ -4062,8 +4063,8 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  					addpath_id ? 1 : 0, addpath_id, evpn,  					pfx_buf, sizeof(pfx_buf));  				zlog_debug( -					"%s rcvd %s, flapped quicker than processing", -					peer->host, pfx_buf); +					"%pBP rcvd %s, flapped quicker than processing", +					peer, pfx_buf);  			}  			bgp_path_info_restore(dest, pi); @@ -4083,7 +4084,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  						num_labels, addpath_id ? 1 : 0,  						addpath_id, evpn, pfx_buf,  						sizeof(pfx_buf)); -			zlog_debug("%s rcvd %s", peer->host, pfx_buf); +			zlog_debug("%pBP rcvd %s", peer, pfx_buf);  		}  		/* graceful restart STALE flag unset. */ @@ -4380,7 +4381,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  	/* Received Logging. */  	if (bgp_debug_update(peer, p, NULL, 1)) {  		if (!peer->rcvd_attr_printed) { -			zlog_debug("%s rcvd UPDATE w/ attr: %s", peer->host, +			zlog_debug("%pBP rcvd UPDATE w/ attr: %s", peer,  				   peer->rcvd_attr_str);  			peer->rcvd_attr_printed = 1;  		} @@ -4388,7 +4389,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  		bgp_debug_rdpfxpath2str(afi, safi, prd, p, label, num_labels,  					addpath_id ? 1 : 0, addpath_id, evpn,  					pfx_buf, sizeof(pfx_buf)); -		zlog_debug("%s rcvd %s", peer->host, pfx_buf); +		zlog_debug("%pBP rcvd %s", peer, pfx_buf);  	}  	/* Make new BGP info. */ @@ -4532,7 +4533,7 @@ filtered:  	if (bgp_debug_update(peer, p, NULL, 1)) {  		if (!peer->rcvd_attr_printed) { -			zlog_debug("%s rcvd UPDATE w/ attr: %s", peer->host, +			zlog_debug("%pBP rcvd UPDATE w/ attr: %s", peer,  				   peer->rcvd_attr_str);  			peer->rcvd_attr_printed = 1;  		} @@ -4540,8 +4541,8 @@ filtered:  		bgp_debug_rdpfxpath2str(afi, safi, prd, p, label, num_labels,  					addpath_id ? 1 : 0, addpath_id, evpn,  					pfx_buf, sizeof(pfx_buf)); -		zlog_debug("%s rcvd UPDATE about %s -- DENIED due to: %s", -			   peer->host, pfx_buf, reason); +		zlog_debug("%pBP rcvd UPDATE about %s -- DENIED due to: %s", +			   peer, pfx_buf, reason);  	}  	if (pi) { @@ -4645,7 +4646,7 @@ int bgp_withdraw(struct peer *peer, const struct prefix *p, uint32_t addpath_id,  		bgp_debug_rdpfxpath2str(afi, safi, prd, p, label, num_labels,  					addpath_id ? 1 : 0, addpath_id, NULL,  					pfx_buf, sizeof(pfx_buf)); -		zlog_debug("%s rcvd UPDATE about %s -- withdrawn", peer->host, +		zlog_debug("%pBP rcvd UPDATE about %s -- withdrawn", peer,  			   pfx_buf);  	} @@ -8189,14 +8190,16 @@ DEFPY(aggregate_addressv4, aggregate_addressv4_cmd,        "[no] aggregate-address <A.B.C.D/M$prefix|A.B.C.D$addr A.B.C.D$mask> [{"        "as-set$as_set_s"        "|summary-only$summary_only" -      "|route-map WORD$rmap_name" +      "|route-map RMAP_NAME$rmap_name"        "|origin <egp|igp|incomplete>$origin_s"        "|matching-MED-only$match_med" -      "|suppress-map WORD$suppress_map" +      "|suppress-map RMAP_NAME$suppress_map"        "}]",        NO_STR        "Configure BGP aggregate entries\n" -      "Aggregate prefix\n" "Aggregate address\n" "Aggregate mask\n" +      "Aggregate prefix\n" +      "Aggregate address\n" +      "Aggregate mask\n"        "Generate AS set path information\n"        "Filter more specific routes from updates\n"        "Apply route map to aggregate network\n" @@ -8251,10 +8254,10 @@ DEFPY(aggregate_addressv6, aggregate_addressv6_cmd,        "[no] aggregate-address X:X::X:X/M$prefix [{"        "as-set$as_set_s"        "|summary-only$summary_only" -      "|route-map WORD$rmap_name" +      "|route-map RMAP_NAME$rmap_name"        "|origin <egp|igp|incomplete>$origin_s"        "|matching-MED-only$match_med" -      "|suppress-map WORD$suppress_map" +      "|suppress-map RMAP_NAME$suppress_map"        "}]",        NO_STR        "Configure BGP aggregate entries\n" @@ -9464,7 +9467,6 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p,  			   json_object *json_paths)  {  	struct attr *attr; -	char buf[BUFSIZ] = {0};  	json_object *json_path = NULL;  	json_object *json_nexthop = NULL;  	json_object *json_overlay = NULL; @@ -9489,16 +9491,15 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p,  	/* Print attribute */  	attr = path->attr; -	char buf1[BUFSIZ];  	int af = NEXTHOP_FAMILY(attr->mp_nexthop_len);  	switch (af) {  	case AF_INET: -		inet_ntop(af, &attr->mp_nexthop_global_in, buf, BUFSIZ);  		if (!json_path) { -			vty_out(vty, "%-16s", buf); +			vty_out(vty, "%-16pI4", &attr->mp_nexthop_global_in);  		} else { -			json_object_string_add(json_nexthop, "ip", buf); +			json_object_string_addf(json_nexthop, "ip", "%pI4", +						&attr->mp_nexthop_global_in);  			json_object_string_add(json_nexthop, "afi", "ipv4"); @@ -9507,15 +9508,17 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p,  		}  		break;  	case AF_INET6: -		inet_ntop(af, &attr->mp_nexthop_global, buf, BUFSIZ); -		inet_ntop(af, &attr->mp_nexthop_local, buf1, BUFSIZ);  		if (!json_path) { -			vty_out(vty, "%s(%s)", buf, buf1); +			vty_out(vty, "%pI6(%pI6)", &attr->mp_nexthop_global, +				&attr->mp_nexthop_local);  		} else { -			json_object_string_add(json_nexthop, "ipv6Global", buf); +			json_object_string_addf(json_nexthop, "ipv6Global", +						"%pI6", +						&attr->mp_nexthop_global); -			json_object_string_add(json_nexthop, "ipv6LinkLocal", -					       buf1); +			json_object_string_addf(json_nexthop, "ipv6LinkLocal", +						"%pI6", +						&attr->mp_nexthop_local);  			json_object_string_add(json_nexthop, "afi", "ipv6"); @@ -9536,12 +9539,10 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p,  	const struct bgp_route_evpn *eo = bgp_attr_get_evpn_overlay(attr); -	ipaddr2str(&eo->gw_ip, buf, BUFSIZ); -  	if (!json_path) -		vty_out(vty, "/%s", buf); +		vty_out(vty, "/%pIA", &eo->gw_ip);  	else -		json_object_string_add(json_overlay, "gw", buf); +		json_object_string_addf(json_overlay, "gw", "%pIA", &eo->gw_ip);  	if (bgp_attr_get_ecommunity(attr)) {  		char *mac = NULL; @@ -10506,7 +10507,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,  		if (json_paths) {  			if (!bgp_attr_get_community(attr)->json)  				community_str(bgp_attr_get_community(attr), -					      true); +					      true, true);  			json_object_lock(bgp_attr_get_community(attr)->json);  			json_object_object_add(  				json_path, "community", @@ -10537,7 +10538,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,  		if (json_paths) {  			if (!bgp_attr_get_lcommunity(attr)->json)  				lcommunity_str(bgp_attr_get_lcommunity(attr), -					       true); +					       true, true);  			json_object_lock(bgp_attr_get_lcommunity(attr)->json);  			json_object_object_add(  				json_path, "largeCommunity", @@ -15011,7 +15012,7 @@ static void bgp_config_write_network_evpn(struct vty *vty, struct bgp *bgp,  	char buf[PREFIX_STRLEN * 2];  	char buf2[SU_ADDRSTRLEN];  	char rdbuf[RD_ADDRSTRLEN]; -	char esi_buf[ESI_BYTES]; +	char esi_buf[ESI_STR_LEN];  	/* Network configuration. */  	for (pdest = bgp_table_top(bgp->route[afi][safi]); pdest; diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 6fcc083e33..20ee2e4d49 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -1841,7 +1841,7 @@ struct route_map_rule_cmd route_set_srte_color_cmd = {  	"sr-te color", route_set_srte_color, route_set_srte_color_compile,  	route_set_srte_color_free}; -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  struct rmap_ip_nexthop_set {  	struct in_addr *address;  	int peer_address; @@ -2174,6 +2174,57 @@ static const struct route_map_rule_cmd route_set_aspath_exclude_cmd = {  	route_aspath_free,  }; +/* `set as-path replace AS-PATH` */ +static void *route_aspath_replace_compile(const char *arg) +{ +	return XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg); +} + +static void route_aspath_replace_free(void *rule) +{ +	XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); +} + +static enum route_map_cmd_result_t +route_set_aspath_replace(void *rule, const struct prefix *dummy, void *object) +{ +	struct aspath *aspath_new; +	const char *replace = rule; +	struct bgp_path_info *path = object; +	as_t own_asn = path->peer->change_local_as ? path->peer->change_local_as +						   : path->peer->local_as; + +	if (path->peer->sort != BGP_PEER_EBGP) { +		zlog_warn( +			"`set as-path replace` is supported only for EBGP peers"); +		return RMAP_NOOP; +	} + +	if (path->attr->aspath->refcnt) +		aspath_new = aspath_dup(path->attr->aspath); +	else +		aspath_new = path->attr->aspath; + +	if (strmatch(replace, "any")) { +		path->attr->aspath = +			aspath_replace_all_asn(aspath_new, own_asn); +	} else { +		as_t replace_asn = strtoul(replace, NULL, 10); + +		path->attr->aspath = aspath_replace_specific_asn( +			aspath_new, replace_asn, own_asn); +	} + +	return RMAP_OKAY; +} + +static const struct route_map_rule_cmd route_set_aspath_replace_cmd = { +	"as-path replace", +	route_set_aspath_replace, +	route_aspath_replace_compile, +	route_aspath_replace_free, +}; +  /* `set community COMMUNITY' */  struct rmap_com_set {  	struct community *com; @@ -2199,7 +2250,6 @@ route_set_community(void *rule, const struct prefix *prefix, void *object)  	/* "none" case.  */  	if (rcs->none) { -		attr->flag &= ~(ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES));  		bgp_attr_set_community(attr, NULL);  		/* See the longer comment down below. */  		if (old && old->refcnt == 0) @@ -2227,8 +2277,6 @@ route_set_community(void *rule, const struct prefix *prefix, void *object)  	/* will be interned by caller if required */  	bgp_attr_set_community(attr, new); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); -  	return RMAP_OKAY;  } @@ -2313,7 +2361,6 @@ route_set_lcommunity(void *rule, const struct prefix *prefix, void *object)  	/* "none" case.  */  	if (rcs->none) { -		attr->flag &= ~(ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES));  		bgp_attr_set_lcommunity(attr, NULL);  		/* See the longer comment down below. */ @@ -2341,8 +2388,6 @@ route_set_lcommunity(void *rule, const struct prefix *prefix, void *object)  	/* will be intern()'d or attr_flush()'d by bgp_update_main() */  	bgp_attr_set_lcommunity(attr, new); -	attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES); -  	return RMAP_OKAY;  } @@ -2438,13 +2483,9 @@ route_set_lcommunity_delete(void *rule, const struct prefix *pfx, void *object)  		if (new->size == 0) {  			bgp_attr_set_lcommunity(path->attr, NULL); -			path->attr->flag &= -				~ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES);  			lcommunity_free(&new);  		} else {  			bgp_attr_set_lcommunity(path->attr, new); -			path->attr->flag |= -				ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES);  		}  	} @@ -2526,12 +2567,9 @@ route_set_community_delete(void *rule, const struct prefix *prefix,  		if (new->size == 0) {  			bgp_attr_set_community(path->attr, NULL); -			path->attr->flag &= -				~ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);  			community_free(&new);  		} else {  			bgp_attr_set_community(path->attr, new); -			path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES);  		}  	} @@ -2597,7 +2635,6 @@ route_set_ecommunity(void *rule, const struct prefix *prefix, void *object)  	attr = path->attr;  	if (rcs->none) { -		attr->flag &= ~(ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));  		bgp_attr_set_ecommunity(attr, NULL);  		return RMAP_OKAY;  	} @@ -2624,8 +2661,6 @@ route_set_ecommunity(void *rule, const struct prefix *prefix, void *object)  	/* will be intern()'d or attr_flush()'d by bgp_update_main() */  	bgp_attr_set_ecommunity(path->attr, new_ecom); -	path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); -  	return RMAP_OKAY;  } @@ -2787,7 +2822,6 @@ route_set_ecommunity_lb(void *rule, const struct prefix *prefix, void *object)  	/* new_ecom will be intern()'d or attr_flush()'d in call stack */  	bgp_attr_set_ecommunity(path->attr, new_ecom); -	path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);  	/* Mark that route-map has set link bandwidth; used in attribute  	 * setting decisions. @@ -3306,7 +3340,7 @@ static const struct route_map_rule_cmd  /* `set ipv6 nexthop global IP_ADDRESS' */ -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_ipv6_nexthop_global(void *rule, const struct prefix *p, void *object)  { @@ -3418,7 +3452,7 @@ static const struct route_map_rule_cmd  /* `set ipv6 nexthop local IP_ADDRESS' */ -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_ipv6_nexthop_local(void *rule, const struct prefix *p, void *object)  { @@ -3478,7 +3512,7 @@ static const struct route_map_rule_cmd  /* `set ipv6 nexthop peer-address' */ -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_ipv6_nexthop_peer(void *rule, const struct prefix *pfx, void *object)  { @@ -3809,6 +3843,14 @@ static void bgp_route_map_update_peer_group(const char *rmap_name,  			if (filter->usmap.name  			    && (strcmp(rmap_name, filter->usmap.name) == 0))  				filter->usmap.map = map; + +			if (filter->advmap.aname && +			    (strcmp(rmap_name, filter->advmap.aname) == 0)) +				filter->advmap.amap = map; + +			if (filter->advmap.cname && +			    (strcmp(rmap_name, filter->advmap.cname) == 0)) +				filter->advmap.cmap = map;  		}  	}  } @@ -5398,6 +5440,43 @@ DEFUN_YANG (set_aspath_prepend_lastas,  	return nb_cli_apply_changes(vty, NULL);  } +DEFPY_YANG (set_aspath_replace_asn, +	    set_aspath_replace_asn_cmd, +	    "set as-path replace <any|(1-4294967295)>$replace", +	    SET_STR +	    "Transform BGP AS_PATH attribute\n" +	    "Replace AS number to local AS number\n" +	    "Replace any AS number to local AS number\n" +	    "Replace a specific AS number to local AS number\n") +{ +	const char *xpath = +		"./set-action[action='frr-bgp-route-map:as-path-replace']"; +	char xpath_value[XPATH_MAXLEN]; + +	nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); +	snprintf(xpath_value, sizeof(xpath_value), +		 "%s/rmap-set-action/frr-bgp-route-map:replace-as-path", xpath); +	nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, replace); +	return nb_cli_apply_changes(vty, NULL); +} + +DEFPY_YANG (no_set_aspath_replace_asn, +	    no_set_aspath_replace_asn_cmd, +	    "no set as-path replace [<any|(1-4294967295)>]", +	    NO_STR +	    SET_STR +	    "Transform BGP AS_PATH attribute\n" +	    "Replace AS number to local AS number\n" +	    "Replace any AS number to local AS number\n" +	    "Replace a specific AS number to local AS number\n") +{ +	const char *xpath = +		"./set-action[action='frr-bgp-route-map:as-path-replace']"; + +	nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); +	return nb_cli_apply_changes(vty, NULL); +} +  DEFUN_YANG (no_set_aspath_prepend,  	    no_set_aspath_prepend_cmd,  	    "no set as-path prepend [(1-4294967295)]", @@ -5562,19 +5641,19 @@ DEFUN_YANG (set_community,  	str = buffer_getstr(b);  	buffer_free(b); -	if (str) { +	if (str)  		com = community_str2com(str); -		XFREE(MTYPE_TMP, str); -	}  	/* Can't compile user input into communities attribute.  */  	if (!com) { -		vty_out(vty, "%% Malformed communities attribute\n"); +		vty_out(vty, "%% Malformed communities attribute '%s'\n", str); +		XFREE(MTYPE_TMP, str);  		return CMD_WARNING_CONFIG_FAILED;  	} +	XFREE(MTYPE_TMP, str);  	/* Set communites attribute string.  */ -	str = community_str(com, false); +	str = community_str(com, false, false);  	if (additive) {  		size_t argstr_sz = strlen(str) + strlen(" additive") + 1; @@ -6736,6 +6815,7 @@ void bgp_route_map_init(void)  	route_map_install_set(&route_set_distance_cmd);  	route_map_install_set(&route_set_aspath_prepend_cmd);  	route_map_install_set(&route_set_aspath_exclude_cmd); +	route_map_install_set(&route_set_aspath_replace_cmd);  	route_map_install_set(&route_set_origin_cmd);  	route_map_install_set(&route_set_atomic_aggregate_cmd);  	route_map_install_set(&route_set_aggregator_as_cmd); @@ -6809,10 +6889,12 @@ void bgp_route_map_init(void)  	install_element(RMAP_NODE, &set_aspath_prepend_asn_cmd);  	install_element(RMAP_NODE, &set_aspath_prepend_lastas_cmd);  	install_element(RMAP_NODE, &set_aspath_exclude_cmd); +	install_element(RMAP_NODE, &set_aspath_replace_asn_cmd);  	install_element(RMAP_NODE, &no_set_aspath_prepend_cmd);  	install_element(RMAP_NODE, &no_set_aspath_prepend_lastas_cmd);  	install_element(RMAP_NODE, &no_set_aspath_exclude_cmd);  	install_element(RMAP_NODE, &no_set_aspath_exclude_all_cmd); +	install_element(RMAP_NODE, &no_set_aspath_replace_asn_cmd);  	install_element(RMAP_NODE, &set_origin_cmd);  	install_element(RMAP_NODE, &no_set_origin_cmd);  	install_element(RMAP_NODE, &set_atomic_aggregate_cmd); diff --git a/bgpd/bgp_routemap_nb.c b/bgpd/bgp_routemap_nb.c index caf1553ec1..585596e1aa 100644 --- a/bgpd/bgp_routemap_nb.c +++ b/bgpd/bgp_routemap_nb.c @@ -297,6 +297,13 @@ const struct frr_yang_module_info frr_bgp_route_map_info = {  			}  		},  		{ +			.xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:replace-as-path", +			.cbs = { +				.modify = lib_route_map_entry_set_action_rmap_set_action_replace_as_path_modify, +				.destroy = lib_route_map_entry_set_action_rmap_set_action_replace_as_path_destroy, +			} +		}, +		{  			.xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:community-none",  			.cbs = {  				.modify = lib_route_map_entry_set_action_rmap_set_action_community_none_modify, diff --git a/bgpd/bgp_routemap_nb.h b/bgpd/bgp_routemap_nb.h index e0b3a6926f..a01adf7d5d 100644 --- a/bgpd/bgp_routemap_nb.h +++ b/bgpd/bgp_routemap_nb.h @@ -108,6 +108,10 @@ int lib_route_map_entry_set_action_rmap_set_action_last_as_modify(struct nb_cb_m  int lib_route_map_entry_set_action_rmap_set_action_last_as_destroy(struct nb_cb_destroy_args *args);  int lib_route_map_entry_set_action_rmap_set_action_exclude_as_path_modify(struct nb_cb_modify_args *args);  int lib_route_map_entry_set_action_rmap_set_action_exclude_as_path_destroy(struct nb_cb_destroy_args *args); +int lib_route_map_entry_set_action_rmap_set_action_replace_as_path_modify( +	struct nb_cb_modify_args *args); +int lib_route_map_entry_set_action_rmap_set_action_replace_as_path_destroy( +	struct nb_cb_destroy_args *args);  int lib_route_map_entry_set_action_rmap_set_action_community_none_modify(struct nb_cb_modify_args *args);  int lib_route_map_entry_set_action_rmap_set_action_community_none_destroy(struct nb_cb_destroy_args *args);  int lib_route_map_entry_set_action_rmap_set_action_community_string_modify(struct nb_cb_modify_args *args); diff --git a/bgpd/bgp_routemap_nb_config.c b/bgpd/bgp_routemap_nb_config.c index 773538ee41..b87877b1e0 100644 --- a/bgpd/bgp_routemap_nb_config.c +++ b/bgpd/bgp_routemap_nb_config.c @@ -2209,6 +2209,58 @@ lib_route_map_entry_set_action_rmap_set_action_exclude_as_path_destroy(  /*   * XPath: + * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:replace-as-path + */ +int lib_route_map_entry_set_action_rmap_set_action_replace_as_path_modify( +	struct nb_cb_modify_args *args) +{ +	struct routemap_hook_context *rhc; +	const char *type; +	int rv; + +	switch (args->event) { +	case NB_EV_VALIDATE: +	case NB_EV_PREPARE: +	case NB_EV_ABORT: +		break; +	case NB_EV_APPLY: +		/* Add configuration. */ +		rhc = nb_running_get_entry(args->dnode, NULL, true); +		type = yang_dnode_get_string(args->dnode, NULL); + +		/* Set destroy information. */ +		rhc->rhc_shook = generic_set_delete; +		rhc->rhc_rule = "as-path replace"; +		rhc->rhc_event = RMAP_EVENT_SET_DELETED; + +		rv = generic_set_add(rhc->rhc_rmi, "as-path replace", type, +				     args->errmsg, args->errmsg_len); +		if (rv != CMD_SUCCESS) { +			rhc->rhc_shook = NULL; +			return NB_ERR_INCONSISTENCY; +		} +	} + +	return NB_OK; +} + +int lib_route_map_entry_set_action_rmap_set_action_replace_as_path_destroy( +	struct nb_cb_destroy_args *args) +{ +	switch (args->event) { +	case NB_EV_VALIDATE: +	case NB_EV_PREPARE: +	case NB_EV_ABORT: +		break; +	case NB_EV_APPLY: +		return lib_route_map_entry_set_destroy(args); +	} + +	return NB_OK; +} + +/* + * XPath:   * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:community-none   */  int lib_route_map_entry_set_action_rmap_set_action_community_none_modify( diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c index c724b938d1..eb9d5f3f73 100644 --- a/bgpd/bgp_rpki.c +++ b/bgpd/bgp_rpki.c @@ -64,13 +64,16 @@  #endif  static struct thread *t_rpki; +static struct thread *t_rpki_start;  DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE, "BGP RPKI Cache server");  DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group"); +DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_RTRLIB, "BGP RPKI RTRLib");  #define POLLING_PERIOD_DEFAULT 3600  #define EXPIRE_INTERVAL_DEFAULT 7200  #define RETRY_INTERVAL_DEFAULT 600 +#define BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT 3  #define RPKI_DEBUG(...)                                                        \  	if (rpki_debug) {                                                      \ @@ -122,6 +125,7 @@ static int add_tcp_cache(const char *host, const char *port,  static void print_record(const struct pfx_record *record, struct vty *vty);  static int is_synchronized(void);  static int is_running(void); +static int is_stopping(void);  static void route_match_free(void *rule);  static enum route_map_cmd_result_t route_match(void *rule,  					       const struct prefix *prefix, @@ -156,17 +160,17 @@ static const struct route_map_rule_cmd route_match_rpki_cmd = {  static void *malloc_wrapper(size_t size)  { -	return XMALLOC(MTYPE_BGP_RPKI_CACHE, size); +	return XMALLOC(MTYPE_BGP_RPKI_RTRLIB, size);  }  static void *realloc_wrapper(void *ptr, size_t size)  { -	return XREALLOC(MTYPE_BGP_RPKI_CACHE, ptr, size); +	return XREALLOC(MTYPE_BGP_RPKI_RTRLIB, ptr, size);  }  static void free_wrapper(void *ptr)  { -	XFREE(MTYPE_BGP_RPKI_CACHE, ptr); +	XFREE(MTYPE_BGP_RPKI_RTRLIB, ptr);  }  static void init_tr_socket(struct cache *cache) @@ -331,7 +335,7 @@ static struct rtr_mgr_group *get_groups(void)  inline int is_synchronized(void)  { -	return rtr_is_running && rtr_mgr_conf_in_sync(rtr_config); +	return is_running() && rtr_mgr_conf_in_sync(rtr_config);  }  inline int is_running(void) @@ -339,6 +343,11 @@ inline int is_running(void)  	return rtr_is_running;  } +inline int is_stopping(void) +{ +	return rtr_is_stopping; +} +  static struct prefix *pfx_record_to_prefix(struct pfx_record *record)  {  	struct prefix *prefix = prefix_new(); @@ -480,7 +489,7 @@ static void rpki_connection_status_cb(const struct rtr_mgr_group *group  	struct pfx_record rec = {0};  	int retval; -	if (rtr_is_stopping || +	if (is_stopping() ||  	    atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst))  		return; @@ -500,8 +509,8 @@ static void rpki_update_cb_sync_rtr(struct pfx_table *p __attribute__((unused)),  				    const struct pfx_record rec,  				    const bool added __attribute__((unused)))  { -	if (rtr_is_stopping -	    || atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) +	if (is_stopping() || +	    atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst))  		return;  	int retval = @@ -587,6 +596,18 @@ static int bgp_rpki_module_init(void)  	return 0;  } +static void start_expired(struct thread *thread) +{ +	if (!rtr_mgr_conf_in_sync(rtr_config)) { +		thread_add_timer(bm->master, start_expired, NULL, +				 BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT, +				 &t_rpki_start); +		return; +	} + +	rtr_is_running = 1; +} +  static int start(void)  {  	int ret; @@ -620,7 +641,8 @@ static int start(void)  		rtr_mgr_free(rtr_config);  		return ERROR;  	} -	rtr_is_running = 1; + +	thread_add_timer(bm->master, start_expired, NULL, 0, &t_rpki_start);  	XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups); @@ -630,7 +652,8 @@ static int start(void)  static void stop(void)  {  	rtr_is_stopping = 1; -	if (rtr_is_running) { +	if (is_running()) { +		THREAD_OFF(t_rpki_start);  		rtr_mgr_stop(rtr_config);  		rtr_mgr_free(rtr_config);  		rtr_is_running = 0; @@ -639,7 +662,10 @@ static void stop(void)  static int reset(bool force)  { -	if (rtr_is_running && !force) +	if (is_running() && !force) +		return SUCCESS; + +	if (thread_is_scheduled(t_rpki_start))  		return SUCCESS;  	RPKI_DEBUG("Resetting RPKI Session"); @@ -722,7 +748,7 @@ static int rpki_validate_prefix(struct peer *peer, struct attr *attr,  	enum pfxv_state result;  	if (!is_synchronized()) -		return 0; +		return RPKI_NOT_BEING_USED;  	// No aspath means route comes from iBGP  	if (!attr->aspath || !attr->aspath->segments) { @@ -762,7 +788,7 @@ static int rpki_validate_prefix(struct peer *peer, struct attr *attr,  		break;  	default: -		return 0; +		return RPKI_NOT_BEING_USED;  	}  	// Do the actual validation @@ -792,7 +818,7 @@ static int rpki_validate_prefix(struct peer *peer, struct attr *attr,  			prefix, as_number);  		break;  	} -	return 0; +	return RPKI_NOT_BEING_USED;  }  static int add_cache(struct cache *cache) @@ -804,7 +830,7 @@ static int add_cache(struct cache *cache)  	group.sockets_len = 1;  	group.sockets = &cache->rtr_socket; -	if (rtr_is_running) { +	if (is_running()) {  		init_tr_socket(cache);  		if (rtr_mgr_add_group(rtr_config, &group) != RTR_SUCCESS) { @@ -902,9 +928,8 @@ static void free_cache(struct cache *cache)  	if (cache->type == TCP) {  		XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.tcp_config->host);  		XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.tcp_config->port); -		if (cache->tr_config.tcp_config->bindaddr) -			XFREE(MTYPE_BGP_RPKI_CACHE, -			      cache->tr_config.tcp_config->bindaddr); +		XFREE(MTYPE_BGP_RPKI_CACHE, +		      cache->tr_config.tcp_config->bindaddr);  		XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.tcp_config);  	}  #if defined(FOUND_SSH) @@ -916,9 +941,8 @@ static void free_cache(struct cache *cache)  		      cache->tr_config.ssh_config->client_privkey_path);  		XFREE(MTYPE_BGP_RPKI_CACHE,  		      cache->tr_config.ssh_config->server_hostkey_path); -		if (cache->tr_config.ssh_config->bindaddr) -			XFREE(MTYPE_BGP_RPKI_CACHE, -			      cache->tr_config.ssh_config->bindaddr); +		XFREE(MTYPE_BGP_RPKI_CACHE, +		      cache->tr_config.ssh_config->bindaddr);  		XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.ssh_config);  	}  #endif @@ -1171,9 +1195,9 @@ DEFPY (no_rpki_cache,  		return CMD_WARNING;  	} -	if (rtr_is_running && listcount(cache_list) == 1) { +	if (is_running() && listcount(cache_list) == 1) {  		stop(); -	} else if (rtr_is_running) { +	} else if (is_running()) {  		if (rtr_mgr_remove_group(rtr_config, preference) == RTR_ERROR) {  			vty_out(vty, "Could not remove cache %ld", preference); diff --git a/bgpd/bgp_script.c b/bgpd/bgp_script.c index 9446a25a05..bf3e612bfd 100644 --- a/bgpd/bgp_script.c +++ b/bgpd/bgp_script.c @@ -156,18 +156,19 @@ void lua_pushattr(lua_State *L, const struct attr *attr)  void lua_decode_attr(lua_State *L, int idx, struct attr *attr)  { -	lua_getfield(L, -1, "metric"); +	lua_getfield(L, idx, "metric");  	attr->med = lua_tointeger(L, -1);  	lua_pop(L, 1); -	lua_getfield(L, -1, "ifindex"); +	lua_getfield(L, idx, "ifindex");  	attr->nh_ifindex = lua_tointeger(L, -1);  	lua_pop(L, 1); -	lua_getfield(L, -1, "aspath"); +	lua_getfield(L, idx, "aspath");  	attr->aspath = aspath_str2aspath(lua_tostring(L, -1));  	lua_pop(L, 1); -	lua_getfield(L, -1, "localpref"); +	lua_getfield(L, idx, "localpref");  	attr->local_pref = lua_tointeger(L, -1);  	lua_pop(L, 1); +	lua_pop(L, 1);  }  void *lua_toattr(lua_State *L, int idx) diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index aa3e44318a..e85118e588 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -1463,7 +1463,7 @@ static int update_group_periodic_merge_walkcb(struct update_group *updgrp,   *             over multiple statements. Useful to set dirty flag on   *             update groups.   */ -void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype, +void update_group_policy_update(struct bgp *bgp, enum bgp_policy_type ptype,  				const char *pname, int route_update,  				int start_event)  { diff --git a/bgpd/bgp_updgrp.h b/bgpd/bgp_updgrp.h index 0e10341bc4..e3309ab7c5 100644 --- a/bgpd/bgp_updgrp.h +++ b/bgpd/bgp_updgrp.h @@ -74,7 +74,7 @@  	 | PEER_CAP_ADDPATH_AF_TX_ADV | PEER_CAP_ADDPATH_AF_RX_RCV             \  	 | PEER_CAP_ENHE_AF_NEGO) -typedef enum { BGP_ATTR_VEC_NH = 0, BGP_ATTR_VEC_MAX } bpacket_attr_vec_type; +enum bpacket_attr_vec_type { BGP_ATTR_VEC_NH = 0, BGP_ATTR_VEC_MAX };  typedef struct {  	uint32_t flags; @@ -288,7 +288,7 @@ struct updwalk_context {  	struct bgp_path_info *pi;  	uint64_t updgrp_id;  	uint64_t subgrp_id; -	bgp_policy_type_e policy_type; +	enum bgp_policy_type policy_type;  	const char *policy_name;  	int policy_event_start_flag;  	int policy_route_update; @@ -368,7 +368,8 @@ extern void update_subgroup_split_peer(struct peer_af *, struct update_group *);  extern bool update_subgroup_check_merge(struct update_subgroup *, const char *);  extern bool update_subgroup_trigger_merge_check(struct update_subgroup *,  						int force); -extern void update_group_policy_update(struct bgp *bgp, bgp_policy_type_e ptype, +extern void update_group_policy_update(struct bgp *bgp, +				       enum bgp_policy_type ptype,  				       const char *pname, int route_update,  				       int start_event);  extern void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi, @@ -409,7 +410,7 @@ extern struct stream *bpacket_reformat_for_peer(struct bpacket *pkt,  						struct peer_af *paf);  extern void bpacket_attr_vec_arr_reset(struct bpacket_attr_vec_arr *vecarr);  extern void bpacket_attr_vec_arr_set_vec(struct bpacket_attr_vec_arr *vecarr, -					 bpacket_attr_vec_type type, +					 enum bpacket_attr_vec_type type,  					 struct stream *s, struct attr *attr);  extern void subgroup_default_update_packet(struct update_subgroup *subgrp,  					   struct attr *attr, diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c index 0d8ee79ae5..87558f9c35 100644 --- a/bgpd/bgp_updgrp_adv.c +++ b/bgpd/bgp_updgrp_adv.c @@ -818,6 +818,8 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)  	assert(attr.aspath);  	attr.local_pref = bgp->default_local_pref; +	attr.med = 0; +	attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC);  	if ((afi == AFI_IP6) || peer_cap_enhe(peer, afi, safi)) {  		/* IPv6 global nexthop must be included. */ diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c index cf24e1d689..c4a3ca7500 100644 --- a/bgpd/bgp_updgrp_packet.c +++ b/bgpd/bgp_updgrp_packet.c @@ -1240,7 +1240,7 @@ void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)  static void  bpacket_vec_arr_inherit_attr_flags(struct bpacket_attr_vec_arr *vecarr, -				   bpacket_attr_vec_type type, +				   enum bpacket_attr_vec_type type,  				   struct attr *attr)  {  	if (CHECK_FLAG(attr->rmap_change_flags, @@ -1291,8 +1291,8 @@ void bpacket_attr_vec_arr_reset(struct bpacket_attr_vec_arr *vecarr)  /* Setup a particular node entry in the vecarr */  void bpacket_attr_vec_arr_set_vec(struct bpacket_attr_vec_arr *vecarr, -				  bpacket_attr_vec_type type, struct stream *s, -				  struct attr *attr) +				  enum bpacket_attr_vec_type type, +				  struct stream *s, struct attr *attr)  {  	if (!vecarr)  		return; diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index dea1433f6d..2aa77576a1 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -286,7 +286,7 @@ static int bgp_srv6_locator_unset(struct bgp *bgp)  {  	int ret;  	struct listnode *node, *nnode; -	struct prefix_ipv6 *chunk; +	struct srv6_locator_chunk *chunk;  	struct bgp_srv6_function *func;  	struct bgp *bgp_vrf;  	struct in6_addr *tovpn_sid; @@ -934,12 +934,12 @@ static void bgp_clear_vty_error(struct vty *vty, struct peer *peer, afi_t afi,  	switch (error) {  	case BGP_ERR_AF_UNCONFIGURED:  		vty_out(vty, -			"%%BGP: Enable %s address family for the neighbor %s\n", +			"%% BGP: Enable %s address family for the neighbor %s\n",  			get_afi_safi_str(afi, safi, false), peer->host);  		break;  	case BGP_ERR_SOFT_RECONFIG_UNCONFIGURED:  		vty_out(vty, -			"%%BGP: Inbound soft reconfig for %s not possible as it\n      has neither refresh capability, nor inbound soft reconfig\n", +			"%% BGP: Inbound soft reconfig for %s not possible as it\n      has neither refresh capability, nor inbound soft reconfig\n",  			peer->host);  		break;  	default: @@ -1086,7 +1086,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,  			peer = peer_lookup(bgp, &su);  			if (!peer) {  				vty_out(vty, -					"%%BGP: Unknown neighbor - \"%s\"\n", +					"%% BGP: Unknown neighbor - \"%s\"\n",  					arg);  				return CMD_WARNING;  			} @@ -1113,7 +1113,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,  		group = peer_group_lookup(bgp, arg);  		if (!group) { -			vty_out(vty, "%%BGP: No such peer-group %s\n", arg); +			vty_out(vty, "%% BGP: No such peer-group %s\n", arg);  			return CMD_WARNING;  		} @@ -1128,7 +1128,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,  		if (!found)  			vty_out(vty, -				"%%BGP: No %s peer belonging to peer-group %s is configured\n", +				"%% BGP: No %s peer belonging to peer-group %s is configured\n",  				get_afi_safi_str(afi, safi, false), arg);  		return CMD_SUCCESS; @@ -1163,7 +1163,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,  		if (!found)  			vty_out(vty, -				"%%BGP: No external %s peer is configured\n", +				"%% BGP: No external %s peer is configured\n",  				get_afi_safi_str(afi, safi, false));  		return CMD_SUCCESS; @@ -1200,7 +1200,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,  		if (!found)  			vty_out(vty, -				"%%BGP: No %s peer is configured with AS %s\n", +				"%% BGP: No %s peer is configured with AS %s\n",  				get_afi_safi_str(afi, safi, false), arg);  		return CMD_SUCCESS; @@ -7434,7 +7434,7 @@ DEFPY (bgp_condadv_period,  DEFPY (neighbor_advertise_map,         neighbor_advertise_map_cmd, -       "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor advertise-map WORD$advertise_str <exist-map|non-exist-map>$exist WORD$condition_str", +       "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor advertise-map RMAP_NAME$advertise_str <exist-map|non-exist-map>$exist RMAP_NAME$condition_str",         NO_STR         NEIGHBOR_STR         NEIGHBOR_ADDR_STR2 @@ -7455,7 +7455,7 @@ DEFPY (neighbor_advertise_map,  }  ALIAS_HIDDEN(neighbor_advertise_map, neighbor_advertise_map_hidden_cmd, -	     "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor advertise-map WORD$advertise_str <exist-map|non-exist-map>$exist WORD$condition_str", +	     "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor advertise-map RMAP_NAME$advertise_str <exist-map|non-exist-map>$exist RMAP_NAME$condition_str",  	     NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2  	     "Route-map to conditionally advertise routes\n"  	     "Name of advertise map\n" @@ -8700,7 +8700,7 @@ DEFPY (af_rt_vpn_imexport,  	int ret;  	struct ecommunity *ecom = NULL;  	int dodir[BGP_VPN_POLICY_DIR_MAX] = {0}; -	vpn_policy_direction_t dir; +	enum vpn_policy_direction dir;  	afi_t afi;  	int idx = 0;  	bool yes = true; @@ -8780,7 +8780,7 @@ DEFPY (af_route_map_vpn_imexport,  	VTY_DECLVAR_CONTEXT(bgp, bgp);  	int ret;  	int dodir[BGP_VPN_POLICY_DIR_MAX] = {0}; -	vpn_policy_direction_t dir; +	enum vpn_policy_direction dir;  	afi_t afi;  	int idx = 0;  	bool yes = true; @@ -8843,7 +8843,7 @@ DEFPY(af_import_vrf_route_map, af_import_vrf_route_map_cmd,        "name of route-map\n")  {  	VTY_DECLVAR_CONTEXT(bgp, bgp); -	vpn_policy_direction_t dir = BGP_VPN_POLICY_DIR_FROMVPN; +	enum vpn_policy_direction dir = BGP_VPN_POLICY_DIR_FROMVPN;  	afi_t afi;  	struct bgp *bgp_default; @@ -8896,7 +8896,7 @@ DEFPY(af_no_import_vrf_route_map, af_no_import_vrf_route_map_cmd,        "name of route-map\n")  {  	VTY_DECLVAR_CONTEXT(bgp, bgp); -	vpn_policy_direction_t dir = BGP_VPN_POLICY_DIR_FROMVPN; +	enum vpn_policy_direction dir = BGP_VPN_POLICY_DIR_FROMVPN;  	afi_t afi;  	afi = vpn_policy_getafi(vty, bgp, true); @@ -9027,7 +9027,7 @@ DEFPY (bgp_imexport_vpn,  	int idx = 0;  	bool yes = true;  	int flag; -	vpn_policy_direction_t dir; +	enum vpn_policy_direction dir;  	if (argv_find(argv, argc, "no", &idx))  		yes = false; @@ -9316,7 +9316,7 @@ DEFPY (show_bgp_srv6,  {  	struct bgp *bgp;  	struct listnode *node; -	struct prefix_ipv6 *chunk; +	struct srv6_locator_chunk *chunk;  	struct bgp_srv6_function *func;  	struct in6_addr *tovpn4_sid;  	struct in6_addr *tovpn6_sid; @@ -9331,7 +9331,7 @@ DEFPY (show_bgp_srv6,  	vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name);  	vty_out(vty, "locator_chunks:\n");  	for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) { -		prefix2str(chunk, buf, sizeof(buf)); +		prefix2str(&chunk->prefix, buf, sizeof(buf));  		vty_out(vty, "- %s\n", buf);  	} @@ -12559,6 +12559,18 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,  		else  			json_object_boolean_false_add(  				json_neigh, "extendedOptionalParametersLength"); + +		/* Conditional advertisements */ +		json_object_int_add( +			json_neigh, +			"bgpTimerConfiguredConditionalAdvertisementsSec", +			bgp->condition_check_period); +		if (thread_is_scheduled(bgp->t_condition_check)) +			json_object_int_add( +				json_neigh, +				"bgpTimerUntilConditionalAdvertisementsSec", +				thread_timer_remain_second( +					bgp->t_condition_check));  	} else {  		/* Administrative shutdown. */  		if (CHECK_FLAG(p->flags, PEER_FLAG_SHUTDOWN) @@ -12636,6 +12648,16 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,  		if (BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(p))  			vty_out(vty,  				"  Extended Optional Parameters Length is enabled\n"); + +		/* Conditional advertisements */ +		vty_out(vty, +			"  Configured conditional advertisements interval is %d seconds\n", +			bgp->condition_check_period); +		if (thread_is_scheduled(bgp->t_condition_check)) +			vty_out(vty, +				"  Time until conditional advertisements begin is %lu seconds\n", +				thread_timer_remain_second( +					bgp->t_condition_check));  	}  	/* Capability. */  	if (peer_established(p) && @@ -13042,7 +13064,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,  			json_object_object_add(json_cap, "hostName",  					       json_hname); -			/* Gracefull Restart */ +			/* Graceful Restart */  			if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV) ||  			    CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) {  				if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV) && @@ -13452,7 +13474,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,  						vty_out(vty, "none");  					vty_out(vty, "\n");  				} -			} /* Gracefull Restart */ +			} /* Graceful Restart */  		}  	} @@ -14473,7 +14495,7 @@ static void community_show_all_iterator(struct hash_bucket *bucket,  	com = (struct community *)bucket->data;  	vty_out(vty, "[%p] (%ld) %s\n", (void *)com, com->refcnt, -		community_str(com, false)); +		community_str(com, false, false));  }  /* Show BGP's community internal data. */ @@ -14502,7 +14524,7 @@ static void lcommunity_show_all_iterator(struct hash_bucket *bucket,  	lcom = (struct lcommunity *)bucket->data;  	vty_out(vty, "[%p] (%ld) %s\n", (void *)lcom, lcom->refcnt, -		lcommunity_str(lcom, false)); +		lcommunity_str(lcom, false, false));  }  /* Show BGP's community internal data. */ @@ -14604,7 +14626,7 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,  	char *vname;  	char buf1[INET6_ADDRSTRLEN];  	char *ecom_str; -	vpn_policy_direction_t dir; +	enum vpn_policy_direction dir;  	if (json) {  		json_object *json_import_vrfs = NULL; @@ -17599,11 +17621,67 @@ static const struct cmd_variable_handler bgp_var_peergroup[] = {  	{.tokenname = "PGNAME", .completions = bgp_ac_peergroup},  	{.completions = NULL} }; +DEFINE_HOOK(bgp_config_end, (struct bgp *bgp), (bgp)); + +static struct thread *t_bgp_cfg; + +bool bgp_config_inprocess(void) +{ +	return thread_is_scheduled(t_bgp_cfg); +} + +static void bgp_config_finish(struct thread *t) +{ +	struct listnode *node; +	struct bgp *bgp; + +	for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) +		hook_call(bgp_config_end, bgp); +} + +static void bgp_config_start(void) +{ +#define BGP_PRE_CONFIG_MAX_WAIT_SECONDS 600 +	THREAD_OFF(t_bgp_cfg); +	thread_add_timer(bm->master, bgp_config_finish, NULL, +			 BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg); +} + +/* When we receive a hook the configuration is read, + * we start a timer to make sure we postpone sending + * EoR before route-maps are processed. + * This is especially valid if using `bgp route-map delay-timer`. + */ +static void bgp_config_end(void) +{ +#define BGP_POST_CONFIG_DELAY_SECONDS 1 +	uint32_t bgp_post_config_delay = +		thread_is_scheduled(bm->t_rmap_update) +			? thread_timer_remain_second(bm->t_rmap_update) +			: BGP_POST_CONFIG_DELAY_SECONDS; + +	/* If BGP config processing thread isn't running, then +	 * we can return and rely it's properly handled. +	 */ +	if (!bgp_config_inprocess()) +		return; + +	THREAD_OFF(t_bgp_cfg); + +	/* Start a new timer to make sure we don't send EoR +	 * before route-maps are processed. +	 */ +	thread_add_timer(bm->master, bgp_config_finish, NULL, +			 bgp_post_config_delay, &t_bgp_cfg); +} +  void bgp_vty_init(void)  {  	cmd_variable_handler_register(bgp_var_neighbor);  	cmd_variable_handler_register(bgp_var_peergroup); +	cmd_init_config_callbacks(bgp_config_start, bgp_config_end); +  	/* Install bgp top node. */  	install_node(&bgp_node);  	install_node(&bgp_ipv4_unicast_node); @@ -19298,9 +19376,9 @@ static const char *community_list_config_str(struct community_entry *entry)  		str = "";  	else {  		if (entry->style == COMMUNITY_LIST_STANDARD) -			str = community_str(entry->u.com, false); +			str = community_str(entry->u.com, false, false);  		else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) -			str = lcommunity_str(entry->u.lcom, false); +			str = lcommunity_str(entry->u.lcom, false, false);  		else  			str = entry->config;  	} diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h index 93026c663a..4b393275d6 100644 --- a/bgpd/bgp_vty.h +++ b/bgpd/bgp_vty.h @@ -164,6 +164,7 @@ extern void bgp_config_write_rpkt_quanta(struct vty *vty, struct bgp *bgp);  extern void bgp_config_write_listen(struct vty *vty, struct bgp *bgp);  extern void bgp_config_write_coalesce_time(struct vty *vty, struct bgp *bgp);  extern int bgp_vty_return(struct vty *vty, int ret); +extern bool bgp_config_inprocess(void);  extern struct peer *peer_and_group_lookup_vty(struct vty *vty,  					      const char *peer_str); diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 78eaac7806..77b8a8ab96 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3155,26 +3155,26 @@ static int bgp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)  	struct stream *s = NULL;  	struct bgp *bgp = bgp_get_default();  	struct listnode *node; -	struct prefix_ipv6 *c; -	struct srv6_locator_chunk s6c = {}; -	struct prefix_ipv6 *chunk = NULL; +	struct srv6_locator_chunk *c; +	struct srv6_locator_chunk *chunk = srv6_locator_chunk_alloc();  	s = zclient->ibuf; -	zapi_srv6_locator_chunk_decode(s, &s6c); +	zapi_srv6_locator_chunk_decode(s, chunk); -	if (strcmp(bgp->srv6_locator_name, s6c.locator_name) != 0) { +	if (strcmp(bgp->srv6_locator_name, chunk->locator_name) != 0) {  		zlog_err("%s: Locator name unmatch %s:%s", __func__, -			 bgp->srv6_locator_name, s6c.locator_name); +			 bgp->srv6_locator_name, chunk->locator_name); +		srv6_locator_chunk_free(chunk);  		return 0;  	}  	for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, c)) { -		if (!prefix_cmp(c, &s6c.prefix)) +		if (!prefix_cmp(&c->prefix, &chunk->prefix)) { +			srv6_locator_chunk_free(chunk);  			return 0; +		}  	} -	chunk = prefix_ipv6_new(); -	*chunk = s6c.prefix;  	listnode_add(bgp->srv6_locator_chunks, chunk);  	vpn_leak_postchange_all();  	return 0; @@ -3203,7 +3203,7 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS)  	struct srv6_locator loc = {};  	struct bgp *bgp = bgp_get_default();  	struct listnode *node, *nnode; -	struct prefix_ipv6 *chunk; +	struct srv6_locator_chunk *chunk;  	struct bgp_srv6_function *func;  	struct bgp *bgp_vrf;  	struct in6_addr *tovpn_sid; @@ -3215,7 +3215,7 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS)  	// refresh chunks  	for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk))  		if (prefix_match((struct prefix *)&loc.prefix, -				 (struct prefix *)chunk)) +				 (struct prefix *)&chunk->prefix))  			listnode_delete(bgp->srv6_locator_chunks, chunk);  	// refresh functions diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 38a106359e..a2361758dc 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -565,7 +565,7 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as)  	   AS change.  Just Reset EBGP sessions, not CONFED sessions.  If we  	   were not doing confederation before, reset all EBGP sessions.  */  	for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { -		bgp_peer_sort_t ptype = peer_sort(peer); +		enum bgp_peer_sort ptype = peer_sort(peer);  		/* We're looking for peers who's AS is not local or part of our  		   confederation.  */ @@ -1004,7 +1004,7 @@ void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,  }  /* Check peer's AS number and determines if this peer is IBGP or EBGP */ -static inline bgp_peer_sort_t peer_calc_sort(struct peer *peer) +static inline enum bgp_peer_sort peer_calc_sort(struct peer *peer)  {  	struct bgp *bgp; @@ -1091,13 +1091,13 @@ static inline bgp_peer_sort_t peer_calc_sort(struct peer *peer)  }  /* Calculate and cache the peer "sort" */ -bgp_peer_sort_t peer_sort(struct peer *peer) +enum bgp_peer_sort peer_sort(struct peer *peer)  {  	peer->sort = peer_calc_sort(peer);  	return peer->sort;  } -bgp_peer_sort_t peer_sort_lookup(struct peer *peer) +enum bgp_peer_sort peer_sort_lookup(struct peer *peer)  {  	return peer->sort;  } @@ -1731,6 +1731,8 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,  	peer->v_routeadv = (peer_sort(peer) == BGP_PEER_IBGP)  				   ? BGP_DEFAULT_IBGP_ROUTEADV  				   : BGP_DEFAULT_EBGP_ROUTEADV; +	if (bgp_config_inprocess()) +		peer->shut_during_cfg = true;  	peer = peer_lock(peer); /* bgp peer list reference */  	peer->group = group; @@ -1824,7 +1826,7 @@ int bgp_afi_safi_peer_exists(struct bgp *bgp, afi_t afi, safi_t safi)  /* Change peer's AS number.  */  void peer_as_change(struct peer *peer, as_t as, int as_specified)  { -	bgp_peer_sort_t origtype, newtype; +	enum bgp_peer_sort origtype, newtype;  	/* Stop peer. */  	if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { @@ -1929,8 +1931,8 @@ int peer_remote_as(struct bgp *bgp, union sockunion *su, const char *conf_if,  				return BGP_ERR_PEER_GROUP_MEMBER;  			} -			bgp_peer_sort_t peer_sort_type = -						peer_sort(peer->group->conf); +			enum bgp_peer_sort peer_sort_type = +				peer_sort(peer->group->conf);  			/* Explicit AS numbers used, compare AS numbers */  			if (as_type == AS_SPECIFIED) { @@ -2101,6 +2103,20 @@ static void peer_group2peer_config_copy_af(struct peer_group *group,  		PEER_ATTR_INHERIT(peer, group, filter[afi][safi].usmap.map);  	} +	/* Conditional Advertisements */ +	if (!CHECK_FLAG(pfilter_ovrd[RMAP_OUT], PEER_FT_ADVERTISE_MAP)) { +		PEER_STR_ATTR_INHERIT(peer, group, +				      filter[afi][safi].advmap.aname, +				      MTYPE_BGP_FILTER_NAME); +		PEER_ATTR_INHERIT(peer, group, filter[afi][safi].advmap.amap); +		PEER_STR_ATTR_INHERIT(peer, group, +				      filter[afi][safi].advmap.cname, +				      MTYPE_BGP_FILTER_NAME); +		PEER_ATTR_INHERIT(peer, group, filter[afi][safi].advmap.cmap); +		PEER_ATTR_INHERIT(peer, group, +				  filter[afi][safi].advmap.condition); +	} +  	if (peer->addpath_type[afi][safi] == BGP_ADDPATH_NONE) {  		peer->addpath_type[afi][safi] = conf->addpath_type[afi][safi];  		bgp_addpath_type_changed(conf->bgp); @@ -2347,15 +2363,14 @@ void peer_nsf_stop(struct peer *peer)  	if (peer->t_gr_restart) {  		BGP_TIMER_OFF(peer->t_gr_restart);  		if (bgp_debug_neighbor_events(peer)) -			zlog_debug("%s graceful restart timer stopped", -				   peer->host); +			zlog_debug("%pBP graceful restart timer stopped", peer);  	}  	if (peer->t_gr_stale) {  		BGP_TIMER_OFF(peer->t_gr_stale);  		if (bgp_debug_neighbor_events(peer))  			zlog_debug( -				"%s graceful restart stalepath timer stopped", -				peer->host); +				"%pBP graceful restart stalepath timer stopped", +				peer);  	}  	bgp_clear_route_all(peer);  } @@ -2897,7 +2912,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer,  	int first_member = 0;  	afi_t afi;  	safi_t safi; -	bgp_peer_sort_t ptype, gtype; +	enum bgp_peer_sort ptype, gtype;  	/* Lookup the peer.  */  	if (!peer) @@ -3765,7 +3780,7 @@ void bgp_free(struct bgp *bgp)  	XFREE(MTYPE_BGP_EVPN_INFO, bgp->evpn_info);  	for (afi = AFI_IP; afi < AFI_MAX; afi++) { -		vpn_policy_direction_t dir; +		enum vpn_policy_direction dir;  		if (bgp->vpn_policy[afi].import_vrf)  			list_delete(&bgp->vpn_policy[afi].import_vrf); @@ -4294,8 +4309,8 @@ static void peer_flag_modify_action(struct peer *peer, uint32_t flag)  				BGP_TIMER_OFF(peer->t_pmax_restart);  				if (bgp_debug_neighbor_events(peer))  					zlog_debug( -						"%s Maximum-prefix restart timer canceled", -						peer->host); +						"%pBP Maximum-prefix restart timer canceled", +						peer);  			}  			if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->status)) { @@ -4488,7 +4503,7 @@ static int peer_flag_modify(struct peer *peer, uint32_t flag, int set)  	}  	/* -	 * Update peer-group members, unless they are explicitely overriding +	 * Update peer-group members, unless they are explicitly overriding  	 * peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) { @@ -4541,7 +4556,7 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,  	struct peer *member;  	struct listnode *node, *nnode;  	struct peer_flag_action action; -	bgp_peer_sort_t ptype; +	enum bgp_peer_sort ptype;  	memset(&action, 0, sizeof(struct peer_flag_action));  	size = sizeof(peer_af_flag_action_list) @@ -4656,7 +4671,7 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,  			  set != invert);  	} else {  		/* -		 * Update peer-group members, unless they are explicitely +		 * Update peer-group members, unless they are explicitly  		 * overriding peer-group configuration.  		 */  		for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, @@ -4918,7 +4933,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -4989,7 +5004,7 @@ int peer_update_source_addr_set(struct peer *peer, const union sockunion *su)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5066,7 +5081,7 @@ int peer_update_source_unset(struct peer *peer)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5146,7 +5161,7 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5222,7 +5237,7 @@ int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5327,7 +5342,7 @@ int peer_weight_set(struct peer *peer, afi_t afi, safi_t safi, uint16_t weight)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5374,7 +5389,7 @@ int peer_weight_unset(struct peer *peer, afi_t afi, safi_t safi)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5420,7 +5435,7 @@ int peer_timers_set(struct peer *peer, uint32_t keepalive, uint32_t holdtime)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5459,7 +5474,7 @@ int peer_timers_unset(struct peer *peer)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5499,7 +5514,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)  	}  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5553,7 +5568,7 @@ int peer_timers_connect_unset(struct peer *peer)  	}  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5601,7 +5616,7 @@ int peer_advertise_interval_set(struct peer *peer, uint32_t routeadv)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5658,7 +5673,7 @@ int peer_advertise_interval_unset(struct peer *peer)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5699,7 +5714,7 @@ int peer_timers_delayopen_set(struct peer *peer, uint32_t delayopen)  		return 0;  	/* Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS_RO(peer->group->peer, node, member)) {  		/* Skip peers with overridden configuration. */ @@ -5746,7 +5761,7 @@ int peer_timers_delayopen_unset(struct peer *peer)  		return 0;  	/* Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS_RO(peer->group->peer, node, member)) {  		/* Skip peers with overridden configuration. */ @@ -5816,7 +5831,7 @@ int peer_allowas_in_set(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Set flag and configuration on all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5882,7 +5897,7 @@ int peer_allowas_in_unset(struct peer *peer, afi_t afi, safi_t safi)  	/*  	 * Remove flags and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -5908,7 +5923,7 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend,  	struct bgp *bgp = peer->bgp;  	struct peer *member;  	struct listnode *node, *nnode; -	bgp_peer_sort_t ptype = peer_sort(peer); +	enum bgp_peer_sort ptype = peer_sort(peer);  	if (ptype != BGP_PEER_EBGP && ptype != BGP_PEER_INTERNAL)  		return BGP_ERR_LOCAL_AS_ALLOWED_ONLY_FOR_EBGP; @@ -5951,7 +5966,7 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend,  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6027,7 +6042,7 @@ int peer_local_as_unset(struct peer *peer)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6091,7 +6106,7 @@ int peer_password_set(struct peer *peer, const char *password)  	/*  	 * Set flag and configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6169,7 +6184,7 @@ int peer_password_unset(struct peer *peer)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6239,7 +6254,7 @@ int peer_distribute_set(struct peer *peer, afi_t afi, safi_t safi, int direct,  	/*  	 * Set configuration on all peer-group members, un less they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6306,7 +6321,7 @@ int peer_distribute_unset(struct peer *peer, afi_t afi, safi_t safi, int direct)  	/*  	 * Remove configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6424,7 +6439,7 @@ int peer_prefix_list_set(struct peer *peer, afi_t afi, safi_t safi, int direct,  	/*  	 * Set configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6492,7 +6507,7 @@ int peer_prefix_list_unset(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Remove configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6608,7 +6623,7 @@ int peer_aslist_set(struct peer *peer, afi_t afi, safi_t safi, int direct,  	/*  	 * Set configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6675,7 +6690,7 @@ int peer_aslist_unset(struct peer *peer, afi_t afi, safi_t safi, int direct)  	/*  	 * Remove configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6805,7 +6820,7 @@ int peer_route_map_set(struct peer *peer, afi_t afi, safi_t safi, int direct,  	/*  	 * Set configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6871,7 +6886,7 @@ int peer_route_map_unset(struct peer *peer, afi_t afi, safi_t safi, int direct)  	/*  	 * Remove configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6925,7 +6940,7 @@ int peer_unsuppress_map_set(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Set configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -6987,7 +7002,7 @@ int peer_unsuppress_map_unset(struct peer *peer, afi_t afi, safi_t safi)  	/*  	 * Remove configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -7091,7 +7106,7 @@ int peer_advertise_map_set(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Set configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -7152,7 +7167,7 @@ int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Remove configuration on all peer-group members, unless they are -	 * explicitely overriding peer-group configuration. +	 * explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -7185,8 +7200,9 @@ static bool peer_maximum_prefix_clear_overflow(struct peer *peer)  	if (peer->t_pmax_restart) {  		BGP_TIMER_OFF(peer->t_pmax_restart);  		if (bgp_debug_neighbor_events(peer)) -			zlog_debug("%s Maximum-prefix restart timer cancelled", -				   peer->host); +			zlog_debug( +				"%pBP Maximum-prefix restart timer cancelled", +				peer);  	}  	BGP_EVENT_ADD(peer, BGP_Start);  	return true; @@ -7229,7 +7245,7 @@ int peer_maximum_prefix_set(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Set flags and configuration on all peer-group members, unless they -	 * are explicitely overriding peer-group configuration. +	 * are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -7290,7 +7306,7 @@ int peer_maximum_prefix_unset(struct peer *peer, afi_t afi, safi_t safi)  	/*  	 * Remove flags and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {  		struct peer *member; @@ -7352,7 +7368,7 @@ int peer_maximum_prefix_out_set(struct peer *peer, afi_t afi, safi_t safi,  	/*  	 * Set flag and configuration on all peer-group members, unless they -	 * are explicitely overriding peer-group configuration. +	 * are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {  		/* Skip peers with overridden configuration. */ @@ -7394,7 +7410,7 @@ int peer_maximum_prefix_out_unset(struct peer *peer, afi_t afi, safi_t safi)  	/*  	 * Remove flag and configuration from all peer-group members, unless -	 * they are explicitely overriding peer-group configuration. +	 * they are explicitly overriding peer-group configuration.  	 */  	for (ALL_LIST_ELEMENTS_RO(peer->group->peer, node, member)) {  		/* Skip peers with overridden configuration. */ @@ -7938,8 +7954,33 @@ void bgp_pthreads_finish(void)  	frr_pthread_stop_all();  } +static int peer_unshut_after_cfg(struct bgp *bgp) +{ +	struct listnode *node; +	struct peer *peer; + +	for (ALL_LIST_ELEMENTS_RO(bgp->peer, node, peer)) { +		if (!peer->shut_during_cfg) +			continue; + +		if (bgp_debug_neighbor_events(peer)) +			zlog_debug("%s: released from config-pending hold", +				   peer->host); + +		peer->shut_during_cfg = false; +		if (peer_active(peer) && peer->status != Established) { +			if (peer->status != Idle) +				BGP_EVENT_ADD(peer, BGP_Stop); +			BGP_EVENT_ADD(peer, BGP_Start); +		} +	} + +	return 0; +} +  void bgp_init(unsigned short instance)  { +	hook_register(bgp_config_end, peer_unshut_after_cfg);  	/* allocates some vital data structures used by peer commands in  	 * vty_init */ @@ -8109,3 +8150,16 @@ void bgp_gr_apply_running_config(void)  		gr_router_detected = false;  	}  } + +printfrr_ext_autoreg_p("BP", printfrr_bp); +static ssize_t printfrr_bp(struct fbuf *buf, struct printfrr_eargs *ea, +			   const void *ptr) +{ +	const struct peer *peer = ptr; + +	if (!peer) +		return bputs(buf, "(null)"); + +	return bprintfrr(buf, "%s(%s)", peer->host, +			 peer->hostname ? peer->hostname : "Unknown"); +} diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index a9475f39a7..dfed9f2ae9 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -192,11 +192,11 @@ struct bgp_redist {  	struct bgp_rmap rmap;  }; -typedef enum { +enum vpn_policy_direction {  	BGP_VPN_POLICY_DIR_FROMVPN = 0,  	BGP_VPN_POLICY_DIR_TOVPN = 1,  	BGP_VPN_POLICY_DIR_MAX = 2 -} vpn_policy_direction_t; +};  struct vpn_policy {  	struct bgp *bgp; /* parent */ @@ -771,6 +771,7 @@ DECLARE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp));  DECLARE_HOOK(bgp_inst_config_write,  		(struct bgp *bgp, struct vty *vty),  		(bgp, vty)); +DECLARE_HOOK(bgp_config_end, (struct bgp *bgp), (bgp));  /* Thread callback information */  struct afi_safi_info { @@ -900,13 +901,13 @@ struct bgp_filter {  /* IBGP/EBGP identifier.  We also have a CONFED peer, which is to say,     a peer who's AS is part of our Confederation.  */ -typedef enum { +enum bgp_peer_sort {  	BGP_PEER_UNSPECIFIED,  	BGP_PEER_IBGP,  	BGP_PEER_EBGP,  	BGP_PEER_INTERNAL,  	BGP_PEER_CONFED, -} bgp_peer_sort_t; +};  /* BGP message header and packet size.  */  #define BGP_MARKER_SIZE		                16 @@ -1083,7 +1084,7 @@ struct peer {  	/* Peer's local AS number. */  	as_t local_as; -	bgp_peer_sort_t sort; +	enum bgp_peer_sort sort;  	/* Peer's Change local AS number. */  	as_t change_local_as; @@ -1676,6 +1677,8 @@ struct peer {  	/* Long-lived Graceful Restart */  	struct llgr_info llgr[AFI_MAX][SAFI_MAX]; +	bool shut_during_cfg; +  	QOBJ_FIELDS;  };  DECLARE_QOBJ_TYPE(peer); @@ -1703,9 +1706,10 @@ DECLARE_QOBJ_TYPE(peer);  /* Check if suppress start/restart of sessions to peer. */  #define BGP_PEER_START_SUPPRESSED(P)                                           \ -	(CHECK_FLAG((P)->flags, PEER_FLAG_SHUTDOWN)                            \ -	 || CHECK_FLAG((P)->sflags, PEER_STATUS_PREFIX_OVERFLOW)               \ -	 || CHECK_FLAG((P)->bgp->flags, BGP_FLAG_SHUTDOWN)) +	(CHECK_FLAG((P)->flags, PEER_FLAG_SHUTDOWN) ||                         \ +	 CHECK_FLAG((P)->sflags, PEER_STATUS_PREFIX_OVERFLOW) ||               \ +	 CHECK_FLAG((P)->bgp->flags, BGP_FLAG_SHUTDOWN) ||                     \ +	 (P)->shut_during_cfg)  #define PEER_ROUTE_ADV_DELAY(peer)					       \  	(CHECK_FLAG(peer->thread_flags, PEER_THREAD_SUBGRP_ADV_DELAY)) @@ -1964,12 +1968,12 @@ enum bgp_create_error_code {  /*   * Enumeration of different policy kinds a peer can be configured with.   */ -typedef enum { +enum bgp_policy_type {  	BGP_POLICY_ROUTE_MAP,  	BGP_POLICY_FILTER_LIST,  	BGP_POLICY_PREFIX_LIST,  	BGP_POLICY_DISTRIBUTE_LIST, -} bgp_policy_type_e; +};  /* peer_flag_change_type. */  enum peer_change_type { @@ -2021,8 +2025,8 @@ extern struct peer *peer_unlock_with_caller(const char *, struct peer *);  #define peer_unlock(A) peer_unlock_with_caller(__FUNCTION__, (A))  #define peer_lock(B) peer_lock_with_caller(__FUNCTION__, (B)) -extern bgp_peer_sort_t peer_sort(struct peer *peer); -extern bgp_peer_sort_t peer_sort_lookup(struct peer *peer); +extern enum bgp_peer_sort peer_sort(struct peer *peer); +extern enum bgp_peer_sort peer_sort_lookup(struct peer *peer);  extern bool peer_active(struct peer *);  extern bool peer_active_nego(struct peer *); @@ -2499,4 +2503,11 @@ void peer_tcp_mss_unset(struct peer *peer);  extern void bgp_recalculate_afi_safi_bestpaths(struct bgp *bgp, afi_t afi,  					       safi_t safi); + +#ifdef _FRR_ATTRIBUTE_PRINTFRR +/* clang-format off */ +#pragma FRR printfrr_ext "%pBP" (struct peer *) +/* clang-format on */ +#endif +  #endif /* _QUAGGA_BGPD_H */ diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c index 1c8497843e..3aa8868374 100644 --- a/bgpd/rfapi/rfapi.c +++ b/bgpd/rfapi/rfapi.c @@ -832,9 +832,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */  	struct ecommunity *ecomm = bgp_attr_get_ecommunity(&attr); -	if (ecomm->size) { -		attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); -	} else { +	if (!ecomm->size) {  		ecommunity_free(&ecomm);  		bgp_attr_set_ecommunity(&attr, NULL);  	} @@ -3980,7 +3978,7 @@ rfapi_rfp_get_group_config_name_l2(struct rfapi_cfg *rfc, const char *name,   *    rfp_start_val     value returned by rfp_start   *    type              group type   *    name              group name - *    criteria          RFAPI caller provided serach criteria + *    criteria          RFAPI caller provided search criteria   *    search_cb         optional rfp_group_config_search_cb_t   *   * output: @@ -4037,7 +4035,7 @@ void *rfapi_rfp_get_group_config_ptr_name(   *    rfp_start_val     value returned by rfp_start   *    type              group type   *    logical_net_id    group logical network identifier - *    criteria          RFAPI caller provided serach criteria + *    criteria          RFAPI caller provided search criteria   *    search_cb         optional rfp_group_config_search_cb_t   *   * output: diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c index f4f2e11391..c479b4d65a 100644 --- a/bgpd/rfapi/vnc_export_bgp.c +++ b/bgpd/rfapi/vnc_export_bgp.c @@ -650,9 +650,6 @@ encap_attr_export(struct attr *new, struct attr *orig,  	} else {  		bgp_attr_set_ecommunity(new, ecom_ro);  	} -	if (ecom_ro) { -		new->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); -	}  	/*  	 * Set MED diff --git a/configure.ac b/configure.ac index 170d16ca6a..0a6bdd1d73 100644 --- a/configure.ac +++ b/configure.ac @@ -58,7 +58,7 @@ elif test "$host" != "$build"; then    AC_MSG_NOTICE([...])    AC_MSG_NOTICE([... cross-compilation: creating hosttools directory and self-configuring for build platform tools]) -  AC_MSG_NOTICE([... use HOST_CPPFLAGS / HOST_CFLAGS / HOST_LDFLAGS if neccessary]) +  AC_MSG_NOTICE([... use HOST_CPPFLAGS / HOST_CFLAGS / HOST_LDFLAGS if necessary])    AC_MSG_NOTICE([...])    ( @@ -325,9 +325,26 @@ else     fi  fi +dnl just stick -g into LDFLAGS, if we don't have it in CFLAGS it won't do much +LDFLAGS="$LDFLAGS -g" +  AM_CONDITIONAL([DEV_BUILD], [test "$enable_dev_build" = "yes"]) +dnl -fms-extensions causes clang to have a built-in __wchar_t on OpenBSD, +dnl which just straight up breaks compiling any code. +dnl (2022-04-04 / OpenBSD 7 / clang 11.1.0) +AH_VERBATIM([OpenBSD], [ +#ifdef __OpenBSD__ +#define __wchar_t __wchar_t_ignore +#include <stdint.h> +#undef __wchar_t +#endif +]) +  dnl always want these CFLAGS +AC_C_FLAG([-fms-extensions], [ +  AC_MSG_ERROR([$CC does not support unnamed struct fields (-fms-extensions)]) +])  AC_C_FLAG([-fno-omit-frame-pointer])  AC_C_FLAG([-funwind-tables])  AC_C_FLAG([-Wall]) diff --git a/debian/changelog b/debian/changelog index 2b28c4c6dc..f3e42199de 100644 --- a/debian/changelog +++ b/debian/changelog @@ -989,7 +989,7 @@ quagga (0.98.3-6) testing-proposed-updates; urgency=high  quagga (0.98.3-5) unstable; urgency=high    * The patch which tried to remove the OpenSSL dependency, which is -    not only unneccessary but also a violation of the licence and thus RC, +    not only unnecessary but also a violation of the licence and thus RC,      stopped working a while ago, since autoreconf is no longer run before      building the binaries. So now ./configure is patched directly (thanks      to Faidon Liambotis for reporting). Closes: #306840 @@ -1370,7 +1370,7 @@ quagga (0.96.4x-3) unstable; urgency=low    * Made the directory (but not the config/log files!) world accessible      again on user request (thanks to Anand Kumria)). Closes: #213129    * No longer providing sample configuration in /etc/quagga/. They are -    now only available in /usr/share/doc/quagga/ to avoid accidently +    now only available in /usr/share/doc/quagga/ to avoid accidentally      using them without changing the adresses (thanks to Marc Haber).      Closes: #215918 @@ -1430,7 +1430,7 @@ quagga (0.96.3-1) unstable; urgency=medium  quagga (0.96.2-9) unstable; urgency=medium -  * Removed /usr/share/info/dir.* which were accidently there and prevented +  * Removed /usr/share/info/dir.* which were accidentally there and prevented      the installation by dpkg (thanks to Simon Raven). Closes: #212614    * Reworded package description (thanks to Anand Kumria). Closes: #213125    * Added french debconf translation (thanks to Christian Perrier). diff --git a/debian/frr.logrotate b/debian/frr.logrotate index a56a908bdf..1e0e726cb4 100644 --- a/debian/frr.logrotate +++ b/debian/frr.logrotate @@ -4,7 +4,7 @@          missingok          compress          rotate 14 -        create 640 frr frrvty +        create 0640 frr frr          postrotate              pid=$(lsof -t -a -c /syslog/ /var/log/frr/* 2>/dev/null) @@ -17,7 +17,7 @@              # open, as well as the daemons, so always signal the daemons.              # It's safe, a NOP if (only) syslog is being used.              for i in babeld bgpd eigrpd isisd ldpd nhrpd ospf6d ospfd sharpd \ -                pimd ripd ripngd zebra pbrd staticd bfdd fabricd vrrpd; do +                pimd ripd ripngd zebra pathd pbrd staticd bfdd fabricd vrrpd; do                  if [ -e /var/run/frr/$i.pid ] ; then                      pids="$pids $(cat /var/run/frr/$i.pid)"                  fi diff --git a/debian/frr.postinst b/debian/frr.postinst index 505ff8eaf8..4e23cd3cec 100644 --- a/debian/frr.postinst +++ b/debian/frr.postinst @@ -16,7 +16,7 @@ adduser \  	frr  usermod -a -G frrvty frr -mkdir -p /var/log/frr +mkdir -m 0755 -p /var/log/frr  mkdir -p /etc/frr diff --git a/debian/frr.preinst b/debian/frr.preinst index 0e10e39247..2af5a4ed8f 100644 --- a/debian/frr.preinst +++ b/debian/frr.preinst @@ -73,6 +73,7 @@ EOF  		-o -f /etc/frr/eigrpd.conf \  		-o -f /etc/frr/babeld.conf \  		-o -f /etc/frr/pbrd.conf \ +		-o -f /etc/frr/pathd.conf \  		-o -f /etc/frr/bfdd.conf; then  		# no explicit statement, but some split config file exists  		# => need to fix vtysh.conf & frr.conf in postinst diff --git a/doc/developer/building-docker.rst b/doc/developer/building-docker.rst index 35b51cd9c0..4cf356049e 100644 --- a/doc/developer/building-docker.rst +++ b/doc/developer/building-docker.rst @@ -109,6 +109,27 @@ No script, multi-arch (ex. amd64, arm64):: +Building ubi 8 Image +----------------------- + +Script:: + +   ./docker/ubi-8/build.sh + +Script with params, an example could be this (all that info will go to docker label) :: + +   ./docker/ubi-8/build.sh  frr:ubi-8-my-test "$(git rev-parse --short=10 HEAD)" my_release my_name my_vendor + +No script:: + +   docker build -f docker/ubi-8/Dockerfile . + +No script, multi-arch (ex. amd64, arm64):: + +   docker buildx build --platform linux/amd64,linux/arm64 -f docker/ubi-8/Dockerfile -t frr-ubi-8:latest . + + +  Building Ubuntu 18.04 Image  --------------------------- diff --git a/doc/developer/building-frr-for-fedora.rst b/doc/developer/building-frr-for-fedora.rst index dc869ece10..aa10f1118d 100644 --- a/doc/developer/building-frr-for-fedora.rst +++ b/doc/developer/building-frr-for-fedora.rst @@ -81,7 +81,7 @@ content:     MPLS must be invidividually enabled on each interface that requires it. See     the example in the config block above. -Load the modifed sysctls on the system: +Load the modified sysctls on the system:  .. code-block:: console diff --git a/doc/developer/building-frr-for-opensuse.rst b/doc/developer/building-frr-for-opensuse.rst index d9800a1638..38346fe881 100644 --- a/doc/developer/building-frr-for-opensuse.rst +++ b/doc/developer/building-frr-for-opensuse.rst @@ -85,7 +85,7 @@ content:     MPLS must be invidividually enabled on each interface that requires it. See     the example in the config block above. -Load the modifed sysctls on the system: +Load the modified sysctls on the system:  .. code-block:: console diff --git a/doc/developer/draft-zebra-00.ms b/doc/developer/draft-zebra-00.ms index 25994727a0..b5d6924613 100644 --- a/doc/developer/draft-zebra-00.ms +++ b/doc/developer/draft-zebra-00.ms @@ -179,13 +179,13 @@ Interface information message format.  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+  |   Index (1)   |  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -|                       Inteface flag (4)                       | +|                       Interface flag (4)                      |  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -|                      Inteface metric (4)                      | +|                      Interface metric (4)                     |  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -|                        Inteface MTU (4)                       | +|                        Interface MTU (4)                      |  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -|                    Inteface Address count (4)                 | +|                    Interface Address count (4)                |  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+  .DE  .sp diff --git a/doc/developer/lists.rst b/doc/developer/lists.rst index 4eaa85115e..ccac10aab9 100644 --- a/doc/developer/lists.rst +++ b/doc/developer/lists.rst @@ -62,7 +62,7 @@ in the future:  The APIs are all designed to be as type-safe as possible.  This means that  there will be a compiler warning when an item doesn't match the container, or  the return value has a different type, or other similar situations.  **You -should never use casts with these APIs.**  If a cast is neccessary in relation +should never use casts with these APIs.**  If a cast is necessary in relation  to these APIs, there is probably something wrong with the overall design.  Only the following pieces use dynamically allocated memory: @@ -143,7 +143,7 @@ Each of the data structures has a ``PREDECL_*`` and a ``DECLARE_*`` macro to  set up an "instantiation" of the container.  This works somewhat similar to C++  templating, though much simpler. -**In all following text, the Z prefix is replaced with a name choosen +**In all following text, the Z prefix is replaced with a name chosen  for the instance of the datastructure.**  The common setup pattern will look like this: @@ -650,7 +650,7 @@ Atomic lists  `atomlist.h` provides an unsorted and a sorted atomic single-linked list.  Since atomic memory accesses can be considerably slower than plain memory  accessses (depending on the CPU type), these lists should only be used where -neccessary. +necessary.  The following guarantees are provided regarding concurrent access: diff --git a/doc/developer/logging.rst b/doc/developer/logging.rst index 7046361204..e262f6af94 100644 --- a/doc/developer/logging.rst +++ b/doc/developer/logging.rst @@ -158,7 +158,6 @@ Networking data types     - :c:struct:`prefix_ls`     - :c:struct:`prefix_rd` -   - :c:struct:`prefix_ptr`     - :c:struct:`prefix_sg` (use :frrfmt:`%pPSG4`)     - :c:union:`prefixptr` (dereference to get :c:struct:`prefix`)     - :c:union:`prefixconstptr` (dereference to get :c:struct:`prefix`) @@ -171,7 +170,7 @@ Networking data types     :frrfmtout:`(*,1.2.3.4)` -   This is *(S,G)* output for use in pimd.  (Note prefix_sg is not a prefix +   This is *(S,G)* output for use in zebra.  (Note prefix_sg is not a prefix     "subclass" like the other prefix_* structs.)  .. frrfmt:: %pSU (union sockunion *) @@ -205,12 +204,6 @@ Networking data types     ``%pNHci``: :frrfmtout:`eth0` — compact interface only -.. frrfmt:: %pBD (struct bgp_dest *) - -   :frrfmtout:`fe80::1234/64` - -   (only available in bgpd.) -  .. frrfmt:: %dPF (int)     :frrfmtout:`AF_INET` @@ -361,6 +354,57 @@ FRR library helper formats     (The output is aligned to some degree.) +FRR daemon specific formats +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following formats are only available in specific daemons, as the code +implementing them is part of the daemon, not the library. + +zebra +""""" + +.. frrfmt:: %pZN (struct route_node *) + +   Print information for a RIB node, including zebra-specific data. + +   :frrfmtout:`::/0 src fe80::/64 (MRIB)` (``%pZN``) + +   :frrfmtout:`1234` (``%pZNt`` - table number) + +bgpd +"""" + +.. frrfmt:: %pBD (struct bgp_dest *) + +   Print prefix for a BGP destination. + +   :frrfmtout:`fe80::1234/64` + +.. frrfmt:: %pBP (struct peer *) + +   :frrfmtout:`192.168.1.1(leaf1.frrouting.org)` + +   Print BGP peer's IP and hostname together. + +pimd/pim6d +"""""""""" + +.. frrfmt:: %pPA (pim_addr *) + +   Format IP address according to IP version (pimd vs. pim6d) being compiled. + +   :frrfmtout:`fe80::1234` / :frrfmtout:`10.0.0.1` + +   :frrfmtout:`*` (``%pPAs`` - replace 0.0.0.0/:: with star) + +.. frrfmt:: %pSG (pim_sgaddr *) + +   Format S,G pair according to IP version (pimd vs. pim6d) being compiled. +   Braces are included. + +   :frrfmtout:`(*,224.0.0.0)` + +  General utility formats  ^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/developer/static-linking.rst b/doc/developer/static-linking.rst index 1e45c48dc3..5342fbfbf6 100644 --- a/doc/developer/static-linking.rst +++ b/doc/developer/static-linking.rst @@ -64,7 +64,7 @@ like this:  Hopefully you get a nice, usable, PIC ``libpcre.a``.  So now we have to link all these static libraries into FRR. Rather than modify -FRR to accomodate this, the best option is to create an archive with all of +FRR to accommodate this, the best option is to create an archive with all of  libyang's dependencies. Then to avoid making any changes to FRR build foo,  rename this ``libyang.a`` and copy it over the usual static library location.  Ugly but it works. To do this, go into your libyang build directory, which diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst index af8756a909..adab9725d9 100644 --- a/doc/developer/workflow.rst +++ b/doc/developer/workflow.rst @@ -623,6 +623,8 @@ Please copy-paste this header verbatim. In particular:  - Do not replace "This program" with "FRR"  - Do not change the address of the FSF +- keep ``#include <zebra.h>``.  The absolute first header included in any C +  file **must** be either ``zebra.h`` or ``config.h`` (with HAVE_CONFIG_H guard)  Adding Copyright Claims to Existing Files  ----------------------------------------- @@ -895,6 +897,26 @@ necessary replacements.  | u_long    | unsigned long            |  +-----------+--------------------------+ +FRR also uses unnamed struct fields, enabled with ``-fms-extensions`` (cf. +https://gcc.gnu.org/onlinedocs/gcc/Unnamed-Fields.html).  The following two +patterns can/should be used where contextually appropriate: + +.. code-block:: c + +   struct outer { +           struct inner; +   }; + +.. code-block:: c + +   struct outer { +           union { +                   struct inner; +                   struct inner inner_name; +           }; +   }; + +  .. _style-exceptions:  Exceptions diff --git a/doc/developer/zebra.rst b/doc/developer/zebra.rst index d51cbc9a14..cef53f1cbe 100644 --- a/doc/developer/zebra.rst +++ b/doc/developer/zebra.rst @@ -250,7 +250,7 @@ Zebra Protocol Commands  +------------------------------------+-------+  | ZEBRA_INTERFACE_DISABLE_RADV       | 43    |  +------------------------------------+-------+ -| ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB     | 44    | +| ZEBRA_NEXTHOP_LOOKUP_MRIB          | 44    |  +------------------------------------+-------+  | ZEBRA_INTERFACE_LINK_PARAMS        | 45    |  +------------------------------------+-------+ diff --git a/doc/user/basic.rst b/doc/user/basic.rst index 4c196cfcfe..42faefd10b 100644 --- a/doc/user/basic.rst +++ b/doc/user/basic.rst @@ -471,7 +471,7 @@ recommendations apply in regards to upgrades:     for differences against your old configuration.  If any defaults changed     that affect your setup, lines may appear or disappear.  If a new line     appears, it was previously the default (or not supported) and is now -   neccessary to retain previous behavior.  If a line disappears, it +   necessary to retain previous behavior.  If a line disappears, it     previously wasn't the default, but now is, so it is no longer necessary.  3. Check the log files for deprecation warnings by using ``grep -i deprecat``. @@ -678,6 +678,20 @@ Terminal Mode Commands     This command displays FRR's timer data for timers that will pop in     the future. +.. clicmd:: show yang operational-data XPATH [{format <json|xml>|translate TRANSLATOR|with-config}] DAEMON + +   Display the YANG operational data starting from XPATH. The default +   format is JSON, but can be displayed in XML as well. + +   Normally YANG operational data are located inside containers marked +   as `read-only`. + +   Optionally it is also possible to display configuration leaves in +   addition to operational data with the option `with-config`. This +   option enables the display of configuration leaves with their +   currently configured value (if the leaf is optional it will only show +   if it was created or has a default value). +  .. _common-invocation-options:  Common Invocation Options diff --git a/doc/user/bfd.rst b/doc/user/bfd.rst index 14aacc0f6b..c47ed04f63 100644 --- a/doc/user/bfd.rst +++ b/doc/user/bfd.rst @@ -236,7 +236,7 @@ BFD Peer Specific Commands     Notes: -   - Profile configurations can be overriden on a peer basis by specifying +   - Profile configurations can be overridden on a peer basis by specifying       non-default parameters in peer configuration node.     - Non existing profiles can be configured and they will only be applied       once they start to exist. diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index b9733cd522..6f99b41140 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1983,6 +1983,11 @@ Using AS Path in Route Map     Prepend the existing last AS number (the leftmost ASN) to the AS_PATH.     The no form of this command removes this set operation from the route-map. +.. clicmd:: set as-path replace <any|ASN> + +   Replace a specific AS number to local AS number. ``any`` replaces each +   AS number in the AS-PATH with the local AS number. +  .. _bgp-communities-attribute:  Communities Attribute @@ -2947,16 +2952,18 @@ This group of server links is referred to as an Ethernet Segment.  Ethernet Segments  """""""""""""""""  An Ethernet Segment can be configured by specifying a system-MAC and a -local discriminator against the bond interface on the PE (via zebra) - +local discriminator or a complete ESINAME against the bond interface on the +PE (via zebra) - -.. clicmd:: evpn mh es-id (1-16777215) +.. clicmd:: evpn mh es-id <(1-16777215)|ESINAME>  .. clicmd:: evpn mh es-sys-mac X:X:X:X:X:X  The sys-mac and local discriminator are used for generating a 10-byte, -Type-3 Ethernet Segment ID. +Type-3 Ethernet Segment ID. ESINAME is a 10-byte, Type-0 Ethernet Segment ID - +"00:AA:BB:CC:DD:EE:FF:GG:HH:II". -Type-1 (EAS-per-ES and EAD-per-EVI) routes are used to advertise the locally +Type-1 (EAD-per-ES and EAD-per-EVI) routes are used to advertise the locally  attached ESs and to learn off remote ESs in the network. Local Type-2/MAC-IP  routes are also advertised with a destination ESI allowing for MAC-IP syncing  between Ethernet Segment peers. @@ -3056,8 +3063,7 @@ route maybe fragmented.  The number of EVIs per-EAD route can be configured via the following  BGP command - -.. index:: [no] ead-es-frag evi-limit(1-1000) -.. clicmd:: [no] ead-es-frag evi-limit(1-1000) +.. clicmd:: [no] ead-es-frag evi-limit (1-1000)  Sample Configuration  ^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst index e71cf4631c..0456e77b21 100644 --- a/doc/user/pimv6.rst +++ b/doc/user/pimv6.rst @@ -113,6 +113,14 @@ Certain signals have special meanings to *pim6d*.     notifications to the kernel. This command is vrf aware, to configure for a     vrf, enter the vrf submode. +.. clicmd:: ipv6 ssmpingd [X:X::X:X] + +   Enable ipv6 ssmpingd configuration. A network level management tool +   to check whether one can receive multicast packets via SSM from host. +   The host target given to ssmping must run the ssmpingd daemon which listens +   for IPv4 and IPv6 unicast requests. When it receives one, it responds to a +   well known SSM multicast group which ssmping just have joined. +  .. _pimv6-interface-configuration:  PIMv6 Interface Configuration @@ -277,6 +285,47 @@ cause great confusion.     Display upstream information for S,G's and the RPF data associated with them. +.. clicmd:: show ipv6 multicast + +   Display various information about the interfaces used in this pim instance. + +.. clicmd:: show ipv6 multicast count [vrf NAME] [json] + +   Display multicast data packets count per interface for a vrf. + +.. clicmd:: show ipv6 multicast count vrf all [json] + +   Display multicast data packets count per interface for all vrf. + +.. clicmd:: show ipv6 mroute [vrf NAME] [X:X::X:X [X:X::X:X]] [fill] [json] + +   Display information about installed into the kernel S,G mroutes.  If +   one address is specified we assume it is the Group we are interested +   in displaying data on.  If the second address is specified then it is +   Source Group.  The keyword ``fill`` says to fill in all assumed data +   for test/data gathering purposes. + +.. clicmd:: show ipv6 mroute [vrf NAME] count [json] + +   Display information about installed into the kernel S,G mroutes and in +   addition display data about packet flow for the mroutes for a specific +   vrf. + +.. clicmd:: show ipv6 mroute vrf all count [json] + +   Display information about installed into the kernel S,G mroutes and in +   addition display data about packet flow for the mroutes for all vrfs. + +.. clicmd:: show ipv6 mroute [vrf NAME] summary [json] + +   Display total number of S,G mroutes and number of S,G mroutes installed +   into the kernel for a specific vrf. + +.. clicmd:: show ipv6 mroute vrf all summary [json] + +   Display total number of S,G mroutes and number of S,G mroutes +   installed into the kernel for all vrfs. +  PIMv6 Debug Commands  ==================== diff --git a/doc/user/scripting.rst b/doc/user/scripting.rst index badc82c500..42855de1ab 100644 --- a/doc/user/scripting.rst +++ b/doc/user/scripting.rst @@ -58,7 +58,7 @@ The documentation for :ref:`on-rib-process-dplane-results` tells us its  arguments. Here, the destination prefix for a route is being logged out.  Scripts live in :file:`/etc/frr/scripts/` by default. This is configurable at -compile time via ``--with-scriptdir``. It may be overriden at runtime with the +compile time via ``--with-scriptdir``. It may be overridden at runtime with the  ``--scriptdir`` daemon option.  The documentation for :ref:`on-rib-process-dplane-results` indicates that the diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index 0244f7c583..29f305520a 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -283,6 +283,17 @@ the default route.     Allow IPv6 nexthop tracking to resolve via the default route. This parameter     is configured per-VRF, so the command is also available in the VRF subnode. +.. clicmd:: show ip nht [vrf NAME] [A.B.C.D|X:X::X:X] [mrib] + +   Show nexthop tracking status for address resolution.  If vrf is not specified +   then display the default vrf.  If ``all`` is specified show all vrf address +   resolution output.  If an ipv4 or ipv6 address is not specified then display +   all addresses tracked, else display the requested address.  The mrib keyword +   indicates that the operator wants to see the multicast rib address resolution +   table.  An alternative form of the command is ``show ip import-check`` and this +   form of the command is deprecated at this point in time. + +  Administrative Distance  ======================= @@ -290,7 +301,7 @@ Administrative distance allows FRR to make decisions about what routes  should be installed in the rib based upon the originating protocol.  The lowest Admin Distance is the route selected.  This is purely a  subjective decision about ordering and care has been taken to choose -the same distances that other routing suites have choosen. +the same distances that other routing suites have chosen.  +------------+-----------+  | Protocol   | Distance  | @@ -350,7 +361,7 @@ has multiple routes for the same prefix from multiple sources.  An example  here would be if someone else was running another routing suite besides  FRR at the same time, the kernel must choose what route to use to forward  on.  FRR choose the value of 20 because of two reasons.  FRR wanted a -value small enough to be choosen but large enough that the operator could +value small enough to be chosen but large enough that the operator could  allow route prioritization by the kernel when multiple routing suites are  being run and FRR wanted to take advantage of Route Replace semantics that  the linux kernel offers.  In order for Route Replacement semantics to @@ -541,7 +552,7 @@ via a ``ip route show X`` command:        nexthop via 192.168.161.9 dev enp39s0 weight 1  Once installed into the FIB, FRR currently has little control over what -nexthops are choosen to forward packets on.  Currently the Linux kernel +nexthops are chosen to forward packets on.  Currently the Linux kernel  has a ``fib_multipath_hash_policy`` sysctl which dictates how the hashing  algorithm is used to forward packets. @@ -712,7 +723,7 @@ and this section also helps that case.     Create a new locator. If the name of an existing locator is specified,     move to specified locator's configuration node to change the settings it. -.. clicmd:: prefix X:X::X:X/M [function-bits-length 32] +.. clicmd:: prefix X:X::X:X/M [func-bits 32]     Set the ipv6 prefix block of the locator. SRv6 locator is defined by     RFC8986. The actual routing protocol specifies the locator and allocates a @@ -732,7 +743,7 @@ and this section also helps that case.     will be ``2001:db8:1:1:1::``)     The function bits range is 16bits by default.  If operator want to change -   function bits range, they can configure with ``function-bits-length`` +   function bits range, they can configure with ``func-bits``     option.  :: @@ -1182,7 +1193,7 @@ zebra Terminal Mode Commands     Display statistics about clients that are connected to zebra.  This is     useful for debugging and seeing how much data is being passed between -   zebra and it's clients.  If the summary form of the command is choosen +   zebra and it's clients.  If the summary form of the command is chosen     a table is displayed with shortened information.  .. clicmd:: show zebra router table summary diff --git a/docker/centos-8/Dockerfile b/docker/centos-8/Dockerfile index 71378c2451..df095edcde 100644 --- a/docker/centos-8/Dockerfile +++ b/docker/centos-8/Dockerfile @@ -1,7 +1,11 @@  # This stage builds an rpm from the source  FROM centos:centos8 as centos-8-builder +RUN sed -i -e "s|mirrorlist=|#mirrorlist=|g" /etc/yum.repos.d/CentOS-* +RUN sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-* +  RUN dnf install --enablerepo=powertools -y rpm-build git autoconf pcre-devel \ +        systemd-devel \          automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \          groff pkgconfig json-c-devel pam-devel bison flex python3-pytest \          c-ares-devel python3-devel python3-sphinx libcap-devel platform-python-devel \ @@ -32,6 +36,10 @@ RUN echo '%_smp_mflags %( echo "-j$(/usr/bin/getconf _NPROCESSORS_ONLN)"; )' >>  # This stage installs frr from the rpm  FROM centos:centos8 + +RUN sed -i -e "s|mirrorlist=|#mirrorlist=|g" /etc/yum.repos.d/CentOS-*  \ +    && sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-* +  RUN mkdir -p /pkgs/rpm \      && yum install -y https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \          https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm diff --git a/docker/ubi-8/Dockerfile b/docker/ubi-8/Dockerfile new file mode 100644 index 0000000000..7b2db66ede --- /dev/null +++ b/docker/ubi-8/Dockerfile @@ -0,0 +1,83 @@ +# This stage builds an rpm from the source +FROM registry.access.redhat.com/ubi8/ubi:8.5 as ubi-8-builder + +RUN dnf -y update-minimal --security --sec-severity=Important --sec-severity=Critical + +RUN rpm --import https://www.centos.org/keys/RPM-GPG-KEY-CentOS-Official \ +    && dnf config-manager --disableplugin subscription-manager --add-repo http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os \ +    && dnf config-manager --disableplugin subscription-manager --add-repo http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os \ +    && dnf config-manager --disableplugin subscription-manager --add-repo http://mirror.centos.org/centos/8-stream/PowerTools/x86_64/os + +RUN dnf install -qy https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ +    && dnf install --enablerepo=* -qy rpm-build git autoconf pcre-devel \ +    systemd-devel automake libtool make  readline-devel  texinfo  \ +    net-snmp-devel  pkgconfig  groff pkgconfig  json-c-devel pam-devel  \ +    bison  flex  python3-pytest  c-ares-devel python3-devel python3-sphinx \ +    libcap-devel  platform-python-devel \ +    https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \ +    https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \ +    https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \ +    https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-devel-0.7.0-1.el7.centos.x86_64.rpm + + +COPY . /src + +ARG PKGVER + +RUN echo '%_smp_mflags %( echo "-j$(/usr/bin/getconf _NPROCESSORS_ONLN)"; )' >> /root/.rpmmacros \ +    && cd /src \ +    && ./bootstrap.sh \ +    && ./configure \ +        --enable-rpki \ +        --enable-snmp=agentx \ +        --enable-numeric-version \ +        --with-pkg-extra-version="_palmetto_git$PKGVER" \ +    && make dist \ +    && cd / \ +    && mkdir -p /rpmbuild/{SOURCES,SPECS} \ +    && cp /src/frr*.tar.gz /rpmbuild/SOURCES \ +    && cp /src/redhat/frr.spec /rpmbuild/SPECS \ +    && rpmbuild \ +        --define "_topdir /rpmbuild" \ +        -ba /rpmbuild/SPECS/frr.spec + +# This stage installs frr from the rpm +FROM registry.access.redhat.com/ubi8/ubi:8.5 +RUN dnf -y update-minimal --security --sec-severity=Important --sec-severity=Critical +ARG FRR_IMAGE_TAG +ARG FRR_RELEASE +ARG FRR_NAME +ARG FRR_VENDOR +LABEL name=$FRR_NAME \ +      vendor=$FRR_VENDOR \ +      version=$FRR_IMAGE_TAG \ +      release=$FRR_RELEASE + +RUN rpm --import https://www.centos.org/keys/RPM-GPG-KEY-CentOS-Official \ +    && dnf config-manager --disableplugin subscription-manager --add-repo http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os \ +    && dnf config-manager --disableplugin subscription-manager --add-repo http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os + +RUN dnf install -qy https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \ +    && mkdir -p /pkgs/rpm \ +    && dnf install --enablerepo=* -qy https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \ +    https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm + +COPY --from=ubi-8-builder /rpmbuild/RPMS/ /pkgs/rpm/ + +RUN dnf install -qy /pkgs/rpm/*/*.rpm \ +    && rm -rf /pkgs \ +# Own the config / PID files +    && mkdir -p /var/run/frr \ +    && chown -R frr:frr /etc/frr /var/run/frr + +# Add tini because no CentOS8 package +ENV TINI_VERSION v0.19.0 +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /sbin/tini +RUN chmod +x /sbin/tini + +# Simple init manager for reaping processes and forwarding signals +ENTRYPOINT ["/sbin/tini", "--"] + +# Default CMD starts watchfrr +COPY docker/ubi-8/docker-start /usr/lib/frr/docker-start +CMD ["/usr/lib/frr/docker-start"] diff --git a/docker/ubi-8/build.sh b/docker/ubi-8/build.sh new file mode 100755 index 0000000000..0216636893 --- /dev/null +++ b/docker/ubi-8/build.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -e + +## +# Package version needs to be decimal +## +DISTRO=ubi-8 + +GITREV="$2" +if [ -z "$GITREV" ];then +	GITREV="$(git rev-parse --short=10 HEAD)" +fi + +FRR_IMAGE_TAG="$1" +if [ -z $FRR_IMAGE_TAG ];then +	FRR_IMAGE_TAG="frr:ubi-8-$GITREV" +fi +PKGVER="$(printf '%u\n' 0x$GITREV)" + +FRR_RELEASE="$3" +if [ -z $FRR_RELEASE ];then +	FRR_RELEASE=$(git describe --tags --abbrev=0) +fi + +FRR_NAME=$4 +if [ -z $FRR_NAME ];then +	FRR_NAME=frr +fi + +FRR_VENDOR=$5 +if [ -z $FRR_VENDOR ];then +	FRR_VENDOR=frr +fi + +docker build \ +	--cache-from="frr:$DISTRO-builder-$GITREV" \ +	--file=docker/$DISTRO/Dockerfile \ +	--build-arg="PKGVER=$PKGVER" \ +	--build-arg="FRR_IMAGE_TAG=$FRR_IMAGE_TAG" \ +	--build-arg="FRR_RELEASE=$FRR_RELEASE" \ +	--build-arg="FRR_NAME=$FRR_NAME" \ +	--build-arg="FRR_VENDOR=$FRR_VENDOR" \ +	--tag="$FRR_IMAGE_TAG" \ +	. + diff --git a/docker/ubi-8/docker-start b/docker/ubi-8/docker-start new file mode 100755 index 0000000000..d954142ab9 --- /dev/null +++ b/docker/ubi-8/docker-start @@ -0,0 +1,4 @@ +#!/bin/bash + +source /usr/lib/frr/frrcommon.sh +/usr/lib/frr/watchfrr $(daemon_list) diff --git a/eigrpd/eigrp_routemap.c b/eigrpd/eigrp_routemap.c index d9b500a8fd..218cea7fa3 100644 --- a/eigrpd/eigrp_routemap.c +++ b/eigrpd/eigrp_routemap.c @@ -690,7 +690,7 @@ static const struct route_map_rule_cmd route_set_metric_cmd = {  /* `set ip next-hop IP_ADDRESS' */ -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_ip_nexthop(void *rule, struct prefix *prefix,  		     route_map_object_t type, void *object) @@ -748,7 +748,7 @@ static const struct route_map_rule_cmd route_set_ip_nexthop_cmd = {  /* `set tag TAG' */ -/* Set tag to object.  ojbect must be pointer to struct attr. */ +/* Set tag to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_tag(void *rule, struct prefix *prefix,  	      route_map_object_t type, void *object) diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c index 2729dce382..11f17ec7bf 100644 --- a/isisd/isis_adjacency.c +++ b/isisd/isis_adjacency.c @@ -31,6 +31,7 @@  #include "thread.h"  #include "if.h"  #include "stream.h" +#include "bfd.h"  #include "isisd/isis_constants.h"  #include "isisd/isis_common.h" @@ -814,6 +815,15 @@ void isis_adj_print_vty(struct isis_adjacency *adj, struct vty *vty,  				vty_out(vty, "      %s\n", buf);  			}  		} +		if (adj->circuit && adj->circuit->bfd_config.enabled) { +			vty_out(vty, "    BFD is %s%s\n", +				adj->bfd_session ? "active, status " +						 : "configured", +				!adj->bfd_session +					? "" +					: bfd_get_status_str(bfd_sess_status( +						  adj->bfd_session))); +		}  		for (ALL_LIST_ELEMENTS_RO(adj->adj_sids, anode, sra)) {  			const char *adj_type;  			const char *backup; diff --git a/isisd/isis_bfd.c b/isisd/isis_bfd.c index 1f50fb9342..5311a384e7 100644 --- a/isisd/isis_bfd.c +++ b/isisd/isis_bfd.c @@ -168,6 +168,8 @@ void isis_bfd_circuit_cmd(struct isis_circuit *circuit)  			struct listnode *node;  			struct isis_adjacency *adj; +			if (!adjdb) +				continue;  			for (ALL_LIST_ELEMENTS_RO(adjdb, node, adj))  				bfd_adj_cmd(adj);  		} diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index c7bf1e2012..fedceed3bb 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -661,8 +661,11 @@ int isis_circuit_up(struct isis_circuit *circuit)  			"Interface MTU %zu on %s is too low to support area lsp mtu %u!",  			isis_circuit_pdu_size(circuit),  			circuit->interface->name, circuit->area->lsp_mtu); -		isis_circuit_update_all_srmflags(circuit, 0); -		return ISIS_ERROR; + +		/* Allow ISIS to continue configuration.   With this +		 * configuration failure ISIS will attempt to send lsp +		 * packets but will fail until the mtu is configured properly +		 */  	}  	if (circuit->circ_type == CIRCUIT_T_BROADCAST) { diff --git a/isisd/isis_dr.c b/isisd/isis_dr.c index 78197974d7..27b7388072 100644 --- a/isisd/isis_dr.c +++ b/isisd/isis_dr.c @@ -217,7 +217,8 @@ int isis_dr_resign(struct isis_circuit *circuit, int level)  {  	uint8_t id[ISIS_SYS_ID_LEN + 2]; -	zlog_debug("isis_dr_resign l%d", level); +	if (IS_DEBUG_EVENTS) +		zlog_debug("isis_dr_resign l%d", level);  	circuit->u.bc.is_dr[level - 1] = 0;  	circuit->u.bc.run_dr_elect[level - 1] = 0; diff --git a/isisd/isis_ldp_sync.c b/isisd/isis_ldp_sync.c index 3c68b8d15f..fb605eb07a 100644 --- a/isisd/isis_ldp_sync.c +++ b/isisd/isis_ldp_sync.c @@ -479,9 +479,9 @@ void isis_if_ldp_sync_enable(struct isis_circuit *circuit)  	struct isis_area *area = circuit->area;  	/* called when setting LDP-SYNC at the global level: -	 *  specifed on interface overrides global config +	 *  specified on interface overrides global config  	 *  if ptop link send msg to LDP indicating ldp-sync enabled - 	 */ +	 */  	if (if_is_loopback(circuit->interface))  		return; @@ -541,7 +541,7 @@ void isis_if_set_ldp_sync_holddown(struct isis_circuit *circuit)  	struct isis_area *area = circuit->area;  	/* called when setting LDP-SYNC at the global level: -	 *  specifed on interface overrides global config. +	 *  specified on interface overrides global config.  	 */  	if (if_is_loopback(circuit->interface))  		return; diff --git a/isisd/isis_lfa.c b/isisd/isis_lfa.c index d515873ec1..348381ee74 100644 --- a/isisd/isis_lfa.c +++ b/isisd/isis_lfa.c @@ -1646,6 +1646,11 @@ void isis_ldp_rlfa_handle_client_close(struct zapi_client_close_info *info)  			     level++) {  				struct isis_spftree *spftree; +				if (!(area->is_type & level)) +					continue; +				if (!area->spftree[tree][level - 1]) +					continue; +  				spftree = area->spftree[tree][level - 1];  				isis_rlfa_list_clear(spftree);  			} diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index 019c26687b..cf4c2aea0a 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -393,30 +393,11 @@ int isis_instance_purge_originator_modify(struct nb_cb_modify_args *args)   */  int isis_instance_lsp_mtu_modify(struct nb_cb_modify_args *args)  { -	struct listnode *node; -	struct isis_circuit *circuit;  	uint16_t lsp_mtu = yang_dnode_get_uint16(args->dnode, NULL);  	struct isis_area *area;  	switch (args->event) {  	case NB_EV_VALIDATE: -		area = nb_running_get_entry(args->dnode, NULL, false); -		if (!area) -			break; -		for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) { -			if (circuit->state != C_STATE_INIT -			    && circuit->state != C_STATE_UP) -				continue; -			if (lsp_mtu > isis_circuit_pdu_size(circuit)) { -				snprintf( -					args->errmsg, args->errmsg_len, -					"ISIS area contains circuit %s, which has a maximum PDU size of %zu", -					circuit->interface->name, -					isis_circuit_pdu_size(circuit)); -				return NB_ERR_VALIDATION; -			} -		} -		break;  	case NB_EV_PREPARE:  	case NB_EV_ABORT:  		break; @@ -1868,7 +1849,7 @@ int isis_instance_mpls_te_destroy(struct nb_cb_destroy_args *args)  		return NB_OK;  	/* Remove Link State Database */ -	ls_ted_del_all(area->mta->ted); +	ls_ted_del_all(&area->mta->ted);  	/* Flush LSP if circuit engage */  	for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) { @@ -2552,43 +2533,14 @@ int isis_instance_mpls_ldp_sync_holddown_modify(struct nb_cb_modify_args *args)   */  int lib_interface_isis_create(struct nb_cb_create_args *args)  { -	struct isis_area *area = NULL;  	struct interface *ifp;  	struct isis_circuit *circuit = NULL;  	const char *area_tag = yang_dnode_get_string(args->dnode, "./area-tag"); -	uint32_t min_mtu, actual_mtu;  	switch (args->event) {  	case NB_EV_PREPARE:  	case NB_EV_ABORT: -		break;  	case NB_EV_VALIDATE: -		/* check if interface mtu is sufficient. If the area has not -		 * been created yet, assume default MTU for the area -		 */ -		ifp = nb_running_get_entry(args->dnode, NULL, false); -		/* zebra might not know yet about the MTU - nothing we can do */ -		if (!ifp || ifp->mtu == 0) -			break; -		actual_mtu = -			if_is_broadcast(ifp) ? ifp->mtu - LLC_LEN : ifp->mtu; - -		area = isis_area_lookup(area_tag, ifp->vrf->vrf_id); -		if (area) -			min_mtu = area->lsp_mtu; -		else -#ifndef FABRICD -			min_mtu = yang_get_default_uint16( -				"/frr-isisd:isis/instance/lsp/mtu"); -#else -			min_mtu = DEFAULT_LSP_MTU; -#endif /* ifndef FABRICD */ -		if (actual_mtu < min_mtu) { -			snprintf(args->errmsg, args->errmsg_len, -				 "Interface %s has MTU %u, minimum MTU for the area is %u", -				 ifp->name, actual_mtu, min_mtu); -			return NB_ERR_VALIDATION; -		}  		break;  	case NB_EV_APPLY:  		ifp = nb_running_get_entry(args->dnode, NULL, true); diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c index 1a54d47f3c..016efd5cd7 100644 --- a/isisd/isis_pdu.c +++ b/isisd/isis_pdu.c @@ -2103,7 +2103,7 @@ void send_hello_sched(struct isis_circuit *circuit, int level, long delay)  /* - * Count the maximum number of lsps that can be accomodated by a given size. + * Count the maximum number of lsps that can be accommodated by a given size.   */  #define LSP_ENTRIES_LEN (10 + ISIS_SYS_ID_LEN)  static uint16_t get_max_lsp_count(uint16_t size) diff --git a/isisd/isis_pdu.h b/isisd/isis_pdu.h index b2e43781f6..b4018995d5 100644 --- a/isisd/isis_pdu.h +++ b/isisd/isis_pdu.h @@ -142,7 +142,7 @@ struct isis_lsp_hdr {  /*   * Since the length field of LSP Entries TLV is one byte long, and each LSP   * entry is LSP_ENTRIES_LEN (16) bytes long, the maximum number of LSP entries - * can be accomodated in a TLV is + * can be accommodated in a TLV is   * 255 / 16 = 15.   *   * Therefore, the maximum length of the LSP Entries TLV is diff --git a/isisd/isis_route.c b/isisd/isis_route.c index 764a0b0cd3..9f8f639e5d 100644 --- a/isisd/isis_route.c +++ b/isisd/isis_route.c @@ -443,7 +443,7 @@ void isis_route_delete(struct isis_area *area, struct route_node *rode,  	if (rinfo == NULL) {  		if (IS_DEBUG_RTE_EVENTS)  			zlog_debug( -				"ISIS-Rte: tried to delete non-existant route %s", +				"ISIS-Rte: tried to delete non-existent route %s",  				buff);  		return;  	} diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c index fd05fb94df..b5fce35b1e 100644 --- a/isisd/isis_spf.c +++ b/isisd/isis_spf.c @@ -1400,14 +1400,13 @@ static void spf_adj_list_parse_tlv(struct isis_spftree *spftree,  		spf_adj_list_parse_lsp(spftree, adj_list, lsp, id, metric);  } -static void spf_adj_list_parse_lsp(struct isis_spftree *spftree, -				   struct list *adj_list, struct isis_lsp *lsp, -				   const uint8_t *pseudo_nodeid, -				   uint32_t pseudo_metric) +static void spf_adj_list_parse_lsp_frag(struct isis_spftree *spftree, +					struct list *adj_list, +					struct isis_lsp *lsp, +					const uint8_t *pseudo_nodeid, +					uint32_t pseudo_metric)  {  	bool pseudo_lsp = LSP_PSEUDO_ID(lsp->hdr.lsp_id); -	struct isis_lsp *frag; -	struct listnode *node;  	struct isis_item *head;  	struct isis_item_list *te_neighs; @@ -1445,14 +1444,27 @@ static void spf_adj_list_parse_lsp(struct isis_spftree *spftree,  			}  		}  	} +} + + +static void spf_adj_list_parse_lsp(struct isis_spftree *spftree, +				   struct list *adj_list, struct isis_lsp *lsp, +				   const uint8_t *pseudo_nodeid, +				   uint32_t pseudo_metric) +{ +	struct isis_lsp *frag; +	struct listnode *node; + +	spf_adj_list_parse_lsp_frag(spftree, adj_list, lsp, pseudo_nodeid, +				    pseudo_metric);  	/* Parse LSP fragments. */  	for (ALL_LIST_ELEMENTS_RO(lsp->lspu.frags, node, frag)) {  		if (!frag->tlvs)  			continue; -		spf_adj_list_parse_lsp(spftree, adj_list, frag, pseudo_nodeid, -				       pseudo_metric); +		spf_adj_list_parse_lsp_frag(spftree, adj_list, frag, +					    pseudo_nodeid, pseudo_metric);  	}  } diff --git a/isisd/isis_te.c b/isisd/isis_te.c index 95fbca17a8..1a1e0dc294 100644 --- a/isisd/isis_te.c +++ b/isisd/isis_te.c @@ -65,7 +65,7 @@  #include "isisd/isis_zebra.h"  /*------------------------------------------------------------------------* - * Followings are control functions for MPLS-TE parameters management. + * Following are control functions for MPLS-TE parameters management.   *------------------------------------------------------------------------*/  /* Main initialization / update function of the MPLS TE Circuit context */ @@ -1231,7 +1231,7 @@ void isis_te_init_ted(struct isis_area *area)  			isis_te_parse_lsp(area->mta, lsp);  } -/* Followings are vty command functions */ +/* Following are vty command functions */  #ifndef FABRICD  static void show_router_id(struct vty *vty, struct isis_area *area) diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c index d3d59fb435..3ba5c6ccfa 100644 --- a/isisd/isis_tlvs.c +++ b/isisd/isis_tlvs.c @@ -22,8 +22,9 @@   * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA   * 02111-1307, USA.   */ -#include <json-c/json_object.h> +  #include <zebra.h> +#include <json-c/json_object.h>  #ifdef CRYPTO_INTERNAL  #include "md5.h" diff --git a/isisd/isisd.c b/isisd/isisd.c index 369b83396a..85307f448b 100644 --- a/isisd/isisd.c +++ b/isisd/isisd.c @@ -1628,7 +1628,7 @@ void print_debug(struct vty *vty, int flags, int onoff)  		vty_out(vty, "IS-IS Update related packet debugging is %s\n",  			onoffs);  	if (flags & DEBUG_RTE_EVENTS) -		vty_out(vty, "IS-IS Route related debuggin is %s\n", onoffs); +		vty_out(vty, "IS-IS Route related debugging is %s\n", onoffs);  	if (flags & DEBUG_EVENTS)  		vty_out(vty, "IS-IS Event debugging is %s\n", onoffs);  	if (flags & DEBUG_PACKET_DUMP) @@ -2158,6 +2158,7 @@ DEFUN (debug_isis_bfd,         PROTO_NAME " interaction with BFD\n")  {  	debug_bfd |= DEBUG_BFD; +	bfd_protocol_integration_set_debug(true);  	print_debug(vty, DEBUG_BFD, 1);  	return CMD_SUCCESS; @@ -2172,6 +2173,7 @@ DEFUN (no_debug_isis_bfd,         PROTO_NAME " interaction with BFD\n")  {  	debug_bfd &= ~DEBUG_BFD; +	bfd_protocol_integration_set_debug(false);  	print_debug(vty, DEBUG_BFD, 0);  	return CMD_SUCCESS; diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c index a78d2b25d6..796cf11798 100644 --- a/ldpd/ldpd.c +++ b/ldpd/ldpd.c @@ -1790,7 +1790,7 @@ merge_l2vpn(struct ldpd_conf *xconf, struct l2vpn *l2vpn, struct l2vpn *xl)  	previous_pw_type = l2vpn->pw_type;  	previous_mtu = l2vpn->mtu; -	/* merge intefaces */ +	/* merge interfaces */  	RB_FOREACH_SAFE(lif, l2vpn_if_head, &l2vpn->if_tree, ftmp) {  		/* find deleted interfaces */  		if (l2vpn_if_find(xl, lif->ifname) == NULL) { diff --git a/lib/atomlist.c b/lib/atomlist.c index b7c9516a00..2631d4fa78 100644 --- a/lib/atomlist.c +++ b/lib/atomlist.c @@ -267,7 +267,7 @@ static void atomsort_del_core(struct atomsort_head *h,  					memory_order_consume);  			/* track the beginning of a chain of deleted items -			 * this is neccessary to make this lock-free; we can +			 * this is necessary to make this lock-free; we can  			 * complete deletions started by other threads.  			 */  			if (!atomptr_l(prevval)) { diff --git a/lib/base64.c b/lib/base64.c index e3f238969b..6f0be039f1 100644 --- a/lib/base64.c +++ b/lib/base64.c @@ -3,6 +3,10 @@   * For details, see http://sourceforge.net/projects/libb64   */ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif +  #include "base64.h"  static const int CHARS_PER_LINE = 72; diff --git a/lib/command.c b/lib/command.c index 1989668bf0..a429510059 100644 --- a/lib/command.c +++ b/lib/command.c @@ -445,11 +445,15 @@ static bool full_cli;  /* This function write configuration of this host. */  static int config_write_host(struct vty *vty)  { -	if (cmd_hostname_get()) -		vty_out(vty, "hostname %s\n", cmd_hostname_get()); +	const char *name; -	if (cmd_domainname_get()) -		vty_out(vty, "domainname %s\n", cmd_domainname_get()); +	name = cmd_hostname_get(); +	if (name && name[0] != '\0') +		vty_out(vty, "hostname %s\n", name); + +	name = cmd_domainname_get(); +	if (name && name[0] != '\0') +		vty_out(vty, "domainname %s\n", name);  	/* The following are all configuration commands that are not sent to  	 * watchfrr.  For instance watchfrr is hardcoded to log to syslog so diff --git a/lib/command_py.c b/lib/command_py.c index 90344ae1e5..6301eec5e8 100644 --- a/lib/command_py.c +++ b/lib/command_py.c @@ -28,6 +28,9 @@   * setup & these trample over each other.   */ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif  #include <Python.h>  #include "structmember.h"  #include <string.h> diff --git a/lib/elf_py.c b/lib/elf_py.c index 5289faece4..75d2d6007f 100644 --- a/lib/elf_py.c +++ b/lib/elf_py.c @@ -50,10 +50,10 @@  #define PY_SSIZE_T_CLEAN -#include <Python.h>  #ifdef HAVE_CONFIG_H  #include "config.h"  #endif +#include <Python.h>  #include "structmember.h"  #include <string.h>  #include <stdlib.h> diff --git a/lib/filter_cli.c b/lib/filter_cli.c index fb40c527dd..9a877a5704 100644 --- a/lib/filter_cli.c +++ b/lib/filter_cli.c @@ -151,28 +151,25 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		ada.ada_type = "ipv4"; -		ada.ada_name = name; -		ada.ada_action = action; -		if (host_str && mask_str == NULL) { -			ada.ada_xpath[0] = "./host"; -			ada.ada_value[0] = host_str; -		} else if (host_str && mask_str) { -			ada.ada_xpath[0] = "./network/address"; -			ada.ada_value[0] = host_str; -			ada.ada_xpath[1] = "./network/mask"; -			ada.ada_value[1] = mask_str; -		} else { -			ada.ada_xpath[0] = "./source-any"; -			ada.ada_value[0] = ""; -		} - -		/* Duplicated entry without sequence, just quit. */ -		if (acl_is_dup(vty->candidate_config->dnode, &ada)) -			return CMD_SUCCESS; +	ada.ada_type = "ipv4"; +	ada.ada_name = name; +	ada.ada_action = action; +	if (host_str && mask_str == NULL) { +		ada.ada_xpath[0] = "./host"; +		ada.ada_value[0] = host_str; +	} else if (host_str && mask_str) { +		ada.ada_xpath[0] = "./network/address"; +		ada.ada_value[0] = host_str; +		ada.ada_xpath[1] = "./network/mask"; +		ada.ada_value[1] = mask_str; +	} else { +		ada.ada_xpath[0] = "./source-any"; +		ada.ada_value[0] = "";  	} +	if (acl_is_dup(vty->candidate_config->dnode, &ada)) +		return CMD_SUCCESS; +  	/*  	 * Create the access-list first, so we can generate sequence if  	 * none given (backward compatibility). @@ -280,49 +277,46 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		ada.ada_type = "ipv4"; -		ada.ada_name = name; -		ada.ada_action = action; -		if (src_str && src_mask_str == NULL) { -			ada.ada_xpath[idx] = "./host"; -			ada.ada_value[idx] = src_str; -			idx++; -		} else if (src_str && src_mask_str) { -			ada.ada_xpath[idx] = "./network/address"; -			ada.ada_value[idx] = src_str; -			idx++; -			ada.ada_xpath[idx] = "./network/mask"; -			ada.ada_value[idx] = src_mask_str; -			idx++; -		} else { -			ada.ada_xpath[idx] = "./source-any"; -			ada.ada_value[idx] = ""; -			idx++; -		} - -		if (dst_str && dst_mask_str == NULL) { -			ada.ada_xpath[idx] = "./destination-host"; -			ada.ada_value[idx] = dst_str; -			idx++; -		} else if (dst_str && dst_mask_str) { -			ada.ada_xpath[idx] = "./destination-network/address"; -			ada.ada_value[idx] = dst_str; -			idx++; -			ada.ada_xpath[idx] = "./destination-network/mask"; -			ada.ada_value[idx] = dst_mask_str; -			idx++; -		} else { -			ada.ada_xpath[idx] = "./destination-any"; -			ada.ada_value[idx] = ""; -			idx++; -		} +	ada.ada_type = "ipv4"; +	ada.ada_name = name; +	ada.ada_action = action; +	if (src_str && src_mask_str == NULL) { +		ada.ada_xpath[idx] = "./host"; +		ada.ada_value[idx] = src_str; +		idx++; +	} else if (src_str && src_mask_str) { +		ada.ada_xpath[idx] = "./network/address"; +		ada.ada_value[idx] = src_str; +		idx++; +		ada.ada_xpath[idx] = "./network/mask"; +		ada.ada_value[idx] = src_mask_str; +		idx++; +	} else { +		ada.ada_xpath[idx] = "./source-any"; +		ada.ada_value[idx] = ""; +		idx++; +	} -		/* Duplicated entry without sequence, just quit. */ -		if (acl_is_dup(vty->candidate_config->dnode, &ada)) -			return CMD_SUCCESS; +	if (dst_str && dst_mask_str == NULL) { +		ada.ada_xpath[idx] = "./destination-host"; +		ada.ada_value[idx] = dst_str; +		idx++; +	} else if (dst_str && dst_mask_str) { +		ada.ada_xpath[idx] = "./destination-network/address"; +		ada.ada_value[idx] = dst_str; +		idx++; +		ada.ada_xpath[idx] = "./destination-network/mask"; +		ada.ada_value[idx] = dst_mask_str; +		idx++; +	} else { +		ada.ada_xpath[idx] = "./destination-any"; +		ada.ada_value[idx] = ""; +		idx++;  	} +	if (acl_is_dup(vty->candidate_config->dnode, &ada)) +		return CMD_SUCCESS; +  	/*  	 * Create the access-list first, so we can generate sequence if  	 * none given (backward compatibility). @@ -466,28 +460,25 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		ada.ada_type = "ipv4"; -		ada.ada_name = name; -		ada.ada_action = action; - -		if (prefix_str) { -			ada.ada_xpath[0] = "./ipv4-prefix"; -			ada.ada_value[0] = prefix_str; -			if (exact) { -				ada.ada_xpath[1] = "./ipv4-exact-match"; -				ada.ada_value[1] = "true"; -			} -		} else { -			ada.ada_xpath[0] = "./any"; -			ada.ada_value[0] = ""; -		} +	ada.ada_type = "ipv4"; +	ada.ada_name = name; +	ada.ada_action = action; -		/* Duplicated entry without sequence, just quit. */ -		if (acl_is_dup(vty->candidate_config->dnode, &ada)) -			return CMD_SUCCESS; +	if (prefix_str) { +		ada.ada_xpath[0] = "./ipv4-prefix"; +		ada.ada_value[0] = prefix_str; +		if (exact) { +			ada.ada_xpath[1] = "./ipv4-exact-match"; +			ada.ada_value[1] = "true"; +		} +	} else { +		ada.ada_xpath[0] = "./any"; +		ada.ada_value[0] = "";  	} +	if (acl_is_dup(vty->candidate_config->dnode, &ada)) +		return CMD_SUCCESS; +  	/*  	 * Create the access-list first, so we can generate sequence if  	 * none given (backward compatibility). @@ -656,28 +647,25 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		ada.ada_type = "ipv6"; -		ada.ada_name = name; -		ada.ada_action = action; - -		if (prefix_str) { -			ada.ada_xpath[0] = "./ipv6-prefix"; -			ada.ada_value[0] = prefix_str; -			if (exact) { -				ada.ada_xpath[1] = "./ipv6-exact-match"; -				ada.ada_value[1] = "true"; -			} -		} else { -			ada.ada_xpath[0] = "./any"; -			ada.ada_value[0] = ""; -		} +	ada.ada_type = "ipv6"; +	ada.ada_name = name; +	ada.ada_action = action; -		/* Duplicated entry without sequence, just quit. */ -		if (acl_is_dup(vty->candidate_config->dnode, &ada)) -			return CMD_SUCCESS; +	if (prefix_str) { +		ada.ada_xpath[0] = "./ipv6-prefix"; +		ada.ada_value[0] = prefix_str; +		if (exact) { +			ada.ada_xpath[1] = "./ipv6-exact-match"; +			ada.ada_value[1] = "true"; +		} +	} else { +		ada.ada_xpath[0] = "./any"; +		ada.ada_value[0] = "";  	} +	if (acl_is_dup(vty->candidate_config->dnode, &ada)) +		return CMD_SUCCESS; +  	/*  	 * Create the access-list first, so we can generate sequence if  	 * none given (backward compatibility). @@ -850,24 +838,21 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		ada.ada_type = "mac"; -		ada.ada_name = name; -		ada.ada_action = action; - -		if (mac_str) { -			ada.ada_xpath[0] = "./mac"; -			ada.ada_value[0] = mac_str; -		} else { -			ada.ada_xpath[0] = "./any"; -			ada.ada_value[0] = ""; -		} +	ada.ada_type = "mac"; +	ada.ada_name = name; +	ada.ada_action = action; -		/* Duplicated entry without sequence, just quit. */ -		if (acl_is_dup(vty->candidate_config->dnode, &ada)) -			return CMD_SUCCESS; +	if (mac_str) { +		ada.ada_xpath[0] = "./mac"; +		ada.ada_value[0] = mac_str; +	} else { +		ada.ada_xpath[0] = "./any"; +		ada.ada_value[0] = "";  	} +	if (acl_is_dup(vty->candidate_config->dnode, &ada)) +		return CMD_SUCCESS; +  	/*  	 * Create the access-list first, so we can generate sequence if  	 * none given (backward compatibility). @@ -1272,23 +1257,20 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		pda.pda_type = "ipv4"; -		pda.pda_name = name; -		pda.pda_action = action; -		if (prefix_str) { -			prefix_copy(&pda.prefix, prefix); -			pda.ge = ge; -			pda.le = le; -		} else { -			pda.any = true; -		} - -		/* Duplicated entry without sequence, just quit. */ -		if (plist_is_dup(vty->candidate_config->dnode, &pda)) -			return CMD_SUCCESS; +	pda.pda_type = "ipv4"; +	pda.pda_name = name; +	pda.pda_action = action; +	if (prefix_str) { +		prefix_copy(&pda.prefix, prefix); +		pda.ge = ge; +		pda.le = le; +	} else { +		pda.any = true;  	} +	if (plist_is_dup(vty->candidate_config->dnode, &pda)) +		return CMD_SUCCESS; +  	/*  	 * Create the prefix-list first, so we can generate sequence if  	 * none given (backward compatibility). @@ -1476,23 +1458,20 @@ DEFPY_YANG(  	 * Backward compatibility: don't complain about duplicated values,  	 * just silently accept.  	 */ -	if (seq_str == NULL) { -		pda.pda_type = "ipv6"; -		pda.pda_name = name; -		pda.pda_action = action; -		if (prefix_str) { -			prefix_copy(&pda.prefix, prefix); -			pda.ge = ge; -			pda.le = le; -		} else { -			pda.any = true; -		} - -		/* Duplicated entry without sequence, just quit. */ -		if (plist_is_dup(vty->candidate_config->dnode, &pda)) -			return CMD_SUCCESS; +	pda.pda_type = "ipv6"; +	pda.pda_name = name; +	pda.pda_action = action; +	if (prefix_str) { +		prefix_copy(&pda.prefix, prefix); +		pda.ge = ge; +		pda.le = le; +	} else { +		pda.any = true;  	} +	if (plist_is_dup(vty->candidate_config->dnode, &pda)) +		return CMD_SUCCESS; +  	/*  	 * Create the prefix-list first, so we can generate sequence if  	 * none given (backward compatibility). diff --git a/lib/frrscript.c b/lib/frrscript.c index 4fee79991a..8add44c19e 100644 --- a/lib/frrscript.c +++ b/lib/frrscript.c @@ -373,7 +373,7 @@ int frrscript_load(struct frrscript *fs, const char *function_name,  	}  	if (luaL_dofile(L, script_name) != 0) { -		zlog_err("frrscript: failed loading script '%s.lua': error: %s", +		zlog_err("frrscript: failed loading script '%s': error: %s",  			 script_name, lua_tostring(L, -1));  		goto fail;  	} @@ -381,7 +381,7 @@ int frrscript_load(struct frrscript *fs, const char *function_name,  	/* To check the Lua function, we get it from the global table */  	lua_getglobal(L, function_name);  	if (lua_isfunction(L, lua_gettop(L)) == 0) { -		zlog_err("frrscript: loaded script '%s.lua' but %s not found", +		zlog_err("frrscript: loaded script '%s' but %s not found",  			 script_name, function_name);  		goto fail;  	} @@ -391,7 +391,7 @@ int frrscript_load(struct frrscript *fs, const char *function_name,  	if (load_cb && (*load_cb)(fs) != 0) {  		zlog_err( -			"frrscript: '%s.lua': %s: loaded but callback returned non-zero exit code", +			"frrscript: '%s': %s: loaded but callback returned non-zero exit code",  			script_name, function_name);  		goto fail;  	} diff --git a/lib/link_state.c b/lib/link_state.c index e4ccd0fb65..639a1d37d8 100644 --- a/lib/link_state.c +++ b/lib/link_state.c @@ -997,25 +997,26 @@ void ls_ted_del(struct ls_ted *ted)  	XFREE(MTYPE_LS_DB, ted);  } -void ls_ted_del_all(struct ls_ted *ted) +void ls_ted_del_all(struct ls_ted **ted)  {  	struct ls_vertex *vertex;  	struct ls_edge *edge;  	struct ls_subnet *subnet; -	if (ted == NULL) +	if (*ted == NULL)  		return;  	/* First remove Vertices, Edges and Subnets and associated Link State */ -	frr_each_safe (vertices, &ted->vertices, vertex) -		ls_vertex_del_all(ted, vertex); -	frr_each_safe (edges, &ted->edges, edge) -		ls_edge_del_all(ted, edge); -	frr_each_safe (subnets, &ted->subnets, subnet) -		ls_subnet_del_all(ted, subnet); +	frr_each_safe (vertices, &(*ted)->vertices, vertex) +		ls_vertex_del_all(*ted, vertex); +	frr_each_safe (edges, &(*ted)->edges, edge) +		ls_edge_del_all(*ted, edge); +	frr_each_safe (subnets, &(*ted)->subnets, subnet) +		ls_subnet_del_all(*ted, subnet);  	/* then remove TED itself */ -	ls_ted_del(ted); +	ls_ted_del(*ted); +	*ted = NULL;  }  void ls_ted_clean(struct ls_ted *ted) diff --git a/lib/link_state.h b/lib/link_state.h index 761e8b6a27..f46a2068a1 100644 --- a/lib/link_state.h +++ b/lib/link_state.h @@ -746,7 +746,7 @@ extern void ls_ted_del(struct ls_ted *ted);   *   * @param ted	Link State Data Base   */ -extern void ls_ted_del_all(struct ls_ted *ted); +extern void ls_ted_del_all(struct ls_ted **ted);  /**   * Clean Link State Data Base by removing all Vertices, Edges and SubNets marked @@ -370,7 +370,7 @@ static const struct zebra_desc_table command_types[] = {  	DESC_ENTRY(ZEBRA_BFD_CLIENT_DEREGISTER),  	DESC_ENTRY(ZEBRA_INTERFACE_ENABLE_RADV),  	DESC_ENTRY(ZEBRA_INTERFACE_DISABLE_RADV), -	DESC_ENTRY(ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB), +	DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP_MRIB),  	DESC_ENTRY(ZEBRA_INTERFACE_LINK_PARAMS),  	DESC_ENTRY(ZEBRA_MPLS_LABELS_ADD),  	DESC_ENTRY(ZEBRA_MPLS_LABELS_DELETE), diff --git a/lib/northbound.c b/lib/northbound.c index 49adea6d53..2cc7ac6ea1 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -1649,10 +1649,12 @@ static int nb_oper_data_iter_container(const struct nb_node *nb_node,  				       uint32_t flags, nb_oper_data_cb cb,  				       void *arg)  { +	const struct lysc_node *snode = nb_node->snode; +  	if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))  		return NB_OK; -	/* Presence containers. */ +	/* Read-only presence containers. */  	if (nb_node->cbs.get_elem) {  		struct yang_data *data;  		int ret; @@ -1662,15 +1664,24 @@ static int nb_oper_data_iter_container(const struct nb_node *nb_node,  			/* Presence container is not present. */  			return NB_OK; -		ret = (*cb)(nb_node->snode, translator, data, arg); +		ret = (*cb)(snode, translator, data, arg);  		if (ret != NB_OK)  			return ret;  	} +	/* Read-write presence containers. */ +	if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)) { +		struct lysc_node_container *scontainer; + +		scontainer = (struct lysc_node_container *)snode; +		if (CHECK_FLAG(scontainer->flags, LYS_PRESENCE) +		    && !yang_dnode_get(running_config->dnode, xpath)) +			return NB_OK; +	} +  	/* Iterate over the child nodes. */ -	return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry, -					  list_keys, translator, false, flags, -					  cb, arg); +	return nb_oper_data_iter_children(snode, xpath, list_entry, list_keys, +					  translator, false, flags, cb, arg);  }  static int diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c index 1e25f6a1e2..56eac9dc32 100644 --- a/lib/northbound_cli.c +++ b/lib/northbound_cli.c @@ -1464,6 +1464,7 @@ DEFPY (show_yang_operational_data,           [{\  	   format <json$json|xml$xml>\  	   |translate WORD$translator_family\ +	   |with-config$with_config\  	 }]",         SHOW_STR         "YANG information\n" @@ -1473,13 +1474,15 @@ DEFPY (show_yang_operational_data,         "JavaScript Object Notation\n"         "Extensible Markup Language\n"         "Translate operational data\n" -       "YANG module translator\n") +       "YANG module translator\n" +       "Merge configuration data\n")  {  	LYD_FORMAT format;  	struct yang_translator *translator = NULL;  	struct ly_ctx *ly_ctx;  	struct lyd_node *dnode;  	char *strp; +	uint32_t print_options = LYD_PRINT_WITHSIBLINGS;  	if (xml)  		format = LYD_XML; @@ -1507,13 +1510,21 @@ DEFPY (show_yang_operational_data,  		yang_dnode_free(dnode);  		return CMD_WARNING;  	} + +	if (with_config && yang_dnode_exists(running_config->dnode, xpath)) { +		struct lyd_node *config_dnode = +			yang_dnode_get(running_config->dnode, xpath); +		if (config_dnode != NULL) { +			lyd_merge_tree(&dnode, yang_dnode_dup(config_dnode), +				       LYD_MERGE_DESTRUCT); +			print_options |= LYD_PRINT_WD_ALL; +		} +	} +  	(void)lyd_validate_all(&dnode, ly_ctx, 0, NULL);  	/* Display the data. */ -	if (lyd_print_mem(&strp, dnode, format, -			  LYD_PRINT_WITHSIBLINGS | LYD_PRINT_WD_ALL) -		    != 0 -	    || !strp) { +	if (lyd_print_mem(&strp, dnode, format, print_options) != 0 || !strp) {  		vty_out(vty, "%% Failed to display operational data.\n");  		yang_dnode_free(dnode);  		return CMD_WARNING; diff --git a/lib/plist.c b/lib/plist.c index e7647fb2a7..d8ef9dcbd5 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -1568,8 +1568,10 @@ static void prefix_list_reset_afi(afi_t afi, int orf)  	if (master == NULL)  		return; -	while ((plist = plist_pop(&master->str))) +	while ((plist = plist_first(&master->str))) {  		prefix_list_delete(plist); +		plist_pop(&master->str); +	}  	master->recent = NULL;  } diff --git a/lib/prefix.c b/lib/prefix.c index 4db0c2478b..1a3efd32b1 100644 --- a/lib/prefix.c +++ b/lib/prefix.c @@ -177,8 +177,10 @@ const char *safi2str(safi_t safi)  }  /* If n includes p prefix then return 1 else return 0. */ -int prefix_match(const struct prefix *n, const struct prefix *p) +int prefix_match(union prefixconstptr unet, union prefixconstptr upfx)  { +	const struct prefix *n = unet.p; +	const struct prefix *p = upfx.p;  	int offset;  	int shift;  	const uint8_t *np, *pp; @@ -274,9 +276,11 @@ int evpn_type5_prefix_match(const struct prefix *n, const struct prefix *p)  }  /* If n includes p then return 1 else return 0. Prefix mask is not considered */ -int prefix_match_network_statement(const struct prefix *n, -				   const struct prefix *p) +int prefix_match_network_statement(union prefixconstptr unet, +				   union prefixconstptr upfx)  { +	const struct prefix *n = unet.p; +	const struct prefix *p = upfx.p;  	int offset;  	int shift;  	const uint8_t *np, *pp; @@ -472,8 +476,10 @@ int prefix_cmp(union prefixconstptr up1, union prefixconstptr up2)   * address families don't match, return -1; otherwise the return value is   * in range 0 ... maximum prefix length for the address family.   */ -int prefix_common_bits(const struct prefix *p1, const struct prefix *p2) +int prefix_common_bits(union prefixconstptr ua, union prefixconstptr ub)  { +	const struct prefix *p1 = ua.p; +	const struct prefix *p2 = ub.p;  	int pos, bit;  	int length = 0;  	uint8_t xor ; @@ -509,8 +515,10 @@ int prefix_common_bits(const struct prefix *p1, const struct prefix *p2)  }  /* Return prefix family type string. */ -const char *prefix_family_str(const struct prefix *p) +const char *prefix_family_str(union prefixconstptr pu)  { +	const struct prefix *p = pu.p; +  	if (p->family == AF_INET)  		return "inet";  	if (p->family == AF_INET6) @@ -815,14 +823,16 @@ void apply_mask_ipv6(struct prefix_ipv6 *p)  	}  } -void apply_mask(struct prefix *p) +void apply_mask(union prefixptr pu)  { +	struct prefix *p = pu.p; +  	switch (p->family) {  	case AF_INET: -		apply_mask_ipv4((struct prefix_ipv4 *)p); +		apply_mask_ipv4(pu.p4);  		break;  	case AF_INET6: -		apply_mask_ipv6((struct prefix_ipv6 *)p); +		apply_mask_ipv6(pu.p6);  		break;  	default:  		break; @@ -868,8 +878,10 @@ void prefix2sockunion(const struct prefix *p, union sockunion *su)  		       sizeof(struct in6_addr));  } -int prefix_blen(const struct prefix *p) +int prefix_blen(union prefixconstptr pu)  { +	const struct prefix *p = pu.p; +  	switch (p->family) {  	case AF_INET:  		return IPV4_MAX_BYTELEN; diff --git a/lib/prefix.h b/lib/prefix.h index 816a1517e1..e043d41d30 100644 --- a/lib/prefix.h +++ b/lib/prefix.h @@ -287,13 +287,6 @@ static inline int is_evpn_prefix_ipaddr_v6(const struct prefix_evpn *evp)  	return 0;  } -/* Prefix for a generic pointer */ -struct prefix_ptr { -	uint8_t family; -	uint16_t prefixlen; -	uintptr_t prefix __attribute__((aligned(8))); -}; -  /* Prefix for a Flowspec entry */  struct prefix_fs {  	uint8_t family; @@ -420,6 +413,11 @@ extern const char *family2str(int family);  extern const char *safi2str(safi_t safi);  extern const char *afi2str(afi_t afi); +static inline afi_t prefix_afi(union prefixconstptr pu) +{ +	return family2afi(pu.p->family); +} +  /*   * Check bit of the prefix.   * @@ -437,8 +435,8 @@ extern void prefix_free(struct prefix **p);   * Function to handle prefix_free being used as a del function.   */  extern void prefix_free_lists(void *arg); -extern const char *prefix_family_str(const struct prefix *); -extern int prefix_blen(const struct prefix *); +extern const char *prefix_family_str(union prefixconstptr pu); +extern int prefix_blen(union prefixconstptr pu);  extern int str2prefix(const char *, struct prefix *);  #define PREFIX2STR_BUFFER  PREFIX_STRLEN @@ -449,14 +447,14 @@ extern const char *prefix_sg2str(const struct prefix_sg *sg, char *str);  extern const char *prefix2str(union prefixconstptr, char *, int);  extern int evpn_type5_prefix_match(const struct prefix *evpn_pfx,  				   const struct prefix *match_pfx); -extern int prefix_match(const struct prefix *, const struct prefix *); -extern int prefix_match_network_statement(const struct prefix *, -					  const struct prefix *); -extern int prefix_same(union prefixconstptr, union prefixconstptr); -extern int prefix_cmp(union prefixconstptr, union prefixconstptr); -extern int prefix_common_bits(const struct prefix *, const struct prefix *); -extern void prefix_copy(union prefixptr, union prefixconstptr); -extern void apply_mask(struct prefix *); +extern int prefix_match(union prefixconstptr unet, union prefixconstptr upfx); +extern int prefix_match_network_statement(union prefixconstptr unet, +					  union prefixconstptr upfx); +extern int prefix_same(union prefixconstptr ua, union prefixconstptr ub); +extern int prefix_cmp(union prefixconstptr ua, union prefixconstptr ub); +extern int prefix_common_bits(union prefixconstptr ua, union prefixconstptr ub); +extern void prefix_copy(union prefixptr udst, union prefixconstptr usrc); +extern void apply_mask(union prefixptr pu);  #ifdef __clang_analyzer__  /* clang-SA doesn't understand transparent unions, making it think that the @@ -585,6 +583,71 @@ static inline int is_default_host_route(const struct prefix *p)  	return 0;  } +/* IPv6 scope values, usable for IPv4 too (cf. below) */ +/* clang-format off */ +enum { +	/* 0: reserved */ +	MCAST_SCOPE_IFACE  = 0x1, +	MCAST_SCOPE_LINK   = 0x2, +	MCAST_SCOPE_REALM  = 0x3, +	MCAST_SCOPE_ADMIN  = 0x4, +	MCAST_SCOPE_SITE   = 0x5, +	/* 6-7: unassigned */ +	MCAST_SCOPE_ORG    = 0x8, +	/* 9-d: unassigned */ +	MCAST_SCOPE_GLOBAL = 0xe, +	/* f: reserved */ +}; +/* clang-format on */ + +static inline uint8_t ipv6_mcast_scope(const struct in6_addr *addr) +{ +	return addr->s6_addr[1] & 0xf; +} + +static inline bool ipv6_mcast_nofwd(const struct in6_addr *addr) +{ +	return (addr->s6_addr[1] & 0xf) <= MCAST_SCOPE_LINK; +} + +static inline bool ipv6_mcast_ssm(const struct in6_addr *addr) +{ +	uint32_t bits = ntohl(addr->s6_addr32[0]); + +	/* ff3x:0000::/32 */ +	return (bits & 0xfff0ffff) == 0xff300000; +} + +static inline uint8_t ipv4_mcast_scope(const struct in_addr *addr) +{ +	uint32_t bits = ntohl(addr->s_addr); + +	/* 224.0.0.0/24 - link scope */ +	if ((bits & 0xffffff00) == 0xe0000000) +		return MCAST_SCOPE_LINK; +	/* 239.0.0.0/8 - org scope */ +	if ((bits & 0xff000000) == 0xef000000) +		return MCAST_SCOPE_ORG; + +	return MCAST_SCOPE_GLOBAL; +} + +static inline bool ipv4_mcast_nofwd(const struct in_addr *addr) +{ +	uint32_t bits = ntohl(addr->s_addr); + +	/* 224.0.0.0/24 */ +	return (bits & 0xffffff00) == 0xe0000000; +} + +static inline bool ipv4_mcast_ssm(const struct in_addr *addr) +{ +	uint32_t bits = ntohl(addr->s_addr); + +	/* 232.0.0.0/8 */ +	return (bits & 0xff000000) == 0xe8000000; +} +  #ifdef _FRR_ATTRIBUTE_PRINTFRR  #pragma FRR printfrr_ext "%pEA"  (struct ethaddr *) diff --git a/lib/privs.c b/lib/privs.c index b3f51267d1..24a15a0c0b 100644 --- a/lib/privs.c +++ b/lib/privs.c @@ -684,7 +684,7 @@ void zprivs_init(struct zebra_privs_t *zprivs)  #else  /* !HAVE_CAPABILITIES */  	/* we dont have caps. we'll need to maintain rid and saved uid -	 * and change euid back to saved uid (who we presume has all neccessary +	 * and change euid back to saved uid (who we presume has all necessary  	 * privileges) whenever we are asked to raise our privileges.  	 *  	 * This is not worth that much security wise, but all we can do. diff --git a/lib/route_types.txt b/lib/route_types.txt index 77639070c9..a82273a6dc 100644 --- a/lib/route_types.txt +++ b/lib/route_types.txt @@ -3,7 +3,7 @@  # Used to construct route_types.c and route_types.h  #  # comma-seperated fields of either 2 fields (help strings) or 7 fields. -# White space before and after the comma seperators is stripped. +# White space before and after the comma separators is stripped.  # Lines /beginning/ with # are comments.  #  #### diff --git a/lib/routemap.h b/lib/routemap.h index 6c4916898a..13dafe6849 100644 --- a/lib/routemap.h +++ b/lib/routemap.h @@ -276,6 +276,7 @@ DECLARE_QOBJ_TYPE(route_map);  #define IS_MATCH_LOCAL_PREF(C)                                                 \  	(strmatch(C, "frr-bgp-route-map:match-local-preference"))  #define IS_MATCH_ALIAS(C) (strmatch(C, "frr-bgp-route-map:match-alias")) +#define IS_MATCH_SCRIPT(C) (strmatch(C, "frr-bgp-route-map:match-script"))  #define IS_MATCH_ORIGIN(C)                                                     \  	(strmatch(C, "frr-bgp-route-map:match-origin"))  #define IS_MATCH_RPKI(C) (strmatch(C, "frr-bgp-route-map:rpki")) @@ -369,6 +370,7 @@ DECLARE_QOBJ_TYPE(route_map);  	(strmatch(A, "frr-bgp-route-map:as-path-prepend"))  #define IS_SET_AS_EXCLUDE(A)                                                   \  	(strmatch(A, "frr-bgp-route-map:as-path-exclude")) +#define IS_SET_AS_REPLACE(A) (strmatch(A, "frr-bgp-route-map:as-path-replace"))  #define IS_SET_IPV6_NH_GLOBAL(A)                                               \  	(strmatch(A, "frr-bgp-route-map:ipv6-nexthop-global"))  #define IS_SET_IPV6_VPN_NH(A)                                                  \ diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c index 2685bd2d79..ff98a14c41 100644 --- a/lib/routemap_cli.c +++ b/lib/routemap_cli.c @@ -635,6 +635,11 @@ void route_map_condition_show(struct vty *vty, const struct lyd_node *dnode,  			yang_dnode_get_string(  				dnode,  				"./rmap-match-condition/frr-bgp-route-map:alias")); +	} else if (IS_MATCH_SCRIPT(condition)) { +		vty_out(vty, " match script %s\n", +			yang_dnode_get_string( +				dnode, +				"./rmap-match-condition/frr-bgp-route-map:script"));  	} else if (IS_MATCH_ORIGIN(condition)) {  		vty_out(vty, " match origin %s\n",  			yang_dnode_get_string( @@ -1192,6 +1197,11 @@ void route_map_action_show(struct vty *vty, const struct lyd_node *dnode,  			yang_dnode_get_string(  				dnode,  				"./rmap-set-action/frr-bgp-route-map:exclude-as-path")); +	} else if (IS_SET_AS_REPLACE(action)) { +		vty_out(vty, " set as-path replace %s\n", +			yang_dnode_get_string( +				dnode, +				"./rmap-set-action/frr-bgp-route-map:replace-as-path"));  	} else if (IS_SET_AS_PREPEND(action)) {  		if (yang_dnode_exists(  			    dnode, @@ -1429,41 +1439,6 @@ void route_map_optimization_disabled_show(struct vty *vty,  		name);  } -#if CONFDATE > 20220409 -CPP_NOTICE("Time to remove old route-map optimization command") -#endif - -DEFPY_HIDDEN( -	routemap_optimization, routemap_optimization_cmd, -	"[no] route-map optimization", -	NO_STR -	"route-map\n" -	"optimization\n") -{ -	const struct lyd_node *rmi_dnode; -	const char *rm_name; -	char xpath[XPATH_MAXLEN]; - -	vty_out(vty, -		"%% This command is deprecated. Please, use `route-map NAME optimization` from the config node.\n"); - -	rmi_dnode = -		yang_dnode_get(vty->candidate_config->dnode, VTY_CURR_XPATH); -	if (!rmi_dnode) { -		vty_out(vty, "%% Failed to get RMI dnode in candidate DB\n"); -		return CMD_WARNING_CONFIG_FAILED; -	} - -	rm_name = yang_dnode_get_string(rmi_dnode, "../name"); - -	snprintf( -		xpath, sizeof(xpath), -		"/frr-route-map:lib/route-map[name='%s']/optimization-disabled", -		rm_name); -	nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, no ? "true" : "false"); -	return nb_cli_apply_changes(vty, NULL); -} -  static int route_map_config_write(struct vty *vty)  {  	const struct lyd_node *dnode; @@ -1582,6 +1557,4 @@ void route_map_cli_init(void)  	install_element(RMAP_NODE, &set_srte_color_cmd);  	install_element(RMAP_NODE, &no_set_srte_color_cmd); - -	install_element(RMAP_NODE, &routemap_optimization_cmd);  } diff --git a/lib/strformat.c b/lib/strformat.c index a420ba553a..d941a7f04e 100644 --- a/lib/strformat.c +++ b/lib/strformat.c @@ -361,6 +361,8 @@ static ssize_t printfrr_abstime(struct fbuf *buf, struct printfrr_eargs *ea,  	if (flags & TIMEFMT_SKIP)  		return 0; +	if (!ts) +		return bputch(buf, '-');  	if (flags & TIMEFMT_REALTIME)  		*real_ts = *ts; @@ -452,6 +454,8 @@ static ssize_t printfrr_reltime(struct fbuf *buf, struct printfrr_eargs *ea,  	if (flags & TIMEFMT_SKIP)  		return 0; +	if (!ts) +		return bputch(buf, '-');  	if (flags & TIMEFMT_ABSOLUTE) {  		struct timespec anchor[1]; @@ -561,8 +565,6 @@ static ssize_t printfrr_ts(struct fbuf *buf, struct printfrr_eargs *ea,  {  	const struct timespec *ts = vptr; -	if (!ts) -		return bputs(buf, "(null)");  	return printfrr_time(buf, ea, ts, 0);  } @@ -574,7 +576,7 @@ static ssize_t printfrr_tv(struct fbuf *buf, struct printfrr_eargs *ea,  	struct timespec ts;  	if (!tv) -		return bputs(buf, "(null)"); +		return printfrr_time(buf, ea, NULL, 0);  	ts.tv_sec = tv->tv_sec;  	ts.tv_nsec = tv->tv_usec * 1000; @@ -589,7 +591,7 @@ static ssize_t printfrr_tt(struct fbuf *buf, struct printfrr_eargs *ea,  	struct timespec ts;  	if (!tt) -		return bputs(buf, "(null)"); +		return printfrr_time(buf, ea, NULL, TIMEFMT_SECONDS);  	ts.tv_sec = *tt;  	ts.tv_nsec = 0; diff --git a/lib/subdir.am b/lib/subdir.am index d1df9cb3d9..c3899c4e0f 100644 --- a/lib/subdir.am +++ b/lib/subdir.am @@ -588,7 +588,7 @@ DISTCLEANFILES += lib/route_types.h  if GIT_VERSION  # bit of a trick here to always have up-to-date git stamps without triggering -# unneccessary rebuilds.  .PHONY causes the .tmp file to be rebuilt always, +# unnecessary rebuilds.  .PHONY causes the .tmp file to be rebuilt always,  # but if we use that on gitversion.h it'll ripple through the .c file deps.  # (even if gitversion.h's file timestamp doesn't change, make will think it  # did, because of .PHONY...) diff --git a/lib/thread.c b/lib/thread.c index 90074b3d89..44183257bb 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -751,7 +751,7 @@ void thread_master_free(struct thread_master *m)  	XFREE(MTYPE_THREAD_MASTER, m);  } -/* Return remain time in miliseconds. */ +/* Return remain time in milliseconds. */  unsigned long thread_timer_remain_msec(struct thread *thread)  {  	int64_t remain; @@ -266,6 +266,15 @@ static inline void vty_push_context(struct vty *vty, int node, uint64_t id)  	struct structname *ptr = VTY_GET_CONTEXT(structname);                  \  	VTY_CHECK_CONTEXT(ptr); +#define VTY_DECLVAR_CONTEXT_VRF(vrfptr)                                        \ +	struct vrf *vrfptr;                                                    \ +	if (vty->node == CONFIG_NODE)                                          \ +		vrfptr = vrf_lookup_by_id(VRF_DEFAULT);                        \ +	else                                                                   \ +		vrfptr = VTY_GET_CONTEXT(vrf);                                 \ +	VTY_CHECK_CONTEXT(vrfptr);                                             \ +	MACRO_REQUIRE_SEMICOLON() /* end */ +  /* XPath macros. */  #define VTY_PUSH_XPATH(nodeval, value)                                         \  	do {                                                                   \ diff --git a/lib/zclient.h b/lib/zclient.h index 7e1283d830..78eb73c530 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -143,7 +143,7 @@ typedef enum {  	ZEBRA_BFD_CLIENT_DEREGISTER,  	ZEBRA_INTERFACE_ENABLE_RADV,  	ZEBRA_INTERFACE_DISABLE_RADV, -	ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB, +	ZEBRA_NEXTHOP_LOOKUP_MRIB,  	ZEBRA_INTERFACE_LINK_PARAMS,  	ZEBRA_MPLS_LABELS_ADD,  	ZEBRA_MPLS_LABELS_DELETE, diff --git a/lib/zlog.h b/lib/zlog.h index a530c589a8..dcc0bf14e9 100644 --- a/lib/zlog.h +++ b/lib/zlog.h @@ -68,7 +68,7 @@ struct xrefdata_logmsg {   * initialization and/or before config load.  There is no need to call e.g.   * fprintf(stderr, ...) just because it's "too early" at startup.  Depending   * on context, it may still be the right thing to use fprintf though -- try to - * determine wether something is a log message or something else. + * determine whether something is a log message or something else.   */  extern void vzlogx(const struct xref_logmsg *xref, int prio, diff --git a/m4/.gitignore b/m4/.gitignore index 01a2a593d0..63f9fa78ed 100644 --- a/m4/.gitignore +++ b/m4/.gitignore @@ -3,6 +3,7 @@  !ax_compare_version.m4  !ax_cxx_compile_stdcxx.m4 +!ax_lua.m4  !ax_prog_perl_modules.m4  !ax_pthread.m4  !ax_python.m4 diff --git a/nhrpd/linux.c b/nhrpd/linux.c index 4986bfb99c..75e9f37a68 100644 --- a/nhrpd/linux.c +++ b/nhrpd/linux.c @@ -7,8 +7,9 @@   * (at your option) any later version.   */ -#include <errno.h>  #include "zebra.h" + +#include <errno.h>  #include <linux/if_packet.h>  #include "nhrp_protocol.h" diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c index 40afd716c7..63444f8e57 100644 --- a/ospf6d/ospf6_asbr.c +++ b/ospf6d/ospf6_asbr.c @@ -1440,6 +1440,7 @@ void ospf6_asbr_redistribute_add(int type, ifindex_t ifindex,  	/* apply route-map */  	if (ROUTEMAP(red)) {  		troute.route_option = &tinfo; +		troute.ospf6 = ospf6;  		tinfo.ifindex = ifindex;  		tinfo.tag = tag; diff --git a/ospf6d/ospf6_flood.c b/ospf6d/ospf6_flood.c index cc82084e5e..bc9e2c3405 100644 --- a/ospf6d/ospf6_flood.c +++ b/ospf6d/ospf6_flood.c @@ -878,6 +878,28 @@ static int ospf6_is_maxage_lsa_drop(struct ospf6_lsa *lsa,  	return 0;  } +static bool ospf6_lsa_check_min_arrival(struct ospf6_lsa *lsa, +					struct ospf6_neighbor *from) +{ +	struct timeval now, res; +	unsigned int time_delta_ms; + +	monotime(&now); +	timersub(&now, &lsa->installed, &res); +	time_delta_ms = (res.tv_sec * 1000) + (int)(res.tv_usec / 1000); + +	if (time_delta_ms < from->ospf6_if->area->ospf6->lsa_minarrival) { +		if (IS_OSPF6_DEBUG_FLOODING || +		    IS_OSPF6_DEBUG_FLOOD_TYPE(lsa->header->type)) +			zlog_debug( +				"LSA can't be updated within MinLSArrival, %dms < %dms, discard", +				time_delta_ms, +				from->ospf6_if->area->ospf6->lsa_minarrival); +		return true; +	} +	return false; +} +  /* RFC2328 section 13 The Flooding Procedure */  void ospf6_receive_lsa(struct ospf6_neighbor *from,  		       struct ospf6_lsa_header *lsa_header) @@ -885,7 +907,6 @@ void ospf6_receive_lsa(struct ospf6_neighbor *from,  	struct ospf6_lsa *new = NULL, *old = NULL, *rem = NULL;  	int ismore_recent;  	int is_debug = 0; -	unsigned int time_delta_ms;  	ismore_recent = 1;  	assert(from); @@ -993,19 +1014,7 @@ void ospf6_receive_lsa(struct ospf6_neighbor *from,  		/* (a) MinLSArrival check */  		if (old) { -			struct timeval now, res; -			monotime(&now); -			timersub(&now, &old->installed, &res); -			time_delta_ms = -				(res.tv_sec * 1000) + (int)(res.tv_usec / 1000); -			if (time_delta_ms -			    < from->ospf6_if->area->ospf6->lsa_minarrival) { -				if (is_debug) -					zlog_debug( -						"LSA can't be updated within MinLSArrival, %dms < %dms, discard", -						time_delta_ms, -						from->ospf6_if->area->ospf6 -							->lsa_minarrival); +			if (ospf6_lsa_check_min_arrival(old, from)) {  				ospf6_lsa_delete(new);  				return; /* examin next lsa */  			} @@ -1222,7 +1231,11 @@ void ospf6_receive_lsa(struct ospf6_neighbor *from,  						__PRETTY_FUNCTION__, old->name);  			} -			/* XXX, MinLSArrival check !? RFC 2328 13 (8) */ +			/* MinLSArrival check as per RFC 2328 13 (8) */ +			if (ospf6_lsa_check_min_arrival(old, from)) { +				ospf6_lsa_delete(new); +				return; /* examin next lsa */ +			}  			ospf6_lsdb_add(ospf6_lsa_copy(old),  				       from->lsupdate_list); diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c index b9ee3c3403..55f1a1c7b5 100644 --- a/ospf6d/ospf6_interface.c +++ b/ospf6d/ospf6_interface.c @@ -158,7 +158,7 @@ static uint32_t ospf6_interface_get_cost(struct ospf6_interface *oi)  	ospf6 = oi->interface->vrf->info;  	refbw = ospf6 ? ospf6->ref_bandwidth : OSPF6_REFERENCE_BANDWIDTH; -	/* A specifed ip ospf cost overrides a calculated one. */ +	/* A specified ip ospf cost overrides a calculated one. */  	if (CHECK_FLAG(oi->flag, OSPF6_INTERFACE_NOAUTOCOST))  		cost = oi->cost;  	else { diff --git a/ospf6d/ospf6_lsdb.h b/ospf6d/ospf6_lsdb.h index 07c331af64..a3a4d5bb9f 100644 --- a/ospf6d/ospf6_lsdb.h +++ b/ospf6d/ospf6_lsdb.h @@ -71,7 +71,7 @@ extern struct ospf6_lsa *ospf6_lsdb_next(const struct route_node *iterend,   * Since we are locking the lsa in ospf6_lsdb_head   * and then unlocking it in ospf6_lsa_unlock, when   * we cache the next pointer we need to increment - * the lock for the lsa so we don't accidently free + * the lock for the lsa so we don't accidentally free   * it really early.   */  #define ALL_LSDB(lsdb, lsa, lsanext)                                           \ diff --git a/ospf6d/ospf6_nssa.c b/ospf6d/ospf6_nssa.c index 1220c32783..53b45d6ca3 100644 --- a/ospf6d/ospf6_nssa.c +++ b/ospf6d/ospf6_nssa.c @@ -613,7 +613,8 @@ struct ospf6_lsa *ospf6_translated_nssa_refresh(struct ospf6_area *area,  	return new;  } -static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *lsa) +static void ospf6_abr_translate_nssa(struct ospf6_area *area, +				     struct ospf6_lsa *lsa)  {  	/* Incoming Type-7 or aggregated Type-7  	 * @@ -625,7 +626,7 @@ static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *  	 *  Later, any Unapproved Translated Type-5's are flushed/discarded  	 */ -	struct ospf6_lsa *old = NULL, *new = NULL; +	struct ospf6_lsa *old = NULL;  	struct ospf6_as_external_lsa *nssa_lsa;  	struct prefix prefix;  	struct ospf6_route *match; @@ -661,11 +662,36 @@ static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *  		return;  	} +	/* Find the type-5 LSA in the area-range table */ +	match = ospf6_route_lookup_bestmatch(&prefix, area->nssa_range_table); +	if (match && CHECK_FLAG(match->flag, OSPF6_ROUTE_NSSA_RANGE)) { +		if (prefix_same(&prefix, &match->prefix)) { +			/* The prefix range is being removed, +			 * no need to refresh +			 */ +			if +				CHECK_FLAG(match->flag, OSPF6_ROUTE_REMOVE) +			return; +		} else { +			if (!CHECK_FLAG(match->flag, OSPF6_ROUTE_REMOVE)) { +				if (IS_OSPF6_DEBUG_NSSA) +					zlog_debug( +						"%s: LSA Id %pI4 suppressed by range %pFX of area %s", +						__func__, &lsa->header->id, +						&match->prefix, area->name); +				/* LSA will be suppressed by area-range command, +				 * no need to refresh +				 */ +				return; +			} +		} +	} +  	/* Find the existing AS-External LSA for this prefix */ -	match = ospf6_route_lookup(&prefix, ospf6->external_table); +	match = ospf6_route_lookup(&prefix, ospf6->route_table);  	if (match) { -		old = ospf6_lsdb_lookup(OSPF6_LSTYPE_AS_EXTERNAL, -					match->path.origin.id, ospf6->router_id, +		old = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL), +					lsa->external_lsa_id, ospf6->router_id,  					ospf6->lsdb);  	} @@ -675,20 +701,15 @@ static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *  		return;  	} -	if (old) { +	if (old && !OSPF6_LSA_IS_MAXAGE(old)) {  		if (IS_OSPF6_DEBUG_NSSA)  			zlog_debug( -				"%s : found old translated LSA Id %pI4, refreshing", +				"%s : found old translated LSA Id %pI4, skip",  				__func__, &old->header->id); -		/* refresh */ -		new = ospf6_translated_nssa_refresh(area, lsa, old); -		if (!new) { -			if (IS_OSPF6_DEBUG_NSSA) -				zlog_debug( -					"%s : could not refresh translated LSA Id %pI4", -					__func__, &old->header->id); -		} +		UNSET_FLAG(old->flag, OSPF6_LSA_UNAPPROVED); +		return; +  	} else {  		/* no existing external route for this LSA Id  		 * originate translated LSA diff --git a/ospf6d/ospf6_top.h b/ospf6d/ospf6_top.h index f06a3254ac..9dddd465fe 100644 --- a/ospf6d/ospf6_top.h +++ b/ospf6d/ospf6_top.h @@ -70,7 +70,7 @@ struct ospf6_gr_info {  };  struct ospf6_gr_helper { -	/* Gracefull restart Helper supported configs*/ +	/* Graceful restart Helper supported configs*/  	/* Supported grace interval*/  	uint32_t supported_grace_time; diff --git a/ospfclient/ospf_apiclient.c b/ospfclient/ospf_apiclient.c index 29f1c0807d..1908604bd9 100644 --- a/ospfclient/ospf_apiclient.c +++ b/ospfclient/ospf_apiclient.c @@ -91,7 +91,7 @@ static unsigned short ospf_apiclient_getport(void)  }  /* ----------------------------------------------------------- - * Followings are functions for connection management + * Following are functions for connection management   * -----------------------------------------------------------   */ @@ -300,7 +300,7 @@ int ospf_apiclient_close(struct ospf_apiclient *oclient)  }  /* ----------------------------------------------------------- - * Followings are functions to send a request to OSPFd + * Following are functions to send a request to OSPFd   * -----------------------------------------------------------   */ @@ -498,7 +498,7 @@ int ospf_apiclient_lsa_delete(struct ospf_apiclient *oclient,  }  /* ----------------------------------------------------------- - * Followings are handlers for messages from OSPF daemon + * Following are handlers for messages from OSPF daemon   * -----------------------------------------------------------   */ diff --git a/ospfd/ospf_apiserver.c b/ospfd/ospf_apiserver.c index a624f4ce1e..97bd125aee 100644 --- a/ospfd/ospf_apiserver.c +++ b/ospfd/ospf_apiserver.c @@ -221,7 +221,7 @@ static struct ospf_apiserver *lookup_apiserver_by_lsa(struct ospf_lsa *lsa)  }  /* ----------------------------------------------------------- - * Followings are functions to manage client connections. + * Following are functions to manage client connections.   * -----------------------------------------------------------   */  static int ospf_apiserver_new_lsa_hook(struct ospf_lsa *lsa) @@ -1174,7 +1174,7 @@ int ospf_apiserver_handle_register_event(struct ospf_apiserver *apiserv,  /* ----------------------------------------------------------- - * Followings are functions for LSDB synchronization. + * Following are functions for LSDB synchronization.   * -----------------------------------------------------------   */ @@ -1345,7 +1345,7 @@ int ospf_apiserver_handle_sync_lsdb(struct ospf_apiserver *apiserv,  /* ----------------------------------------------------------- - * Followings are functions to originate or update LSA + * Following are functions to originate or update LSA   * from an application.   * -----------------------------------------------------------   */ @@ -1787,7 +1787,7 @@ out:  /* ----------------------------------------------------------- - * Followings are functions to delete LSAs + * Following are functions to delete LSAs   * -----------------------------------------------------------   */ @@ -1952,7 +1952,7 @@ void ospf_apiserver_flush_opaque_lsa(struct ospf_apiserver *apiserv,  /* ----------------------------------------------------------- - * Followings are callback functions to handle opaque types + * Following are callback functions to handle opaque types   * -----------------------------------------------------------   */ @@ -2102,7 +2102,7 @@ void ospf_apiserver_show_info(struct vty *vty, struct json_object *json,  }  /* ----------------------------------------------------------- - * Followings are functions to notify clients about events + * Following are functions to notify clients about events   * -----------------------------------------------------------   */ @@ -2417,7 +2417,7 @@ static void apiserver_clients_lsa_change_notify(uint8_t msgtype,  /* ------------------------------------------------------------- - * Followings are hooks invoked when LSAs are updated or deleted + * Following are hooks invoked when LSAs are updated or deleted   * -------------------------------------------------------------   */ diff --git a/ospfd/ospf_apiserver.h b/ospfd/ospf_apiserver.h index 3d57737080..b4d8bb2f52 100644 --- a/ospfd/ospf_apiserver.h +++ b/ospfd/ospf_apiserver.h @@ -79,7 +79,7 @@ enum ospf_apiserver_event {  };  /* ----------------------------------------------------------- - * Followings are functions to manage client connections. + * Following are functions to manage client connections.   * -----------------------------------------------------------   */ @@ -99,7 +99,7 @@ extern int ospf_apiserver_send_reply(struct ospf_apiserver *apiserv,  				     uint32_t seqnr, uint8_t rc);  /* ----------------------------------------------------------- - * Followings are message handler functions + * Following are message handler functions   * -----------------------------------------------------------   */ @@ -147,7 +147,7 @@ extern int ospf_apiserver_handle_sync_lsdb(struct ospf_apiserver *apiserv,  /* ----------------------------------------------------------- - * Followings are functions for LSA origination/deletion + * Following are functions for LSA origination/deletion   * -----------------------------------------------------------   */ @@ -169,7 +169,7 @@ extern void ospf_apiserver_flood_opaque_lsa(struct ospf_lsa *lsa);  /* ----------------------------------------------------------- - * Followings are callback functions to handle opaque types + * Following are callback functions to handle opaque types   * -----------------------------------------------------------   */ @@ -191,7 +191,7 @@ extern void ospf_apiserver_flush_opaque_lsa(struct ospf_apiserver *apiserv,  					    uint8_t opaque_type);  /* ----------------------------------------------------------- - * Followings are hooks when LSAs are updated or deleted + * Following are hooks when LSAs are updated or deleted   * -----------------------------------------------------------   */ diff --git a/ospfd/ospf_ext.c b/ospfd/ospf_ext.c index 0e5a7e29c0..69847088e4 100644 --- a/ospfd/ospf_ext.c +++ b/ospfd/ospf_ext.c @@ -74,7 +74,7 @@ static struct ospf_ext_lp OspfEXT;  /*   * ----------------------------------------------------------------------- - * Followings are initialize/terminate functions for Extended Prefix/Link + * Following are initialize/terminate functions for Extended Prefix/Link   * Opaque LSA handling.   * -----------------------------------------------------------------------   */ @@ -216,7 +216,7 @@ void ospf_ext_finish(void)  /*   * --------------------------------------------------------------------- - * Followings are control functions for Extended Prefix/Link Opaque LSA + * Following are control functions for Extended Prefix/Link Opaque LSA   * parameters management.   * ---------------------------------------------------------------------   */ @@ -681,7 +681,7 @@ void ospf_ext_update_sr(bool enable)  /*   * ----------------------------------------------------------------------- - * Followings are callback functions against generic Opaque-LSAs handling + * Following are callback functions against generic Opaque-LSAs handling   * -----------------------------------------------------------------------   */ @@ -985,7 +985,7 @@ static int ospf_ext_pref_lsa_update(struct ospf_lsa *lsa)  /*   * ------------------------------------------------------- - * Followings are OSPF protocol processing functions for + * Following are OSPF protocol processing functions for   * Extended Prefix/Link Opaque LSA   * -------------------------------------------------------   */ @@ -1713,7 +1713,7 @@ static void ospf_ext_lsa_schedule(struct ext_itf *exti, enum lsa_opcode op)  /*   * ------------------------------------ - * Followings are vty show functions. + * Following are vty show functions.   * ------------------------------------   */ diff --git a/ospfd/ospf_interface.c b/ospfd/ospf_interface.c index 2626cccc37..5df2ecf070 100644 --- a/ospfd/ospf_interface.c +++ b/ospfd/ospf_interface.c @@ -102,7 +102,7 @@ int ospf_if_get_output_cost(struct ospf_interface *oi)  					: OSPF_DEFAULT_BANDWIDTH;  	refbw = oi->ospf->ref_bandwidth; -	/* A specifed ip ospf cost overrides a calculated one. */ +	/* A specified ip ospf cost overrides a calculated one. */  	if (OSPF_IF_PARAM_CONFIGURED(IF_DEF_PARAMS(oi->ifp), output_cost_cmd)  	    || OSPF_IF_PARAM_CONFIGURED(oi->params, output_cost_cmd))  		cost = OSPF_IF_PARAM(oi, output_cost_cmd); diff --git a/ospfd/ospf_ldp_sync.c b/ospfd/ospf_ldp_sync.c index f6c1b43610..b4d770d48a 100644 --- a/ospfd/ospf_ldp_sync.c +++ b/ospfd/ospf_ldp_sync.c @@ -141,7 +141,7 @@ void ospf_ldp_sync_if_init(struct ospf_interface *oi)  	ldp_sync_info = params->ldp_sync_info; -	/* specifed on interface overrides global config. */ +	/* specified on interface overrides global config. */  	if (!CHECK_FLAG(ldp_sync_info->flags, LDP_SYNC_FLAG_HOLDDOWN))  		ldp_sync_info->holddown = oi->ospf->ldp_sync_cmd.holddown; @@ -441,7 +441,7 @@ void ospf_if_set_ldp_sync_enable(struct ospf *ospf, struct interface *ifp)  	struct ldp_sync_info *ldp_sync_info;  	/* called when setting LDP-SYNC at the global level: -	 *  specifed on interface overrides global config +	 *  specified on interface overrides global config  	 *  if ptop link send msg to LDP indicating ldp-sync enabled  	 */  	if (if_is_loopback(ifp)) @@ -479,7 +479,7 @@ void ospf_if_set_ldp_sync_holddown(struct ospf *ospf, struct interface *ifp)  	struct ldp_sync_info *ldp_sync_info;  	/* called when setting LDP-SYNC at the global level: -	 *  specifed on interface overrides global config. +	 *  specified on interface overrides global config.  	 */  	if (if_is_loopback(ifp))  		return; diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index 48751dfba8..59e1b73d24 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -450,7 +450,7 @@ char link_info_set(struct stream **s, struct in_addr id, struct in_addr data,  	/* LSA stream is initially allocated to OSPF_MAX_LSA_SIZE, suits  	 * vast majority of cases. Some rare routers with lots of links need  	 * more. -	 * we try accomodate those here. +	 * we try accommodate those here.  	 */  	if (STREAM_WRITEABLE(*s) < OSPF_ROUTER_LSA_LINK_SIZE) {  		size_t ret = OSPF_MAX_LSA_SIZE; @@ -2422,10 +2422,10 @@ void ospf_external_lsa_flush(struct ospf *ospf, uint8_t type,  	    && !(CHECK_FLAG(lsa->flags, OSPF_LSA_LOCAL_XLT)))  		ospf_nssa_lsa_flush(ospf, p); -	/* Sweep LSA from Link State Retransmit List. */ -	ospf_ls_retransmit_delete_nbr_as(ospf, lsa); -  	if (!IS_LSA_MAXAGE(lsa)) { +		/* Sweep LSA from Link State Retransmit List. */ +		ospf_ls_retransmit_delete_nbr_as(ospf, lsa); +  		/* Unregister LSA from Refresh queue. */  		ospf_refresher_unregister_lsa(ospf, lsa); @@ -2858,7 +2858,7 @@ struct ospf_lsa *ospf_lsa_install(struct ospf *ospf, struct ospf_interface *oi,  	   update is needed */  	old = ospf_lsdb_lookup(lsdb, lsa); -	/* Do comparision and record if recalc needed. */ +	/* Do comparison and record if recalc needed. */  	rt_recalc = 0;  	if (old == NULL || ospf_lsa_different(old, lsa, false)) {  		/* Ref rfc3623 section 3.2.3 diff --git a/ospfd/ospf_opaque.c b/ospfd/ospf_opaque.c index b781c9edc1..947454c0df 100644 --- a/ospfd/ospf_opaque.c +++ b/ospfd/ospf_opaque.c @@ -61,7 +61,7 @@ DEFINE_MTYPE_STATIC(OSPFD, OPAQUE_INFO_PER_TYPE, "OSPF opaque per-type info");  DEFINE_MTYPE_STATIC(OSPFD, OPAQUE_INFO_PER_ID, "OSPF opaque per-ID info");  /*------------------------------------------------------------------------* - * Followings are initialize/terminate functions for Opaque-LSAs handling. + * Following are initialize/terminate functions for Opaque-LSAs handling.   *------------------------------------------------------------------------*/  #ifdef SUPPORT_OSPF_API @@ -258,7 +258,7 @@ static const char *ospf_opaque_type_name(uint8_t opaque_type)  }  /*------------------------------------------------------------------------* - * Followings are management functions to store user specified callbacks. + * Following are management functions to store user specified callbacks.   *------------------------------------------------------------------------*/  struct opaque_info_per_type; /* Forward declaration. */ @@ -467,7 +467,7 @@ ospf_opaque_functab_lookup(struct ospf_lsa *lsa)  }  /*------------------------------------------------------------------------* - * Followings are management functions for self-originated LSA entries. + * Following are management functions for self-originated LSA entries.   *------------------------------------------------------------------------*/  /* @@ -759,7 +759,7 @@ out:  }  /*------------------------------------------------------------------------* - * Followings are (vty) configuration functions for Opaque-LSAs handling. + * Following are (vty) configuration functions for Opaque-LSAs handling.   *------------------------------------------------------------------------*/  DEFUN (capability_opaque, @@ -830,7 +830,7 @@ static void ospf_opaque_register_vty(void)  }  /*------------------------------------------------------------------------* - * Followings are collection of user-registered function callers. + * Following are collection of user-registered function callers.   *------------------------------------------------------------------------*/  static int opaque_lsa_new_if_callback(struct list *funclist, @@ -978,7 +978,7 @@ out:  }  /*------------------------------------------------------------------------* - * Followings are glue functions to call Opaque-LSA specific processing. + * Following are glue functions to call Opaque-LSA specific processing.   *------------------------------------------------------------------------*/  int ospf_opaque_new_if(struct interface *ifp) @@ -1283,7 +1283,7 @@ out:  }  /*------------------------------------------------------------------------* - * Followings are Opaque-LSA origination/refresh management functions. + * Following are Opaque-LSA origination/refresh management functions.   *------------------------------------------------------------------------*/  static void ospf_opaque_type9_lsa_originate(struct thread *t); @@ -1647,7 +1647,7 @@ struct ospf_lsa *ospf_opaque_lsa_refresh(struct ospf_lsa *lsa)  }  /*------------------------------------------------------------------------* - * Followings are re-origination/refresh/flush operations of Opaque-LSAs, + * Following are re-origination/refresh/flush operations of Opaque-LSAs,   * triggered by external interventions (vty session, signaling, etc).   *------------------------------------------------------------------------*/ @@ -2162,7 +2162,7 @@ void ospf_opaque_self_originated_lsa_received(struct ospf_neighbor *nbr,  }  /*------------------------------------------------------------------------* - * Followings are util functions; probably be used by Opaque-LSAs only... + * Following are util functions; probably be used by Opaque-LSAs only...   *------------------------------------------------------------------------*/  struct ospf *oi_to_top(struct ospf_interface *oi) diff --git a/ospfd/ospf_opaque.h b/ospfd/ospf_opaque.h index b26bc1e10c..59d4288bf2 100644 --- a/ospfd/ospf_opaque.h +++ b/ospfd/ospf_opaque.h @@ -64,7 +64,7 @@  #define OPAQUE_TYPE_EXTENDED_LINK_LSA                  8  #define OPAQUE_TYPE_MAX                                8 -/* Followings types are proposed in internet-draft documents. */ +/* Following types are proposed in internet-draft documents. */  #define OPAQUE_TYPE_8021_QOSPF				129  #define OPAQUE_TYPE_SECONDARY_NEIGHBOR_DISCOVERY	224  #define OPAQUE_TYPE_FLOODGATE                           225 diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c index c2bd7a0796..55a010a293 100644 --- a/ospfd/ospf_packet.c +++ b/ospfd/ospf_packet.c @@ -3515,7 +3515,7 @@ static int ospf_make_ls_req_func(struct stream *s, uint16_t *length,  	/* LS Request packet overflows interface MTU  	 * delta is just number of bytes required for 1 LS Req  	 * ospf_packet_max will return the number of bytes can -	 * be accomodated without ospf header. So length+delta +	 * be accommodated without ospf header. So length+delta  	 * can be compared to ospf_packet_max  	 * to check if it can fit another lsreq in the same packet.  	 */ @@ -3601,7 +3601,7 @@ static int ospf_make_ls_upd(struct ospf_interface *oi, struct list *update,  			zlog_debug("%s: List Iteration %d LSA[%s]", __func__,  				   count, dump_lsa_key(lsa)); -		/* Will it fit? Minimum it has to fit atleast one */ +		/* Will it fit? Minimum it has to fit at least one */  		if ((length + delta + ntohs(lsa->data->length) > size_noauth) &&  				(count > 0))  			break; @@ -3649,7 +3649,7 @@ static int ospf_make_ls_ack(struct ospf_interface *oi, struct list *ack,  		/* LS Ack packet overflows interface MTU  		 * delta is just number of bytes required for  		 * 1 LS Ack(1 LS Hdr) ospf_packet_max will return -		 * the number of bytes can be accomodated without +		 * the number of bytes can be accommodated without  		 * ospf header. So length+delta can be compared  		 * against ospf_packet_max to check if it can fit  		 * another ls header in the same packet. @@ -3966,7 +3966,7 @@ void ospf_ls_upd_send_lsa(struct ospf_neighbor *nbr, struct ospf_lsa *lsa,  	list_delete(&update);  } -/* Determine size for packet. Must be at least big enough to accomodate next +/* Determine size for packet. Must be at least big enough to accommodate next   * LSA on list, which may be bigger than MTU size.   *   * Return pointer to new ospf_packet diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c index 0efa6ca4d5..3efdb53102 100644 --- a/ospfd/ospf_ri.c +++ b/ospfd/ospf_ri.c @@ -66,7 +66,7 @@  static struct ospf_router_info OspfRI;  /*------------------------------------------------------------------------------* - * Followings are initialize/terminate functions for Router Information + * Following are initialize/terminate functions for Router Information   *handling.   *------------------------------------------------------------------------------*/ @@ -232,7 +232,7 @@ static struct ospf_ri_area_info *lookup_by_area(struct ospf_area *area)  }  /*------------------------------------------------------------------------* - * Followings are control functions for ROUTER INFORMATION parameters + * Following are control functions for ROUTER INFORMATION parameters   *management.   *------------------------------------------------------------------------*/ @@ -668,7 +668,7 @@ void ospf_router_info_update_sr(bool enable, struct sr_node *srn)  }  /*------------------------------------------------------------------------* - * Followings are callback functions against generic Opaque-LSAs handling. + * Following are callback functions against generic Opaque-LSAs handling.   *------------------------------------------------------------------------*/  static void ospf_router_info_ism_change(struct ospf_interface *oi,  					int old_state) @@ -693,7 +693,7 @@ static void ospf_router_info_ism_change(struct ospf_interface *oi,  }  /*------------------------------------------------------------------------* - * Followings are OSPF protocol processing functions for ROUTER INFORMATION + * Following are OSPF protocol processing functions for ROUTER INFORMATION   *------------------------------------------------------------------------*/  static void build_tlv_header(struct stream *s, struct tlv_header *tlvh) @@ -1223,7 +1223,7 @@ static int ospf_router_info_lsa_update(struct ospf_lsa *lsa)  }  /*------------------------------------------------------------------------* - * Followings are vty session control functions. + * Following are vty session control functions.   *------------------------------------------------------------------------*/  #define check_tlv_size(size, msg)                                              \ @@ -1660,7 +1660,7 @@ static void ospf_router_info_config_write_router(struct vty *vty)  }  /*------------------------------------------------------------------------* - * Followings are vty command functions. + * Following are vty command functions.   *------------------------------------------------------------------------*/  /* Simple wrapper schedule RI LSA action in function of the scope */  static void ospf_router_info_schedule(enum lsa_opcode opcode) diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c index f763400212..baf02365a2 100644 --- a/ospfd/ospf_spf.c +++ b/ospfd/ospf_spf.c @@ -678,7 +678,7 @@ static void ospf_spf_flush_parents(struct vertex *w)  /*   * Consider supplied next-hop for inclusion to the supplied list of - * equal-cost next-hops, adjust list as neccessary. + * equal-cost next-hops, adjust list as necessary.   */  static void ospf_spf_add_parent(struct vertex *v, struct vertex *w,  				struct vertex_nexthop *newhop, diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c index e4059d05c2..c861685f4e 100644 --- a/ospfd/ospf_sr.c +++ b/ospfd/ospf_sr.c @@ -2023,7 +2023,7 @@ void ospf_sr_update_task(struct ospf *ospf)  /*   * -------------------------------------- - * Followings are vty command functions. + * Following are vty command functions.   * --------------------------------------   */ @@ -2053,11 +2053,15 @@ void ospf_sr_config_write_router(struct vty *vty)  		vty_out(vty, " segment-routing global-block %u %u",  			OspfSR.srgb.start, upper); -	if ((OspfSR.srlb.start != DEFAULT_SRLB_LABEL) -	    || (OspfSR.srlb.end != DEFAULT_SRLB_END)) +	if ((OspfSR.srlb.start != DEFAULT_SRLB_LABEL) || +	    (OspfSR.srlb.end != DEFAULT_SRLB_END)) { +		if ((OspfSR.srgb.start == DEFAULT_SRGB_LABEL) && +		    (OspfSR.srgb.size == DEFAULT_SRGB_SIZE)) +			vty_out(vty, " segment-routing global-block %u %u", +				OspfSR.srgb.start, upper);  		vty_out(vty, " local-block %u %u\n", OspfSR.srlb.start,  			OspfSR.srlb.end); -	else +	} else  		vty_out(vty, "\n");  	if (OspfSR.msd != 0) @@ -2663,12 +2667,18 @@ DEFUN (no_sr_prefix_sid,  		return CMD_WARNING_CONFIG_FAILED;  	} +	osr_debug("SR (%s): Remove Prefix %pFX with index %u", __func__, +		  (struct prefix *)&srp->prefv4, srp->sid); +  	/* Get Interface */  	ifp = if_lookup_by_index(srp->nhlfe.ifindex, VRF_DEFAULT);  	if (ifp == NULL) {  		vty_out(vty, "interface for prefix %s not found.\n",  			argv[idx]->arg); -		return CMD_WARNING_CONFIG_FAILED; +		/* silently remove from list */ +		listnode_delete(OspfSR.self->ext_prefix, srp); +		XFREE(MTYPE_OSPF_SR_PARAMS, srp); +		return CMD_SUCCESS;  	}  	/* Update Extended Prefix LSA */ @@ -2677,9 +2687,6 @@ DEFUN (no_sr_prefix_sid,  		return CMD_WARNING;  	} -	osr_debug("SR (%s): Remove Prefix %pFX with index %u", __func__, -		  (struct prefix *)&srp->prefv4, srp->sid); -  	/* Delete NHLFE if NO-PHP is set and EXPLICIT NULL not set */  	if (CHECK_FLAG(srp->flags, EXT_SUBTLV_PREFIX_SID_NPFLG)  	    && !CHECK_FLAG(srp->flags, EXT_SUBTLV_PREFIX_SID_EFLG)) diff --git a/ospfd/ospf_te.c b/ospfd/ospf_te.c index 999bc49d91..ddc62982bd 100644 --- a/ospfd/ospf_te.c +++ b/ospfd/ospf_te.c @@ -79,7 +79,7 @@ static const char *const mode2text[] = {"Off", "AS", "Area"};  /*------------------------------------------------------------------------* - * Followings are initialize/terminate functions for MPLS-TE handling. + * Following are initialize/terminate functions for MPLS-TE handling.   *------------------------------------------------------------------------*/  static int ospf_mpls_te_new_if(struct interface *ifp); @@ -197,7 +197,7 @@ void ospf_mpls_te_finish(void)  }  /*------------------------------------------------------------------------* - * Followings are control functions for MPLS-TE parameters management. + * Following are control functions for MPLS-TE parameters management.   *------------------------------------------------------------------------*/  static void del_mpls_te_link(void *val)  { @@ -814,7 +814,7 @@ static int is_mandated_params_set(struct mpls_te_link *lp)  }  /*------------------------------------------------------------------------* - * Followings are callback functions against generic Opaque-LSAs handling. + * Following are callback functions against generic Opaque-LSAs handling.   *------------------------------------------------------------------------*/  static int ospf_mpls_te_new_if(struct interface *ifp) @@ -1079,7 +1079,7 @@ static void ospf_mpls_te_nsm_change(struct ospf_neighbor *nbr, int old_state)  }  /*------------------------------------------------------------------------* - * Followings are OSPF protocol processing functions for MPLS-TE LSA. + * Following are OSPF protocol processing functions for MPLS-TE LSA.   *------------------------------------------------------------------------*/  static void build_tlv_header(struct stream *s, struct tlv_header *tlvh) @@ -1624,7 +1624,7 @@ void ospf_mpls_te_lsa_schedule(struct mpls_te_link *lp, enum lsa_opcode opcode)  /**   * ------------------------------------------------------ - * Followings are Link State Data Base control functions. + * Following are Link State Data Base control functions.   * ------------------------------------------------------   */ @@ -3153,7 +3153,7 @@ static void ospf_te_init_ted(struct ls_ted *ted, struct ospf *ospf)  }  /*------------------------------------------------------------------------* - * Followings are vty session control functions. + * Following are vty session control functions.   *------------------------------------------------------------------------*/  #define check_tlv_size(size, msg)                                              \  	do {                                                                   \ @@ -3846,7 +3846,7 @@ static void ospf_mpls_te_config_write_router(struct vty *vty)  }  /*------------------------------------------------------------------------* - * Followings are vty command functions. + * Following are vty command functions.   *------------------------------------------------------------------------*/  DEFUN (ospf_mpls_te_on, @@ -3908,7 +3908,7 @@ DEFUN (no_ospf_mpls_te,  	ote_debug("MPLS-TE: ON -> OFF");  	/* Remove TED */ -	ls_ted_del_all(OspfMplsTE.ted); +	ls_ted_del_all(&OspfMplsTE.ted);  	OspfMplsTE.enabled = false;  	/* Flush all TE Opaque LSAs */ diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index d4245bde7f..3bd4a9bb68 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -4394,12 +4394,20 @@ static void show_ip_ospf_neighbor_sub(struct vty *vty,  				json_neighbor = json_object_new_object(); -				ospf_nbr_ism_state_message(nbr, msgbuf, 16); - +				ospf_nbr_ism_state_message(nbr, msgbuf, +							   sizeof(msgbuf)); +#if CONFDATE > 20230321 +CPP_NOTICE("Remove show_ip_ospf_neighbor_sub() JSON keys: priority, state, deadTimeMsecs, address, retransmitCounter, requestCounter, dbSummaryCounter") +#endif  				json_object_int_add(json_neighbor, "priority",  						    nbr->priority);  				json_object_string_add(json_neighbor, "state",  						       msgbuf); +				json_object_int_add(json_neighbor, +						    "nbrPriority", +						    nbr->priority); +				json_object_string_add(json_neighbor, +						       "nbrState", msgbuf);  				json_object_string_add(  					json_neighbor, "converged", @@ -4425,6 +4433,10 @@ static void show_ip_ospf_neighbor_sub(struct vty *vty,  					json_object_int_add(json_neighbor,  							    "deadTimeMsecs",  							    time_store); +					json_object_int_add( +						json_neighbor, +						"routerDeadIntervalTimerDueMsec", +						time_store);  					json_object_string_add(  						json_neighbor, "upTime",  						ospf_timeval_dump( @@ -4440,27 +4452,47 @@ static void show_ip_ospf_neighbor_sub(struct vty *vty,  					json_object_string_add(json_neighbor,  							       "deadTimeMsecs",  							       "inactive"); +					json_object_string_add( +						json_neighbor, +						"routerDeadIntervalTimerDueMsec", +						"inactive");  				}  				json_object_string_addf(json_neighbor,  							"address", "%pI4",  							&nbr->src); +				json_object_string_addf(json_neighbor, +							"ifaceAddress", "%pI4", +							&nbr->src);  				json_object_string_add(json_neighbor,  						       "ifaceName",  						       IF_NAME(oi));  				json_object_int_add(  					json_neighbor, "retransmitCounter",  					ospf_ls_retransmit_count(nbr)); +				json_object_int_add( +					json_neighbor, +					"linkStateRetransmissionListCounter", +					ospf_ls_retransmit_count(nbr));  				json_object_int_add(json_neighbor,  						    "requestCounter",  						    ospf_ls_request_count(nbr)); +				json_object_int_add( +					json_neighbor, +					"linkStateRequestListCounter", +					ospf_ls_request_count(nbr));  				json_object_int_add(json_neighbor,  						    "dbSummaryCounter",  						    ospf_db_summary_count(nbr)); +				json_object_int_add( +					json_neighbor, +					"databaseSummaryListCounter", +					ospf_db_summary_count(nbr));  				json_object_array_add(json_neigh_array,  						      json_neighbor);  			} else { -				ospf_nbr_ism_state_message(nbr, msgbuf, 16); +				ospf_nbr_ism_state_message(nbr, msgbuf, +							   sizeof(msgbuf));  				if (nbr->state == NSM_Attempt  				    && nbr->router_id.s_addr == INADDR_ANY) @@ -5086,6 +5118,7 @@ static void show_ip_ospf_neighbor_detail_sub(struct vty *vty,  	char timebuf[OSPF_TIME_DUMP_SIZE];  	json_object *json_neigh = NULL, *json_neigh_array = NULL;  	char neigh_str[INET_ADDRSTRLEN] = {0}; +	char neigh_state[16] = {0};  	if (use_json) {  		if (prev_nbr && @@ -5138,15 +5171,13 @@ static void show_ip_ospf_neighbor_detail_sub(struct vty *vty,  			ospf_area_desc_string(oi->area), oi->ifp->name);  	/* Show neighbor priority and state. */ +	ospf_nbr_ism_state_message(nbr, neigh_state, sizeof(neigh_state));  	if (use_json) {  		json_object_int_add(json_neigh, "nbrPriority", nbr->priority); -		json_object_string_add( -			json_neigh, "nbrState", -			lookup_msg(ospf_nsm_state_msg, nbr->state, NULL)); +		json_object_string_add(json_neigh, "nbrState", neigh_state);  	} else  		vty_out(vty, "    Neighbor priority is %d, State is %s,", -			nbr->priority, -			lookup_msg(ospf_nsm_state_msg, nbr->state, NULL)); +			nbr->priority, neigh_state);  	/* Show state changes. */  	if (use_json) diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index 389d3647d0..496d85fd7b 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -1280,6 +1280,7 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS)  {  	struct zapi_route api;  	struct prefix_ipv4 p; +	struct prefix pgen;  	unsigned long ifindex;  	struct in_addr nexthop;  	struct external_info *ei; @@ -1302,13 +1303,17 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS)  	if (IPV4_NET127(ntohl(p.prefix.s_addr)))  		return 0; +	pgen.family = p.family; +	pgen.prefixlen = p.prefixlen; +	pgen.u.prefix4 = p.prefix; +  	/* Re-destributed route is default route.  	 * Here, route type is used as 'ZEBRA_ROUTE_KERNEL' for  	 * updating ex-info. But in resetting (no default-info  	 * originate)ZEBRA_ROUTE_MAX is used to delete the ex-info.  	 * Resolved this inconsistency by maintaining same route type.  	 */ -	if (is_default_prefix4(&p)) +	if ((is_default_prefix(&pgen)) && (api.type != ZEBRA_ROUTE_OSPF))  		rt_type = DEFAULT_ROUTE;  	if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h index 4f1b1493a6..268e4d6f8d 100644 --- a/ospfd/ospfd.h +++ b/ospfd/ospfd.h @@ -337,7 +337,7 @@ struct ospf {  	struct list *external[ZEBRA_ROUTE_MAX + 1];  #define EXTERNAL_INFO(E) (E->external_info) -	/* Gracefull restart Helper supported configs*/ +	/* Graceful restart Helper supported configs*/  	/* Supported grace interval*/  	uint32_t supported_grace_time; diff --git a/pathd/path_pcep_controller.h b/pathd/path_pcep_controller.h index de113feee9..bc7ed4910a 100644 --- a/pathd/path_pcep_controller.h +++ b/pathd/path_pcep_controller.h @@ -132,7 +132,7 @@ struct pcep_pcc_info *pcep_ctrl_get_pcc_info(struct frr_pthread *fpt,  /* Asynchronously send a report. The caller is giving away the path structure,   * it shouldn't be allocated on the stack. If `pcc_id` is `0` the report is - * sent by all PCCs.  The parameter is_stable is used to hint wether the status + * sent by all PCCs.  The parameter is_stable is used to hint whether the status   * will soon change, this is used to ensure all report updates are sent even   * when missing status update events */  int pcep_ctrl_send_report(struct frr_pthread *fpt, int pcc_id, diff --git a/pathd/path_ted.c b/pathd/path_ted.c index 3440b93399..270c664daf 100644 --- a/pathd/path_ted.c +++ b/pathd/path_ted.c @@ -15,10 +15,10 @@   * along with this program.  If not, see <https://www.gnu.org/licenses/>.   */ -#include "stdlib.h" -  #include <zebra.h> +#include <stdlib.h> +  #include "memory.h"  #include "log.h"  #include "command.h" @@ -66,7 +66,7 @@ uint32_t path_ted_teardown(void)  	PATH_TED_DEBUG("%s : TED [%p]", __func__, ted_state_g.ted);  	path_ted_unregister_vty();  	path_ted_stop_importing_igp(); -	ls_ted_del_all(ted_state_g.ted); +	ls_ted_del_all(&ted_state_g.ted);  	path_ted_timer_sync_cancel();  	path_ted_timer_refresh_cancel();  	return 0; @@ -353,7 +353,7 @@ DEFPY (debug_path_ted,  }  /* - * Followings are vty command functions. + * Following are vty command functions.   */  /* clang-format off */  DEFUN (path_ted_on, @@ -391,7 +391,7 @@ DEFUN (no_path_ted,  	}  	/* Remove TED */ -	ls_ted_del_all(ted_state_g.ted); +	ls_ted_del_all(&ted_state_g.ted);  	ted_state_g.enabled = false;  	PATH_TED_DEBUG("%s: PATHD-TED: ON -> OFF", __func__);  	ted_state_g.import = IMPORT_UNKNOWN; diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c index da4e3e1bc0..4506dc1af1 100644 --- a/pbrd/pbr_zebra.c +++ b/pbrd/pbr_zebra.c @@ -64,7 +64,7 @@ void pbr_if_del(struct interface *ifp)  	XFREE(MTYPE_PBR_INTERFACE, ifp->info);  } -/* Inteface addition message from zebra. */ +/* Interface addition message from zebra. */  int pbr_ifp_create(struct interface *ifp)  {  	DEBUGD(&pbr_dbg_zebra, "%s: %s", __func__, ifp->name); diff --git a/pceplib/pcep_msg_objects.h b/pceplib/pcep_msg_objects.h index 270db4aa8d..6d7d3be7e6 100644 --- a/pceplib/pcep_msg_objects.h +++ b/pceplib/pcep_msg_objects.h @@ -542,7 +542,7 @@ struct pcep_ro_subobj_asn {  	uint16_t asn; /* Autonomous system number */  }; -/* The SR ERO and SR RRO subojbects are the same, except +/* The SR ERO and SR RRO subobjects are the same, except   * the SR-RRO does not have the L flag in the Type field.   * Defined in draft-ietf-pce-segment-routing-16 */  enum pcep_sr_subobj_nai { diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c index 7b3e04fdc0..bc1d478af0 100644 --- a/pimd/pim6_cmd.c +++ b/pimd/pim6_cmd.c @@ -33,11 +33,14 @@  #include "pimd.h"  #include "pim6_cmd.h" +#include "pim_cmd_common.h"  #include "pim_vty.h"  #include "lib/northbound_cli.h"  #include "pim_errors.h"  #include "pim_nb.h" -#include "pim_cmd_common.h" +#include "pim_addr.h" +#include "pim_nht.h" +  #ifndef VTYSH_EXTRACT_PL  #include "pimd/pim6_cmd_clippy.c" @@ -167,7 +170,7 @@ DEFPY (ipv6_pim_rp_keep_alive,         "ipv6 pim rp keep-alive-timer (1-65535)$kat",         IPV6_STR         PIM_STR -       "Rendevous Point\n" +       "Rendezvous Point\n"         "Keep alive Timer\n"         "Seconds\n")  { @@ -180,7 +183,7 @@ DEFPY (no_ipv6_pim_rp_keep_alive,         NO_STR         IPV6_STR         PIM_STR -       "Rendevous Point\n" +       "Rendezvous Point\n"         "Keep alive Timer\n"         IGNORED_IN_NO_STR)  { @@ -448,6 +451,33 @@ DEFPY (no_ipv6_pim_rp_prefix_list,  	return pim_process_no_rp_plist_cmd(vty, rp_str, plist);  } + +DEFPY (ipv6_ssmpingd, +      ipv6_ssmpingd_cmd, +      "ipv6 ssmpingd [X:X::X:X]$source", +      IPV6_STR +      CONF_SSMPINGD_STR +      "Source address\n") +{ +	const char *src_str = (source_str) ? source_str : "::"; + +	return pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, src_str); +} + + +DEFPY (no_ipv6_ssmpingd, +      no_ipv6_ssmpingd_cmd, +      "no ipv6 ssmpingd [X:X::X:X]$source", +      NO_STR +      IPV6_STR +      CONF_SSMPINGD_STR +      "Source address\n") +{ +	const char *src_str = (source_str) ? source_str : "::"; + +	return pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, src_str); +} +  DEFPY (interface_ipv6_mld_join,         interface_ipv6_mld_join_cmd,         "ipv6 mld join X:X::X:X$group [X:X::X:X$source]", @@ -505,121 +535,1296 @@ DEFPY (interface_no_ipv6_mld_join,  DEFPY (interface_ipv6_mld,         interface_ipv6_mld_cmd, -       "ipv6 mld", +       "[no] ipv6 mld", +       NO_STR         IPV6_STR         IFACE_MLD_STR)  { -	nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true"); +	const char *value = no ? "false" : "true"; +	nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, value);  	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,  				    "frr-routing:ipv6");  } -DEFPY (interface_no_ipv6_mld, -       interface_no_ipv6_mld_cmd, -       "no ipv6 mld", +DEFPY (interface_ipv6_mld_version, +       interface_ipv6_mld_version_cmd, +       "[no] ipv6 mld version ![(1-2)$version]",         NO_STR         IPV6_STR -       IFACE_MLD_STR) +       IFACE_MLD_STR +       "MLD version\n" +       "MLD version number\n")  { -	const struct lyd_node *pim_enable_dnode; -	char pim_if_xpath[XPATH_MAXLEN + 64]; - -	snprintf(pim_if_xpath, sizeof(pim_if_xpath), -		 "%s/frr-pim:pim/address-family[address-family='%s']", -		 VTY_CURR_XPATH, "frr-routing:ipv6"); - -	pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode, -					   FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, -					   "frr-routing:ipv6"); -	if (!pim_enable_dnode) { -		nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY, NULL); -		nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); -	} else { -		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) { -			nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY, -					      NULL); -			nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); -		} else -			nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, -					      "false"); -	} +	if (no) +		nb_cli_enqueue_change(vty, "./mld-version", NB_OP_DESTROY, +				      NULL); +	else +		nb_cli_enqueue_change(vty, "./mld-version", NB_OP_MODIFY, +				      version_str);  	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,  				    "frr-routing:ipv6");  } -DEFPY (interface_ipv6_mld_version, -       interface_ipv6_mld_version_cmd, -       "ipv6 mld version (1-2)$version", +DEFPY (interface_ipv6_mld_query_interval, +       interface_ipv6_mld_query_interval_cmd, +       "[no] ipv6 mld query-interval ![(1-65535)$q_interval]", +       NO_STR         IPV6_STR         IFACE_MLD_STR -       "MLD version\n" -       "MLD version number\n") +       IFACE_MLD_QUERY_INTERVAL_STR +       "Query interval in seconds\n")  { -	nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true"); -	nb_cli_enqueue_change(vty, "./mld-version", NB_OP_MODIFY, version_str); +	if (no) +		nb_cli_enqueue_change(vty, "./query-interval", NB_OP_DESTROY, +				      NULL); +	else +		nb_cli_enqueue_change(vty, "./query-interval", NB_OP_MODIFY, +				      q_interval_str);  	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,  				    "frr-routing:ipv6");  } -DEFPY (interface_no_ipv6_mld_version, -       interface_no_ipv6_mld_version_cmd, -       "no ipv6 mld version [(1-2)]", +DEFPY (ipv6_mld_group_watermark, +       ipv6_mld_group_watermark_cmd, +       "ipv6 mld watermark-warn (1-65535)$limit", +       IPV6_STR +       MLD_STR +       "Configure group limit for watermark warning\n" +       "Group count to generate watermark warning\n") +{ +	PIM_DECLVAR_CONTEXT_VRF(vrf, pim); + +	/* TBD Depends on MLD data structure changes */ +	(void)pim; + +	return CMD_SUCCESS; +} + +DEFPY (no_ipv6_mld_group_watermark, +       no_ipv6_mld_group_watermark_cmd, +       "no ipv6 mld watermark-warn [(1-65535)$limit]", +       NO_STR +       IPV6_STR +       MLD_STR +       "Unconfigure group limit for watermark warning\n" +       IGNORED_IN_NO_STR) +{ +	PIM_DECLVAR_CONTEXT_VRF(vrf, pim); + +	/* TBD Depends on MLD data structure changes */ +	(void)pim; + +	return CMD_SUCCESS; +} + +DEFPY (interface_ipv6_mld_query_max_response_time, +       interface_ipv6_mld_query_max_response_time_cmd, +       "[no] ipv6 mld query-max-response-time ![(1-65535)$qmrt]",         NO_STR         IPV6_STR         IFACE_MLD_STR -       "MLD version\n" -       "MLD version number\n") +       IFACE_MLD_QUERY_MAX_RESPONSE_TIME_STR +       "Query response value in milliseconds\n")  { -	nb_cli_enqueue_change(vty, "./mld-version", NB_OP_DESTROY, NULL); +	if (no) +		return gm_process_no_query_max_response_time_cmd(vty); +	return gm_process_query_max_response_time_cmd(vty, qmrt_str); +} -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv6"); +DEFPY (interface_ipv6_mld_robustness, +       interface_ipv6_mld_robustness_cmd, +       "[no] ipv6 mld robustness ![(1-7)]", +       NO_STR +       IPV6_STR +       IFACE_MLD_STR +       "MLD Robustness variable\n" +       "MLD Robustness variable\n") +{ +	if (no) +		return gm_process_no_last_member_query_count_cmd(vty); +	return gm_process_last_member_query_count_cmd(vty, robustness_str);  } -DEFPY (interface_ipv6_mld_query_interval, -       interface_ipv6_mld_query_interval_cmd, -       "ipv6 mld query-interval (1-65535)$q_interval", +DEFPY (interface_ipv6_mld_last_member_query_interval, +       interface_ipv6_mld_last_member_query_interval_cmd, +       "[no] ipv6 mld last-member-query-interval ![(1-65535)$lmqi]", +       NO_STR         IPV6_STR         IFACE_MLD_STR -       IFACE_MLD_QUERY_INTERVAL_STR -       "Query interval in seconds\n") +       IFACE_MLD_LAST_MEMBER_QUERY_INTERVAL_STR +       "Last member query interval in milliseconds\n")  { -	const struct lyd_node *pim_enable_dnode; +	if (no) +		return gm_process_no_last_member_query_interval_cmd(vty); +	return gm_process_last_member_query_interval_cmd(vty, lmqi_str); +} -	pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode, -					   FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, -					   "frr-routing:ipv6"); -	if (!pim_enable_dnode) { -		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true"); -	} else { -		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) -			nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, -					      "true"); +DEFPY (show_ipv6_pim_rp, +       show_ipv6_pim_rp_cmd, +       "show ipv6 pim [vrf NAME] rp-info [X:X::X:X/M$group] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM RP information\n" +       "Multicast Group range\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; +	struct prefix *range = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING;  	} -	nb_cli_enqueue_change(vty, "./query-interval", NB_OP_MODIFY, -			      q_interval_str); +	if (group_str) { +		range = prefix_new(); +		prefix_copy(range, group); +		apply_mask(range); +	} -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv6"); +	if (json) +		json_parent = json_object_new_object(); + +	pim_rp_show_information(pim, range, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	prefix_free(&range); + +	return CMD_SUCCESS;  } -DEFPY (interface_no_ipv6_mld_query_interval, -      interface_no_ipv6_mld_query_interval_cmd, -      "no ipv6 mld query-interval [(1-65535)]", -      NO_STR -      IPV6_STR -      IFACE_MLD_STR -      IFACE_MLD_QUERY_INTERVAL_STR -      IGNORED_IN_NO_STR) +DEFPY (show_ipv6_pim_rp_vrf_all, +       show_ipv6_pim_rp_vrf_all_cmd, +       "show ipv6 pim vrf all rp-info [X:X::X:X/M$group] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM RP information\n" +       "Multicast Group range\n" +       JSON_STR)  { -	nb_cli_enqueue_change(vty, "./query-interval", NB_OP_DESTROY, NULL); +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; +	struct prefix *range = NULL; + +	if (group_str) { +		range = prefix_new(); +		prefix_copy(range, group); +		apply_mask(range); +	} -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv6"); +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); +		pim_rp_show_information(vrf->info, range, vty, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	prefix_free(&range); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_rpf, +       show_ipv6_pim_rpf_cmd, +       "show ipv6 pim [vrf NAME] rpf [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM cached source rpf information\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	pim_show_rpf(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_rpf_vrf_all, +       show_ipv6_pim_rpf_vrf_all_cmd, +       "show ipv6 pim vrf all rpf [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM cached source rpf information\n" +       JSON_STR) +{ +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); +		pim_show_rpf(vrf->info, vty, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_secondary, +       show_ipv6_pim_secondary_cmd, +       "show ipv6 pim [vrf NAME] secondary", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM neighbor addresses\n") +{ +	struct pim_instance *pim; +	struct vrf *v; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	pim_show_neighbors_secondary(pim, vty); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_statistics, +       show_ipv6_pim_statistics_cmd, +       "show ipv6 pim [vrf NAME] statistics [interface WORD$word] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM statistics\n" +       INTERFACE_STR +       "PIM interface\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	bool uj = !!json; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (word) +		pim_show_statistics(pim, vty, word, uj); +	else +		pim_show_statistics(pim, vty, NULL, uj); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_upstream, +       show_ipv6_pim_upstream_cmd, +       "show ipv6 pim [vrf NAME] upstream [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM upstream information\n" +       "The Source or Group\n" +       "The Group\n" +       JSON_STR) +{ +	pim_sgaddr sg = {0}; +	struct vrf *v; +	bool uj = !!json; +	struct pim_instance *pim; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) { +		vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf); +		return CMD_WARNING; +	} +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (uj) +		json_parent = json_object_new_object(); + +	if (!pim_addr_is_any(s_or_g)) { +		if (!pim_addr_is_any(g)) { +			sg.src = s_or_g; +			sg.grp = g; +		} else +			sg.grp = s_or_g; +	} + +	pim_show_upstream(pim, vty, &sg, json_parent); + +	if (uj) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_upstream_vrf_all, +       show_ipv6_pim_upstream_vrf_all_cmd, +       "show ipv6 pim vrf all upstream [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM upstream information\n" +       JSON_STR) +{ +	pim_sgaddr sg = {0}; +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); +		pim_show_upstream(vrf->info, vty, &sg, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_upstream_join_desired, +       show_ipv6_pim_upstream_join_desired_cmd, +       "show ipv6 pim [vrf NAME] upstream-join-desired [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM upstream join-desired\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	bool uj = !!json; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	pim_show_join_desired(pim, vty, uj); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_upstream_rpf, +       show_ipv6_pim_upstream_rpf_cmd, +       "show ipv6 pim [vrf NAME] upstream-rpf [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM upstream source rpf\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	bool uj = !!json; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	pim_show_upstream_rpf(pim, vty, uj); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_state, +       show_ipv6_pim_state_cmd, +       "show ipv6 pim [vrf NAME] state [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM state information\n" +       "Unicast or Multicast address\n" +       "Multicast address\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	pim_show_state(pim, vty, s_or_g_str, g_str, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_state_vrf_all, +       show_ipv6_pim_state_vrf_all_cmd, +       "show ipv6 pim vrf all state [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM state information\n" +       "Unicast or Multicast address\n" +       "Multicast address\n" +       JSON_STR) +{ +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); +		pim_show_state(vrf->info, vty, s_or_g_str, g_str, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_channel, +       show_ipv6_pim_channel_cmd, +       "show ipv6 pim [vrf NAME] channel [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM downstream channel info\n" +       JSON_STR) +{ +	struct vrf *v; +	bool uj = !!json; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim_show_channel(v->info, vty, uj); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_interface, +       show_ipv6_pim_interface_cmd, +       "show ipv6 pim [vrf NAME] interface [detail|WORD]$interface [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM interface information\n" +       "Detailed output\n" +       "interface name\n" +       JSON_STR) +{ +	struct vrf *v; +	bool uj = !!json; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	if (uj) +		json_parent = json_object_new_object(); + +	if (interface) +		pim_show_interfaces_single(v->info, vty, interface, false, +					   json_parent); +	else +		pim_show_interfaces(v->info, vty, false, json_parent); + +	if (uj) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_interface_vrf_all, +       show_ipv6_pim_interface_vrf_all_cmd, +       "show ipv6 pim vrf all interface [detail|WORD]$interface [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM interface information\n" +       "Detailed output\n" +       "interface name\n" +       JSON_STR) +{ +	bool uj = !!json; +	struct vrf *v; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (uj) +		json_parent = json_object_new_object(); + +	RB_FOREACH (v, vrf_name_head, &vrfs_by_name) { +		if (!uj) +			vty_out(vty, "VRF: %s\n", v->name); +		else +			json_vrf = json_object_new_object(); + +		if (interface) +			pim_show_interfaces_single(v->info, vty, interface, +						   false, json_vrf); +		else +			pim_show_interfaces(v->info, vty, false, json_vrf); + +		if (uj) +			json_object_object_add(json_parent, v->name, json_vrf); +	} +	if (uj) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_join, +       show_ipv6_pim_join_cmd, +       "show ipv6 pim [vrf NAME] join [X:X::X:X$s_or_g [X:X::X:X$g]] [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM interface join information\n" +       "The Source or Group\n" +       "The Group\n" +       JSON_STR) +{ +	pim_sgaddr sg = {}; +	struct vrf *v; +	struct pim_instance *pim; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) { +		vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf); +		return CMD_WARNING; +	} +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (!pim_addr_is_any(s_or_g)) { +		if (!pim_addr_is_any(g)) { +			sg.src = s_or_g; +			sg.grp = g; +		} else +			sg.grp = s_or_g; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	pim_show_join(pim, vty, &sg, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_join_vrf_all, +       show_ipv6_pim_join_vrf_all_cmd, +       "show ipv6 pim vrf all join [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM interface join information\n" +       JSON_STR) +{ +	pim_sgaddr sg = {0}; +	struct vrf *vrf_struct; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf_struct, vrf_name_head, &vrfs_by_name) { +		if (!json_parent) +			vty_out(vty, "VRF: %s\n", vrf_struct->name); +		else +			json_vrf = json_object_new_object(); +		pim_show_join(vrf_struct->info, vty, &sg, json_vrf); + +		if (json) +			json_object_object_add(json_parent, vrf_struct->name, +					       json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	return CMD_WARNING; +} + +DEFPY (show_ipv6_pim_jp_agg, +       show_ipv6_pim_jp_agg_cmd, +       "show ipv6 pim [vrf NAME] jp-agg", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "join prune aggregation list\n") +{ +	struct vrf *v; +	struct pim_instance *pim; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) { +		vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf); +		return CMD_WARNING; +	} +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	pim_show_jp_agg_list(pim, vty); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_local_membership, +       show_ipv6_pim_local_membership_cmd, +       "show ipv6 pim [vrf NAME] local-membership [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM interface local-membership\n" +       JSON_STR) +{ +	struct vrf *v; +	bool uj = !!json; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim_show_membership(v->info, vty, uj); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_neighbor, +       show_ipv6_pim_neighbor_cmd, +       "show ipv6 pim [vrf NAME] neighbor [detail|WORD]$interface [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM neighbor information\n" +       "Detailed output\n" +       "Name of interface or neighbor\n" +       JSON_STR) +{ +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	if (json) +		json_parent = json_object_new_object(); + +	if (interface) +		pim_show_neighbors_single(v->info, vty, interface, json_parent); +	else +		pim_show_neighbors(v->info, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_neighbor_vrf_all, +       show_ipv6_pim_neighbor_vrf_all_cmd, +       "show ipv6 pim vrf all neighbor [detail|WORD]$interface [json$json]", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM neighbor information\n" +       "Detailed output\n" +       "Name of interface or neighbor\n" +       JSON_STR) +{ +	struct vrf *v; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); +	RB_FOREACH (v, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", v->name); +		else +			json_vrf = json_object_new_object(); + +		if (interface) +			pim_show_neighbors_single(v->info, vty, interface, +						  json_vrf); +		else +			pim_show_neighbors(v->info, vty, json_vrf); + +		if (json) +			json_object_object_add(json_parent, v->name, json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_nexthop, +       show_ipv6_pim_nexthop_cmd, +       "show ipv6 pim [vrf NAME] nexthop", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM cached nexthop rpf information\n") +{ +	struct vrf *v; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim_show_nexthop(v->info, vty); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_pim_nexthop_lookup, +       show_ipv6_pim_nexthop_lookup_cmd, +       "show ipv6 pim [vrf NAME] nexthop-lookup X:X::X:X$source X:X::X:X$group", +       SHOW_STR +       IPV6_STR +       PIM_STR +       VRF_CMD_HELP_STR +       "PIM cached nexthop rpf lookup\n" +       "Source/RP address\n" +       "Multicast Group address\n") +{ +	struct prefix nht_p; +	int result = 0; +	pim_addr vif_source; +	struct prefix grp; +	struct pim_nexthop nexthop; +	struct vrf *v; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group)) +		return CMD_SUCCESS; + +	pim_addr_to_prefix(&nht_p, vif_source); +	pim_addr_to_prefix(&grp, group); +	memset(&nexthop, 0, sizeof(nexthop)); + +	result = pim_ecmp_nexthop_lookup(v->info, &nexthop, &nht_p, &grp, 0); + +	if (!result) { +		vty_out(vty, +			"Nexthop Lookup failed, no usable routes returned.\n"); +		return CMD_SUCCESS; +	} + +	vty_out(vty, "Group %s --- Nexthop %pPAs Interface %s\n", group_str, +		&nexthop.mrib_nexthop_addr, nexthop.interface->name); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_multicast, +       show_ipv6_multicast_cmd, +       "show ipv6 multicast [vrf NAME]", +       SHOW_STR +       IPV6_STR +       "Multicast global information\n" +       VRF_CMD_HELP_STR) +{ +	struct vrf *v; +	struct pim_instance *pim; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	pim_cmd_show_ip_multicast_helper(pim, vty); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_multicast_vrf_all, +       show_ipv6_multicast_vrf_all_cmd, +       "show ipv6 multicast vrf all", +       SHOW_STR +       IPV6_STR +       "Multicast global information\n" +       VRF_CMD_HELP_STR) +{ +	struct vrf *vrf; + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		vty_out(vty, "VRF: %s\n", vrf->name); +		pim_cmd_show_ip_multicast_helper(vrf->info, vty); +	} + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_multicast_count, +       show_ipv6_multicast_count_cmd, +       "show ipv6 multicast count [vrf NAME] [json$json]", +       SHOW_STR +       IPV6_STR +       "Multicast global information\n" +       "Data packet count\n" +       VRF_CMD_HELP_STR +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	show_multicast_interfaces(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_multicast_count_vrf_all, +       show_ipv6_multicast_count_vrf_all_cmd, +       "show ipv6 multicast count vrf all [json$json]", +       SHOW_STR +       IPV6_STR +       "Multicast global information\n" +       "Data packet count\n" +       VRF_CMD_HELP_STR +       JSON_STR) +{ +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); + +		show_multicast_interfaces(vrf->info, vty, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_mroute, +       show_ipv6_mroute_cmd, +       "show ipv6 mroute [vrf NAME] [X:X::X:X$s_or_g [X:X::X:X$g]] [fill$fill] [json$json]", +       SHOW_STR +       IPV6_STR +       MROUTE_STR +       VRF_CMD_HELP_STR +       "The Source or Group\n" +       "The Group\n" +       "Fill in Assumed data\n" +       JSON_STR) +{ +	pim_sgaddr sg = {0}; +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	if (!pim_addr_is_any(s_or_g)) { +		if (!pim_addr_is_any(g)) { +			sg.src = s_or_g; +			sg.grp = g; +		} else +			sg.grp = s_or_g; +	} + +	show_mroute(pim, vty, &sg, !!fill, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_mroute_vrf_all, +       show_ipv6_mroute_vrf_all_cmd, +       "show ipv6 mroute vrf all [fill$fill] [json$json]", +       SHOW_STR +       IPV6_STR +       MROUTE_STR +       VRF_CMD_HELP_STR +       "Fill in Assumed data\n" +       JSON_STR) +{ +	pim_sgaddr sg = {0}; +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); +		show_mroute(vrf->info, vty, &sg, !!fill, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_mroute_count, +       show_ipv6_mroute_count_cmd, +       "show ipv6 mroute [vrf NAME] count [json$json]", +       SHOW_STR +       IPV6_STR +       MROUTE_STR +       VRF_CMD_HELP_STR +       "Route and packet count data\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	show_mroute_count(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_mroute_count_vrf_all, +       show_ipv6_mroute_count_vrf_all_cmd, +       "show ipv6 mroute vrf all count [json$json]", +       SHOW_STR +       IPV6_STR +       MROUTE_STR +       VRF_CMD_HELP_STR +       "Route and packet count data\n" +       JSON_STR) +{ +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); +		show_mroute_count(vrf->info, vty, json_vrf); + +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_mroute_summary, +       show_ipv6_mroute_summary_cmd, +       "show ipv6 mroute [vrf NAME] summary [json$json]", +       SHOW_STR +       IPV6_STR +       MROUTE_STR +       VRF_CMD_HELP_STR +       "Summary of all mroutes\n" +       JSON_STR) +{ +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; + +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	show_mroute_summary(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS; +} + +DEFPY (show_ipv6_mroute_summary_vrf_all, +       show_ipv6_mroute_summary_vrf_all_cmd, +       "show ipv6 mroute vrf all summary [json$json]", +       SHOW_STR +       IPV6_STR +       MROUTE_STR +       VRF_CMD_HELP_STR +       "Summary of all mroutes\n" +       JSON_STR) +{ +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); + +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object(); + +		show_mroute_summary(vrf->info, vty, json_vrf); + +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf); +	} + +	if (json) +		vty_json(vty, json_parent); + +	return CMD_SUCCESS;  }  void pim_cmd_init(void) @@ -665,13 +1870,59 @@ void pim_cmd_init(void)  	install_element(VRF_NODE, &ipv6_pim_rp_prefix_list_cmd);  	install_element(CONFIG_NODE, &no_ipv6_pim_rp_prefix_list_cmd);  	install_element(VRF_NODE, &no_ipv6_pim_rp_prefix_list_cmd); +	install_element(CONFIG_NODE, &ipv6_ssmpingd_cmd); +	install_element(VRF_NODE, &ipv6_ssmpingd_cmd); +	install_element(CONFIG_NODE, &no_ipv6_ssmpingd_cmd); +	install_element(VRF_NODE, &no_ipv6_ssmpingd_cmd); +  	install_element(INTERFACE_NODE, &interface_ipv6_mld_cmd); -	install_element(INTERFACE_NODE, &interface_no_ipv6_mld_cmd);  	install_element(INTERFACE_NODE, &interface_ipv6_mld_join_cmd);  	install_element(INTERFACE_NODE, &interface_no_ipv6_mld_join_cmd);  	install_element(INTERFACE_NODE, &interface_ipv6_mld_version_cmd); -	install_element(INTERFACE_NODE, &interface_no_ipv6_mld_version_cmd);  	install_element(INTERFACE_NODE, &interface_ipv6_mld_query_interval_cmd); +	install_element(CONFIG_NODE, &ipv6_mld_group_watermark_cmd); +	install_element(VRF_NODE, &ipv6_mld_group_watermark_cmd); +	install_element(CONFIG_NODE, &no_ipv6_mld_group_watermark_cmd); +	install_element(VRF_NODE, &no_ipv6_mld_group_watermark_cmd); + +	install_element(INTERFACE_NODE, +			&interface_ipv6_mld_query_max_response_time_cmd); +	install_element(INTERFACE_NODE, +			&interface_ipv6_mld_robustness_cmd);  	install_element(INTERFACE_NODE, -			&interface_no_ipv6_mld_query_interval_cmd); +			&interface_ipv6_mld_last_member_query_interval_cmd); + +	install_element(VIEW_NODE, &show_ipv6_pim_rp_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_rp_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_rpf_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_rpf_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_secondary_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_statistics_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_upstream_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_upstream_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_upstream_join_desired_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_upstream_rpf_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_state_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_state_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_channel_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_interface_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_interface_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_join_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_join_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_jp_agg_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_local_membership_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_neighbor_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_neighbor_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_nexthop_cmd); +	install_element(VIEW_NODE, &show_ipv6_pim_nexthop_lookup_cmd); +	install_element(VIEW_NODE, &show_ipv6_multicast_cmd); +	install_element(VIEW_NODE, &show_ipv6_multicast_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_multicast_count_cmd); +	install_element(VIEW_NODE, &show_ipv6_multicast_count_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_mroute_cmd); +	install_element(VIEW_NODE, &show_ipv6_mroute_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_mroute_count_cmd); +	install_element(VIEW_NODE, &show_ipv6_mroute_count_vrf_all_cmd); +	install_element(VIEW_NODE, &show_ipv6_mroute_summary_cmd); +	install_element(VIEW_NODE, &show_ipv6_mroute_summary_vrf_all_cmd);  } diff --git a/pimd/pim6_cmd.h b/pimd/pim6_cmd.h index ac5eb3f9bf..d6853a7410 100644 --- a/pimd/pim6_cmd.h +++ b/pimd/pim6_cmd.h @@ -43,6 +43,7 @@  #define DEBUG_MLD_EVENTS_STR "MLD protocol events\n"  #define DEBUG_MLD_PACKETS_STR "MLD protocol packets\n"  #define DEBUG_MLD_TRACE_STR "MLD internal daemon activity\n" +#define CONF_SSMPINGD_STR "Enable ssmpingd operation\n"  void pim_cmd_init(void); diff --git a/pimd/pim6_main.c b/pimd/pim6_main.c index ed53924616..09aa72e535 100644 --- a/pimd/pim6_main.c +++ b/pimd/pim6_main.c @@ -119,7 +119,7 @@ static const struct frr_yang_module_info *const pim6d_yang_modules[] = {  /* clang-format off */  FRR_DAEMON_INFO(pim6d, PIM6,  	.vty_port = 0, -	.flags = FRR_NO_SPLIT_CONFIG, +	// .flags = FRR_NO_SPLIT_CONFIG,  	.proghelp = "Protocol Independent Multicast (RFC7761) for IPv6", @@ -133,6 +133,7 @@ FRR_DAEMON_INFO(pim6d, PIM6,  );  /* clang-format on */ +extern void gm_cli_init(void);  int main(int argc, char **argv, char **envp)  { @@ -184,6 +185,8 @@ int main(int argc, char **argv, char **envp)  	 */  	pim_iface_init(); +	gm_cli_init(); +  	pim_zebra_init();  #if 0  	pim_bfd_init(); diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c new file mode 100644 index 0000000000..baf211791e --- /dev/null +++ b/pimd/pim6_mld.c @@ -0,0 +1,3011 @@ +/* + * PIMv6 MLD querier + * Copyright (C) 2021-2022  David Lamparter for NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * keep pim6_mld.h open when working on this code.  Most data structures are + * commented in the header. + * + * IPv4 support is pre-planned but hasn't been tackled yet.  It is intended + * that this code will replace the old IGMP querier at some point. + */ + +#include <zebra.h> +#include <netinet/ip6.h> + +#include "lib/memory.h" +#include "lib/jhash.h" +#include "lib/prefix.h" +#include "lib/checksum.h" +#include "lib/thread.h" + +#include "pimd/pim6_mld.h" +#include "pimd/pim6_mld_protocol.h" +#include "pimd/pim_memory.h" +#include "pimd/pim_instance.h" +#include "pimd/pim_iface.h" +#include "pimd/pim_util.h" +#include "pimd/pim_tib.h" +#include "pimd/pimd.h" + +#ifndef IPV6_MULTICAST_ALL +#define IPV6_MULTICAST_ALL 29 +#endif + +DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface"); +DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet"); +DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber"); +DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state"); +DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)"); +DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state"); +DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate"); + +static void gm_t_query(struct thread *t); +static void gm_trigger_specific(struct gm_sg *sg); +static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg, +			      struct timeval expire_wait); + +/* shorthand for log messages */ +#define log_ifp(msg)                                                           \ +	"[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name +#define log_pkt_src(msg)                                                       \ +	"[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name,    \ +		&pkt_src->sin6_addr +#define log_sg(sg, msg)                                                        \ +	"[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name,                    \ +		sg->iface->ifp->name, &sg->sgaddr + +/* clang-format off */ +#if PIM_IPV == 6 +static const pim_addr gm_all_hosts = { +	.s6_addr = { +		0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, +	}, +}; +static const pim_addr gm_all_routers = { +	.s6_addr = { +		0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, +	}, +}; +/* MLDv1 does not allow subscriber tracking due to report suppression + * hence, the source address is replaced with ffff:...:ffff + */ +static const pim_addr gm_dummy_untracked = { +	.s6_addr = { +		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	}, +}; +#else +/* 224.0.0.1 */ +static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), }; +/* 224.0.0.22 */ +static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), }; +static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, }; +#endif +/* clang-format on */ + +#define IPV6_MULTICAST_SCOPE_LINK 2 + +static inline uint8_t in6_multicast_scope(const pim_addr *addr) +{ +	return addr->s6_addr[1] & 0xf; +} + +static inline bool in6_multicast_nofwd(const pim_addr *addr) +{ +	return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK; +} + +/* + * (S,G) -> subscriber,(S,G) + */ + +static int gm_packet_sg_cmp(const struct gm_packet_sg *a, +			    const struct gm_packet_sg *b) +{ +	const struct gm_packet_state *s_a, *s_b; + +	s_a = gm_packet_sg2state(a); +	s_b = gm_packet_sg2state(b); +	return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr); +} + +DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm, +		    gm_packet_sg_cmp); + +static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg, +					      enum gm_sub_sense sense, +					      struct gm_subscriber *sub) +{ +	struct { +		struct gm_packet_state hdr; +		struct gm_packet_sg item; +	} ref = { +		/* clang-format off */ +		.hdr = { +			.subscriber = sub, +		}, +		.item = { +			.offset = 0, +		}, +		/* clang-format on */ +	}; + +	return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item); +} + +/* + * interface -> (*,G),pending + */ + +static int gm_grp_pending_cmp(const struct gm_grp_pending *a, +			      const struct gm_grp_pending *b) +{ +	return IPV6_ADDR_CMP(&a->grp, &b->grp); +} + +DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm, +		    gm_grp_pending_cmp); + +/* + * interface -> ([S1,S2,...],G),pending + */ + +static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a, +			      const struct gm_gsq_pending *b) +{ +	if (a->s_bit != b->s_bit) +		return numcmp(a->s_bit, b->s_bit); + +	return IPV6_ADDR_CMP(&a->grp, &b->grp); +} + +static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a) +{ +	uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19; + +	return jhash(&a->grp, sizeof(a->grp), seed); +} + +DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp, +	     gm_gsq_pending_hash); + +/* + * interface -> (S,G) + */ + +static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b) +{ +	return pim_sgaddr_cmp(a->sgaddr, b->sgaddr); +} + +DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp); + +static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp, +				pim_addr src) +{ +	struct gm_sg ref; + +	ref.sgaddr.grp = grp; +	ref.sgaddr.src = src; +	return gm_sgs_find(gm_ifp->sgs, &ref); +} + +static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp, +				pim_addr src) +{ +	struct gm_sg *ret, *prev; + +	ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret)); +	ret->sgaddr.grp = grp; +	ret->sgaddr.src = src; +	ret->iface = gm_ifp; +	prev = gm_sgs_add(gm_ifp->sgs, ret); + +	if (prev) { +		XFREE(MTYPE_GM_SG, ret); +		ret = prev; +	} else { +		monotime(&ret->created); +		gm_packet_sg_subs_init(ret->subs_positive); +		gm_packet_sg_subs_init(ret->subs_negative); +	} +	return ret; +} + +/* + * interface -> packets, sorted by expiry (because add_tail insert order) + */ + +DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm); + +/* + * subscriber -> packets + */ + +DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm); + +/* + * interface -> subscriber + */ + +static int gm_subscriber_cmp(const struct gm_subscriber *a, +			     const struct gm_subscriber *b) +{ +	return IPV6_ADDR_CMP(&a->addr, &b->addr); +} + +static uint32_t gm_subscriber_hash(const struct gm_subscriber *a) +{ +	return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4); +} + +DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp, +	     gm_subscriber_hash); + +static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp, +						   pim_addr addr) +{ +	struct gm_subscriber ref, *ret; + +	ref.addr = addr; +	ret = gm_subscribers_find(gm_ifp->subscribers, &ref); +	if (ret) +		ret->refcount++; +	return ret; +} + +static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp, +					       pim_addr addr) +{ +	struct gm_subscriber ref, *ret; + +	ref.addr = addr; +	ret = gm_subscribers_find(gm_ifp->subscribers, &ref); + +	if (!ret) { +		ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret)); +		ret->iface = gm_ifp; +		ret->addr = addr; +		ret->refcount = 1; +		monotime(&ret->created); +		gm_packets_init(ret->packets); + +		gm_subscribers_add(gm_ifp->subscribers, ret); +	} +	return ret; +} + +static void gm_subscriber_drop(struct gm_subscriber **subp) +{ +	struct gm_subscriber *sub = *subp; +	struct gm_if *gm_ifp; + +	if (!sub) +		return; +	gm_ifp = sub->iface; + +	*subp = NULL; +	sub->refcount--; + +	if (sub->refcount) +		return; + +	gm_subscribers_del(gm_ifp->subscribers, sub); +	XFREE(MTYPE_GM_SUBSCRIBER, sub); +} + +/****************************************************************************/ + +/* bundle query timer values for combined v1/v2 handling */ +struct gm_query_timers { +	unsigned qrv; +	unsigned max_resp_ms; +	unsigned qqic_ms; + +	struct timeval fuzz; +	struct timeval expire_wait; +}; + +static void gm_expiry_calc(struct gm_query_timers *timers) +{ +	unsigned expire = +		(timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms; +	ldiv_t exp_div = ldiv(expire, 1000); + +	timers->expire_wait.tv_sec = exp_div.quot; +	timers->expire_wait.tv_usec = exp_div.rem * 1000; +	timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait); +} + +static void gm_sg_free(struct gm_sg *sg) +{ +	/* t_sg_expiry is handled before this is reached */ +	THREAD_OFF(sg->t_sg_query); +	gm_packet_sg_subs_fini(sg->subs_negative); +	gm_packet_sg_subs_fini(sg->subs_positive); +	XFREE(MTYPE_GM_SG, sg); +} + +/* clang-format off */ +static const char *const gm_states[] = { +	[GM_SG_NOINFO]			= "NOINFO", +	[GM_SG_JOIN]			= "JOIN", +	[GM_SG_JOIN_EXPIRING]		= "JOIN_EXPIRING", +	[GM_SG_PRUNE]			= "PRUNE", +	[GM_SG_NOPRUNE]			= "NOPRUNE", +	[GM_SG_NOPRUNE_EXPIRING]	= "NOPRUNE_EXPIRING", +}; +/* clang-format on */ + +CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported"); +/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is + * joined.  Whether we actually want/need to support this is a separate + * question - it is almost never used.  In fact this is exactly what RFC5790 + * ("lightweight" MLDv2) does:  it removes S,G EXCLUDE support. + */ + +static void gm_sg_update(struct gm_sg *sg, bool has_expired) +{ +	struct gm_if *gm_ifp = sg->iface; +	enum gm_sg_state prev, desired; +	bool new_join; +	struct gm_sg *grp = NULL; + +	if (!pim_addr_is_any(sg->sgaddr.src)) +		grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY); +	else +		assert(sg->state != GM_SG_PRUNE); + +	if (gm_packet_sg_subs_count(sg->subs_positive)) { +		desired = GM_SG_JOIN; +		assert(!sg->t_sg_expire); +	} else if ((sg->state == GM_SG_JOIN || +		    sg->state == GM_SG_JOIN_EXPIRING) && +		   !has_expired) +		desired = GM_SG_JOIN_EXPIRING; +	else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive)) +		desired = GM_SG_NOINFO; +	else if (gm_packet_sg_subs_count(grp->subs_positive) == +		 gm_packet_sg_subs_count(sg->subs_negative)) { +		if ((sg->state == GM_SG_NOPRUNE || +		     sg->state == GM_SG_NOPRUNE_EXPIRING) && +		    !has_expired) +			desired = GM_SG_NOPRUNE_EXPIRING; +		else +			desired = GM_SG_PRUNE; +	} else if (gm_packet_sg_subs_count(sg->subs_negative)) +		desired = GM_SG_NOPRUNE; +	else +		desired = GM_SG_NOINFO; + +	if (desired != sg->state && !gm_ifp->stopping) { +		if (PIM_DEBUG_IGMP_EVENTS) +			zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state], +				   gm_states[desired]); + +		if (desired == GM_SG_JOIN_EXPIRING || +		    desired == GM_SG_NOPRUNE_EXPIRING) { +			struct gm_query_timers timers; + +			timers.qrv = gm_ifp->cur_qrv; +			timers.max_resp_ms = gm_ifp->cur_max_resp; +			timers.qqic_ms = gm_ifp->cur_query_intv_trig; +			timers.fuzz = gm_ifp->cfg_timing_fuzz; + +			gm_expiry_calc(&timers); +			gm_sg_timer_start(gm_ifp, sg, timers.expire_wait); + +			THREAD_OFF(sg->t_sg_query); +			sg->n_query = gm_ifp->cur_qrv; +			sg->query_sbit = false; +			gm_trigger_specific(sg); +		} +	} +	prev = sg->state; +	sg->state = desired; + +	if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping) +		new_join = false; +	else +		new_join = gm_sg_state_want_join(desired); + +	if (new_join && !sg->tib_joined) { +		/* this will retry if join previously failed */ +		sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr, +						gm_ifp->ifp, &sg->oil); +		if (!sg->tib_joined) +			zlog_warn( +				"MLD join for %pSG%%%s not propagated into TIB", +				&sg->sgaddr, gm_ifp->ifp->name); +		else +			zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr, +				  gm_ifp->ifp->name); + +	} else if (sg->tib_joined && !new_join) { +		tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil); + +		sg->oil = NULL; +		sg->tib_joined = false; +	} + +	if (desired == GM_SG_NOINFO) { +		assertf((!sg->t_sg_expire && +			 !gm_packet_sg_subs_count(sg->subs_positive) && +			 !gm_packet_sg_subs_count(sg->subs_negative)), +			"%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p", +			&sg->sgaddr, gm_ifp->ifp->name, has_expired, +			sg->t_sg_expire, gm_states[prev], gm_states[desired], +			gm_packet_sg_subs_count(sg->subs_positive), +			gm_packet_sg_subs_count(sg->subs_negative), grp); + +		if (PIM_DEBUG_IGMP_TRACE) +			zlog_debug(log_sg(sg, "dropping")); + +		gm_sgs_del(gm_ifp->sgs, sg); +		gm_sg_free(sg); +	} +} + +/****************************************************************************/ + +/* the following bunch of functions deals with transferring state from + * received packets into gm_packet_state.  As a reminder, the querier is + * structured to keep all items received in one packet together, since they + * will share expiry timers and thus allows efficient handling. + */ + +static void gm_packet_free(struct gm_packet_state *pkt) +{ +	gm_packet_expires_del(pkt->iface->expires, pkt); +	gm_packets_del(pkt->subscriber->packets, pkt); +	gm_subscriber_drop(&pkt->subscriber); +	XFREE(MTYPE_GM_STATE, pkt); +} + +static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt, +					       struct gm_sg *sg, bool is_excl, +					       bool is_src) +{ +	struct gm_packet_sg *item; + +	assert(pkt->n_active < pkt->n_sg); + +	item = &pkt->items[pkt->n_active]; +	item->sg = sg; +	item->is_excl = is_excl; +	item->is_src = is_src; +	item->offset = pkt->n_active; + +	pkt->n_active++; +	return item; +} + +static bool gm_packet_sg_drop(struct gm_packet_sg *item) +{ +	struct gm_packet_state *pkt; +	size_t i; + +	assert(item->sg); + +	pkt = gm_packet_sg2state(item); +	if (item->sg->most_recent == item) +		item->sg->most_recent = NULL; + +	for (i = 0; i < item->n_exclude; i++) { +		struct gm_packet_sg *excl_item; + +		excl_item = item + 1 + i; +		if (!excl_item->sg) +			continue; + +		gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item); +		excl_item->sg = NULL; +		pkt->n_active--; + +		assert(pkt->n_active > 0); +	} + +	if (item->is_excl && item->is_src) +		gm_packet_sg_subs_del(item->sg->subs_negative, item); +	else +		gm_packet_sg_subs_del(item->sg->subs_positive, item); +	item->sg = NULL; +	pkt->n_active--; + +	if (!pkt->n_active) { +		gm_packet_free(pkt); +		return true; +	} +	return false; +} + +static void gm_packet_drop(struct gm_packet_state *pkt, bool trace) +{ +	for (size_t i = 0; i < pkt->n_sg; i++) { +		struct gm_sg *sg = pkt->items[i].sg; +		bool deleted; + +		if (!sg) +			continue; + +		if (trace && PIM_DEBUG_IGMP_TRACE) +			zlog_debug(log_sg(sg, "general-dropping from %pPA"), +				   &pkt->subscriber->addr); +		deleted = gm_packet_sg_drop(&pkt->items[i]); + +		gm_sg_update(sg, true); +		if (deleted) +			break; +	} +} + +static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp, +					struct gm_subscriber *subscriber, +					pim_addr grp, pim_addr *srcs, +					size_t n_src, enum gm_sub_sense sense) +{ +	struct gm_sg *sg; +	struct gm_packet_sg *old_src; +	size_t i; + +	for (i = 0; i < n_src; i++) { +		sg = gm_sg_find(gm_ifp, grp, srcs[i]); +		if (!sg) +			continue; + +		old_src = gm_packet_sg_find(sg, sense, subscriber); +		if (!old_src) +			continue; + +		gm_packet_sg_drop(old_src); +		gm_sg_update(sg, false); +	} +} + +static void gm_sg_expiry_cancel(struct gm_sg *sg) +{ +	if (sg->t_sg_expire && PIM_DEBUG_IGMP_TRACE) +		zlog_debug(log_sg(sg, "alive, cancelling expiry timer")); +	THREAD_OFF(sg->t_sg_expire); +	sg->query_sbit = true; +} + +/* first pass: process all changes resulting in removal of state: + *  - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G) + *  - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state + *  - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state + *  - {TO,IS}_EXCLUDE,   if *,G in INCLUDE removes S,G state + * note *replacing* state is NOT considered *removing* state here + * + * everything else is thrown into pkt for creation of state in pass 2 + */ +static void gm_handle_v2_pass1(struct gm_packet_state *pkt, +			       struct mld_v2_rec_hdr *rechdr) +{ +	/* NB: pkt->subscriber can be NULL here if the subscriber was not +	 * previously seen! +	 */ +	struct gm_subscriber *subscriber = pkt->subscriber; +	struct gm_sg *grp; +	struct gm_packet_sg *old_grp = NULL; +	struct gm_packet_sg *item; +	size_t n_src = ntohs(rechdr->n_src); +	size_t j; +	bool is_excl = false; + +	grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY); +	if (grp && subscriber) +		old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber); + +	assert(old_grp == NULL || old_grp->is_excl); + +	switch (rechdr->type) { +	case MLD_RECTYPE_IS_EXCLUDE: +	case MLD_RECTYPE_CHANGE_TO_EXCLUDE: +		/* this always replaces or creates state */ +		is_excl = true; +		if (!grp) +			grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY); + +		item = gm_packet_sg_setup(pkt, grp, is_excl, false); +		item->n_exclude = n_src; + +		/* [EXCL_INCL_SG_NOTE] referenced below +		 * +		 * in theory, we should drop any S,G that the host may have +		 * previously added in INCLUDE mode.  In practice, this is both +		 * incredibly rare and entirely irrelevant.  It only makes any +		 * difference if an S,G that the host previously had on the +		 * INCLUDE list is now on the blocked list for EXCLUDE, which +		 * we can cover in processing the S,G list in pass2_excl(). +		 * +		 * Other S,G from the host are simply left to expire +		 * "naturally" through general expiry. +		 */ +		break; + +	case MLD_RECTYPE_IS_INCLUDE: +	case MLD_RECTYPE_CHANGE_TO_INCLUDE: +		if (old_grp) { +			/* INCLUDE has no *,G state, so old_grp here refers to +			 * previous EXCLUDE => delete it +			 */ +			gm_packet_sg_drop(old_grp); +			gm_sg_update(grp, false); +			CPP_NOTICE("need S,G PRUNE => NO_INFO transition here"); +		} +		break; + +	case MLD_RECTYPE_ALLOW_NEW_SOURCES: +		if (old_grp) { +			/* remove S,Gs from EXCLUDE, and then we're done */ +			gm_packet_sg_remove_sources(pkt->iface, subscriber, +						    rechdr->grp, rechdr->srcs, +						    n_src, GM_SUB_NEG); +			return; +		} +		/* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally +		 * idential to IS_INCLUDE (because the list of sources in +		 * IS_INCLUDE is not exhaustive) +		 */ +		break; + +	case MLD_RECTYPE_BLOCK_OLD_SOURCES: +		if (old_grp) { +			/* this is intentionally not implemented because it +			 * would be complicated as hell.  we only take the list +			 * of blocked sources from full group state records +			 */ +			return; +		} + +		if (subscriber) +			gm_packet_sg_remove_sources(pkt->iface, subscriber, +						    rechdr->grp, rechdr->srcs, +						    n_src, GM_SUB_POS); +		return; +	} + +	for (j = 0; j < n_src; j++) { +		struct gm_sg *sg; + +		sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]); +		if (!sg) +			sg = gm_sg_make(pkt->iface, rechdr->grp, +					rechdr->srcs[j]); + +		gm_packet_sg_setup(pkt, sg, is_excl, true); +	} +} + +/* second pass: creating/updating/refreshing state.  All the items from the + * received packet have already been thrown into gm_packet_state. + */ + +static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i) +{ +	struct gm_packet_sg *item = &pkt->items[i]; +	struct gm_packet_sg *old = NULL; +	struct gm_sg *sg = item->sg; + +	/* EXCLUDE state was already dropped in pass1 */ +	assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber)); + +	old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber); +	if (old) +		gm_packet_sg_drop(old); + +	pkt->n_active++; +	gm_packet_sg_subs_add(sg->subs_positive, item); + +	sg->most_recent = item; +	gm_sg_expiry_cancel(sg); +	gm_sg_update(sg, false); +} + +static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs) +{ +	struct gm_packet_sg *item = &pkt->items[offs]; +	struct gm_packet_sg *old_grp, *item_dup; +	struct gm_sg *sg_grp = item->sg; +	size_t i; + +	old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber); +	if (old_grp) { +		for (i = 0; i < item->n_exclude; i++) { +			struct gm_packet_sg *item_src, *old_src; + +			item_src = &pkt->items[offs + 1 + i]; +			old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG, +						    pkt->subscriber); +			if (old_src) +				gm_packet_sg_drop(old_src); + +			/* See [EXCL_INCL_SG_NOTE] above - we can have old S,G +			 * items left over if the host previously had INCLUDE +			 * mode going.  Remove them here if we find any. +			 */ +			old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS, +						    pkt->subscriber); +			if (old_src) +				gm_packet_sg_drop(old_src); +		} + +		/* the previous loop has removed the S,G entries which are +		 * still excluded after this update.  So anything left on the +		 * old item was previously excluded but is now included +		 * => need to trigger update on S,G +		 */ +		for (i = 0; i < old_grp->n_exclude; i++) { +			struct gm_packet_sg *old_src; +			struct gm_sg *old_sg_src; + +			old_src = old_grp + 1 + i; +			old_sg_src = old_src->sg; +			if (!old_sg_src) +				continue; + +			gm_packet_sg_drop(old_src); +			gm_sg_update(old_sg_src, false); +		} + +		gm_packet_sg_drop(old_grp); +	} + +	item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item); +	assert(!item_dup); +	pkt->n_active++; + +	sg_grp->most_recent = item; +	gm_sg_expiry_cancel(sg_grp); + +	for (i = 0; i < item->n_exclude; i++) { +		struct gm_packet_sg *item_src; + +		item_src = &pkt->items[offs + 1 + i]; +		item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative, +						 item_src); + +		if (item_dup) +			item_src->sg = NULL; +		else { +			pkt->n_active++; +			gm_sg_update(item_src->sg, false); +		} +	} + +	/* TODO: determine best ordering between gm_sg_update(S,G) and (*,G) +	 * to get lower PIM churn/flapping +	 */ +	gm_sg_update(sg_grp, false); +} + +CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state"); +/* on receiving a query, we need to update our robustness/query interval to + * match, so we correctly process group/source specific queries after last + * member leaves + */ + +static void gm_handle_v2_report(struct gm_if *gm_ifp, +				const struct sockaddr_in6 *pkt_src, char *data, +				size_t len) +{ +	struct mld_v2_report_hdr *hdr; +	size_t i, n_records, max_entries; +	struct gm_packet_state *pkt; + +	if (len < sizeof(*hdr)) { +		if (PIM_DEBUG_IGMP_PACKETS) +			zlog_debug(log_pkt_src( +				"malformed MLDv2 report (truncated header)")); +		gm_ifp->stats.rx_drop_malformed++; +		return; +	} + +	/* errors after this may at least partially process the packet */ +	gm_ifp->stats.rx_new_report++; + +	hdr = (struct mld_v2_report_hdr *)data; +	data += sizeof(*hdr); +	len -= sizeof(*hdr); + +	/* can't have more *,G and S,G items than there is space for ipv6 +	 * addresses, so just use this to allocate temporary buffer +	 */ +	max_entries = len / sizeof(pim_addr); +	pkt = XCALLOC(MTYPE_GM_STATE, +		      offsetof(struct gm_packet_state, items[max_entries])); +	pkt->n_sg = max_entries; +	pkt->iface = gm_ifp; +	pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr); + +	n_records = ntohs(hdr->n_records); + +	/* validate & remove state in v2_pass1() */ +	for (i = 0; i < n_records; i++) { +		struct mld_v2_rec_hdr *rechdr; +		size_t n_src, record_size; + +		if (len < sizeof(*rechdr)) { +			zlog_warn(log_pkt_src( +				"malformed MLDv2 report (truncated record header)")); +			gm_ifp->stats.rx_trunc_report++; +			break; +		} + +		rechdr = (struct mld_v2_rec_hdr *)data; +		data += sizeof(*rechdr); +		len -= sizeof(*rechdr); + +		n_src = ntohs(rechdr->n_src); +		record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4; + +		if (len < record_size) { +			zlog_warn(log_pkt_src( +				"malformed MLDv2 report (truncated source list)")); +			gm_ifp->stats.rx_trunc_report++; +			break; +		} +		if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) { +			zlog_warn( +				log_pkt_src( +					"malformed MLDv2 report (invalid group %pI6)"), +				&rechdr->grp); +			gm_ifp->stats.rx_trunc_report++; +			break; +		} + +		data += record_size; +		len -= record_size; + +		gm_handle_v2_pass1(pkt, rechdr); +	} + +	if (!pkt->n_active) { +		gm_subscriber_drop(&pkt->subscriber); +		XFREE(MTYPE_GM_STATE, pkt); +		return; +	} + +	pkt = XREALLOC(MTYPE_GM_STATE, pkt, +		       offsetof(struct gm_packet_state, items[pkt->n_active])); +	pkt->n_sg = pkt->n_active; +	pkt->n_active = 0; + +	monotime(&pkt->received); +	if (!pkt->subscriber) +		pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr); +	gm_packets_add_tail(pkt->subscriber->packets, pkt); +	gm_packet_expires_add_tail(gm_ifp->expires, pkt); + +	for (i = 0; i < pkt->n_sg; i++) +		if (!pkt->items[i].is_excl) +			gm_handle_v2_pass2_incl(pkt, i); +		else { +			gm_handle_v2_pass2_excl(pkt, i); +			i += pkt->items[i].n_exclude; +		} + +	if (pkt->n_active == 0) +		gm_packet_free(pkt); +} + +static void gm_handle_v1_report(struct gm_if *gm_ifp, +				const struct sockaddr_in6 *pkt_src, char *data, +				size_t len) +{ +	struct mld_v1_pkt *hdr; +	struct gm_packet_state *pkt; +	struct gm_sg *grp; +	struct gm_packet_sg *item; +	size_t max_entries; + +	if (len < sizeof(*hdr)) { +		if (PIM_DEBUG_IGMP_PACKETS) +			zlog_debug(log_pkt_src("malformed MLDv1 report (truncated)")); +		gm_ifp->stats.rx_drop_malformed++; +		return; +	} + +	gm_ifp->stats.rx_old_report++; + +	hdr = (struct mld_v1_pkt *)data; + +	max_entries = 1; +	pkt = XCALLOC(MTYPE_GM_STATE, +		      offsetof(struct gm_packet_state, items[max_entries])); +	pkt->n_sg = max_entries; +	pkt->iface = gm_ifp; +	pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked); + +	/* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */ + +	grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY); +	if (!grp) +		grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY); + +	item = gm_packet_sg_setup(pkt, grp, true, false); +	item->n_exclude = 0; +	CPP_NOTICE("set v1-seen timer on grp here"); + +	/* } */ + +	/* pass2 will count n_active back up to 1.  Also since a v1 report +	 * has exactly 1 group, we can skip the realloc() that v2 needs here. +	 */ +	assert(pkt->n_active == 1); +	pkt->n_sg = pkt->n_active; +	pkt->n_active = 0; + +	monotime(&pkt->received); +	if (!pkt->subscriber) +		pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked); +	gm_packets_add_tail(pkt->subscriber->packets, pkt); +	gm_packet_expires_add_tail(gm_ifp->expires, pkt); + +	/* pass2 covers installing state & removing old state;  all the v1 +	 * compat is handled at this point. +	 * +	 * Note that "old state" may be v2;  subscribers will switch from v2 +	 * reports to v1 reports when the querier changes from v2 to v1.  So, +	 * limiting this to v1 would be wrong. +	 */ +	gm_handle_v2_pass2_excl(pkt, 0); + +	if (pkt->n_active == 0) +		gm_packet_free(pkt); +} + +static void gm_handle_v1_leave(struct gm_if *gm_ifp, +			       const struct sockaddr_in6 *pkt_src, char *data, +			       size_t len) +{ +	struct mld_v1_pkt *hdr; +	struct gm_subscriber *subscriber; +	struct gm_sg *grp; +	struct gm_packet_sg *old_grp; + +	if (len < sizeof(*hdr)) { +		if (PIM_DEBUG_IGMP_PACKETS) +			zlog_debug(log_pkt_src("malformed MLDv1 leave (truncated)")); +		gm_ifp->stats.rx_drop_malformed++; +		return; +	} + +	gm_ifp->stats.rx_old_leave++; + +	hdr = (struct mld_v1_pkt *)data; + +	subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked); +	if (!subscriber) +		return; + +	/* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */ + +	grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY); +	if (grp) { +		old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber); +		if (old_grp) { +			gm_packet_sg_drop(old_grp); +			gm_sg_update(grp, false); +			CPP_NOTICE("need S,G PRUNE => NO_INFO transition here"); +		} +	} + +	/* } */ + +	/* nothing more to do here, pass2 is no-op for leaves */ +	gm_subscriber_drop(&subscriber); +} + +/* for each general query received (or sent), a timer is started to expire + * _everything_ at the appropriate time (including robustness multiplier). + * + * So when this timer hits, all packets - with all of their items - that were + * received *before* the query are aged out, and state updated accordingly. + * Note that when we receive a refresh/update, the previous/old packet is + * already dropped and replaced with a new one, so in normal steady-state + * operation, this timer won't be doing anything. + * + * Additionally, if a subscriber actively leaves a group, that goes through + * its own path too and won't hit this.  This is really only triggered when a + * host straight up disappears. + */ +static void gm_t_expire(struct thread *t) +{ +	struct gm_if *gm_ifp = THREAD_ARG(t); +	struct gm_packet_state *pkt; + +	zlog_info(log_ifp("general expiry timer")); + +	while (gm_ifp->n_pending) { +		struct gm_general_pending *pend = gm_ifp->pending; +		struct timeval remain; +		int64_t remain_ms; + +		remain_ms = monotime_until(&pend->expiry, &remain); +		if (remain_ms > 0) { +			if (PIM_DEBUG_IGMP_EVENTS) +				zlog_debug( +					log_ifp("next general expiry in %" PRId64 "ms"), +					remain_ms / 1000); + +			thread_add_timer_tv(router->master, gm_t_expire, gm_ifp, +					    &remain, &gm_ifp->t_expire); +			return; +		} + +		while ((pkt = gm_packet_expires_first(gm_ifp->expires))) { +			if (timercmp(&pkt->received, &pend->query, >=)) +				break; + +			if (PIM_DEBUG_IGMP_PACKETS) +				zlog_debug(log_ifp("expire packet %p"), pkt); +			gm_packet_drop(pkt, true); +		} + +		gm_ifp->n_pending--; +		memmove(gm_ifp->pending, gm_ifp->pending + 1, +			gm_ifp->n_pending * sizeof(gm_ifp->pending[0])); +	} + +	if (PIM_DEBUG_IGMP_EVENTS) +		zlog_debug(log_ifp("next general expiry waiting for query")); +} + +/* NB: the receive handlers will also run when sending packets, since we + * receive our own packets back in. + */ +static void gm_handle_q_general(struct gm_if *gm_ifp, +				struct gm_query_timers *timers) +{ +	struct timeval now, expiry; +	struct gm_general_pending *pend; + +	monotime(&now); +	timeradd(&now, &timers->expire_wait, &expiry); + +	while (gm_ifp->n_pending) { +		pend = &gm_ifp->pending[gm_ifp->n_pending - 1]; + +		if (timercmp(&pend->expiry, &expiry, <)) +			break; + +		/* if we end up here, the last item in pending[] has an expiry +		 * later than the expiry for this query.  But our query time +		 * (now) is later than that of the item (because, well, that's +		 * how time works.)  This makes this query meaningless since +		 * it's "supersetted" within the preexisting query +		 */ + +		if (PIM_DEBUG_IGMP_TRACE_DETAIL) +			zlog_debug(log_ifp("zapping supersetted general timer %pTVMu"), +				   &pend->expiry); + +		gm_ifp->n_pending--; +		if (!gm_ifp->n_pending) +			THREAD_OFF(gm_ifp->t_expire); +	} + +	/* people might be messing with their configs or something */ +	if (gm_ifp->n_pending == array_size(gm_ifp->pending)) +		return; + +	pend = &gm_ifp->pending[gm_ifp->n_pending]; +	pend->query = now; +	pend->expiry = expiry; + +	if (!gm_ifp->n_pending++) { +		if (PIM_DEBUG_IGMP_TRACE) +			zlog_debug(log_ifp("starting general timer @ 0: %pTVMu"), +				   &pend->expiry); +		thread_add_timer_tv(router->master, gm_t_expire, gm_ifp, +				    &timers->expire_wait, &gm_ifp->t_expire); +	} else +		if (PIM_DEBUG_IGMP_TRACE) +			zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"), +				   gm_ifp->n_pending, &pend->expiry); +} + +static void gm_t_sg_expire(struct thread *t) +{ +	struct gm_sg *sg = THREAD_ARG(t); +	struct gm_if *gm_ifp = sg->iface; +	struct gm_packet_sg *item; + +	assertf(sg->state == GM_SG_JOIN_EXPIRING || +			sg->state == GM_SG_NOPRUNE_EXPIRING, +		"%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t); + +	frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item) +		/* this will also drop EXCLUDE mode S,G lists together with +		 * the *,G entry +		 */ +		gm_packet_sg_drop(item); + +	/* subs_negative items are only timed out together with the *,G entry +	 * since we won't get any reports for a group-and-source query +	 */ +	gm_sg_update(sg, true); +} + +static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg, +			       struct timeval ref) +{ +	struct gm_packet_state *pkt; + +	if (!sg->most_recent) { +		struct gm_packet_state *best_pkt = NULL; +		struct gm_packet_sg *item; + +		frr_each (gm_packet_sg_subs, sg->subs_positive, item) { +			pkt = gm_packet_sg2state(item); + +			if (!best_pkt || +			    timercmp(&pkt->received, &best_pkt->received, >)) { +				best_pkt = pkt; +				sg->most_recent = item; +			} +		} +	} +	if (sg->most_recent) { +		struct timeval fuzz; + +		pkt = gm_packet_sg2state(sg->most_recent); + +		/* this shouldn't happen on plain old real ethernet segment, +		 * but on something like a VXLAN or VPLS it is very possible +		 * that we get a report before the query that triggered it. +		 * (imagine a triangle scenario with 3 datacenters, it's very +		 * possible A->B + B->C is faster than A->C due to odd routing) +		 * +		 * This makes a little tolerance allowance to handle that case. +		 */ +		timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz); + +		if (timercmp(&fuzz, &ref, >)) +			return true; +	} +	return false; +} + +static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg, +			      struct timeval expire_wait) +{ +	struct timeval now; + +	if (!sg) +		return; +	if (sg->state == GM_SG_PRUNE) +		return; + +	monotime(&now); +	if (gm_sg_check_recent(gm_ifp, sg, now)) +		return; + +	if (PIM_DEBUG_IGMP_TRACE) +		zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait); + +	if (sg->t_sg_expire) { +		struct timeval remain; + +		remain = thread_timer_remain(sg->t_sg_expire); +		if (timercmp(&remain, &expire_wait, <=)) +			return; + +		THREAD_OFF(sg->t_sg_expire); +	} + +	thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait, +			    &sg->t_sg_expire); +} + +static void gm_handle_q_groupsrc(struct gm_if *gm_ifp, +				 struct gm_query_timers *timers, pim_addr grp, +				 const pim_addr *srcs, size_t n_src) +{ +	struct gm_sg *sg; +	size_t i; + +	for (i = 0; i < n_src; i++) { +		sg = gm_sg_find(gm_ifp, grp, srcs[i]); +		gm_sg_timer_start(gm_ifp, sg, timers->expire_wait); +	} +} + +static void gm_t_grp_expire(struct thread *t) +{ +	/* if we're here, that means when we received the group-specific query +	 * there was one or more active S,G for this group.  For *,G the timer +	 * in sg->t_sg_expire is running separately and gets cancelled when we +	 * receive a report, so that work is left to gm_t_sg_expire and we +	 * shouldn't worry about it here. +	 */ +	struct gm_grp_pending *pend = THREAD_ARG(t); +	struct gm_if *gm_ifp = pend->iface; +	struct gm_sg *sg, *sg_start, sg_ref; + +	if (PIM_DEBUG_IGMP_EVENTS) +		zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp); + +	/* gteq lookup - try to find *,G or S,G  (S,G is > *,G) +	 * could technically be gt to skip a possible *,G +	 */ +	sg_ref.sgaddr.grp = pend->grp; +	sg_ref.sgaddr.src = PIMADDR_ANY; +	sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref); + +	frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) { +		struct gm_packet_sg *item; + +		if (pim_addr_cmp(sg->sgaddr.grp, pend->grp)) +			break; +		if (pim_addr_is_any(sg->sgaddr.src)) +			/* handled by gm_t_sg_expire / sg->t_sg_expire */ +			continue; +		if (gm_sg_check_recent(gm_ifp, sg, pend->query)) +			continue; + +		/* we may also have a group-source-specific query going on in +		 * parallel.  But if we received nothing for the *,G query, +		 * the S,G query is kinda irrelevant. +		 */ +		THREAD_OFF(sg->t_sg_expire); + +		frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item) +			/* this will also drop the EXCLUDE S,G lists */ +			gm_packet_sg_drop(item); + +		gm_sg_update(sg, true); +	} + +	gm_grp_pends_del(gm_ifp->grp_pends, pend); +	XFREE(MTYPE_GM_GRP_PENDING, pend); +} + +static void gm_handle_q_group(struct gm_if *gm_ifp, +			      struct gm_query_timers *timers, pim_addr grp) +{ +	struct gm_sg *sg, sg_ref; +	struct gm_grp_pending *pend, pend_ref; + +	sg_ref.sgaddr.grp = grp; +	sg_ref.sgaddr.src = PIMADDR_ANY; +	/* gteq lookup - try to find *,G or S,G  (S,G is > *,G) */ +	sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref); + +	if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp)) +		/* we have nothing at all for this group - don't waste RAM */ +		return; + +	if (pim_addr_is_any(sg->sgaddr.src)) { +		/* actually found *,G entry here */ +		if (PIM_DEBUG_IGMP_TRACE) +			zlog_debug(log_ifp("*,%pPAs expiry timer starting"), +				   &grp); +		gm_sg_timer_start(gm_ifp, sg, timers->expire_wait); + +		sg = gm_sgs_next(gm_ifp->sgs, sg); +		if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp)) +			/* no S,G for this group */ +			return; +	} + +	pend_ref.grp = grp; +	pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref); + +	if (pend) { +		struct timeval remain; + +		remain = thread_timer_remain(pend->t_expire); +		if (timercmp(&remain, &timers->expire_wait, <=)) +			return; + +		THREAD_OFF(pend->t_expire); +	} else { +		pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend)); +		pend->grp = grp; +		pend->iface = gm_ifp; +		gm_grp_pends_add(gm_ifp->grp_pends, pend); +	} + +	monotime(&pend->query); +	thread_add_timer_tv(router->master, gm_t_grp_expire, pend, +			    &timers->expire_wait, &pend->t_expire); + +	if (PIM_DEBUG_IGMP_TRACE) +		zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp, +			   pend->t_expire); +} + +static void gm_bump_querier(struct gm_if *gm_ifp) +{ +	struct pim_interface *pim_ifp = gm_ifp->ifp->info; + +	THREAD_OFF(gm_ifp->t_query); + +	if (pim_addr_is_any(pim_ifp->ll_lowest)) +		return; +	if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest)) +		return; + +	gm_ifp->n_startup = gm_ifp->cur_qrv; + +	thread_execute(router->master, gm_t_query, gm_ifp, 0); +} + +static void gm_t_other_querier(struct thread *t) +{ +	struct gm_if *gm_ifp = THREAD_ARG(t); +	struct pim_interface *pim_ifp = gm_ifp->ifp->info; + +	zlog_info(log_ifp("other querier timer expired")); + +	gm_ifp->querier = pim_ifp->ll_lowest; +	gm_ifp->n_startup = gm_ifp->cur_qrv; + +	thread_execute(router->master, gm_t_query, gm_ifp, 0); +} + +static void gm_handle_query(struct gm_if *gm_ifp, +			    const struct sockaddr_in6 *pkt_src, +			    pim_addr *pkt_dst, char *data, size_t len) +{ +	struct mld_v2_query_hdr *hdr; +	struct pim_interface *pim_ifp = gm_ifp->ifp->info; +	struct gm_query_timers timers; +	bool general_query; + +	if (len < sizeof(struct mld_v2_query_hdr) && +	    len != sizeof(struct mld_v1_pkt)) { +		zlog_warn(log_pkt_src("invalid query size")); +		gm_ifp->stats.rx_drop_malformed++; +		return; +	} + +	hdr = (struct mld_v2_query_hdr *)data; +	general_query = pim_addr_is_any(hdr->grp); + +	if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) { +		zlog_warn(log_pkt_src( +				  "malformed MLDv2 query (invalid group %pI6)"), +			  &hdr->grp); +		gm_ifp->stats.rx_drop_malformed++; +		return; +	} + +	if (len >= sizeof(struct mld_v2_query_hdr)) { +		size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr); + +		if (len < sizeof(struct mld_v2_query_hdr) + src_space) { +			zlog_warn(log_pkt_src( +				"malformed MLDv2 query (truncated source list)")); +			gm_ifp->stats.rx_drop_malformed++; +			return; +		} + +		if (general_query && src_space) { +			zlog_warn(log_pkt_src( +				"malformed MLDv2 query (general query with non-empty source list)")); +			gm_ifp->stats.rx_drop_malformed++; +			return; +		} +	} + +	/* accepting queries unicast to us (or addressed to a wrong group) +	 * can mess up querier election as well as cause us to terminate +	 * traffic (since after a unicast query no reports will be coming in) +	 */ +	if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) { +		if (pim_addr_is_any(hdr->grp)) { +			zlog_warn( +				log_pkt_src( +					"wrong destination %pPA for general query"), +				pkt_dst); +			gm_ifp->stats.rx_drop_dstaddr++; +			return; +		} + +		if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) { +			gm_ifp->stats.rx_drop_dstaddr++; +			zlog_warn( +				log_pkt_src( +					"wrong destination %pPA for group specific query"), +				pkt_dst); +			return; +		} +	} + +	if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) { +		if (PIM_DEBUG_IGMP_EVENTS) +			zlog_debug(log_pkt_src("replacing elected querier %pPA"), +				   &gm_ifp->querier); + +		gm_ifp->querier = pkt_src->sin6_addr; +	} + +	if (len == sizeof(struct mld_v1_pkt)) { +		timers.qrv = gm_ifp->cur_qrv; +		timers.max_resp_ms = hdr->max_resp_code; +		timers.qqic_ms = gm_ifp->cur_query_intv; +	} else { +		timers.qrv = (hdr->flags & 0x7) ?: 8; +		timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code); +		timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000; +	} +	timers.fuzz = gm_ifp->cfg_timing_fuzz; + +	gm_expiry_calc(&timers); + +	if (PIM_DEBUG_IGMP_TRACE_DETAIL) +		zlog_debug( +			log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"), +			timers.qrv, timers.max_resp_ms, timers.qqic_ms, +			&timers.expire_wait); + +	if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) { +		unsigned other_ms; + +		THREAD_OFF(gm_ifp->t_query); +		THREAD_OFF(gm_ifp->t_other_querier); + +		other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2; +		thread_add_timer_msec(router->master, gm_t_other_querier, +				      gm_ifp, other_ms, +				      &gm_ifp->t_other_querier); +	} + +	if (len == sizeof(struct mld_v1_pkt)) { +		if (general_query) { +			gm_handle_q_general(gm_ifp, &timers); +			gm_ifp->stats.rx_query_old_general++; +		} else { +			gm_handle_q_group(gm_ifp, &timers, hdr->grp); +			gm_ifp->stats.rx_query_old_group++; +		} +		return; +	} + +	/* v2 query - [S]uppress bit */ +	if (hdr->flags & 0x8) { +		gm_ifp->stats.rx_query_new_sbit++; +		return; +	} + +	if (general_query) { +		gm_handle_q_general(gm_ifp, &timers); +		gm_ifp->stats.rx_query_new_general++; +	} else if (!ntohs(hdr->n_src)) { +		gm_handle_q_group(gm_ifp, &timers, hdr->grp); +		gm_ifp->stats.rx_query_new_group++; +	} else { +		gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs, +				     ntohs(hdr->n_src)); +		gm_ifp->stats.rx_query_new_groupsrc++; +	} +} + +static void gm_rx_process(struct gm_if *gm_ifp, +			  const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst, +			  void *data, size_t pktlen) +{ +	struct icmp6_plain_hdr *icmp6 = data; +	uint16_t pkt_csum, ref_csum; +	struct ipv6_ph ph6 = { +		.src = pkt_src->sin6_addr, +		.dst = *pkt_dst, +		.ulpl = htons(pktlen), +		.next_hdr = IPPROTO_ICMPV6, +	}; + +	pkt_csum = icmp6->icmp6_cksum; +	icmp6->icmp6_cksum = 0; +	ref_csum = in_cksum_with_ph6(&ph6, data, pktlen); + +	if (pkt_csum != ref_csum) { +		zlog_warn( +			log_pkt_src( +				"(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"), +			pkt_dst, pkt_csum, ref_csum); +		gm_ifp->stats.rx_drop_csum++; +		return; +	} + +	data = (icmp6 + 1); +	pktlen -= sizeof(*icmp6); + +	switch (icmp6->icmp6_type) { +	case ICMP6_MLD_QUERY: +		gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen); +		break; +	case ICMP6_MLD_V1_REPORT: +		gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen); +		break; +	case ICMP6_MLD_V1_DONE: +		gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen); +		break; +	case ICMP6_MLD_V2_REPORT: +		gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen); +		break; +	} +} + +static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len, +				 uint16_t alert_type) +{ +	uint8_t *hopopt_end; + +	if (hopopt_len < 8) +		return false; +	if (hopopt_len < (hopopts[1] + 1U) * 8U) +		return false; + +	hopopt_end = hopopts + (hopopts[1] + 1) * 8; +	hopopts += 2; + +	while (hopopts < hopopt_end) { +		if (hopopts[0] == IP6OPT_PAD1) { +			hopopts++; +			continue; +		} + +		if (hopopts > hopopt_end - 2) +			break; +		if (hopopts > hopopt_end - 2 - hopopts[1]) +			break; + +		if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) { +			uint16_t have_type = (hopopts[2] << 8) | hopopts[3]; + +			if (have_type == alert_type) +				return true; +		} + +		hopopts += 2 + hopopts[1]; +	} +	return false; +} + +static void gm_t_recv(struct thread *t) +{ +	struct pim_instance *pim = THREAD_ARG(t); +	union { +		char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) + +			 CMSG_SPACE(256) /* hop options */ + +			 CMSG_SPACE(sizeof(int)) /* hopcount */]; +		struct cmsghdr align; +	} cmsgbuf; +	struct cmsghdr *cmsg; +	struct in6_pktinfo *pktinfo = NULL; +	uint8_t *hopopts = NULL; +	size_t hopopt_len = 0; +	int *hoplimit = NULL; +	char rxbuf[2048]; +	struct msghdr mh[1] = {}; +	struct iovec iov[1]; +	struct sockaddr_in6 pkt_src[1]; +	ssize_t nread; +	size_t pktlen; + +	thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket, +			&pim->t_gm_recv); + +	iov->iov_base = rxbuf; +	iov->iov_len = sizeof(rxbuf); + +	mh->msg_name = pkt_src; +	mh->msg_namelen = sizeof(pkt_src); +	mh->msg_control = cmsgbuf.buf; +	mh->msg_controllen = sizeof(cmsgbuf.buf); +	mh->msg_iov = iov; +	mh->msg_iovlen = array_size(iov); +	mh->msg_flags = 0; + +	nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC); +	if (nread <= 0) { +		zlog_err("(VRF %s) RX error: %m", pim->vrf->name); +		pim->gm_rx_drop_sys++; +		return; +	} + +	if ((size_t)nread > sizeof(rxbuf)) { +		iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread); +		iov->iov_len = nread; +	} +	nread = recvmsg(pim->gm_socket, mh, 0); +	if (nread <= 0) { +		zlog_err("(VRF %s) RX error: %m", pim->vrf->name); +		pim->gm_rx_drop_sys++; +		goto out_free; +	} + +	struct interface *ifp; + +	ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id); +	if (!ifp || !ifp->info) +		goto out_free; + +	struct pim_interface *pim_ifp = ifp->info; +	struct gm_if *gm_ifp = pim_ifp->mld; + +	if (!gm_ifp) +		goto out_free; + +	for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) { +		if (cmsg->cmsg_level != SOL_IPV6) +			continue; + +		switch (cmsg->cmsg_type) { +		case IPV6_PKTINFO: +			pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg); +			break; +		case IPV6_HOPOPTS: +			hopopts = CMSG_DATA(cmsg); +			hopopt_len = cmsg->cmsg_len - sizeof(*cmsg); +			break; +		case IPV6_HOPLIMIT: +			hoplimit = (int *)CMSG_DATA(cmsg); +			break; +		} +	} + +	if (!pktinfo || !hoplimit) { +		zlog_err(log_ifp( +			"BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT")); +		pim->gm_rx_drop_sys++; +		goto out_free; +	} + +	if (*hoplimit != 1) { +		zlog_err(log_pkt_src("packet with hop limit != 1")); +		/* spoofing attempt => count on srcaddr counter */ +		gm_ifp->stats.rx_drop_srcaddr++; +		goto out_free; +	} + +	if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) { +		zlog_err(log_pkt_src( +			"packet without IPv6 Router Alert MLD option")); +		gm_ifp->stats.rx_drop_ra++; +		goto out_free; +	} + +	if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr)) +		/* reports from :: happen in normal operation for DAD, so +		 * don't spam log messages about this +		 */ +		goto out_free; + +	if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) { +		zlog_warn(log_pkt_src("packet from invalid source address")); +		gm_ifp->stats.rx_drop_srcaddr++; +		goto out_free; +	} + +	pktlen = nread; +	if (pktlen < sizeof(struct icmp6_plain_hdr)) { +		zlog_warn(log_pkt_src("truncated packet")); +		gm_ifp->stats.rx_drop_malformed++; +		goto out_free; +	} + +	gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base, +		      pktlen); + +out_free: +	if (iov->iov_base != rxbuf) +		XFREE(MTYPE_GM_PACKET, iov->iov_base); +} + +static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp, +			  const pim_addr *srcs, size_t n_srcs, bool s_bit) +{ +	struct pim_interface *pim_ifp = gm_ifp->ifp->info; +	struct sockaddr_in6 dstaddr = { +		.sin6_family = AF_INET6, +		.sin6_scope_id = gm_ifp->ifp->ifindex, +	}; +	struct { +		struct icmp6_plain_hdr hdr; +		struct mld_v2_query_hdr v2_query; +	} query = { +		/* clang-format off */ +		.hdr = { +			.icmp6_type = ICMP6_MLD_QUERY, +			.icmp6_code = 0, +		}, +		.v2_query = { +			.grp = grp, +		}, +		/* clang-format on */ +	}; +	struct ipv6_ph ph6 = { +		.src = pim_ifp->ll_lowest, +		.ulpl = htons(sizeof(query)), +		.next_hdr = IPPROTO_ICMPV6, +	}; +	union { +		char buf[CMSG_SPACE(8) /* hop options */ + +			 CMSG_SPACE(sizeof(struct in6_pktinfo))]; +		struct cmsghdr align; +	} cmsg = {}; +	struct cmsghdr *cmh; +	struct msghdr mh[1] = {}; +	struct iovec iov[3]; +	size_t iov_len; +	ssize_t ret, expect_ret; +	uint8_t *dp; +	struct in6_pktinfo *pktinfo; + +	if (if_is_loopback(gm_ifp->ifp)) { +		/* Linux is a bit odd with multicast on loopback */ +		ph6.src = in6addr_loopback; +		dstaddr.sin6_addr = in6addr_loopback; +	} else if (pim_addr_is_any(grp)) +		dstaddr.sin6_addr = gm_all_hosts; +	else +		dstaddr.sin6_addr = grp; + +	query.v2_query.max_resp_code = +		mld_max_resp_encode(gm_ifp->cur_max_resp); +	query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0; +	if (s_bit) +		query.v2_query.flags |= 0x08; +	query.v2_query.qqic = +		igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000); +	query.v2_query.n_src = htons(n_srcs); + +	ph6.dst = dstaddr.sin6_addr; + +	/* ph6 not included in sendmsg */ +	iov[0].iov_base = &ph6; +	iov[0].iov_len = sizeof(ph6); +	iov[1].iov_base = &query; +	if (gm_ifp->cur_version == GM_MLDV1) { +		iov_len = 2; +		iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt); +	} else if (!n_srcs) { +		iov_len = 2; +		iov[1].iov_len = sizeof(query); +	} else { +		iov[1].iov_len = sizeof(query); +		iov[2].iov_base = (void *)srcs; +		iov[2].iov_len = n_srcs * sizeof(srcs[0]); +		iov_len = 3; +	} + +	query.hdr.icmp6_cksum = in_cksumv(iov, iov_len); + +	if (PIM_DEBUG_IGMP_PACKETS) +		zlog_debug(log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"), +			   &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs); + +	mh->msg_name = &dstaddr; +	mh->msg_namelen = sizeof(dstaddr); +	mh->msg_iov = iov + 1; +	mh->msg_iovlen = iov_len - 1; +	mh->msg_control = &cmsg; +	mh->msg_controllen = sizeof(cmsg.buf); + +	cmh = CMSG_FIRSTHDR(mh); +	cmh->cmsg_level = IPPROTO_IPV6; +	cmh->cmsg_type = IPV6_HOPOPTS; +	cmh->cmsg_len = CMSG_LEN(8); +	dp = CMSG_DATA(cmh); +	*dp++ = 0;		     // next header +	*dp++ = 0;		     // length (8-byte blocks, minus 1) +	*dp++ = IP6OPT_ROUTER_ALERT; // router alert +	*dp++ = 2;		     // length +	*dp++ = 0;		     // value (2 bytes) +	*dp++ = 0;		     // value (2 bytes) (0 = MLD) +	*dp++ = 0;		     // pad0 +	*dp++ = 0;		     // pad0 +				     // +	cmh = CMSG_NXTHDR(mh, cmh); +	cmh->cmsg_level = IPPROTO_IPV6; +	cmh->cmsg_type = IPV6_PKTINFO; +	cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); +	pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh); +	pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex; +	pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest; + +	expect_ret = iov[1].iov_len; +	if (iov_len == 3) +		expect_ret += iov[2].iov_len; + +	frr_with_privs (&pimd_privs) { +		ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0); +	} + +	if (ret != expect_ret) { +		zlog_warn(log_ifp("failed to send query: %m")); +		gm_ifp->stats.tx_query_fail++; +	} else { +		if (gm_ifp->cur_version == GM_MLDV1) { +			if (pim_addr_is_any(grp)) +				gm_ifp->stats.tx_query_old_general++; +			else +				gm_ifp->stats.tx_query_old_group++; +		} else { +			if (pim_addr_is_any(grp)) +				gm_ifp->stats.tx_query_new_general++; +			else if (!n_srcs) +				gm_ifp->stats.tx_query_new_group++; +			else +				gm_ifp->stats.tx_query_new_groupsrc++; +		} +	} +} + +static void gm_t_query(struct thread *t) +{ +	struct gm_if *gm_ifp = THREAD_ARG(t); +	unsigned timer_ms = gm_ifp->cur_query_intv; + +	if (gm_ifp->n_startup) { +		timer_ms /= 4; +		gm_ifp->n_startup--; +	} + +	thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms, +			      &gm_ifp->t_query); + +	gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false); +} + +static void gm_t_sg_query(struct thread *t) +{ +	struct gm_sg *sg = THREAD_ARG(t); + +	gm_trigger_specific(sg); +} + +/* S,G specific queries (triggered by a member leaving) get a little slack + * time so we can bundle queries for [S1,S2,S3,...],G into the same query + */ +static void gm_send_specific(struct gm_gsq_pending *pend_gsq) +{ +	struct gm_if *gm_ifp = pend_gsq->iface; + +	gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src, +		      pend_gsq->s_bit); + +	gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq); +	XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq); +} + +static void gm_t_gsq_pend(struct thread *t) +{ +	struct gm_gsq_pending *pend_gsq = THREAD_ARG(t); + +	gm_send_specific(pend_gsq); +} + +static void gm_trigger_specific(struct gm_sg *sg) +{ +	struct gm_if *gm_ifp = sg->iface; +	struct pim_interface *pim_ifp = gm_ifp->ifp->info; +	struct gm_gsq_pending *pend_gsq, ref; + +	sg->n_query--; +	if (sg->n_query) +		thread_add_timer_msec(router->master, gm_t_sg_query, sg, +				      gm_ifp->cur_query_intv_trig, +				      &sg->t_sg_query); + +	if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest)) +		return; +	if (gm_ifp->pim->gm_socket == -1) +		return; + +	if (PIM_DEBUG_IGMP_TRACE) +		zlog_debug(log_sg(sg, "triggered query")); + +	if (pim_addr_is_any(sg->sgaddr.src)) { +		gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit); +		return; +	} + +	ref.grp = sg->sgaddr.grp; +	ref.s_bit = sg->query_sbit; + +	pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref); +	if (!pend_gsq) { +		pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq)); +		pend_gsq->grp = sg->sgaddr.grp; +		pend_gsq->s_bit = sg->query_sbit; +		pend_gsq->iface = gm_ifp; +		gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq); + +		thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq, +				    &gm_ifp->cfg_timing_fuzz, +				    &pend_gsq->t_send); +	} + +	assert(pend_gsq->n_src < array_size(pend_gsq->srcs)); + +	pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src; +	pend_gsq->n_src++; + +	if (pend_gsq->n_src == array_size(pend_gsq->srcs)) { +		THREAD_OFF(pend_gsq->t_send); +		gm_send_specific(pend_gsq); +		pend_gsq = NULL; +	} +} + +static void gm_vrf_socket_incref(struct pim_instance *pim) +{ +	struct vrf *vrf = pim->vrf; +	int ret, intval; +	struct icmp6_filter filter[1]; + +	if (pim->gm_socket_if_count++ && pim->gm_socket != -1) +		return; + +	ICMP6_FILTER_SETBLOCKALL(filter); +	ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter); +	ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter); +	ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter); +	ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter); + +	frr_with_privs (&pimd_privs) { +		pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, +					  vrf->vrf_id, vrf->name); +		if (pim->gm_socket < 0) { +			zlog_err("(VRF %s) could not create MLD socket: %m", +				 vrf->name); +			return; +		} + +		ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER, +				 filter, sizeof(filter)); +		if (ret) +			zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m", +				 vrf->name); + +		intval = 1; +		ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO, +				 &intval, sizeof(intval)); +		if (ret) +			zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m", +				 vrf->name); + +		intval = 1; +		ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS, +				 &intval, sizeof(intval)); +		if (ret) +			zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m", +				 vrf->name); + +		intval = 1; +		ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT, +				 &intval, sizeof(intval)); +		if (ret) +			zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m", +				 vrf->name); + +		intval = 1; +		ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP, +				 &intval, sizeof(intval)); +		if (ret) +			zlog_err( +				"(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m", +				vrf->name); + +		intval = 1; +		ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS, +				 &intval, sizeof(intval)); +		if (ret) +			zlog_err("(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m", +				 vrf->name); + +		/* NB: IPV6_MULTICAST_ALL does not completely bypass multicast +		 * RX filtering in Linux.  It only means "receive all groups +		 * that something on the system has joined".  To actually +		 * receive *all* MLD packets - which is what we need - +		 * multicast routing must be enabled on the interface.  And +		 * this only works for MLD packets specifically. +		 * +		 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c +		 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there. +		 * +		 * Also note that the code there explicitly checks for the IPv6 +		 * router alert MLD option (which is required by the RFC to be +		 * on MLD packets.)  That implies trying to support hosts which +		 * erroneously don't add that option is just not possible. +		 */ +		intval = 1; +		ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL, +				 &intval, sizeof(intval)); +		if (ret) +			zlog_info( +				"(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)", +				vrf->name); +	} + +	thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket, +			&pim->t_gm_recv); +} + +static void gm_vrf_socket_decref(struct pim_instance *pim) +{ +	if (--pim->gm_socket_if_count) +		return; + +	THREAD_OFF(pim->t_gm_recv); +	close(pim->gm_socket); +	pim->gm_socket = -1; +} + +static void gm_start(struct interface *ifp) +{ +	struct pim_interface *pim_ifp = ifp->info; +	struct gm_if *gm_ifp; + +	assert(pim_ifp); +	assert(pim_ifp->pim); +	assert(pim_ifp->mroute_vif_index >= 0); +	assert(!pim_ifp->mld); + +	gm_vrf_socket_incref(pim_ifp->pim); + +	gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp)); +	gm_ifp->ifp = ifp; +	pim_ifp->mld = gm_ifp; +	gm_ifp->pim = pim_ifp->pim; +	monotime(&gm_ifp->started); + +	zlog_info(log_ifp("starting MLD")); + +	if (pim_ifp->mld_version == 1) +		gm_ifp->cur_version = GM_MLDV1; +	else +		gm_ifp->cur_version = GM_MLDV2; + +	/* hardcoded for dev without CLI */ +	gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable; +	gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000; +	gm_ifp->cur_query_intv_trig = pim_ifp->mld_last_query_intv; +	gm_ifp->cur_max_resp = pim_ifp->mld_max_resp_ms; + +	gm_ifp->cfg_timing_fuzz.tv_sec = 0; +	gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000; + +	gm_sgs_init(gm_ifp->sgs); +	gm_subscribers_init(gm_ifp->subscribers); +	gm_packet_expires_init(gm_ifp->expires); +	gm_grp_pends_init(gm_ifp->grp_pends); +	gm_gsq_pends_init(gm_ifp->gsq_pends); + +	frr_with_privs (&pimd_privs) { +		struct ipv6_mreq mreq; +		int ret; + +		/* all-MLDv2 group */ +		mreq.ipv6mr_multiaddr = gm_all_routers; +		mreq.ipv6mr_interface = ifp->ifindex; +		ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6, +				 IPV6_JOIN_GROUP, &mreq, sizeof(mreq)); +		if (ret) +			zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m", +				 ifp->name); +	} +} + +void gm_ifp_teardown(struct interface *ifp) +{ +	struct pim_interface *pim_ifp = ifp->info; +	struct gm_if *gm_ifp; +	struct gm_packet_state *pkt; +	struct gm_grp_pending *pend_grp; +	struct gm_gsq_pending *pend_gsq; +	struct gm_subscriber *subscriber; +	struct gm_sg *sg; + +	if (!pim_ifp || !pim_ifp->mld) +		return; + +	gm_ifp = pim_ifp->mld; +	gm_ifp->stopping = true; +	if (PIM_DEBUG_IGMP_EVENTS) +		zlog_debug(log_ifp("MLD stop")); + +	THREAD_OFF(gm_ifp->t_query); +	THREAD_OFF(gm_ifp->t_other_querier); +	THREAD_OFF(gm_ifp->t_expire); + +	frr_with_privs (&pimd_privs) { +		struct ipv6_mreq mreq; +		int ret; + +		/* all-MLDv2 group */ +		mreq.ipv6mr_multiaddr = gm_all_routers; +		mreq.ipv6mr_interface = ifp->ifindex; +		ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6, +				 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq)); +		if (ret) +			zlog_err("(%s) failed to leave ff02::16 (all-MLDv2): %m", +				 ifp->name); +	} + +	gm_vrf_socket_decref(gm_ifp->pim); + +	while ((pkt = gm_packet_expires_first(gm_ifp->expires))) +		gm_packet_drop(pkt, false); + +	while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) { +		THREAD_OFF(pend_grp->t_expire); +		XFREE(MTYPE_GM_GRP_PENDING, pend_grp); +	} + +	while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) { +		THREAD_OFF(pend_gsq->t_send); +		XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq); +	} + +	while ((sg = gm_sgs_pop(gm_ifp->sgs))) { +		THREAD_OFF(sg->t_sg_expire); +		assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG", +			&sg->sgaddr); +		assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG", +			&sg->sgaddr); + +		gm_sg_free(sg); +	} + +	while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) { +		assertf(!gm_packets_count(subscriber->packets), "%pPA", +			&subscriber->addr); +		XFREE(MTYPE_GM_SUBSCRIBER, subscriber); +	} + +	gm_grp_pends_fini(gm_ifp->grp_pends); +	gm_packet_expires_fini(gm_ifp->expires); +	gm_subscribers_fini(gm_ifp->subscribers); +	gm_sgs_fini(gm_ifp->sgs); + +	XFREE(MTYPE_GM_IFACE, gm_ifp); +	pim_ifp->mld = NULL; +} + +static void gm_update_ll(struct interface *ifp) +{ +	struct pim_interface *pim_ifp = ifp->info; +	struct gm_if *gm_ifp = pim_ifp ? pim_ifp->mld : NULL; +	bool was_querier; + +	was_querier = +		!IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) && +		!pim_addr_is_any(gm_ifp->querier); + +	gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest; +	if (was_querier) +		gm_ifp->querier = pim_ifp->ll_lowest; +	THREAD_OFF(gm_ifp->t_query); + +	if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) { +		if (was_querier) +			zlog_info(log_ifp( +				"lost link-local address, stopping querier")); +		return; +	} + +	if (was_querier) +		zlog_info(log_ifp("new link-local %pPA while querier"), +			  &gm_ifp->cur_ll_lowest); +	else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 || +		 pim_addr_is_any(gm_ifp->querier)) { +		zlog_info(log_ifp("new link-local %pPA, becoming querier"), +			  &gm_ifp->cur_ll_lowest); +		gm_ifp->querier = gm_ifp->cur_ll_lowest; +	} else +		return; + +	gm_ifp->n_startup = gm_ifp->cur_qrv; +	thread_execute(router->master, gm_t_query, gm_ifp, 0); +} + +void gm_ifp_update(struct interface *ifp) +{ +	struct pim_interface *pim_ifp = ifp->info; +	struct gm_if *gm_ifp; +	bool changed = false; + +	if (!pim_ifp) +		return; +	if (!if_is_operative(ifp) || !pim_ifp->pim || +	    pim_ifp->mroute_vif_index < 0) { +		gm_ifp_teardown(ifp); +		return; +	} + +	if (!pim_ifp->mld) +		gm_start(ifp); + +	gm_ifp = pim_ifp->mld; +	if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest)) +		gm_update_ll(ifp); + +	/* these don't trigger a new query */ +	gm_ifp->cur_query_intv_trig = pim_ifp->mld_last_query_intv; +	gm_ifp->cur_max_resp = pim_ifp->mld_max_resp_ms; + +	unsigned cfg_query_intv = pim_ifp->gm_default_query_interval * 1000; + +	if (gm_ifp->cur_query_intv != cfg_query_intv) { +		gm_ifp->cur_query_intv = cfg_query_intv; +		changed = true; +	} + +	if (gm_ifp->cur_qrv != pim_ifp->gm_default_robustness_variable) { +		gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable; +		changed = true; +	} + +	enum gm_version cfg_version; + +	if (pim_ifp->mld_version == 1) +		cfg_version = GM_MLDV1; +	else +		cfg_version = GM_MLDV2; +	if (gm_ifp->cur_version != cfg_version) { +		gm_ifp->cur_version = cfg_version; +		changed = true; +	} + +	if (changed) { +		if (PIM_DEBUG_IGMP_TRACE) +			zlog_debug(log_ifp("MLD querier config changed, querying")); +		gm_bump_querier(gm_ifp); +	} +} + +/* + * CLI (show commands only) + */ + +#include "lib/command.h" + +#ifndef VTYSH_EXTRACT_PL +#include "pimd/pim6_mld_clippy.c" +#endif + +#define MLD_STR "Multicast Listener Discovery\n" + +static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str, +				     int *err) +{ +	struct vrf *ret; + +	if (!vrf_str) +		return vrf_lookup_by_id(VRF_DEFAULT); +	if (!strcmp(vrf_str, "all")) +		return NULL; +	ret = vrf_lookup_by_name(vrf_str); +	if (ret) +		return ret; + +	vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str); +	*err = CMD_WARNING; +	return NULL; +} + +static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp) +{ +	struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info; +	struct gm_if *gm_ifp; +	bool querier; +	size_t i; + +	if (!pim_ifp) { +		vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name); +		return; +	} + +	gm_ifp = pim_ifp->mld; +	if (!gm_ifp) { +		vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name); +		return; +	} + +	querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest); + +	vty_out(vty, "Interface %s: MLD running\n", ifp->name); +	vty_out(vty, "  Uptime:                  %pTVMs\n", &gm_ifp->started); +	vty_out(vty, "  MLD version:             %d\n", gm_ifp->cur_version); +	vty_out(vty, "  Querier:                 %pPA%s\n", &gm_ifp->querier, +		querier ? " (this system)" : ""); +	vty_out(vty, "  Query timer:             %pTH\n", gm_ifp->t_query); +	vty_out(vty, "  Other querier timer:     %pTH\n", +		gm_ifp->t_other_querier); +	vty_out(vty, "  Robustness value:        %u\n", gm_ifp->cur_qrv); +	vty_out(vty, "  Query interval:          %ums\n", +		gm_ifp->cur_query_intv); +	vty_out(vty, "  Query response timer:    %ums\n", gm_ifp->cur_max_resp); +	vty_out(vty, "  Last member query intv.: %ums\n", +		gm_ifp->cur_query_intv_trig); +	vty_out(vty, "  %u expiry timers from general queries:\n", +		gm_ifp->n_pending); +	for (i = 0; i < gm_ifp->n_pending; i++) { +		struct gm_general_pending *p = &gm_ifp->pending[i]; + +		vty_out(vty, "    %9pTVMs ago (query) -> %9pTVMu (expiry)\n", +			&p->query, &p->expiry); +	} +	vty_out(vty, "  %zu expiry timers from *,G queries\n", +		gm_grp_pends_count(gm_ifp->grp_pends)); +	vty_out(vty, "  %zu expiry timers from S,G queries\n", +		gm_gsq_pends_count(gm_ifp->gsq_pends)); +	vty_out(vty, "  %zu total *,G/S,G from %zu hosts in %zu bundles\n", +		gm_sgs_count(gm_ifp->sgs), +		gm_subscribers_count(gm_ifp->subscribers), +		gm_packet_expires_count(gm_ifp->expires)); +	vty_out(vty, "\n"); +} + +static void gm_show_if_one(struct vty *vty, struct interface *ifp, +			   json_object *js_if) +{ +	struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info; +	struct gm_if *gm_ifp = pim_ifp->mld; +	bool querier; + +	if (!gm_ifp) { +		if (js_if) +			json_object_string_add(js_if, "state", "down"); +		else +			vty_out(vty, "%-16s  %5s\n", ifp->name, "down"); +		return; +	} + +	querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest); + +	if (js_if) { +		json_object_string_add(js_if, "state", "up"); +		json_object_string_addf(js_if, "version", "%d", +					gm_ifp->cur_version); +		json_object_string_addf(js_if, "upTime", "%pTVMs", +					&gm_ifp->started); +		json_object_boolean_add(js_if, "querier", querier); +		json_object_string_addf(js_if, "querierIp", "%pPA", +					&gm_ifp->querier); +		if (querier) +			json_object_string_addf(js_if, "queryTimer", "%pTH", +						gm_ifp->t_query); +		else +			json_object_string_addf(js_if, "otherQuerierTimer", +						"%pTH", +						gm_ifp->t_other_querier); +	} else { +		vty_out(vty, "%-16s  %-5s  %d  %-25pPA  %-5s %11pTH  %pTVMs\n", +			ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier, +			querier ? "query" : "other", +			querier ? gm_ifp->t_query : gm_ifp->t_other_querier, +			&gm_ifp->started); +	} +} + +static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname, +			   bool detail, json_object *js) +{ +	struct interface *ifp; +	json_object *js_vrf; + +	if (js) { +		js_vrf = json_object_new_object(); +		json_object_object_add(js, vrf->name, js_vrf); +	} + +	FOR_ALL_INTERFACES (vrf, ifp) { +		json_object *js_if = NULL; + +		if (ifname && strcmp(ifp->name, ifname)) +			continue; +		if (detail && !js) { +			gm_show_if_one_detail(vty, ifp); +			continue; +		} + +		if (!ifp->info) +			continue; +		if (js) { +			js_if = json_object_new_object(); +			json_object_object_add(js_vrf, ifp->name, js_if); +		} + +		gm_show_if_one(vty, ifp, js_if); +	} +} + +static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname, +		       bool detail, json_object *js) +{ +	if (!js && !detail) +		vty_out(vty, "%-16s  %-5s  V  %-25s  %-18s  %s\n", "Interface", +			"State", "Querier", "Timer", "Uptime"); + +	if (vrf) +		gm_show_if_vrf(vty, vrf, ifname, detail, js); +	else +		RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) +			gm_show_if_vrf(vty, vrf, ifname, detail, js); +} + +DEFPY(gm_show_interface, +      gm_show_interface_cmd, +      "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME] [detail$detail|json$json]", +      DEBUG_STR +      SHOW_STR +      IPV6_STR +      MLD_STR +      VRF_FULL_CMD_HELP_STR +      "MLD interface information\n" +      "Detailed output\n" +      JSON_STR) +{ +	int ret = CMD_SUCCESS; +	struct vrf *vrf; +	json_object *js = NULL; + +	vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret); +	if (ret != CMD_SUCCESS) +		return ret; + +	if (json) +		js = json_object_new_object(); +	gm_show_if(vty, vrf, ifname, !!detail, js); +	return vty_json(vty, js); +} + +static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp, +			      json_object *js_if) +{ +	struct gm_if_stats *stats = &gm_ifp->stats; +	struct { +		const char *text; +		const char *js_key; +		uint64_t *val; +	} * item, +		items[] = { +			/* clang-format off */ +		{ "v2 reports received", "rxV2Reports", &stats->rx_new_report }, +		{ "v1 reports received", "rxV1Reports", &stats->rx_old_report }, +		{ "v1 done received",    "rxV1Done",    &stats->rx_old_leave }, + +		{ "v2 *,* queries received",   "rxV2QueryGeneral",     &stats->rx_query_new_general }, +		{ "v2 *,G queries received",   "rxV2QueryGroup",       &stats->rx_query_new_group }, +		{ "v2 S,G queries received",   "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc }, +		{ "v2 S-bit queries received", "rxV2QuerySBit",        &stats->rx_query_new_sbit }, +		{ "v1 *,* queries received",   "rxV1QueryGeneral",     &stats->rx_query_old_general }, +		{ "v1 *,G queries received",   "rxV1QueryGroup",       &stats->rx_query_old_group }, + +		{ "v2 *,* queries sent", "txV2QueryGeneral",     &stats->tx_query_new_general }, +		{ "v2 *,G queries sent", "txV2QueryGroup",       &stats->tx_query_new_group }, +		{ "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc }, +		{ "v1 *,* queries sent", "txV1QueryGeneral",     &stats->tx_query_old_general }, +		{ "v1 *,G queries sent", "txV1QueryGroup",       &stats->tx_query_old_group }, +		{ "TX errors",           "txErrors",             &stats->tx_query_fail }, + +		{ "RX dropped (checksum error)", "rxDropChecksum",  &stats->rx_drop_csum }, +		{ "RX dropped (invalid source)", "rxDropSrcAddr",   &stats->rx_drop_srcaddr }, +		{ "RX dropped (invalid dest.)",  "rxDropDstAddr",   &stats->rx_drop_dstaddr }, +		{ "RX dropped (missing alert)",  "rxDropRtrAlert",  &stats->rx_drop_ra }, +		{ "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed }, +		{ "RX truncated reports",        "rxTruncatedRep",  &stats->rx_trunc_report }, +			/* clang-format on */ +		}; + +	for (item = items; item < items + array_size(items); item++) { +		if (js_if) +			json_object_int_add(js_if, item->js_key, *item->val); +		else +			vty_out(vty, "  %-30s  %" PRIu64 "\n", item->text, +				*item->val); +	} +} + +static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf, +			      const char *ifname, json_object *js) +{ +	struct interface *ifp; +	json_object *js_vrf; + +	if (js) { +		js_vrf = json_object_new_object(); +		json_object_object_add(js, vrf->name, js_vrf); +	} + +	FOR_ALL_INTERFACES (vrf, ifp) { +		struct pim_interface *pim_ifp; +		struct gm_if *gm_ifp; +		json_object *js_if = NULL; + +		if (ifname && strcmp(ifp->name, ifname)) +			continue; + +		if (!ifp->info) +			continue; +		pim_ifp = ifp->info; +		if (!pim_ifp->mld) +			continue; +		gm_ifp = pim_ifp->mld; + +		if (js) { +			js_if = json_object_new_object(); +			json_object_object_add(js_vrf, ifp->name, js_if); +		} else { +			vty_out(vty, "Interface: %s\n", ifp->name); +		} +		gm_show_stats_one(vty, gm_ifp, js_if); +		if (!js) +			vty_out(vty, "\n"); +	} +} + +DEFPY(gm_show_interface_stats, +      gm_show_interface_stats_cmd, +      "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]", +      SHOW_STR +      IPV6_STR +      MLD_STR +      VRF_FULL_CMD_HELP_STR +      "MLD statistics\n" +      INTERFACE_STR +      "Interface name\n" +      JSON_STR) +{ +	int ret = CMD_SUCCESS; +	struct vrf *vrf; +	json_object *js = NULL; + +	vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret); +	if (ret != CMD_SUCCESS) +		return ret; + +	if (json) +		js = json_object_new_object(); + +	if (vrf) +		gm_show_stats_vrf(vty, vrf, ifname, js); +	else +		RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) +			gm_show_stats_vrf(vty, vrf, ifname, js); +	return vty_json(vty, js); +} + +static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp, +			      const struct prefix_ipv6 *groups, +			      const struct prefix_ipv6 *sources, +			      bool detail, json_object *js_if) +{ +	struct gm_sg *sg, *sg_start; +	json_object *js_group = NULL; +	pim_addr js_grpaddr = PIMADDR_ANY; +	struct gm_subscriber sub_ref, *sub_untracked; + +	if (groups) { +		struct gm_sg sg_ref = {}; + +		sg_ref.sgaddr.grp = pim_addr_from_prefix(groups); +		sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref); +	} else +		sg_start = gm_sgs_first(gm_ifp->sgs); + +	sub_ref.addr = gm_dummy_untracked; +	sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref); +	/* NB: sub_untracked may be NULL if no untracked joins exist */ + +	frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) { +		struct timeval *recent = NULL, *untracked = NULL; +		json_object *js_src; + +		if (groups) { +			struct prefix_ipv6 grp_p; + +			pim_addr_to_prefix(&grp_p, sg->sgaddr.grp); +			if (!prefix_match(groups, &grp_p)) +				break; +		} + +		if (sources) { +			struct prefix_ipv6 src_p; + +			pim_addr_to_prefix(&src_p, sg->sgaddr.src); +			if (!prefix_match(sources, &src_p)) +				continue; +		} + +		if (sg->most_recent) { +			struct gm_packet_state *packet; + +			packet = gm_packet_sg2state(sg->most_recent); +			recent = &packet->received; +		} + +		if (sub_untracked) { +			struct gm_packet_state *packet; +			struct gm_packet_sg *item; + +			item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked); +			if (item) { +				packet = gm_packet_sg2state(item); +				untracked = &packet->received; +			} +		} + +		if (!js_if) { +			FMT_NSTD_BEGIN; /* %.0p */ +			vty_out(vty, +				"%-30pPA  %-30pPAs  %-16s  %10.0pTVMs  %10.0pTVMs  %10.0pTVMs\n", +				&sg->sgaddr.grp, &sg->sgaddr.src, +				gm_states[sg->state], recent, untracked, +				&sg->created); + +			if (!detail) +				continue; + +			struct gm_packet_sg *item; +			struct gm_packet_state *packet; + +			frr_each (gm_packet_sg_subs, sg->subs_positive, item) { +				packet = gm_packet_sg2state(item); + +				if (packet->subscriber == sub_untracked) +					continue; +				vty_out(vty, "    %-58pPA  %-16s  %10.0pTVMs\n", +					&packet->subscriber->addr, "(JOIN)", +					&packet->received); +			} +			frr_each (gm_packet_sg_subs, sg->subs_negative, item) { +				packet = gm_packet_sg2state(item); + +				if (packet->subscriber == sub_untracked) +					continue; +				vty_out(vty, "    %-58pPA  %-16s  %10.0pTVMs\n", +					&packet->subscriber->addr, "(PRUNE)", +					&packet->received); +			} +			FMT_NSTD_END; /* %.0p */ +			continue; +		} +		/* if (js_if) */ + +		if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) { +			js_group = json_object_new_object(); +			json_object_object_addf(js_if, js_group, "%pPA", +						&sg->sgaddr.grp); +			js_grpaddr = sg->sgaddr.grp; +		} + +		js_src = json_object_new_object(); +		json_object_object_addf(js_group, js_src, "%pPA", +					&sg->sgaddr.src); + +		json_object_string_addf(js_src, "state", gm_states[sg->state]); +		json_object_string_addf(js_src, "created", "%pTVMs", +					&sg->created); +		json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent); + +		if (untracked) +			json_object_string_addf(js_src, "untrackedLastSeen", "%pTVMs", untracked); +		if (!detail) +			continue; + +		json_object *js_subs; +		struct gm_packet_sg *item; +		struct gm_packet_state *packet; + +		js_subs = json_object_new_object(); +		json_object_object_add(js_src, "joinedBy", js_subs); +		frr_each (gm_packet_sg_subs, sg->subs_positive, item) { +			packet = gm_packet_sg2state(item); +			if (packet->subscriber == sub_untracked) +				continue; + +			json_object *js_sub; + +			js_sub = json_object_new_object(); +			json_object_object_addf(js_subs, js_sub, "%pPA", +						&packet->subscriber->addr); +			json_object_string_addf(js_sub, "lastSeen", "%pTVMs", +						&packet->received); +		} + +		js_subs = json_object_new_object(); +		json_object_object_add(js_src, "prunedBy", js_subs); +		frr_each (gm_packet_sg_subs, sg->subs_negative, item) { +			packet = gm_packet_sg2state(item); +			if (packet->subscriber == sub_untracked) +				continue; + +			json_object *js_sub; + +			js_sub = json_object_new_object(); +			json_object_object_addf(js_subs, js_sub, "%pPA", +						&packet->subscriber->addr); +			json_object_string_addf(js_sub, "lastSeen", "%pTVMs", +						&packet->received); +		} +	} +} + +static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf, +			      const char *ifname, +			      const struct prefix_ipv6 *groups, +			      const struct prefix_ipv6 *sources, +			      bool detail, json_object *js) +{ +	struct interface *ifp; +	json_object *js_vrf; + +	if (js) { +		js_vrf = json_object_new_object(); +		json_object_object_add(js, vrf->name, js_vrf); +	} + +	FOR_ALL_INTERFACES (vrf, ifp) { +		struct pim_interface *pim_ifp; +		struct gm_if *gm_ifp; +		json_object *js_if = NULL; + +		if (ifname && strcmp(ifp->name, ifname)) +			continue; + +		if (!ifp->info) +			continue; +		pim_ifp = ifp->info; +		if (!pim_ifp->mld) +			continue; +		gm_ifp = pim_ifp->mld; + +		if (js) { +			js_if = json_object_new_object(); +			json_object_object_add(js_vrf, ifp->name, js_if); +		} + +		if (!js && !ifname) +			vty_out(vty, "\nOn interface %s:\n", ifp->name); + +		gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if); +	} +} + +DEFPY(gm_show_interface_joins, +      gm_show_interface_joins_cmd, +      "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]", +      SHOW_STR +      IPV6_STR +      MLD_STR +      VRF_FULL_CMD_HELP_STR +      "MLD joined groups & sources\n" +      INTERFACE_STR +      "Interface name\n" +      "Limit output to group range\n" +      "Show groups covered by this prefix\n" +      "Limit output to source range\n" +      "Show sources covered by this prefix\n" +      "Show details, including tracked receivers\n" +      JSON_STR) +{ +	int ret = CMD_SUCCESS; +	struct vrf *vrf; +	json_object *js = NULL; + +	vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret); +	if (ret != CMD_SUCCESS) +		return ret; + +	if (json) +		js = json_object_new_object(); +	else +		vty_out(vty, "%-30s  %-30s  %-16s  %10s  %10s  %10s\n", +			"Group", "Source", "State", "LastSeen", "NonTrkSeen", "Created"); + +	if (vrf) +		gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail, +				  js); +	else +		RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) +			gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail, +					  js); +	return vty_json(vty, js); +} + +DEFPY(gm_debug_show, +      gm_debug_show_cmd, +      "debug show mld interface IFNAME", +      DEBUG_STR +      SHOW_STR +      "MLD" +      INTERFACE_STR +      "interface name") +{ +	struct interface *ifp; +	struct pim_interface *pim_ifp; +	struct gm_if *gm_ifp; + +	ifp = if_lookup_by_name(ifname, VRF_DEFAULT); +	if (!ifp) { +		vty_out(vty, "%% no such interface: %pSQq\n", ifname); +		return CMD_WARNING; +	} + +	pim_ifp = ifp->info; +	if (!pim_ifp) { +		vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname); +		return CMD_WARNING; +	} + +	gm_ifp = pim_ifp->mld; +	if (!gm_ifp) { +		vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname); +		return CMD_WARNING; +	} + +	vty_out(vty, "querier:         %pPA\n", &gm_ifp->querier); +	vty_out(vty, "ll_lowest:       %pPA\n\n", &pim_ifp->ll_lowest); +	vty_out(vty, "t_query:         %pTHD\n", gm_ifp->t_query); +	vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier); +	vty_out(vty, "t_expire:        %pTHD\n", gm_ifp->t_expire); + +	vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending); +	for (size_t i = 0; i < gm_ifp->n_pending; i++) { +		int64_t query, expiry; + +		query = monotime_since(&gm_ifp->pending[i].query, NULL); +		expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL); + +		vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n", +			i, query / 1000, expiry / 1000); +	} + +	struct gm_sg *sg; +	struct gm_packet_state *pkt; +	struct gm_packet_sg *item; +	struct gm_subscriber *subscriber; + +	vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs)); +	frr_each (gm_sgs, gm_ifp->sgs, sg) { +		vty_out(vty, "\t%pSG    t_expire=%pTHD\n", &sg->sgaddr, +			sg->t_sg_expire); + +		vty_out(vty, "\t     @pos:%zu\n", +			gm_packet_sg_subs_count(sg->subs_positive)); +		frr_each (gm_packet_sg_subs, sg->subs_positive, item) { +			pkt = gm_packet_sg2state(item); + +			vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n", +				item->is_src ? "S" : "", +				item->is_excl ? "E" : "", +				&pkt->subscriber->addr, pkt->subscriber, pkt, +				item->offset); + +			assert(item->sg == sg); +		} +		vty_out(vty, "\t     @neg:%zu\n", +			gm_packet_sg_subs_count(sg->subs_negative)); +		frr_each (gm_packet_sg_subs, sg->subs_negative, item) { +			pkt = gm_packet_sg2state(item); + +			vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n", +				item->is_src ? "S" : "", +				item->is_excl ? "E" : "", +				&pkt->subscriber->addr, pkt->subscriber, pkt, +				item->offset); + +			assert(item->sg == sg); +		} +	} + +	vty_out(vty, "\n%zu subscribers:\n", +		gm_subscribers_count(gm_ifp->subscribers)); +	frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) { +		vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr, +			subscriber, gm_packets_count(subscriber->packets)); + +		frr_each (gm_packets, subscriber->packets, pkt) { +			vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n", +				pkt, +				monotime_since(&pkt->received, NULL) * +					0.000001f, +				pkt->n_active, pkt->n_sg); + +			for (size_t i = 0; i < pkt->n_sg; i++) { +				item = pkt->items + i; + +				vty_out(vty, "\t\t[%zu]", i); + +				if (!item->sg) { +					vty_out(vty, " inactive\n"); +					continue; +				} + +				vty_out(vty, " %s%s %pSG nE=%u\n", +					item->is_src ? "S" : "", +					item->is_excl ? "E" : "", +					&item->sg->sgaddr, item->n_exclude); +			} +		} +	} + +	return CMD_SUCCESS; +} + +DEFPY(gm_debug_iface_cfg, +      gm_debug_iface_cfg_cmd, +      "debug ipv6 mld {" +        "robustness (0-7)|" +	"query-max-response-time (1-8387584)" +      "}", +      DEBUG_STR +      IPV6_STR +      "Multicast Listener Discovery\n" +      "QRV\nQRV\n" +      "maxresp\nmaxresp\n") +{ +	VTY_DECLVAR_CONTEXT(interface, ifp); +	struct pim_interface *pim_ifp; +	struct gm_if *gm_ifp; +	bool changed = false; + +	pim_ifp = ifp->info; +	if (!pim_ifp) { +		vty_out(vty, "%% no PIM state for interface %pSQq\n", +			ifp->name); +		return CMD_WARNING; +	} +	gm_ifp = pim_ifp->mld; +	if (!gm_ifp) { +		vty_out(vty, "%% no MLD state for interface %pSQq\n", +			ifp->name); +		return CMD_WARNING; +	} + +	if (robustness_str && gm_ifp->cur_qrv != robustness) { +		gm_ifp->cur_qrv = robustness; +		changed = true; +	} +	if (query_max_response_time_str && +	    gm_ifp->cur_max_resp != query_max_response_time) { +		gm_ifp->cur_max_resp = query_max_response_time; +		changed = true; +	} + +	if (changed) { +		vty_out(vty, "%% MLD querier config changed, bumping\n"); +		gm_bump_querier(gm_ifp); +	} +	return CMD_SUCCESS; +} + +void gm_cli_init(void); + +void gm_cli_init(void) +{ +	install_element(VIEW_NODE, &gm_show_interface_cmd); +	install_element(VIEW_NODE, &gm_show_interface_stats_cmd); +	install_element(VIEW_NODE, &gm_show_interface_joins_cmd); + +	install_element(VIEW_NODE, &gm_debug_show_cmd); +	install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd); +} diff --git a/pimd/pim6_mld.h b/pimd/pim6_mld.h new file mode 100644 index 0000000000..e60814980a --- /dev/null +++ b/pimd/pim6_mld.h @@ -0,0 +1,365 @@ +/* + * PIMv6 MLD querier + * Copyright (C) 2021-2022  David Lamparter for NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef PIM6_MLD_H +#define PIM6_MLD_H + +#include "typesafe.h" +#include "pim_addr.h" + +struct thread; +struct pim_instance; +struct gm_packet_sg; +struct gm_if; +struct channel_oil; + +#define MLD_DEFAULT_VERSION 2 + +/* see comment below on subs_negative/subs_positive */ +enum gm_sub_sense { +	/* negative/pruning: S,G in EXCLUDE */ +	GM_SUB_NEG = 0, +	/* positive/joining: *,G in EXCLUDE and S,G in INCLUDE */ +	GM_SUB_POS = 1, +}; + +enum gm_sg_state { +	GM_SG_NOINFO = 0, +	GM_SG_JOIN, +	GM_SG_JOIN_EXPIRING, +	/* remaining 3 only valid for S,G when *,G in EXCLUDE */ +	GM_SG_PRUNE, +	GM_SG_NOPRUNE, +	GM_SG_NOPRUNE_EXPIRING, +}; + +static inline bool gm_sg_state_want_join(enum gm_sg_state state) +{ +	return state != GM_SG_NOINFO && state != GM_SG_PRUNE; +} + +/* MLD (S,G) state (on an interface) + * + * group is always != ::, src is :: for (*,G) joins.  sort order in RB tree is + * such that sources for a particular group can be iterated by starting at the + * group.  For INCLUDE, no (*,G) entry exists, only (S,G). + */ + +PREDECL_RBTREE_UNIQ(gm_packet_sg_subs); +PREDECL_RBTREE_UNIQ(gm_sgs); +struct gm_sg { +	pim_sgaddr sgaddr; +	struct gm_if *iface; +	struct gm_sgs_item itm; + +	enum gm_sg_state state; +	struct channel_oil *oil; +	bool tib_joined; + +	struct timeval created; + +	/* if a group- or group-and-source specific query is running +	 * (implies we haven't received any report yet, since it's cancelled +	 * by that) +	 */ +	struct thread *t_sg_expire; + +	/* last-member-left triggered queries (group/group-source specific) +	 * +	 * this timer will be running even if we aren't the elected querier, +	 * in case the election result changes midway through. +	 */ +	struct thread *t_sg_query; + +	/* we must keep sending (QRV) queries even if we get a positive +	 * response, to make sure other routers are updated.  query_sbit +	 * will be set in that case, since other routers need the *response*, +	 * not the *query* +	 */ +	uint8_t n_query; +	bool query_sbit; + +	/* subs_positive tracks gm_packet_sg resulting in a JOIN, i.e. for +	 * (*,G) it has *EXCLUDE* items, for (S,G) it has *INCLUDE* items. +	 * +	 * subs_negative is always empty for (*,G) and tracks EXCLUDE items +	 * for (S,G).  This means that an (S,G) entry is active as a PRUNE if +	 *   len(src->subs_negative) == len(grp->subs_positive) +	 *   && len(src->subs_positive) == 0 +	 * (i.e. all receivers for the group opted to exclude this S,G and +	 * noone did an SSM join for the S,G) +	 */ +	union { +		struct { +			struct gm_packet_sg_subs_head subs_negative[1]; +			struct gm_packet_sg_subs_head subs_positive[1]; +		}; +		struct gm_packet_sg_subs_head subs[2]; +	}; + +	/* If the elected querier is not ourselves, queries and reports might +	 * get reordered in rare circumstances, i.e. the report could arrive +	 * just a microsecond before the query kicks off the timer.  This can +	 * then result in us thinking there are no more receivers since no +	 * report might be received during the query period. +	 * +	 * To avoid this, keep track of the most recent report for this (S,G) +	 * so we can do a quick check to add just a little bit of slack. +	 * +	 * EXCLUDE S,Gs are never in most_recent. +	 */ +	struct gm_packet_sg *most_recent; +}; + +/* host tracking entry.  addr will be one of: + * + * ::		- used by hosts during address acquisition + * ::1		- may show up on some OS for joins by the router itself + * link-local	- regular operation by MLDv2 hosts + * ffff:..:ffff	- MLDv1 entry (cannot be tracked due to report suppression) + * + * global scope IPv6 addresses can never show up here + */ +PREDECL_HASH(gm_subscribers); +PREDECL_DLIST(gm_packets); +struct gm_subscriber { +	pim_addr addr; +	struct gm_subscribers_item itm; + +	struct gm_if *iface; +	size_t refcount; + +	struct gm_packets_head packets[1]; + +	struct timeval created; +}; + +/* + * MLD join state is kept batched by packet.  Since the timers for all items + * in a packet are the same, this reduces the number of timers we're keeping + * track of.  It also eases tracking for EXCLUDE state groups because the + * excluded sources are in the same packet.  (MLD does not support splitting + * that if it exceeds MTU, it's always a full replace for exclude.) + * + * Since packets may be partially superseded by newer packets, the "active" + * field is used to track this. + */ + +/* gm_packet_sg is allocated as part of gm_packet_state, note the items[0] + * array at the end of that.  gm_packet_sg is NEVER directly allocated with + * XMALLOC/XFREE. + */ +struct gm_packet_sg { +	/* non-NULL as long as this gm_packet_sg is the most recent entry +	 * for (subscriber,S,G).  Cleared to NULL when a newer packet by the +	 * subscriber replaces this item. +	 * +	 * (Old items are kept around so we don't need to realloc/resize +	 * gm_packet_state, which would mess up a whole lot of pointers) +	 */ +	struct gm_sg *sg; + +	/* gm_sg -> (subscriber, gm_packet_sg) +	 * only on RB-tree while sg != NULL, i.e. not superseded by newer. +	 */ +	struct gm_packet_sg_subs_item subs_itm; + +	bool is_src : 1; /* := (src != ::) */ +	bool is_excl : 1; + +	/* for getting back to struct gm_packet_state, cf. +	 * gm_packet_sg2state() below +	 */ +	uint16_t offset; + +	/* if this is a group entry in EXCLUDE state, n_exclude counts how +	 * many sources are on the exclude list here.  They follow immediately +	 * after. +	 */ +	uint16_t n_exclude; +}; + +#define gm_packet_sg2state(sg)                                                 \ +	container_of(sg, struct gm_packet_state, items[sg->offset]) + +PREDECL_DLIST(gm_packet_expires); +struct gm_packet_state { +	struct gm_if *iface; +	struct gm_subscriber *subscriber; +	struct gm_packets_item pkt_itm; + +	struct timeval received; +	struct gm_packet_expires_item exp_itm; + +	/* n_active starts equal to n_sg;  whenever active is set to false on +	 * an item it is decremented.  When n_active == 0, the packet can be +	 * freed. +	 */ +	uint16_t n_sg, n_active; +	struct gm_packet_sg items[0]; +}; + +/* general queries are rather different from group/S,G specific queries;  it's + * not particularly efficient or useful to try to shoehorn them into the S,G + * timers.  Instead, we keep a history of recent queries and their implied + * expiries. + */ +struct gm_general_pending { +	struct timeval query, expiry; +}; + +/* similarly, group queries also age out S,G entries for the group, but in + * this case we only keep one query for each group + * + * why is this not in the *,G gm_sg?  There may not be one (for INCLUDE mode + * groups, or groups we don't know about.)  Also, malicious clients could spam + * random group-specific queries to trigger resource exhaustion, so it makes + * sense to limit these. + */ +PREDECL_RBTREE_UNIQ(gm_grp_pends); +struct gm_grp_pending { +	struct gm_grp_pends_item itm; +	struct gm_if *iface; +	pim_addr grp; + +	struct timeval query; +	struct thread *t_expire; +}; + +/* guaranteed MTU for IPv6 is 1280 bytes.  IPv6 header is 40 bytes, MLDv2 + * query header is 24 bytes, RA option is 8 bytes - leaves 1208 bytes for the + * source list, which is 151 IPv6 addresses.  But we may have some more IPv6 + * extension headers (e.g. IPsec AH), so just cap to 128 + */ +#define MLD_V2Q_MTU_MAX_SOURCES 128 + +/* group-and-source-specific queries are bundled together, if some host joins + * multiple sources it's likely to drop all at the same time. + * + * Unlike gm_grp_pending, this is only used for aggregation since the S,G + * state is kept directly in the gm_sg structure. + */ +PREDECL_HASH(gm_gsq_pends); +struct gm_gsq_pending { +	struct gm_gsq_pends_item itm; + +	struct gm_if *iface; +	struct thread *t_send; + +	pim_addr grp; +	bool s_bit; + +	size_t n_src; +	pim_addr srcs[MLD_V2Q_MTU_MAX_SOURCES]; +}; + + +/* The size of this history is limited by QRV, i.e. there can't be more than + * 8 items here. + */ +#define GM_MAX_PENDING 8 + +enum gm_version { +	GM_NONE, +	GM_MLDV1, +	GM_MLDV2, +}; + +struct gm_if_stats { +	uint64_t rx_drop_csum; +	uint64_t rx_drop_srcaddr; +	uint64_t rx_drop_dstaddr; +	uint64_t rx_drop_ra; +	uint64_t rx_drop_malformed; +	uint64_t rx_trunc_report; + +	/* since the types are different, this is rx_old_* not of rx_*_old */ +	uint64_t rx_old_report; +	uint64_t rx_old_leave; +	uint64_t rx_new_report; + +	uint64_t rx_query_new_general; +	uint64_t rx_query_new_group; +	uint64_t rx_query_new_groupsrc; +	uint64_t rx_query_new_sbit; +	uint64_t rx_query_old_general; +	uint64_t rx_query_old_group; + +	uint64_t tx_query_new_general; +	uint64_t tx_query_new_group; +	uint64_t tx_query_new_groupsrc; +	uint64_t tx_query_old_general; +	uint64_t tx_query_old_group; + +	uint64_t tx_query_fail; +}; + +struct gm_if { +	struct interface *ifp; +	struct pim_instance *pim; +	struct thread *t_query, *t_other_querier, *t_expire; + +	bool stopping; + +	uint8_t n_startup; + +	uint8_t cur_qrv; +	unsigned cur_query_intv;      /* ms */ +	unsigned cur_query_intv_trig; /* ms */ +	unsigned cur_max_resp;	      /* ms */ +	enum gm_version cur_version; + +	/* this value (positive, default 10ms) defines our "timing tolerance": +	 * - added to deadlines for expiring joins +	 * - used to look backwards in time for queries, in case a report was +	 *   reordered before the query +	 */ +	struct timeval cfg_timing_fuzz; + +	/* items in pending[] are sorted by expiry, pending[0] is earliest */ +	struct gm_general_pending pending[GM_MAX_PENDING]; +	uint8_t n_pending; +	struct gm_grp_pends_head grp_pends[1]; +	struct gm_gsq_pends_head gsq_pends[1]; + +	pim_addr querier; +	pim_addr cur_ll_lowest; + +	struct gm_sgs_head sgs[1]; +	struct gm_subscribers_head subscribers[1]; +	struct gm_packet_expires_head expires[1]; + +	struct timeval started; +	struct gm_if_stats stats; +}; + +#if PIM_IPV == 6 +extern void gm_ifp_update(struct interface *ifp); +extern void gm_ifp_teardown(struct interface *ifp); +#else +static inline void gm_ifp_update(struct interface *ifp) +{ +} + +static inline void gm_ifp_teardown(struct interface *ifp) +{ +} +#endif + +#endif /* PIM6_MLD_H */ diff --git a/pimd/pim6_mld_protocol.h b/pimd/pim6_mld_protocol.h new file mode 100644 index 0000000000..699178bc27 --- /dev/null +++ b/pimd/pim6_mld_protocol.h @@ -0,0 +1,125 @@ +/* + * MLD protocol definitions + * Copyright (C) 2022  David Lamparter for NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PIM6_MLD_PROTOCOL_H +#define _PIM6_MLD_PROTOCOL_H + +#include <stdalign.h> +#include <stdint.h> + +/* There is a struct icmp6_hdr provided by OS, but it includes 4 bytes of data. + * Not helpful for us if we want to put the MLD struct after it. + */ + +struct icmp6_plain_hdr { +	uint8_t icmp6_type; +	uint8_t icmp6_code; +	uint16_t icmp6_cksum; +}; +static_assert(sizeof(struct icmp6_plain_hdr) == 4, "struct mismatch"); +static_assert(alignof(struct icmp6_plain_hdr) <= 4, "struct mismatch"); + +/* for MLDv1 query, report and leave all use the same packet format */ +struct mld_v1_pkt { +	uint16_t max_resp_code; +	uint16_t rsvd0; +	struct in6_addr grp; +}; +static_assert(sizeof(struct mld_v1_pkt) == 20, "struct mismatch"); +static_assert(alignof(struct mld_v1_pkt) <= 4, "struct mismatch"); + + +struct mld_v2_query_hdr { +	uint16_t max_resp_code; +	uint16_t rsvd0; +	struct in6_addr grp; +	uint8_t flags; +	uint8_t qqic; +	uint16_t n_src; +	struct in6_addr srcs[0]; +}; +static_assert(sizeof(struct mld_v2_query_hdr) == 24, "struct mismatch"); +static_assert(alignof(struct mld_v2_query_hdr) <= 4, "struct mismatch"); + + +struct mld_v2_report_hdr { +	uint16_t rsvd; +	uint16_t n_records; +}; +static_assert(sizeof(struct mld_v2_report_hdr) == 4, "struct mismatch"); +static_assert(alignof(struct mld_v2_report_hdr) <= 4, "struct mismatch"); + + +struct mld_v2_rec_hdr { +	uint8_t type; +	uint8_t aux_len; +	uint16_t n_src; +	struct in6_addr grp; +	struct in6_addr srcs[0]; +}; +static_assert(sizeof(struct mld_v2_rec_hdr) == 20, "struct mismatch"); +static_assert(alignof(struct mld_v2_rec_hdr) <= 4, "struct mismatch"); + +/* clang-format off */ +enum icmp6_mld_type { +	ICMP6_MLD_QUERY			= 130, +	ICMP6_MLD_V1_REPORT		= 131, +	ICMP6_MLD_V1_DONE		= 132, +	ICMP6_MLD_V2_REPORT		= 143, +}; + +enum mld_v2_rec_type { +	MLD_RECTYPE_IS_INCLUDE		= 1, +	MLD_RECTYPE_IS_EXCLUDE		= 2, +	MLD_RECTYPE_CHANGE_TO_INCLUDE	= 3, +	MLD_RECTYPE_CHANGE_TO_EXCLUDE	= 4, +	MLD_RECTYPE_ALLOW_NEW_SOURCES	= 5, +	MLD_RECTYPE_BLOCK_OLD_SOURCES	= 6, +}; +/* clang-format on */ + +/* helper functions */ + +static inline unsigned mld_max_resp_decode(uint16_t wire) +{ +	uint16_t code = ntohs(wire); +	uint8_t exp; + +	if (code < 0x8000) +		return code; +	exp = (code >> 12) & 0x7; +	return ((code & 0xfff) | 0x1000) << (exp + 3); +} + +static inline uint16_t mld_max_resp_encode(uint32_t value) +{ +	uint16_t code; +	uint8_t exp; + +	if (value < 0x8000) +		code = value; +	else { +		exp = 16 - __builtin_clz(value); +		code = (value >> (exp + 3)) & 0xfff; +		code |= 0x8000 | (exp << 12); +	} +	return htons(code); +} + +#endif /* _PIM6_MLD_PROTOCOL_H */ diff --git a/pimd/pim6_mroute_msg.c b/pimd/pim6_mroute_msg.c index f34fa5965a..37d67ad048 100644 --- a/pimd/pim6_mroute_msg.c +++ b/pimd/pim6_mroute_msg.c @@ -45,17 +45,6 @@ int pim_mroute_set(struct pim_instance *pim, int enable)  	int err;  	int opt, data;  	socklen_t data_len = sizeof(data); -	static const struct sock_filter filter[] = { -		BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 0), -		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0, 0, 1), -		BPF_STMT(BPF_RET | BPF_K, 0xffff), -		BPF_STMT(BPF_RET | BPF_K, 0), -	}; - -	static const struct sock_fprog bpf = { -		.len = array_size(filter), -		.filter = (struct sock_filter *)filter, -	};  	/*  	 * We need to create the VRF table for the pim mroute_socket @@ -133,10 +122,6 @@ int pim_mroute_set(struct pim_instance *pim, int enable)  		zlog_warn(  			"PIM-SM will not work properly on this platform, until the ability to receive the WHOLEPKT upcall");  #endif -		if (setsockopt(pim->mroute_socket, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf))) { -			zlog_warn("Failure to attach SO_ATTACH_FILTER on fd %d: %d %s", -					pim->mroute_socket, errno, safe_strerror(errno)); -		}  	}  	return 0; diff --git a/pimd/pim6_stubs.c b/pimd/pim6_stubs.c index f2781a3ce9..1b31afc4c1 100644 --- a/pimd/pim6_stubs.c +++ b/pimd/pim6_stubs.c @@ -25,108 +25,3 @@  #include "pim_pim.h"  #include "pim_register.h"  #include "pim_cmd.h" - -/* - * NH lookup / NHT - */ -void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr) -{ -} - -void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr) -{ -} - -int zclient_lookup_nexthop(struct pim_instance *pim, -			   struct pim_zlookup_nexthop nexthop_tab[], -			   const int tab_size, pim_addr addr, -			   int max_lookup) -{ -	return -1; -} - -void zclient_lookup_new(void) -{ -} - -void zclient_lookup_free(void) -{ -} - -/* - * packet handling - */ -int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg, -		 int pim_msg_size, const char *ifname) -{ -	return 0; -} - -int pim_hello_send(struct interface *ifp, uint16_t holdtime) -{ -	return -1; -} - -void pim_hello_restart_now(struct interface *ifp) -{ -} - -void pim_hello_restart_triggered(struct interface *ifp) -{ -} - -int pim_sock_add(struct interface *ifp) -{ -	return -1; -} - -void pim_sock_delete(struct interface *ifp, const char *delete_message) -{ -} - -/* - * PIM register - */ -void pim_register_join(struct pim_upstream *up) -{ -} - -void pim_null_register_send(struct pim_upstream *up) -{ -} - -void pim_reg_del_on_couldreg_fail(struct interface *ifp) -{ -} - -bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp) -{ -	return false; -} - -void pim_bsm_proc_free(struct pim_instance *pim) -{ -} - -void pim_bsm_proc_init(struct pim_instance *pim) -{ -} - -struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope, -					  struct prefix *grp) -{ -	return NULL; -} - -void pim_bsm_write_config(struct vty *vty, struct interface *ifp) -{ -} -void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src, -		       struct pim_rpf *rpg, int null_register, -		       struct pim_upstream *up) -{ -} -void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src, -			    pim_addr originator) -{ -} diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h index e422a2e2da..90862520af 100644 --- a/pimd/pim_addr.h +++ b/pimd/pim_addr.h @@ -31,9 +31,13 @@ typedef struct in_addr pim_addr;  #define PIM_ADDRSTRLEN	INET_ADDRSTRLEN  #define PIM_AF		AF_INET  #define PIM_AFI		AFI_IP +#define PIM_IPADDR	IPADDR_V4 +#define ipaddr_pim	ipaddr_v4  #define PIM_MAX_BITLEN	IPV4_MAX_BITLEN  #define PIM_AF_NAME     "ip" +#define PIM_ADDR_FUNCNAME(name) ipv4_##name +  union pimprefixptr {  	prefixtype(pimprefixptr, struct prefix,      p)  	prefixtype(pimprefixptr, struct prefix_ipv4, p4) @@ -50,9 +54,13 @@ typedef struct in6_addr pim_addr;  #define PIM_ADDRSTRLEN	INET6_ADDRSTRLEN  #define PIM_AF		AF_INET6  #define PIM_AFI		AFI_IP6 +#define PIM_IPADDR	IPADDR_V6 +#define ipaddr_pim	ipaddr_v6  #define PIM_MAX_BITLEN	IPV6_MAX_BITLEN  #define PIM_AF_NAME     "ipv6" +#define PIM_ADDR_FUNCNAME(name) ipv6_##name +  union pimprefixptr {  	prefixtype(pimprefixptr, struct prefix,      p)  	prefixtype(pimprefixptr, struct prefix_ipv6, p6) @@ -101,6 +109,21 @@ static inline pim_addr pim_addr_from_prefix(union pimprefixconstptr in)  	return ret;  } +static inline uint8_t pim_addr_scope(const pim_addr addr) +{ +	return PIM_ADDR_FUNCNAME(mcast_scope)(&addr); +} + +static inline bool pim_addr_nofwd(const pim_addr addr) +{ +	return PIM_ADDR_FUNCNAME(mcast_nofwd)(&addr); +} + +static inline bool pim_addr_ssm(const pim_addr addr) +{ +	return PIM_ADDR_FUNCNAME(mcast_ssm)(&addr); +} +  /* don't use this struct directly, use the pim_sgaddr typedef */  struct _pim_sgaddr {  	pim_addr grp; diff --git a/pimd/pim_assert.c b/pimd/pim_assert.c index e7fff4db6f..7d924d6505 100644 --- a/pimd/pim_assert.c +++ b/pimd/pim_assert.c @@ -34,6 +34,7 @@  #include "pim_hello.h"  #include "pim_macro.h"  #include "pim_assert.h" +#include "pim_zebra.h"  #include "pim_ifchannel.h"  static int assert_action_a3(struct pim_ifchannel *ch); @@ -50,6 +51,8 @@ void pim_ifassert_winner_set(struct pim_ifchannel *ch,  	int winner_changed = !!pim_addr_cmp(ch->ifassert_winner, winner);  	int metric_changed = !pim_assert_metric_match(  		&ch->ifassert_winner_metric, &winner_metric); +	enum pim_rpf_result rpf_result; +	struct pim_rpf old_rpf;  	if (PIM_DEBUG_PIM_EVENTS) {  		if (ch->ifassert_state != new_state) { @@ -74,6 +77,22 @@ void pim_ifassert_winner_set(struct pim_ifchannel *ch,  	ch->ifassert_creation = pim_time_monotonic_sec();  	if (winner_changed || metric_changed) { +		if (winner_changed) { +			old_rpf.source_nexthop.interface = +				ch->upstream->rpf.source_nexthop.interface; +			rpf_result = pim_rpf_update(pim_ifp->pim, ch->upstream, +						    &old_rpf, __func__); +			if (rpf_result == PIM_RPF_CHANGED || +			    (rpf_result == PIM_RPF_FAILURE && +			     old_rpf.source_nexthop.interface)) +				pim_zebra_upstream_rpf_changed( +					pim_ifp->pim, ch->upstream, &old_rpf); +			/* update kernel multicast forwarding cache (MFC) */ +			if (ch->upstream->rpf.source_nexthop.interface && +			    ch->upstream->channel_oil) +				pim_upstream_mroute_iif_update( +					ch->upstream->channel_oil, __func__); +		}  		pim_upstream_update_join_desired(pim_ifp->pim, ch->upstream);  		pim_ifchannel_update_could_assert(ch);  		pim_ifchannel_update_assert_tracking_desired(ch); @@ -338,6 +357,7 @@ int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,  			 uint32_t metric_preference, uint32_t route_metric,  			 uint32_t rpt_bit_flag)  { +	struct pim_interface *pim_ifp = ifp->info;  	uint8_t *buf_pastend = pim_msg + buf_size;  	uint8_t *pim_msg_curr;  	int pim_msg_size; @@ -380,7 +400,9 @@ int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,  	  Add PIM header  	*/  	pim_msg_size = pim_msg_curr - pim_msg; -	pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT, false); +	pim_msg_build_header(pim_ifp->primary_address, +			     qpim_all_pim_routers_addr, pim_msg, pim_msg_size, +			     PIM_MSG_TYPE_ASSERT, false);  	return pim_msg_size;  } diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index 66c37e7aed..a33894dddc 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -130,11 +130,7 @@ int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *node1,  		return 1;  	if (node1->hash > node2->hash)  		return -1; -	if (node1->rp_address.s_addr < node2->rp_address.s_addr) -		return 1; -	if (node1->rp_address.s_addr > node2->rp_address.s_addr) -		return -1; -	return 0; +	return pim_addr_cmp(node2->rp_address, node1->rp_address);  }  static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt, @@ -173,11 +169,10 @@ static void pim_on_bs_timer(struct thread *t)  			   __func__, scope->sz_id);  	pim_nht_bsr_del(scope->pim, scope->current_bsr); -  	/* Reset scope zone data */  	scope->accept_nofwd_bsm = false;  	scope->state = ACCEPT_ANY; -	scope->current_bsr.s_addr = INADDR_ANY; +	scope->current_bsr = PIMADDR_ANY;  	scope->current_bsr_prio = 0;  	scope->current_bsr_first_ts = 0;  	scope->current_bsr_last_ts = 0; @@ -353,10 +348,9 @@ static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)  	THREAD_OFF(bsrp->g2rp_timer);  	if (PIM_DEBUG_BSM)  		zlog_debug( -			"%s : starting g2rp timer for grp: %pFX - rp: %pI4 with timeout  %d secs(Actual Hold time : %d secs)", -			__func__, &bsrp->bsgrp_node->group, -			&bsrp->rp_address, hold_time, -			bsrp->rp_holdtime); +			"%s : starting g2rp timer for grp: %pFX - rp: %pPAs with timeout  %d secs(Actual Hold time : %d secs)", +			__func__, &bsrp->bsgrp_node->group, &bsrp->rp_address, +			hold_time, bsrp->rp_holdtime);  	thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,  			 &bsrp->g2rp_timer); @@ -374,7 +368,7 @@ static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)  		return;  	if (PIM_DEBUG_BSM) -		zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pI4", +		zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pPAs",  			   __func__, &bsrp->bsgrp_node->group,  			   &bsrp->rp_address); @@ -462,8 +456,7 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)  		route_unlock_node(rn);  		if (active && pend) { -			if ((active->rp_address.s_addr -			     != pend->rp_address.s_addr)) +			if (pim_addr_cmp(active->rp_address, pend->rp_address))  				pim_rp_change(pim, pend->rp_address,  					      bsgrp_node->group, RP_SRC_BSR);  		} @@ -531,18 +524,17 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)  	pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);  } -static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr, +static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,  			     uint32_t bsr_prio)  { -	if (bsr.s_addr == pim->global_scope.current_bsr.s_addr) +	if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr))  		return true;  	if (bsr_prio > pim->global_scope.current_bsr_prio)  		return true;  	else if (bsr_prio == pim->global_scope.current_bsr_prio) { -		if (ntohl(bsr.s_addr) -		    >= ntohl(pim->global_scope.current_bsr.s_addr)) +		if (pim_addr_cmp(bsr, pim->global_scope.current_bsr) >= 0)  			return true;  		else  			return false; @@ -550,10 +542,10 @@ static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,  		return false;  } -static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr, +static void pim_bsm_update(struct pim_instance *pim, pim_addr bsr,  			   uint32_t bsr_prio)  { -	if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) { +	if (pim_addr_cmp(bsr, pim->global_scope.current_bsr)) {  		pim_nht_bsr_del(pim, pim->global_scope.current_bsr);  		pim_nht_bsr_add(pim, bsr); @@ -583,7 +575,7 @@ void pim_bsm_clear(struct pim_instance *pim)  	/* Reset scope zone data */  	pim->global_scope.accept_nofwd_bsm = false;  	pim->global_scope.state = ACCEPT_ANY; -	pim->global_scope.current_bsr.s_addr = INADDR_ANY; +	pim->global_scope.current_bsr = PIMADDR_ANY;  	pim->global_scope.current_bsr_prio = 0;  	pim->global_scope.current_bsr_first_ts = 0;  	pim->global_scope.current_bsr_last_ts = 0; @@ -617,9 +609,7 @@ void pim_bsm_clear(struct pim_instance *pim)  		}  		/* Deregister addr with Zebra NHT */ -		nht_p.family = AF_INET; -		nht_p.prefixlen = IPV4_MAX_BITLEN; -		nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4; +		nht_p = rp_info->rp.rpf_addr;  		if (PIM_DEBUG_PIM_NHT_RP) {  			zlog_debug("%s: Deregister RP addr %pFX with Zebra ", @@ -718,6 +708,7 @@ static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,  static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,  			      uint32_t pim_mtu, pim_addr dst_addr, bool no_fwd)  { +	struct pim_interface *pim_ifp = ifp->info;  	struct bsmmsg_grpinfo *grpinfo, *curgrp;  	uint8_t *firstgrp_ptr;  	uint8_t *pkt; @@ -836,9 +827,10 @@ static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,  				< (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {  				/* No space to fit in more rp, send this pkt */  				this_pkt_len = pim_mtu - this_pkt_rem; -				pim_msg_build_header(pak_start, this_pkt_len, -						     PIM_MSG_TYPE_BOOTSTRAP, -						     no_fwd); +				pim_msg_build_header( +					pim_ifp->primary_address, dst_addr, +					pak_start, this_pkt_len, +					PIM_MSG_TYPE_BOOTSTRAP, no_fwd);  				pim_bsm_send_intf(pak_start, this_pkt_len, ifp,  						  dst_addr); @@ -847,9 +839,9 @@ static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,  				this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN  							  + PIM_MSG_HEADER_LEN); -				/* If pkt can't accomodate next group + atleast -				 * one rp, we must break out of this inner loop -				 * and process next RP +				/* If pkt can't accommodate next group + at +				 * least one rp, we must break out of this inner +				 * loop and process next RP  				 */  				if (total_rp_cnt == this_rp_cnt)  					break; @@ -873,7 +865,8 @@ static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,  	/* Send if we have any unsent packet */  	if (pak_pending) {  		this_pkt_len = pim_mtu - this_pkt_rem; -		pim_msg_build_header(pak_start, this_pkt_len, +		pim_msg_build_header(pim_ifp->primary_address, dst_addr, +				     pak_start, this_pkt_len,  				     PIM_MSG_TYPE_BOOTSTRAP, no_fwd);  		pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,  				  dst_addr); @@ -920,7 +913,8 @@ static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,  				zlog_debug("%s: pim_bsm_frag_send returned %s",  					   __func__, ret ? "TRUE" : "FALSE");  		} else { -			pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP, +			pim_msg_build_header(pim_ifp->primary_address, dst_addr, +					     buf, len, PIM_MSG_TYPE_BOOTSTRAP,  					     no_fwd);  			if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {  				if (PIM_DEBUG_BSM) @@ -938,7 +932,6 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)  	struct pim_interface *pim_ifp;  	struct bsm_scope *scope;  	struct bsm_frag *bsfrag; -	char neigh_src_str[INET_ADDRSTRLEN];  	uint32_t pim_mtu;  	bool no_fwd = true;  	bool ret = false; @@ -976,13 +969,13 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)  	if (!pim_ifp->ucast_bsm_accept) {  		dst_addr = qpim_all_pim_routers_addr;  		if (PIM_DEBUG_BSM) -			zlog_debug("%s: Sending BSM mcast to %s", __func__, -				   neigh_src_str); +			zlog_debug("%s: Sending BSM mcast to %pPA", __func__, +				   &neigh->source_addr);  	} else {  		dst_addr = neigh->source_addr;  		if (PIM_DEBUG_BSM) -			zlog_debug("%s: Sending BSM ucast to %s", __func__, -				   neigh_src_str); +			zlog_debug("%s: Sending BSM ucast to %pPA", __func__, +				   &neigh->source_addr);  	}  	pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;  	pim_hello_require(ifp); @@ -999,7 +992,8 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)  			}  		} else {  			/* Pim header needs to be constructed */ -			pim_msg_build_header(bsfrag->data, bsfrag->size, +			pim_msg_build_header(pim_ifp->primary_address, dst_addr, +					     bsfrag->data, bsfrag->size,  					     PIM_MSG_TYPE_BOOTSTRAP, no_fwd);  			ret = pim_bsm_send_intf(bsfrag->data, bsfrag->size, ifp,  						dst_addr); @@ -1033,7 +1027,7 @@ struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,  	return bsgrp;  } -static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp, +static uint32_t hash_calc_on_grp_rp(struct prefix group, pim_addr rp,  				    uint8_t hashmasklen)  {  	uint64_t temp; @@ -1051,13 +1045,24 @@ static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,  	/* in_addr stores ip in big endian, hence network byte order  	 * convert to uint32 before processing hash  	 */ +#if PIM_IPV == 4  	grpaddr = ntohl(group.u.prefix4.s_addr); +#else +	grpaddr = group.u.prefix6.s6_addr32[0] ^ group.u.prefix6.s6_addr32[1] ^ +		  group.u.prefix6.s6_addr32[2] ^ group.u.prefix6.s6_addr32[3]; +#endif  	/* Avoid shifting by 32 bit on a 32 bit register */  	if (hashmasklen)  		grpaddr = grpaddr & ((mask << (32 - hashmasklen)));  	else  		grpaddr = grpaddr & mask; + +#if PIM_IPV == 4  	rp_add = ntohl(rp.s_addr); +#else +	rp_add = rp.s6_addr32[0] ^ rp.s6_addr32[1] ^ rp.s6_addr32[2] ^ +		 rp.s6_addr32[3]; +#endif  	temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)  	       + 12345;  	hash = temp & (0x7fffffff); @@ -1076,8 +1081,7 @@ static bool pim_install_bsm_grp_rp(struct pim_instance *pim,  	bsm_rpinfo->rp_prio = rp->rp_pri;  	bsm_rpinfo->rp_holdtime = rp->rp_holdtime; -	memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr, -	       sizeof(struct in_addr)); +	bsm_rpinfo->rp_address = rp->rpaddr.addr;  	bsm_rpinfo->elapse_time = 0;  	/* Back pointer to the group node. */ @@ -1135,6 +1139,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,  	int frag_rp_cnt = 0;  	int offset = 0;  	int ins_count = 0; +	pim_addr grp_addr;  	while (buflen > offset) {  		if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) { @@ -1146,31 +1151,28 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,  		}  		/* Extract Group tlv from BSM */  		memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo)); +		grp_addr = grpinfo.group.addr; -		if (PIM_DEBUG_BSM) { -			char grp_str[INET_ADDRSTRLEN]; - -			pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str, -				       sizeof(grp_str)); +		if (PIM_DEBUG_BSM)  			zlog_debug( -				"%s, Group %s  Rpcount:%d Fragment-Rp-count:%d", -				__func__, grp_str, grpinfo.rp_count, +				"%s, Group %pPAs  Rpcount:%d Fragment-Rp-count:%d", +				__func__, &grp_addr, grpinfo.rp_count,  				grpinfo.frag_rp_count); -		}  		buf += sizeof(struct bsmmsg_grpinfo);  		offset += sizeof(struct bsmmsg_grpinfo); -		group.family = AF_INET; -		if (grpinfo.group.mask > IPV4_MAX_BITLEN) { +		group.family = PIM_AF; +		if (grpinfo.group.mask > PIM_MAX_BITLEN) {  			if (PIM_DEBUG_BSM)  				zlog_debug( -					"%s, v4 prefix length specified: %d is too long", +					"%s, prefix length specified: %d is too long",  					__func__, grpinfo.group.mask);  			return false;  		} + +		pim_addr_to_prefix(&group, grp_addr);  		group.prefixlen = grpinfo.group.mask; -		group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;  		/* Get the Group node for the BSM rp table */  		bsgrp = pim_bsm_get_bsgrp_node(scope, &group); @@ -1182,14 +1184,10 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,  			if (!bsgrp)  				continue; -			if (PIM_DEBUG_BSM) { -				char grp_str[INET_ADDRSTRLEN]; - -				pim_inet4_dump("<Group?>", grpinfo.group.addr, -					       grp_str, sizeof(grp_str)); -				zlog_debug("%s, Rp count is zero for group: %s", -					   __func__, grp_str); -			} +			if (PIM_DEBUG_BSM) +				zlog_debug( +					"%s, Rp count is zero for group: %pPAs", +					__func__, &grp_addr);  			old_rpinfo = bsm_rpinfos_first(bsgrp->bsrp_list);  			if (old_rpinfo) @@ -1242,13 +1240,12 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,  			offset += sizeof(struct bsmmsg_rpinfo);  			if (PIM_DEBUG_BSM) { -				char rp_str[INET_ADDRSTRLEN]; +				pim_addr rp_addr; -				pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr, -					       rp_str, sizeof(rp_str)); +				rp_addr = rpinfo.rpaddr.addr;  				zlog_debug( -					"%s, Rp address - %s; pri:%d hold:%d", -					__func__, rp_str, rpinfo.rp_pri, +					"%s, Rp address - %pPAs; pri:%d hold:%d", +					__func__, &rp_addr, rpinfo.rp_pri,  					rpinfo.rp_holdtime);  			} @@ -1280,7 +1277,6 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,  	struct pim_interface *pim_ifp = NULL;  	struct bsm_frag *bsfrag;  	struct pim_instance *pim; -	char bsr_str[INET_ADDRSTRLEN];  	uint16_t frag_tag;  	bool empty_bsm = false; @@ -1315,11 +1311,10 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,  	}  	bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN); -	pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str, -		       sizeof(bsr_str)); -	if (bshdr->hm_len > IPV4_MAX_BITLEN) { -		zlog_warn("Bad hashmask length for IPv4; got %hhu, expected value in range 0-32", -			  bshdr->hm_len); +	if (bshdr->hm_len > PIM_MAX_BITLEN) { +		zlog_warn( +			"Bad hashmask length for %s; got %hhu, expected value in range 0-32", +			PIM_AF_NAME, bshdr->hm_len);  		pim->bsm_dropped++;  		return -1;  	} @@ -1362,20 +1357,16 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,  		} else {  			if (PIM_DEBUG_BSM)  				zlog_debug( -					"%s : nofwd_bsm received on %s when accpt_nofwd_bsm false", -					__func__, bsr_str); +					"%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false", +					__func__, +					(pim_addr *)&bshdr->bsr_addr.addr);  			pim->bsm_dropped++;  			pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;  			return -1;  		}  	} -#if PIM_IPV == 4 -	if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) -#else -	if (0) -#endif -	{ +	if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) {  		/* Multicast BSMs are only accepted if source interface & IP  		 * match RPF towards the BSR's IP address, or they have  		 * no-forward set @@ -1384,8 +1375,9 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,  						      ifp, sg->src)) {  			if (PIM_DEBUG_BSM)  				zlog_debug( -					"BSM check: RPF to BSR %s is not %pPA%%%s", -					bsr_str, &sg->src, ifp->name); +					"BSM check: RPF to BSR %pPAs is not %pPA%%%s", +					(pim_addr *)&bshdr->bsr_addr.addr, +					&sg->src, ifp->name);  			pim->bsm_dropped++;  			return -1;  		} diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h index 910067109e..90bd2f0877 100644 --- a/pimd/pim_bsm.h +++ b/pimd/pim_bsm.h @@ -61,7 +61,7 @@ struct bsm_scope {  	int sz_id;			/* scope zone id */  	enum ncbsr_state state;		/* non candidate BSR state */  	bool accept_nofwd_bsm;		/* no fwd bsm accepted for scope */ -	struct in_addr current_bsr;     /* current elected BSR for the sz */ +	pim_addr current_bsr;		/* current elected BSR for the sz */  	uint32_t current_bsr_prio;      /* current BSR priority */  	int64_t current_bsr_first_ts;   /* current BSR elected time */  	int64_t current_bsr_last_ts;    /* Last BSM received from E-BSR */ @@ -185,18 +185,30 @@ struct bsm_hdr {  	uint16_t frag_tag;  	uint8_t hm_len;  	uint8_t bsr_prio; +#if PIM_IPV == 4  	struct pim_encoded_ipv4_unicast bsr_addr; +#else +	struct pim_encoded_ipv6_unicast bsr_addr; +#endif  } __attribute__((packed));  struct bsmmsg_grpinfo { +#if PIM_IPV == 4  	struct pim_encoded_group_ipv4 group; +#else +	struct pim_encoded_group_ipv6 group; +#endif  	uint8_t rp_count;  	uint8_t frag_rp_count;  	uint16_t reserved;  } __attribute__((packed));  struct bsmmsg_rpinfo { +#if PIM_IPV == 4  	struct pim_encoded_ipv4_unicast rpaddr; +#else +	struct pim_encoded_ipv6_unicast rpaddr; +#endif  	uint16_t rp_holdtime;  	uint8_t rp_pri;  	uint8_t reserved; diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 86d179fe39..974f664e82 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -67,6 +67,7 @@  #include "lib/northbound_cli.h"  #include "pim_errors.h"  #include "pim_nb.h" +#include "pim_addr.h"  #include "pim_cmd_common.h"  #ifndef VTYSH_EXTRACT_PL @@ -80,14 +81,6 @@ static struct cmd_node debug_node = {  	.config_write = pim_debug_config_write,  }; -static inline bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match) -{ -	return (pim_addr_is_any(match.grp) || -		!pim_addr_cmp(match.grp, item.grp)) && -	       (pim_addr_is_any(match.src) || -		!pim_addr_cmp(match.src, item.src)); -} -  static struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[],  				      const int argc, int *idx)  { @@ -300,180 +293,6 @@ static void pim_show_assert_winner_metric(struct pim_instance *pim,  	}  } -static void json_object_pim_ifp_add(struct json_object *json, -				    struct interface *ifp) -{ -	struct pim_interface *pim_ifp; - -	pim_ifp = ifp->info; -	json_object_string_add(json, "name", ifp->name); -	json_object_string_add(json, "state", if_is_up(ifp) ? "up" : "down"); -	json_object_string_addf(json, "address", "%pI4", -				&pim_ifp->primary_address); -	json_object_int_add(json, "index", ifp->ifindex); - -	if (if_is_multicast(ifp)) -		json_object_boolean_true_add(json, "flagMulticast"); - -	if (if_is_broadcast(ifp)) -		json_object_boolean_true_add(json, "flagBroadcast"); - -	if (ifp->flags & IFF_ALLMULTI) -		json_object_boolean_true_add(json, "flagAllMulticast"); - -	if (ifp->flags & IFF_PROMISC) -		json_object_boolean_true_add(json, "flagPromiscuous"); - -	if (PIM_IF_IS_DELETED(ifp)) -		json_object_boolean_true_add(json, "flagDeleted"); - -	if (pim_if_lan_delay_enabled(ifp)) -		json_object_boolean_true_add(json, "lanDelayEnabled"); -} - -static void pim_show_membership_helper(struct vty *vty, -				       struct pim_interface *pim_ifp, -				       struct pim_ifchannel *ch, -				       struct json_object *json) -{ -	char ch_grp_str[PIM_ADDRSTRLEN]; -	json_object *json_iface = NULL; -	json_object *json_row = NULL; - -	json_object_object_get_ex(json, ch->interface->name, &json_iface); -	if (!json_iface) { -		json_iface = json_object_new_object(); -		json_object_pim_ifp_add(json_iface, ch->interface); -		json_object_object_add(json, ch->interface->name, json_iface); -	} - -	snprintfrr(ch_grp_str, sizeof(ch_grp_str), "%pPAs", &ch->sg.grp); - -	json_row = json_object_new_object(); -	json_object_string_addf(json_row, "source", "%pPAs", &ch->sg.src); -	json_object_string_add(json_row, "group", ch_grp_str); -	json_object_string_add(json_row, "localMembership", -			       ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO -			       ? "NOINFO" -			       : "INCLUDE"); -	json_object_object_add(json_iface, ch_grp_str, json_row); -} - -static void pim_show_membership(struct pim_instance *pim, struct vty *vty, -				bool uj) -{ -	struct pim_interface *pim_ifp; -	struct pim_ifchannel *ch; -	struct interface *ifp; -	enum json_type type; -	json_object *json = NULL; -	json_object *json_tmp = NULL; - -	json = json_object_new_object(); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; -		if (!pim_ifp) -			continue; - -		RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) { -			pim_show_membership_helper(vty, pim_ifp, ch, json); -		} /* scan interface channels */ -	} - -	if (uj) { -		vty_out(vty, "%s\n", json_object_to_json_string_ext( -				json, JSON_C_TO_STRING_PRETTY)); -	} else { -		vty_out(vty, -			"Interface         Address          Source           Group            Membership\n"); - -		/* -		 * Example of the json data we are traversing -		 * -		 * { -		 *   "swp3":{ -		 *     "name":"swp3", -		 *     "state":"up", -		 *     "address":"10.1.20.1", -		 *     "index":5, -		 *     "flagMulticast":true, -		 *     "flagBroadcast":true, -		 *     "lanDelayEnabled":true, -		 *     "226.10.10.10":{ -		 *       "source":"*", -		 *       "group":"226.10.10.10", -		 *       "localMembership":"INCLUDE" -		 *     } -		 *   } -		 * } -		 */ - -		/* foreach interface */ -		json_object_object_foreach(json, key, val) -		{ - -			/* Find all of the keys where the val is an object. In -			 * the example -			 * above the only one is 226.10.10.10 -			 */ -			json_object_object_foreach(val, if_field_key, -						   if_field_val) -			{ -				type = json_object_get_type(if_field_val); - -				if (type == json_type_object) { -					vty_out(vty, "%-16s  ", key); - -					json_object_object_get_ex( -						val, "address", &json_tmp); -					vty_out(vty, "%-15s  ", -						json_object_get_string( -							json_tmp)); - -					json_object_object_get_ex(if_field_val, -								  "source", -								  &json_tmp); -					vty_out(vty, "%-15s  ", -						json_object_get_string( -							json_tmp)); - -					/* Group */ -					vty_out(vty, "%-15s  ", if_field_key); - -					json_object_object_get_ex( -						if_field_val, "localMembership", -						&json_tmp); -					vty_out(vty, "%-10s\n", -						json_object_get_string( -							json_tmp)); -				} -			} -		} -	} - -	json_object_free(json); -} - -static void pim_print_ifp_flags(struct vty *vty, struct interface *ifp) -{ -	vty_out(vty, "Flags\n"); -	vty_out(vty, "-----\n"); -	vty_out(vty, "All Multicast   : %s\n", -		(ifp->flags & IFF_ALLMULTI) ? "yes" : "no"); -	vty_out(vty, "Broadcast       : %s\n", -		if_is_broadcast(ifp) ? "yes" : "no"); -	vty_out(vty, "Deleted         : %s\n", -		PIM_IF_IS_DELETED(ifp) ? "yes" : "no"); -	vty_out(vty, "Interface Index : %d\n", ifp->ifindex); -	vty_out(vty, "Multicast       : %s\n", -		if_is_multicast(ifp) ? "yes" : "no"); -	vty_out(vty, "Promiscuous     : %s\n", -		(ifp->flags & IFF_PROMISC) ? "yes" : "no"); -	vty_out(vty, "\n"); -	vty_out(vty, "\n"); -} -  static void igmp_show_interfaces(struct pim_instance *pim, struct vty *vty,  				 bool uj)  { @@ -874,412 +693,6 @@ static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty,  		vty_json(vty, json);  } -static void pim_show_interfaces_single(struct pim_instance *pim, -				       struct vty *vty, const char *ifname, -				       bool mlag, bool uj) -{ -	struct in_addr ifaddr; -	struct interface *ifp; -	struct listnode *neighnode; -	struct pim_interface *pim_ifp; -	struct pim_neighbor *neigh; -	struct pim_upstream *up; -	time_t now; -	char dr_str[INET_ADDRSTRLEN]; -	char dr_uptime[10]; -	char expire[10]; -	char grp_str[INET_ADDRSTRLEN]; -	char hello_period[10]; -	char hello_timer[10]; -	char neigh_src_str[INET_ADDRSTRLEN]; -	char src_str[INET_ADDRSTRLEN]; -	char stat_uptime[10]; -	char uptime[10]; -	int found_ifname = 0; -	int print_header; -	json_object *json = NULL; -	json_object *json_row = NULL; -	json_object *json_pim_neighbor = NULL; -	json_object *json_pim_neighbors = NULL; -	json_object *json_group = NULL; -	json_object *json_group_source = NULL; -	json_object *json_fhr_sources = NULL; -	struct pim_secondary_addr *sec_addr; -	struct listnode *sec_node; - -	now = pim_time_monotonic_sec(); - -	if (uj) -		json = json_object_new_object(); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; - -		if (!pim_ifp) -			continue; - -		if (mlag == true && pim_ifp->activeactive == false) -			continue; - -		if (strcmp(ifname, "detail") && strcmp(ifname, ifp->name)) -			continue; - -		found_ifname = 1; -		ifaddr = pim_ifp->primary_address; -		pim_inet4_dump("<dr?>", pim_ifp->pim_dr_addr, dr_str, -			       sizeof(dr_str)); -		pim_time_uptime_begin(dr_uptime, sizeof(dr_uptime), now, -				      pim_ifp->pim_dr_election_last); -		pim_time_timer_to_hhmmss(hello_timer, sizeof(hello_timer), -					 pim_ifp->t_pim_hello_timer); -		pim_time_mmss(hello_period, sizeof(hello_period), -			      pim_ifp->pim_hello_period); -		pim_time_uptime(stat_uptime, sizeof(stat_uptime), -				now - pim_ifp->pim_ifstat_start); - -		if (uj) { -			char pbuf[PREFIX2STR_BUFFER]; -			json_row = json_object_new_object(); -			json_object_pim_ifp_add(json_row, ifp); - -			if (pim_ifp->update_source.s_addr != INADDR_ANY) { -				json_object_string_addf( -					json_row, "useSource", "%pI4", -					&pim_ifp->update_source); -			} -			if (pim_ifp->sec_addr_list) { -				json_object *sec_list = NULL; - -				sec_list = json_object_new_array(); -				for (ALL_LIST_ELEMENTS_RO( -					     pim_ifp->sec_addr_list, sec_node, -					     sec_addr)) { -					json_object_array_add( -						sec_list, -						json_object_new_string( -							prefix2str( -								&sec_addr->addr, -								pbuf, -								sizeof(pbuf)))); -				} -				json_object_object_add(json_row, -						       "secondaryAddressList", -						       sec_list); -			} - -			// PIM neighbors -			if (pim_ifp->pim_neighbor_list->count) { -				json_pim_neighbors = json_object_new_object(); - -				for (ALL_LIST_ELEMENTS_RO( -					     pim_ifp->pim_neighbor_list, -					     neighnode, neigh)) { -					json_pim_neighbor = -						json_object_new_object(); -					pim_inet4_dump("<src?>", -						       neigh->source_addr, -						       neigh_src_str, -						       sizeof(neigh_src_str)); -					pim_time_uptime(uptime, sizeof(uptime), -							now - neigh->creation); -					pim_time_timer_to_hhmmss( -						expire, sizeof(expire), -						neigh->t_expire_timer); - -					json_object_string_add( -						json_pim_neighbor, "address", -						neigh_src_str); -					json_object_string_add( -						json_pim_neighbor, "upTime", -						uptime); -					json_object_string_add( -						json_pim_neighbor, "holdtime", -						expire); - -					json_object_object_add( -						json_pim_neighbors, -						neigh_src_str, -						json_pim_neighbor); -				} - -				json_object_object_add(json_row, "neighbors", -						       json_pim_neighbors); -			} - -			json_object_string_add(json_row, "drAddress", dr_str); -			json_object_int_add(json_row, "drPriority", -					    pim_ifp->pim_dr_priority); -			json_object_string_add(json_row, "drUptime", dr_uptime); -			json_object_int_add(json_row, "drElections", -					    pim_ifp->pim_dr_election_count); -			json_object_int_add(json_row, "drChanges", -					    pim_ifp->pim_dr_election_changes); - -			// FHR -			frr_each (rb_pim_upstream, &pim->upstream_head, up) { -				if (ifp != up->rpf.source_nexthop.interface) -					continue; - -				if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_FHR)) -					continue; - -				if (!json_fhr_sources) -					json_fhr_sources = -						json_object_new_object(); - -				snprintfrr(grp_str, sizeof(grp_str), "%pPAs", -					   &up->sg.grp); -				snprintfrr(src_str, sizeof(src_str), "%pPAs", -					   &up->sg.src); -				pim_time_uptime(uptime, sizeof(uptime), -						now - up->state_transition); - -				/* -				 * Does this group live in json_fhr_sources? -				 * If not create it. -				 */ -				json_object_object_get_ex(json_fhr_sources, -							  grp_str, &json_group); - -				if (!json_group) { -					json_group = json_object_new_object(); -					json_object_object_add(json_fhr_sources, -							       grp_str, -							       json_group); -				} - -				json_group_source = json_object_new_object(); -				json_object_string_add(json_group_source, -						       "source", src_str); -				json_object_string_add(json_group_source, -						       "group", grp_str); -				json_object_string_add(json_group_source, -						       "upTime", uptime); -				json_object_object_add(json_group, src_str, -						       json_group_source); -			} - -			if (json_fhr_sources) { -				json_object_object_add(json_row, -						       "firstHopRouter", -						       json_fhr_sources); -			} - -			json_object_int_add(json_row, "helloPeriod", -					    pim_ifp->pim_hello_period); -			json_object_int_add(json_row, "holdTime", -					    PIM_IF_DEFAULT_HOLDTIME(pim_ifp)); -			json_object_string_add(json_row, "helloTimer", -					       hello_timer); -			json_object_string_add(json_row, "helloStatStart", -					       stat_uptime); -			json_object_int_add(json_row, "helloReceived", -					    pim_ifp->pim_ifstat_hello_recv); -			json_object_int_add(json_row, "helloReceivedFailed", -					    pim_ifp->pim_ifstat_hello_recvfail); -			json_object_int_add(json_row, "helloSend", -					    pim_ifp->pim_ifstat_hello_sent); -			json_object_int_add(json_row, "hellosendFailed", -					    pim_ifp->pim_ifstat_hello_sendfail); -			json_object_int_add(json_row, "helloGenerationId", -					    pim_ifp->pim_generation_id); - -			json_object_int_add( -				json_row, "effectivePropagationDelay", -				pim_if_effective_propagation_delay_msec(ifp)); -			json_object_int_add( -				json_row, "effectiveOverrideInterval", -				pim_if_effective_override_interval_msec(ifp)); -			json_object_int_add( -				json_row, "joinPruneOverrideInterval", -				pim_if_jp_override_interval_msec(ifp)); - -			json_object_int_add( -				json_row, "propagationDelay", -				pim_ifp->pim_propagation_delay_msec); -			json_object_int_add( -				json_row, "propagationDelayHighest", -				pim_ifp->pim_neighbors_highest_propagation_delay_msec); -			json_object_int_add( -				json_row, "overrideInterval", -				pim_ifp->pim_override_interval_msec); -			json_object_int_add( -				json_row, "overrideIntervalHighest", -				pim_ifp->pim_neighbors_highest_override_interval_msec); -			if (pim_ifp->bsm_enable) -				json_object_boolean_true_add(json_row, -							     "bsmEnabled"); -			if (pim_ifp->ucast_bsm_accept) -				json_object_boolean_true_add(json_row, -							     "ucastBsmEnabled"); -			json_object_object_add(json, ifp->name, json_row); - -		} else { -			vty_out(vty, "Interface  : %s\n", ifp->name); -			vty_out(vty, "State      : %s\n", -				if_is_up(ifp) ? "up" : "down"); -			if (pim_ifp->update_source.s_addr != INADDR_ANY) { -				vty_out(vty, "Use Source : %pI4\n", -					&pim_ifp->update_source); -			} -			if (pim_ifp->sec_addr_list) { -				vty_out(vty, "Address    : %pI4 (primary)\n", -					&ifaddr); -				for (ALL_LIST_ELEMENTS_RO( -					     pim_ifp->sec_addr_list, sec_node, -					     sec_addr)) -					vty_out(vty, "             %pFX\n", -						&sec_addr->addr); -			} else { -				vty_out(vty, "Address    : %pI4\n", -					&ifaddr); -			} -			vty_out(vty, "\n"); - -			// PIM neighbors -			print_header = 1; - -			for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, -						  neighnode, neigh)) { - -				if (print_header) { -					vty_out(vty, "PIM Neighbors\n"); -					vty_out(vty, "-------------\n"); -					print_header = 0; -				} - -				pim_inet4_dump("<src?>", neigh->source_addr, -					       neigh_src_str, -					       sizeof(neigh_src_str)); -				pim_time_uptime(uptime, sizeof(uptime), -						now - neigh->creation); -				pim_time_timer_to_hhmmss(expire, sizeof(expire), -							 neigh->t_expire_timer); -				vty_out(vty, -					"%-15s : up for %s, holdtime expires in %s\n", -					neigh_src_str, uptime, expire); -			} - -			if (!print_header) { -				vty_out(vty, "\n"); -				vty_out(vty, "\n"); -			} - -			vty_out(vty, "Designated Router\n"); -			vty_out(vty, "-----------------\n"); -			vty_out(vty, "Address   : %s\n", dr_str); -			vty_out(vty, "Priority  : %u(%d)\n", -				pim_ifp->pim_dr_priority, -				pim_ifp->pim_dr_num_nondrpri_neighbors); -			vty_out(vty, "Uptime    : %s\n", dr_uptime); -			vty_out(vty, "Elections : %d\n", -				pim_ifp->pim_dr_election_count); -			vty_out(vty, "Changes   : %d\n", -				pim_ifp->pim_dr_election_changes); -			vty_out(vty, "\n"); -			vty_out(vty, "\n"); - -			// FHR -			print_header = 1; -			frr_each (rb_pim_upstream, &pim->upstream_head, up) { -				if (!up->rpf.source_nexthop.interface) -					continue; - -				if (strcmp(ifp->name, -					   up->rpf.source_nexthop -					   .interface->name) -				    != 0) -					continue; - -				if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_FHR)) -					continue; - -				if (print_header) { -					vty_out(vty, -						"FHR - First Hop Router\n"); -					vty_out(vty, -						"----------------------\n"); -					print_header = 0; -				} - -				pim_time_uptime(uptime, sizeof(uptime), -						now - up->state_transition); -				vty_out(vty, -					"%pPAs : %pPAs is a source, uptime is %s\n", -					&up->sg.grp, &up->sg.src, uptime); -			} - -			if (!print_header) { -				vty_out(vty, "\n"); -				vty_out(vty, "\n"); -			} - -			vty_out(vty, "Hellos\n"); -			vty_out(vty, "------\n"); -			vty_out(vty, "Period         : %d\n", -				pim_ifp->pim_hello_period); -			vty_out(vty, "HoldTime       : %d\n", -				PIM_IF_DEFAULT_HOLDTIME(pim_ifp)); -			vty_out(vty, "Timer          : %s\n", hello_timer); -			vty_out(vty, "StatStart      : %s\n", stat_uptime); -			vty_out(vty, "Receive        : %d\n", -				pim_ifp->pim_ifstat_hello_recv); -			vty_out(vty, "Receive Failed : %d\n", -				pim_ifp->pim_ifstat_hello_recvfail); -			vty_out(vty, "Send           : %d\n", -				pim_ifp->pim_ifstat_hello_sent); -			vty_out(vty, "Send Failed    : %d\n", -				pim_ifp->pim_ifstat_hello_sendfail); -			vty_out(vty, "Generation ID  : %08x\n", -				pim_ifp->pim_generation_id); -			vty_out(vty, "\n"); -			vty_out(vty, "\n"); - -			pim_print_ifp_flags(vty, ifp); - -			vty_out(vty, "Join Prune Interval\n"); -			vty_out(vty, "-------------------\n"); -			vty_out(vty, "LAN Delay                    : %s\n", -				pim_if_lan_delay_enabled(ifp) ? "yes" : "no"); -			vty_out(vty, "Effective Propagation Delay  : %d msec\n", -				pim_if_effective_propagation_delay_msec(ifp)); -			vty_out(vty, "Effective Override Interval  : %d msec\n", -				pim_if_effective_override_interval_msec(ifp)); -			vty_out(vty, "Join Prune Override Interval : %d msec\n", -				pim_if_jp_override_interval_msec(ifp)); -			vty_out(vty, "\n"); -			vty_out(vty, "\n"); - -			vty_out(vty, "LAN Prune Delay\n"); -			vty_out(vty, "---------------\n"); -			vty_out(vty, "Propagation Delay           : %d msec\n", -				pim_ifp->pim_propagation_delay_msec); -			vty_out(vty, "Propagation Delay (Highest) : %d msec\n", -				pim_ifp->pim_neighbors_highest_propagation_delay_msec); -			vty_out(vty, "Override Interval           : %d msec\n", -				pim_ifp->pim_override_interval_msec); -			vty_out(vty, "Override Interval (Highest) : %d msec\n", -				pim_ifp->pim_neighbors_highest_override_interval_msec); -			vty_out(vty, "\n"); -			vty_out(vty, "\n"); - -			vty_out(vty, "BSM Status\n"); -			vty_out(vty, "----------\n"); -			vty_out(vty, "Bsm Enabled          : %s\n", -				pim_ifp->bsm_enable ? "yes" : "no"); -			vty_out(vty, "Unicast Bsm Enabled  : %s\n", -				pim_ifp->ucast_bsm_accept ? "yes" : "no"); -			vty_out(vty, "\n"); -			vty_out(vty, "\n"); -		} -	} - -	if (uj) -		vty_json(vty, json); -	else if (!found_ifname) -		vty_out(vty, "%% No such interface\n"); -} -  static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,  				 const char *ifname, bool uj)  { @@ -1424,101 +837,6 @@ static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,  	}  } -static void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, -				bool mlag, bool uj) -{ -	struct interface *ifp; -	struct pim_interface *pim_ifp; -	struct pim_upstream *up; -	int fhr = 0; -	int pim_nbrs = 0; -	int pim_ifchannels = 0; -	json_object *json = NULL; -	json_object *json_row = NULL; -	json_object *json_tmp; - -	json = json_object_new_object(); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; - -		if (!pim_ifp) -			continue; - -		if (mlag == true && pim_ifp->activeactive == false) -			continue; - -		pim_nbrs = pim_ifp->pim_neighbor_list->count; -		pim_ifchannels = pim_if_ifchannel_count(pim_ifp); -		fhr = 0; - -		frr_each (rb_pim_upstream, &pim->upstream_head, up) -			if (ifp == up->rpf.source_nexthop.interface) -				if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR) -					fhr++; - -		json_row = json_object_new_object(); -		json_object_pim_ifp_add(json_row, ifp); -		json_object_int_add(json_row, "pimNeighbors", pim_nbrs); -		json_object_int_add(json_row, "pimIfChannels", pim_ifchannels); -		json_object_int_add(json_row, "firstHopRouterCount", fhr); -		json_object_string_addf(json_row, "pimDesignatedRouter", "%pI4", -					&pim_ifp->pim_dr_addr); - -		if (pim_ifp->pim_dr_addr.s_addr -		    == pim_ifp->primary_address.s_addr) -			json_object_boolean_true_add( -				json_row, "pimDesignatedRouterLocal"); - -		json_object_object_add(json, ifp->name, json_row); -	} - -	if (uj) { -		vty_out(vty, "%s\n", json_object_to_json_string_ext( -				json, JSON_C_TO_STRING_PRETTY)); -	} else { -		vty_out(vty, -			"Interface         State          Address  PIM Nbrs           PIM DR  FHR IfChannels\n"); - -		json_object_object_foreach(json, key, val) -		{ -			vty_out(vty, "%-16s  ", key); - -			json_object_object_get_ex(val, "state", &json_tmp); -			vty_out(vty, "%5s  ", json_object_get_string(json_tmp)); - -			json_object_object_get_ex(val, "address", &json_tmp); -			vty_out(vty, "%15s  ", -				json_object_get_string(json_tmp)); - -			json_object_object_get_ex(val, "pimNeighbors", -						  &json_tmp); -			vty_out(vty, "%8d  ", json_object_get_int(json_tmp)); - -			if (json_object_object_get_ex( -				    val, "pimDesignatedRouterLocal", -				    &json_tmp)) { -				vty_out(vty, "%15s  ", "local"); -			} else { -				json_object_object_get_ex( -					val, "pimDesignatedRouter", &json_tmp); -				vty_out(vty, "%15s  ", -					json_object_get_string(json_tmp)); -			} - -			json_object_object_get_ex(val, "firstHopRouter", -						  &json_tmp); -			vty_out(vty, "%3d  ", json_object_get_int(json_tmp)); - -			json_object_object_get_ex(val, "pimIfChannels", -						  &json_tmp); -			vty_out(vty, "%9d\n", json_object_get_int(json_tmp)); -		} -	} - -	json_object_free(json); -} -  static void pim_show_interface_traffic(struct pim_instance *pim,  				       struct vty *vty, bool uj)  { @@ -1702,1346 +1020,6 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,  		vty_out(vty, "%% No such interface\n");  } -static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp, -				 struct pim_ifchannel *ch, json_object *json, -				 time_t now, bool uj) -{ -	json_object *json_iface = NULL; -	json_object *json_row = NULL; -	json_object *json_grp = NULL; -	struct in_addr ifaddr; -	char uptime[10]; -	char expire[10]; -	char prune[10]; -	char buf[PREFIX_STRLEN]; - -	ifaddr = pim_ifp->primary_address; - -	pim_time_uptime_begin(uptime, sizeof(uptime), now, ch->ifjoin_creation); -	pim_time_timer_to_mmss(expire, sizeof(expire), -			       ch->t_ifjoin_expiry_timer); -	pim_time_timer_to_mmss(prune, sizeof(prune), -			       ch->t_ifjoin_prune_pending_timer); - -	if (uj) { -		char ch_grp_str[PIM_ADDRSTRLEN]; -		char ch_src_str[PIM_ADDRSTRLEN]; - -		snprintfrr(ch_grp_str, sizeof(ch_grp_str), "%pPAs", -			   &ch->sg.grp); -		snprintfrr(ch_src_str, sizeof(ch_src_str), "%pPAs", -			   &ch->sg.src); - -		json_object_object_get_ex(json, ch->interface->name, -					  &json_iface); - -		if (!json_iface) { -			json_iface = json_object_new_object(); -			json_object_pim_ifp_add(json_iface, ch->interface); -			json_object_object_add(json, ch->interface->name, -					       json_iface); -		} - -		json_row = json_object_new_object(); -		json_object_string_add(json_row, "source", ch_src_str); -		json_object_string_add(json_row, "group", ch_grp_str); -		json_object_string_add(json_row, "upTime", uptime); -		json_object_string_add(json_row, "expire", expire); -		json_object_string_add(json_row, "prune", prune); -		json_object_string_add( -			json_row, "channelJoinName", -			pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags)); -		if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags)) { -#if CONFDATE > 20230131 -CPP_NOTICE("Remove JSON object commands with keys starting with capital") -#endif -			json_object_int_add(json_row, "SGRpt", 1); -			json_object_int_add(json_row, "sgRpt", 1); -		} -		if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags)) -			json_object_int_add(json_row, "protocolPim", 1); -		if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) -			json_object_int_add(json_row, "protocolIgmp", 1); -		json_object_object_get_ex(json_iface, ch_grp_str, &json_grp); -		if (!json_grp) { -			json_grp = json_object_new_object(); -			json_object_object_add(json_grp, ch_src_str, json_row); -			json_object_object_add(json_iface, ch_grp_str, -					       json_grp); -		} else -			json_object_object_add(json_grp, ch_src_str, json_row); -	} else { -		vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-10s %8s %-6s %5s\n", -			ch->interface->name, -			inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), -			&ch->sg.src, &ch->sg.grp, -			pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags), -			uptime, expire, prune); -	} -} - -static void pim_show_join(struct pim_instance *pim, struct vty *vty, -			  pim_sgaddr *sg, bool uj) -{ -	struct pim_interface *pim_ifp; -	struct pim_ifchannel *ch; -	struct interface *ifp; -	time_t now; -	json_object *json = NULL; - -	now = pim_time_monotonic_sec(); - -	if (uj) -		json = json_object_new_object(); -	else -		vty_out(vty, -			"Interface        Address         Source          Group           State      Uptime   Expire Prune\n"); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; -		if (!pim_ifp) -			continue; - -		RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) { -			if (!pim_sgaddr_match(ch->sg, *sg)) -				continue; -			pim_show_join_helper(vty, pim_ifp, ch, json, now, uj); -		} /* scan interface channels */ -	} - -	if (uj) -		vty_json(vty, json); -} - -static void pim_show_neighbors_single(struct pim_instance *pim, struct vty *vty, -				      const char *neighbor, bool uj) -{ -	struct listnode *neighnode; -	struct interface *ifp; -	struct pim_interface *pim_ifp; -	struct pim_neighbor *neigh; -	time_t now; -	int found_neighbor = 0; -	int option_address_list; -	int option_dr_priority; -	int option_generation_id; -	int option_holdtime; -	int option_lan_prune_delay; -	int option_t_bit; -	char uptime[10]; -	char expire[10]; -	char neigh_src_str[INET_ADDRSTRLEN]; - -	json_object *json = NULL; -	json_object *json_ifp = NULL; -	json_object *json_row = NULL; - -	now = pim_time_monotonic_sec(); - -	if (uj) -		json = json_object_new_object(); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; - -		if (!pim_ifp) -			continue; - -		if (pim_ifp->pim_sock_fd < 0) -			continue; - -		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode, -					  neigh)) { -			pim_inet4_dump("<src?>", neigh->source_addr, -				       neigh_src_str, sizeof(neigh_src_str)); - -			/* -			 * The user can specify either the interface name or the -			 * PIM neighbor IP. -			 * If this pim_ifp matches neither then skip. -			 */ -			if (strcmp(neighbor, "detail") -			    && strcmp(neighbor, ifp->name) -			    && strcmp(neighbor, neigh_src_str)) -				continue; - -			found_neighbor = 1; -			pim_time_uptime(uptime, sizeof(uptime), -					now - neigh->creation); -			pim_time_timer_to_hhmmss(expire, sizeof(expire), -						 neigh->t_expire_timer); - -			option_address_list = 0; -			option_dr_priority = 0; -			option_generation_id = 0; -			option_holdtime = 0; -			option_lan_prune_delay = 0; -			option_t_bit = 0; - -			if (PIM_OPTION_IS_SET(neigh->hello_options, -					      PIM_OPTION_MASK_ADDRESS_LIST)) -				option_address_list = 1; - -			if (PIM_OPTION_IS_SET(neigh->hello_options, -					      PIM_OPTION_MASK_DR_PRIORITY)) -				option_dr_priority = 1; - -			if (PIM_OPTION_IS_SET(neigh->hello_options, -					      PIM_OPTION_MASK_GENERATION_ID)) -				option_generation_id = 1; - -			if (PIM_OPTION_IS_SET(neigh->hello_options, -					      PIM_OPTION_MASK_HOLDTIME)) -				option_holdtime = 1; - -			if (PIM_OPTION_IS_SET(neigh->hello_options, -					      PIM_OPTION_MASK_LAN_PRUNE_DELAY)) -				option_lan_prune_delay = 1; - -			if (PIM_OPTION_IS_SET( -				    neigh->hello_options, -				    PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION)) -				option_t_bit = 1; - -			if (uj) { - -				/* Does this ifp live in json?  If not create -				 * it. */ -				json_object_object_get_ex(json, ifp->name, -							  &json_ifp); - -				if (!json_ifp) { -					json_ifp = json_object_new_object(); -					json_object_pim_ifp_add(json_ifp, ifp); -					json_object_object_add(json, ifp->name, -							       json_ifp); -				} - -				json_row = json_object_new_object(); -				json_object_string_add(json_row, "interface", -						       ifp->name); -				json_object_string_add(json_row, "address", -						       neigh_src_str); -				json_object_string_add(json_row, "upTime", -						       uptime); -				json_object_string_add(json_row, "holdtime", -						       expire); -				json_object_int_add(json_row, "drPriority", -						    neigh->dr_priority); -				json_object_int_add(json_row, "generationId", -						    neigh->generation_id); - -				if (option_address_list) -					json_object_boolean_true_add( -						json_row, -						"helloOptionAddressList"); - -				if (option_dr_priority) -					json_object_boolean_true_add( -						json_row, -						"helloOptionDrPriority"); - -				if (option_generation_id) -					json_object_boolean_true_add( -						json_row, -						"helloOptionGenerationId"); - -				if (option_holdtime) -					json_object_boolean_true_add( -						json_row, -						"helloOptionHoldtime"); - -				if (option_lan_prune_delay) -					json_object_boolean_true_add( -						json_row, -						"helloOptionLanPruneDelay"); - -				if (option_t_bit) -					json_object_boolean_true_add( -						json_row, "helloOptionTBit"); - -				json_object_object_add(json_ifp, neigh_src_str, -						       json_row); - -			} else { -				vty_out(vty, "Interface : %s\n", ifp->name); -				vty_out(vty, "Neighbor  : %s\n", neigh_src_str); -				vty_out(vty, -					"    Uptime                         : %s\n", -					uptime); -				vty_out(vty, -					"    Holdtime                       : %s\n", -					expire); -				vty_out(vty, -					"    DR Priority                    : %d\n", -					neigh->dr_priority); -				vty_out(vty, -					"    Generation ID                  : %08x\n", -					neigh->generation_id); -				vty_out(vty, -					"    Override Interval (msec)       : %d\n", -					neigh->override_interval_msec); -				vty_out(vty, -					"    Propagation Delay (msec)       : %d\n", -					neigh->propagation_delay_msec); -				vty_out(vty, -					"    Hello Option - Address List    : %s\n", -					option_address_list ? "yes" : "no"); -				vty_out(vty, -					"    Hello Option - DR Priority     : %s\n", -					option_dr_priority ? "yes" : "no"); -				vty_out(vty, -					"    Hello Option - Generation ID   : %s\n", -					option_generation_id ? "yes" : "no"); -				vty_out(vty, -					"    Hello Option - Holdtime        : %s\n", -					option_holdtime ? "yes" : "no"); -				vty_out(vty, -					"    Hello Option - LAN Prune Delay : %s\n", -					option_lan_prune_delay ? "yes" : "no"); -				vty_out(vty, -					"    Hello Option - T-bit           : %s\n", -					option_t_bit ? "yes" : "no"); -				bfd_sess_show(vty, json_ifp, -					      neigh->bfd_session); -				vty_out(vty, "\n"); -			} -		} -	} - -	if (uj) -		vty_json(vty, json); -	else if (!found_neighbor) -		vty_out(vty, "%% No such interface or neighbor\n"); -} - -static void pim_show_state(struct pim_instance *pim, struct vty *vty, -			   const char *src_or_group, const char *group, bool uj) -{ -	struct channel_oil *c_oil; -	json_object *json = NULL; -	json_object *json_group = NULL; -	json_object *json_ifp_in = NULL; -	json_object *json_ifp_out = NULL; -	json_object *json_source = NULL; -	time_t now; -	int first_oif; -	now = pim_time_monotonic_sec(); - -	if (uj) { -		json = json_object_new_object(); -	} else { -		vty_out(vty, -			"Codes: J -> Pim Join, I -> IGMP Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted"); -		vty_out(vty, -			"\nActive Source           Group            RPT  IIF               OIL\n"); -	} - -	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) { -		char grp_str[INET_ADDRSTRLEN]; -		char src_str[INET_ADDRSTRLEN]; -		char in_ifname[INTERFACE_NAMSIZ + 1]; -		char out_ifname[INTERFACE_NAMSIZ + 1]; -		int oif_vif_index; -		struct interface *ifp_in; -		bool isRpt; -		first_oif = 1; - -		if ((c_oil->up && -		     PIM_UPSTREAM_FLAG_TEST_USE_RPT(c_oil->up->flags)) || -		    c_oil->oil.mfcc_origin.s_addr == INADDR_ANY) -			isRpt = true; -		else -			isRpt = false; - -		pim_inet4_dump("<group?>", c_oil->oil.mfcc_mcastgrp, grp_str, -			       sizeof(grp_str)); -		pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, src_str, -			       sizeof(src_str)); -		ifp_in = pim_if_find_by_vif_index(pim, c_oil->oil.mfcc_parent); - -		if (ifp_in) -			strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname)); -		else -			strlcpy(in_ifname, "<iif?>", sizeof(in_ifname)); - -		if (src_or_group) { -			if (strcmp(src_or_group, src_str) -			    && strcmp(src_or_group, grp_str)) -				continue; - -			if (group && strcmp(group, grp_str)) -				continue; -		} - -		if (uj) { - -			/* Find the group, create it if it doesn't exist */ -			json_object_object_get_ex(json, grp_str, &json_group); - -			if (!json_group) { -				json_group = json_object_new_object(); -				json_object_object_add(json, grp_str, -						       json_group); -			} - -			/* Find the source nested under the group, create it if -			 * it doesn't exist */ -			json_object_object_get_ex(json_group, src_str, -						  &json_source); - -			if (!json_source) { -				json_source = json_object_new_object(); -				json_object_object_add(json_group, src_str, -						       json_source); -			} - -			/* Find the inbound interface nested under the source, -			 * create it if it doesn't exist */ -			json_object_object_get_ex(json_source, in_ifname, -						  &json_ifp_in); - -			if (!json_ifp_in) { -				json_ifp_in = json_object_new_object(); -				json_object_object_add(json_source, in_ifname, -						       json_ifp_in); -				json_object_int_add(json_source, "Installed", -						    c_oil->installed); -				json_object_int_add(json_source, "installed", -						    c_oil->installed); -				if (isRpt) -					json_object_boolean_true_add( -						json_source, "isRpt"); -				else -					json_object_boolean_false_add( -						json_source, "isRpt"); -				json_object_int_add(json_source, "RefCount", -						    c_oil->oil_ref_count); -				json_object_int_add(json_source, "refCount", -						    c_oil->oil_ref_count); -				json_object_int_add(json_source, "OilListSize", -						    c_oil->oil_size); -				json_object_int_add(json_source, "oilListSize", -						    c_oil->oil_size); -				json_object_int_add( -					json_source, "OilRescan", -					c_oil->oil_inherited_rescan); -				json_object_int_add( -					json_source, "oilRescan", -					c_oil->oil_inherited_rescan); -				json_object_int_add(json_source, "LastUsed", -						    c_oil->cc.lastused); -				json_object_int_add(json_source, "lastUsed", -						    c_oil->cc.lastused); -				json_object_int_add(json_source, "PacketCount", -						    c_oil->cc.pktcnt); -				json_object_int_add(json_source, "packetCount", -						    c_oil->cc.pktcnt); -				json_object_int_add(json_source, "ByteCount", -						    c_oil->cc.bytecnt); -				json_object_int_add(json_source, "byteCount", -						    c_oil->cc.bytecnt); -				json_object_int_add(json_source, -						    "WrongInterface", -						    c_oil->cc.wrong_if); -				json_object_int_add(json_source, -						    "wrongInterface", -						    c_oil->cc.wrong_if); -			} -		} else { -			vty_out(vty, "%-6d %-15s  %-15s  %-3s  %-16s  ", -				c_oil->installed, src_str, grp_str, -				isRpt ? "y" : "n", in_ifname); -		} - -		for (oif_vif_index = 0; oif_vif_index < MAXVIFS; -		     ++oif_vif_index) { -			struct interface *ifp_out; -			char oif_uptime[10]; -			int ttl; - -			ttl = c_oil->oil.mfcc_ttls[oif_vif_index]; -			if (ttl < 1) -				continue; - -			ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index); -			pim_time_uptime( -				oif_uptime, sizeof(oif_uptime), -				now - c_oil->oif_creation[oif_vif_index]); - -			if (ifp_out) -				strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname)); -			else -				strlcpy(out_ifname, "<oif?>", sizeof(out_ifname)); - -			if (uj) { -				json_ifp_out = json_object_new_object(); -				json_object_string_add(json_ifp_out, "source", -						       src_str); -				json_object_string_add(json_ifp_out, "group", -						       grp_str); -				json_object_string_add(json_ifp_out, -						       "inboundInterface", -						       in_ifname); -				json_object_string_add(json_ifp_out, -						       "outboundInterface", -						       out_ifname); -				json_object_int_add(json_ifp_out, "installed", -						    c_oil->installed); - -				json_object_object_add(json_ifp_in, out_ifname, -						       json_ifp_out); -			} else { -				if (first_oif) { -					first_oif = 0; -					vty_out(vty, "%s(%c%c%c%c%c)", -						out_ifname, -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_IGMP) -						? 'I' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_PIM) -						? 'J' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_VXLAN) -						? 'V' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_STAR) -						? '*' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_MUTE) -						? 'M' -						: ' '); -				} else -					vty_out(vty, ", %s(%c%c%c%c%c)", -						out_ifname, -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_IGMP) -						? 'I' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_PIM) -						? 'J' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_VXLAN) -						? 'V' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_PROTO_STAR) -						? '*' -						: ' ', -						(c_oil->oif_flags[oif_vif_index] -						 & PIM_OIF_FLAG_MUTE) -						? 'M' -						: ' '); -			} -		} - -		if (!uj) -			vty_out(vty, "\n"); -	} - - -	if (uj) -		vty_json(vty, json); -	else -		vty_out(vty, "\n"); -} - -static void pim_show_neighbors(struct pim_instance *pim, struct vty *vty, -			       bool uj) -{ -	struct listnode *neighnode; -	struct interface *ifp; -	struct pim_interface *pim_ifp; -	struct pim_neighbor *neigh; -	time_t now; -	char uptime[10]; -	char expire[10]; -	char neigh_src_str[INET_ADDRSTRLEN]; -	json_object *json = NULL; -	json_object *json_ifp_rows = NULL; -	json_object *json_row = NULL; - -	now = pim_time_monotonic_sec(); - -	if (uj) { -		json = json_object_new_object(); -	} else { -		vty_out(vty, -			"Interface                Neighbor    Uptime  Holdtime  DR Pri\n"); -	} - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; - -		if (!pim_ifp) -			continue; - -		if (pim_ifp->pim_sock_fd < 0) -			continue; - -		if (uj) -			json_ifp_rows = json_object_new_object(); - -		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode, -					  neigh)) { -			pim_inet4_dump("<src?>", neigh->source_addr, -				       neigh_src_str, sizeof(neigh_src_str)); -			pim_time_uptime(uptime, sizeof(uptime), -					now - neigh->creation); -			pim_time_timer_to_hhmmss(expire, sizeof(expire), -						 neigh->t_expire_timer); - -			if (uj) { -				json_row = json_object_new_object(); -				json_object_string_add(json_row, "interface", -						       ifp->name); -				json_object_string_add(json_row, "neighbor", -						       neigh_src_str); -				json_object_string_add(json_row, "upTime", -						       uptime); -				json_object_string_add(json_row, "holdTime", -						       expire); -				json_object_int_add(json_row, "holdTimeMax", -						    neigh->holdtime); -				json_object_int_add(json_row, "drPriority", -						    neigh->dr_priority); -				json_object_object_add(json_ifp_rows, -						       neigh_src_str, json_row); - -			} else { -				vty_out(vty, "%-16s  %15s  %8s  %8s  %6d\n", -					ifp->name, neigh_src_str, uptime, -					expire, neigh->dr_priority); -			} -		} - -		if (uj) { -			json_object_object_add(json, ifp->name, json_ifp_rows); -			json_ifp_rows = NULL; -		} -	} - -	if (uj) -		vty_json(vty, json); -} - -static void pim_show_neighbors_secondary(struct pim_instance *pim, -					 struct vty *vty) -{ -	struct interface *ifp; - -	vty_out(vty, -		"Interface        Address         Neighbor        Secondary      \n"); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		struct pim_interface *pim_ifp; -		struct in_addr ifaddr; -		struct listnode *neighnode; -		struct pim_neighbor *neigh; -		char buf[PREFIX_STRLEN]; - -		pim_ifp = ifp->info; - -		if (!pim_ifp) -			continue; - -		if (pim_ifp->pim_sock_fd < 0) -			continue; - -		ifaddr = pim_ifp->primary_address; - -		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode, -					  neigh)) { -			char neigh_src_str[INET_ADDRSTRLEN]; -			struct listnode *prefix_node; -			struct prefix *p; - -			if (!neigh->prefix_list) -				continue; - -			pim_inet4_dump("<src?>", neigh->source_addr, -				       neigh_src_str, sizeof(neigh_src_str)); - -			for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list, -						  prefix_node, p)) -				vty_out(vty, "%-16s %-15s %-15s %-15pFX\n", -					ifp->name, -					inet_ntop(AF_INET, &ifaddr, -						  buf, sizeof(buf)), -					neigh_src_str, p); -		} -	} -} - -static void json_object_pim_upstream_add(json_object *json, -					 struct pim_upstream *up) -{ -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED) -		json_object_boolean_true_add(json, "drJoinDesired"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED) -		json_object_boolean_true_add(json, "drJoinDesiredUpdated"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR) -		json_object_boolean_true_add(json, "firstHopRouter"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_IGMP) -		json_object_boolean_true_add(json, "sourceIgmp"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_PIM) -		json_object_boolean_true_add(json, "sourcePim"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_STREAM) -		json_object_boolean_true_add(json, "sourceStream"); - -	/* XXX: need to print ths flag in the plain text display as well */ -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_MSDP) -		json_object_boolean_true_add(json, "sourceMsdp"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE) -		json_object_boolean_true_add(json, "sendSGRptPrune"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_LHR) -		json_object_boolean_true_add(json, "lastHopRouter"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY) -		json_object_boolean_true_add(json, "disableKATExpiry"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_STATIC_IIF) -		json_object_boolean_true_add(json, "staticIncomingInterface"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL) -		json_object_boolean_true_add(json, -					     "allowIncomingInterfaceinOil"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA) -		json_object_boolean_true_add(json, "noPimRegistrationData"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG) -		json_object_boolean_true_add(json, "forcePimRegistration"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG) -		json_object_boolean_true_add(json, "sourceVxlanOrigination"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM) -		json_object_boolean_true_add(json, "sourceVxlanTermination"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN) -		json_object_boolean_true_add(json, "mlagVxlan"); - -	if (up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF) -		json_object_boolean_true_add(json, -					     "mlagNonDesignatedForwarder"); -} - -static const char * -pim_upstream_state2brief_str(enum pim_upstream_state join_state, -			     char *state_str, size_t state_str_len) -{ -	switch (join_state) { -	case PIM_UPSTREAM_NOTJOINED: -		strlcpy(state_str, "NotJ", state_str_len); -		break; -	case PIM_UPSTREAM_JOINED: -		strlcpy(state_str, "J", state_str_len); -		break; -	default: -		strlcpy(state_str, "Unk", state_str_len); -	} -	return state_str; -} - -static const char *pim_reg_state2brief_str(enum pim_reg_state reg_state, -					   char *state_str, size_t state_str_len) -{ -	switch (reg_state) { -	case PIM_REG_NOINFO: -		strlcpy(state_str, "RegNI", state_str_len); -		break; -	case PIM_REG_JOIN: -		strlcpy(state_str, "RegJ", state_str_len); -		break; -	case PIM_REG_JOIN_PENDING: -	case PIM_REG_PRUNE: -		strlcpy(state_str, "RegP", state_str_len); -		break; -	} -	return state_str; -} - -static void pim_show_upstream(struct pim_instance *pim, struct vty *vty, -			      pim_sgaddr *sg, bool uj) -{ -	struct pim_upstream *up; -	time_t now; -	json_object *json = NULL; -	json_object *json_group = NULL; -	json_object *json_row = NULL; - -	now = pim_time_monotonic_sec(); - -	if (uj) -		json = json_object_new_object(); -	else -		vty_out(vty, -			"Iif             Source          Group           State       Uptime   JoinTimer RSTimer   KATimer   RefCnt\n"); - -	frr_each (rb_pim_upstream, &pim->upstream_head, up) { -		char uptime[10]; -		char join_timer[10]; -		char rs_timer[10]; -		char ka_timer[10]; -		char msdp_reg_timer[10]; -		char state_str[PIM_REG_STATE_STR_LEN]; - -		if (!pim_sgaddr_match(up->sg, *sg)) -			continue; - -		pim_time_uptime(uptime, sizeof(uptime), -				now - up->state_transition); -		pim_time_timer_to_hhmmss(join_timer, sizeof(join_timer), -					 up->t_join_timer); - -		/* -		 * If the upstream is not dummy and it has a J/P timer for the -		 * neighbor display that -		 */ -		if (!up->t_join_timer && up->rpf.source_nexthop.interface) { -			struct pim_neighbor *nbr; - -			nbr = pim_neighbor_find_prefix( -				up->rpf.source_nexthop.interface, -				&up->rpf.rpf_addr); -			if (nbr) -				pim_time_timer_to_hhmmss(join_timer, -							 sizeof(join_timer), -							 nbr->jp_timer); -		} - -		pim_time_timer_to_hhmmss(rs_timer, sizeof(rs_timer), -					 up->t_rs_timer); -		pim_time_timer_to_hhmmss(ka_timer, sizeof(ka_timer), -					 up->t_ka_timer); -		pim_time_timer_to_hhmmss(msdp_reg_timer, sizeof(msdp_reg_timer), -					 up->t_msdp_reg_timer); - -		pim_upstream_state2brief_str(up->join_state, state_str, sizeof(state_str)); -		if (up->reg_state != PIM_REG_NOINFO) { -			char tmp_str[PIM_REG_STATE_STR_LEN]; -			char tmp[sizeof(state_str) + 1]; - -			snprintf(tmp, sizeof(tmp), ",%s", -				 pim_reg_state2brief_str(up->reg_state, tmp_str, -							 sizeof(tmp_str))); -			strlcat(state_str, tmp, sizeof(state_str)); -		} - -		if (uj) { -			char grp_str[PIM_ADDRSTRLEN]; -			char src_str[PIM_ADDRSTRLEN]; - -			snprintfrr(grp_str, sizeof(grp_str), "%pPAs", -				   &up->sg.grp); -			snprintfrr(src_str, sizeof(src_str), "%pPAs", -				   &up->sg.src); - -			json_object_object_get_ex(json, grp_str, &json_group); - -			if (!json_group) { -				json_group = json_object_new_object(); -				json_object_object_add(json, grp_str, -						       json_group); -			} - -			json_row = json_object_new_object(); -			json_object_pim_upstream_add(json_row, up); -			json_object_string_add( -				json_row, "inboundInterface", -				up->rpf.source_nexthop.interface -				? up->rpf.source_nexthop.interface->name -				: "Unknown"); - -			/* -			 * The RPF address we use is slightly different -			 * based upon what we are looking up. -			 * If we have a S, list that unless -			 * we are the FHR, else we just put -			 * the RP as the rpfAddress -			 */ -			if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR || -			    pim_addr_is_any(up->sg.src)) { -				char rpf[PREFIX_STRLEN]; -				struct pim_rpf *rpg; - -				rpg = RP(pim, up->sg.grp); -				pim_inet4_dump("<rpf?>", -					       rpg->rpf_addr.u.prefix4, rpf, -					       sizeof(rpf)); -				json_object_string_add(json_row, "rpfAddress", -						       rpf); -			} else { -				json_object_string_add(json_row, "rpfAddress", -						       src_str); -			} - -			json_object_string_add(json_row, "source", src_str); -			json_object_string_add(json_row, "group", grp_str); -			json_object_string_add(json_row, "state", state_str); -			json_object_string_add( -				json_row, "joinState", -				pim_upstream_state2str(up->join_state)); -			json_object_string_add( -				json_row, "regState", -				pim_reg_state2str(up->reg_state, state_str, sizeof(state_str))); -			json_object_string_add(json_row, "upTime", uptime); -			json_object_string_add(json_row, "joinTimer", -					       join_timer); -			json_object_string_add(json_row, "resetTimer", -					       rs_timer); -			json_object_string_add(json_row, "keepaliveTimer", -					       ka_timer); -			json_object_string_add(json_row, "msdpRegTimer", -					       msdp_reg_timer); -			json_object_int_add(json_row, "refCount", -					    up->ref_count); -			json_object_int_add(json_row, "sptBit", up->sptbit); -			json_object_object_add(json_group, src_str, json_row); -		} else { -			vty_out(vty, -				"%-16s%-15pPAs %-15pPAs %-11s %-8s %-9s %-9s %-9s %6d\n", -				up->rpf.source_nexthop.interface -				? up->rpf.source_nexthop.interface->name -				: "Unknown", -				&up->sg.src, &up->sg.grp, state_str, uptime, -				join_timer, rs_timer, ka_timer, up->ref_count); -		} -	} - -	if (uj) -		vty_json(vty, json); -} - -static void pim_show_channel_helper(struct pim_instance *pim, -				    struct vty *vty, -				    struct pim_interface *pim_ifp, -				    struct pim_ifchannel *ch, -				    json_object *json, bool uj) -{ -	struct pim_upstream *up = ch->upstream; -	json_object *json_group = NULL; -	json_object *json_row = NULL; - -	if (uj) { -		char grp_str[PIM_ADDRSTRLEN]; -		char src_str[PIM_ADDRSTRLEN]; - -		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp); -		snprintfrr(src_str, sizeof(src_str), "%pPAs", &up->sg.src); - -		json_object_object_get_ex(json, grp_str, &json_group); - -		if (!json_group) { -			json_group = json_object_new_object(); -			json_object_object_add(json, grp_str, json_group); -		} - -		json_row = json_object_new_object(); -		json_object_pim_upstream_add(json_row, up); -		json_object_string_add(json_row, "interface", -				       ch->interface->name); -		json_object_string_add(json_row, "source", src_str); -		json_object_string_add(json_row, "group", grp_str); - -		if (pim_macro_ch_lost_assert(ch)) -			json_object_boolean_true_add(json_row, "lostAssert"); - -		if (pim_macro_chisin_joins(ch)) -			json_object_boolean_true_add(json_row, "joins"); - -		if (pim_macro_chisin_pim_include(ch)) -			json_object_boolean_true_add(json_row, "pimInclude"); - -		if (pim_upstream_evaluate_join_desired(pim, up)) -			json_object_boolean_true_add(json_row, -						     "evaluateJoinDesired"); - -		json_object_object_add(json_group, src_str, json_row); - -	} else { -		vty_out(vty, "%-16s %-15pPAs %-15pPAs %-10s %-5s %-10s %-11s %-6s\n", -			ch->interface->name, &up->sg.src, &up->sg.grp, -			pim_macro_ch_lost_assert(ch) ? "yes" : "no", -			pim_macro_chisin_joins(ch) ? "yes" : "no", -			pim_macro_chisin_pim_include(ch) ? "yes" : "no", -			PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags) -			? "yes" -			: "no", -			pim_upstream_evaluate_join_desired(pim, up) ? "yes" -			: "no"); -	} -} - -static void pim_show_channel(struct pim_instance *pim, struct vty *vty, -			     bool uj) -{ -	struct pim_interface *pim_ifp; -	struct pim_ifchannel *ch; -	struct interface *ifp; - -	json_object *json = NULL; - -	if (uj) -		json = json_object_new_object(); -	else -		vty_out(vty, -			"Interface        Source          Group           LostAssert Joins PimInclude JoinDesired EvalJD\n"); - -	/* scan per-interface (S,G) state */ -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; -		if (!pim_ifp) -			continue; - - -		RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) { -			/* scan all interfaces */ -			pim_show_channel_helper(pim, vty, pim_ifp, ch, -						json, uj); -		} -	} - -	if (uj) -		vty_json(vty, json); -} - -static void pim_show_join_desired_helper(struct pim_instance *pim, -					 struct vty *vty, -					 struct pim_upstream *up, -					 json_object *json, bool uj) -{ -	json_object *json_group = NULL; -	json_object *json_row = NULL; - -	if (uj) { -		char grp_str[PIM_ADDRSTRLEN]; -		char src_str[PIM_ADDRSTRLEN]; - -		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp); -		snprintfrr(src_str, sizeof(src_str), "%pPAs", &up->sg.src); - -		json_object_object_get_ex(json, grp_str, &json_group); - -		if (!json_group) { -			json_group = json_object_new_object(); -			json_object_object_add(json, grp_str, json_group); -		} - -		json_row = json_object_new_object(); -		json_object_pim_upstream_add(json_row, up); -		json_object_string_add(json_row, "source", src_str); -		json_object_string_add(json_row, "group", grp_str); - -		if (pim_upstream_evaluate_join_desired(pim, up)) -			json_object_boolean_true_add(json_row, -						     "evaluateJoinDesired"); - -		json_object_object_add(json_group, src_str, json_row); - -	} else { -		vty_out(vty, "%-15pPAs %-15pPAs %-6s\n", -			&up->sg.src, &up->sg.grp, -			pim_upstream_evaluate_join_desired(pim, up) ? "yes" -			: "no"); -	} -} - -static void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, -				  bool uj) -{ -	struct pim_upstream *up; - -	json_object *json = NULL; - -	if (uj) -		json = json_object_new_object(); -	else -		vty_out(vty, -			"Source          Group           EvalJD\n"); - -	frr_each (rb_pim_upstream, &pim->upstream_head, up) { -		/* scan all interfaces */ -		pim_show_join_desired_helper(pim, vty, up, -					     json, uj); -	} - -	if (uj) -		vty_json(vty, json); -} - -static void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, -				  bool uj) -{ -	struct pim_upstream *up; -	json_object *json = NULL; -	json_object *json_group = NULL; -	json_object *json_row = NULL; - -	if (uj) -		json = json_object_new_object(); -	else -		vty_out(vty, -			"Source          Group           RpfIface         RibNextHop      RpfAddress     \n"); - -	frr_each (rb_pim_upstream, &pim->upstream_head, up) { -		char rpf_nexthop_str[PREFIX_STRLEN]; -		char rpf_addr_str[PREFIX_STRLEN]; -		struct pim_rpf *rpf; -		const char *rpf_ifname; - -		rpf = &up->rpf; - -		pim_addr_dump("<nexthop?>", -			      &rpf->source_nexthop.mrib_nexthop_addr, -			      rpf_nexthop_str, sizeof(rpf_nexthop_str)); -		pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str, -			      sizeof(rpf_addr_str)); - -		rpf_ifname = rpf->source_nexthop.interface ? rpf->source_nexthop.interface->name : "<ifname?>"; - -		if (uj) { -			char grp_str[PIM_ADDRSTRLEN]; -			char src_str[PIM_ADDRSTRLEN]; - -			snprintfrr(grp_str, sizeof(grp_str), "%pPAs", -				   &up->sg.grp); -			snprintfrr(src_str, sizeof(src_str), "%pPAs", -				   &up->sg.src); - -			json_object_object_get_ex(json, grp_str, &json_group); - -			if (!json_group) { -				json_group = json_object_new_object(); -				json_object_object_add(json, grp_str, -						       json_group); -			} - -			json_row = json_object_new_object(); -			json_object_pim_upstream_add(json_row, up); -			json_object_string_add(json_row, "source", src_str); -			json_object_string_add(json_row, "group", grp_str); -			json_object_string_add(json_row, "rpfInterface", -					       rpf_ifname); -			json_object_string_add(json_row, "ribNexthop", -					       rpf_nexthop_str); -			json_object_string_add(json_row, "rpfAddress", -					       rpf_addr_str); -			json_object_object_add(json_group, src_str, json_row); -		} else { -			vty_out(vty, "%-15pPAs %-15pPAs %-16s %-15s %-15s\n", -				&up->sg.src, &up->sg.grp, rpf_ifname, -				rpf_nexthop_str, rpf_addr_str); -		} -	} - -	if (uj) -		vty_json(vty, json); -} - -static void show_rpf_refresh_stats(struct vty *vty, struct pim_instance *pim, -				   time_t now, json_object *json) -{ -	char refresh_uptime[10]; - -	pim_time_uptime_begin(refresh_uptime, sizeof(refresh_uptime), now, -			      pim->rpf_cache_refresh_last); - -	if (json) { -		json_object_int_add(json, "rpfCacheRefreshDelayMsecs", -				    router->rpf_cache_refresh_delay_msec); -		json_object_int_add( -			json, "rpfCacheRefreshTimer", -			pim_time_timer_remain_msec(pim->rpf_cache_refresher)); -		json_object_int_add(json, "rpfCacheRefreshRequests", -				    pim->rpf_cache_refresh_requests); -		json_object_int_add(json, "rpfCacheRefreshEvents", -				    pim->rpf_cache_refresh_events); -		json_object_string_add(json, "rpfCacheRefreshLast", -				       refresh_uptime); -		json_object_int_add(json, "nexthopLookups", -				    pim->nexthop_lookups); -		json_object_int_add(json, "nexthopLookupsAvoided", -				    pim->nexthop_lookups_avoided); -	} else { -		vty_out(vty, -			"RPF Cache Refresh Delay:    %ld msecs\n" -			"RPF Cache Refresh Timer:    %ld msecs\n" -			"RPF Cache Refresh Requests: %lld\n" -			"RPF Cache Refresh Events:   %lld\n" -			"RPF Cache Refresh Last:     %s\n" -			"Nexthop Lookups:            %lld\n" -			"Nexthop Lookups Avoided:    %lld\n", -			router->rpf_cache_refresh_delay_msec, -			pim_time_timer_remain_msec(pim->rpf_cache_refresher), -			(long long)pim->rpf_cache_refresh_requests, -			(long long)pim->rpf_cache_refresh_events, -			refresh_uptime, (long long)pim->nexthop_lookups, -			(long long)pim->nexthop_lookups_avoided); -	} -} - -static void show_scan_oil_stats(struct pim_instance *pim, struct vty *vty, -				time_t now) -{ -	char uptime_scan_oil[10]; -	char uptime_mroute_add[10]; -	char uptime_mroute_del[10]; - -	pim_time_uptime_begin(uptime_scan_oil, sizeof(uptime_scan_oil), now, -			      pim->scan_oil_last); -	pim_time_uptime_begin(uptime_mroute_add, sizeof(uptime_mroute_add), now, -			      pim->mroute_add_last); -	pim_time_uptime_begin(uptime_mroute_del, sizeof(uptime_mroute_del), now, -			      pim->mroute_del_last); - -	vty_out(vty, -		"Scan OIL - Last: %s  Events: %lld\n" -		"MFC Add  - Last: %s  Events: %lld\n" -		"MFC Del  - Last: %s  Events: %lld\n", -		uptime_scan_oil, (long long)pim->scan_oil_events, -		uptime_mroute_add, (long long)pim->mroute_add_events, -		uptime_mroute_del, (long long)pim->mroute_del_events); -} - -static void pim_show_rpf(struct pim_instance *pim, struct vty *vty, bool uj) -{ -	struct pim_upstream *up; -	time_t now = pim_time_monotonic_sec(); -	json_object *json = NULL; -	json_object *json_group = NULL; -	json_object *json_row = NULL; - -	if (uj) { -		json = json_object_new_object(); -		show_rpf_refresh_stats(vty, pim, now, json); -	} else { -		show_rpf_refresh_stats(vty, pim, now, json); -		vty_out(vty, "\n"); -		vty_out(vty, -			"Source          Group           RpfIface         RpfAddress      RibNextHop      Metric Pref\n"); -	} - -	frr_each (rb_pim_upstream, &pim->upstream_head, up) { -		char rpf_addr_str[PREFIX_STRLEN]; -		char rib_nexthop_str[PREFIX_STRLEN]; -		const char *rpf_ifname; -		struct pim_rpf *rpf = &up->rpf; - -		pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str, -			      sizeof(rpf_addr_str)); -		pim_addr_dump("<nexthop?>", -			      &rpf->source_nexthop.mrib_nexthop_addr, -			      rib_nexthop_str, sizeof(rib_nexthop_str)); - -		rpf_ifname = rpf->source_nexthop.interface ? rpf->source_nexthop.interface->name : "<ifname?>"; - -		if (uj) { -			char grp_str[PIM_ADDRSTRLEN]; -			char src_str[PIM_ADDRSTRLEN]; - -			snprintfrr(grp_str, sizeof(grp_str), "%pPAs", -				   &up->sg.grp); -			snprintfrr(src_str, sizeof(src_str), "%pPAs", -				   &up->sg.src); - -			json_object_object_get_ex(json, grp_str, &json_group); - -			if (!json_group) { -				json_group = json_object_new_object(); -				json_object_object_add(json, grp_str, -						       json_group); -			} - -			json_row = json_object_new_object(); -			json_object_string_add(json_row, "source", src_str); -			json_object_string_add(json_row, "group", grp_str); -			json_object_string_add(json_row, "rpfInterface", -					       rpf_ifname); -			json_object_string_add(json_row, "rpfAddress", -					       rpf_addr_str); -			json_object_string_add(json_row, "ribNexthop", -					       rib_nexthop_str); -			json_object_int_add( -				json_row, "routeMetric", -				rpf->source_nexthop.mrib_route_metric); -			json_object_int_add( -				json_row, "routePreference", -				rpf->source_nexthop.mrib_metric_preference); -			json_object_object_add(json_group, src_str, json_row); - -		} else { -			vty_out(vty, "%-15pPAs %-15pPAs %-16s %-15s %-15s %6d %4d\n", -				&up->sg.src, &up->sg.grp, rpf_ifname, -				rpf_addr_str, rib_nexthop_str, -				rpf->source_nexthop.mrib_route_metric, -				rpf->source_nexthop.mrib_metric_preference); -		} -	} - -	if (uj) -		vty_json(vty, json); -} - -struct pnc_cache_walk_data { -	struct vty *vty; -	struct pim_instance *pim; -}; - -static int pim_print_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg) -{ -	struct pim_nexthop_cache *pnc = bucket->data; -	struct pnc_cache_walk_data *cwd = arg; -	struct vty *vty = cwd->vty; -	struct pim_instance *pim = cwd->pim; -	struct nexthop *nh_node = NULL; -	ifindex_t first_ifindex; -	struct interface *ifp = NULL; -	char buf[PREFIX_STRLEN]; - -	for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) { -		first_ifindex = nh_node->ifindex; -		ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id); - -		vty_out(vty, "%-15s ", inet_ntop(AF_INET, -						 &pnc->rpf.rpf_addr.u.prefix4, -						 buf, sizeof(buf))); -		vty_out(vty, "%-16s ", ifp ? ifp->name : "NULL"); -		vty_out(vty, "%pI4 ", &nh_node->gate.ipv4); -		vty_out(vty, "\n"); -	} -	return CMD_SUCCESS; -} - -static void pim_show_nexthop(struct pim_instance *pim, struct vty *vty) -{ -	struct pnc_cache_walk_data cwd; - -	cwd.vty = vty; -	cwd.pim = pim; -	vty_out(vty, "Number of registered addresses: %lu\n", -		pim->rpf_hash->count); -	vty_out(vty, "Address         Interface        Nexthop\n"); -	vty_out(vty, "---------------------------------------------\n"); - -	hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd); -} -  /* Display the bsm database details */  static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)  { @@ -3066,15 +1044,15 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)  	frr_each (bsm_frags, pim->global_scope.bsm_frags, bsfrag) {  		char grp_str[PREFIX_STRLEN];  		char rp_str[INET_ADDRSTRLEN]; -		char bsr_str[INET_ADDRSTRLEN];  		struct bsmmsg_grpinfo *group; -		struct bsmmsg_rpinfo *rpaddr; +		struct bsmmsg_rpinfo *bsm_rpinfo;  		struct prefix grp;  		struct bsm_hdr *hdr;  		uint32_t offset = 0;  		uint8_t *buf;  		uint32_t len = 0;  		uint32_t frag_rp_cnt = 0; +		pim_addr bsr_addr;  		buf = bsfrag->data;  		len = bsfrag->size; @@ -3089,12 +1067,11 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)  		buf += sizeof(struct bsm_hdr);  		len -= sizeof(struct bsm_hdr); -		pim_inet4_dump("<BSR Address?>", hdr->bsr_addr.addr, bsr_str, -			       sizeof(bsr_str)); - +		memcpy(&bsr_addr, &hdr->bsr_addr.addr, sizeof(bsr_addr));  		if (uj) { -			json_object_string_add(json, "BSR address", bsr_str); +			json_object_string_addf(json, "BSR address", "%pPAs", +						&bsr_addr);  			json_object_int_add(json, "BSR priority",  					    hdr->bsr_prio);  			json_object_int_add(json, "Hashmask Length", @@ -3106,7 +1083,7 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)  			vty_out(vty, "------------------\n");  			vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",  				"BSR-Priority", "Hashmask-len", "Fragment-Tag"); -			vty_out(vty, "%-15s %-15d %-15d %-15d\n", bsr_str, +			vty_out(vty, "%-15pPAs %-15d %-15d %-15d\n", &bsr_addr,  				hdr->bsr_prio, hdr->hm_len,  				ntohs(hdr->frag_tag));  		} @@ -3159,14 +1136,16 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)  					"RpAddress     HoldTime     Priority\n");  			while (frag_rp_cnt--) { -				rpaddr = (struct bsmmsg_rpinfo *)buf; +				pim_addr rp_addr; + +				bsm_rpinfo = (struct bsmmsg_rpinfo *)buf;  				buf += sizeof(struct bsmmsg_rpinfo);  				offset += sizeof(struct bsmmsg_rpinfo); -				pim_inet4_dump("<Rp addr?>", -					       rpaddr->rpaddr.addr, rp_str, -					       sizeof(rp_str)); +				rp_addr = bsm_rpinfo->rpaddr.addr; +				snprintfrr(rp_str, sizeof(rp_str), "%pPAs", +					   &rp_addr);  				if (uj) {  					json_row = json_object_new_object(); @@ -3174,16 +1153,16 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)  						json_row, "Rp Address", rp_str);  					json_object_int_add(  						json_row, "Rp HoldTime", -						ntohs(rpaddr->rp_holdtime)); +						ntohs(bsm_rpinfo->rp_holdtime));  					json_object_int_add(json_row,  							    "Rp Priority", -							    rpaddr->rp_pri); +							    bsm_rpinfo->rp_pri);  					json_object_object_add(  						json_group, rp_str, json_row);  				} else {  					vty_out(vty, "%-15s %-12d %d\n", rp_str, -						ntohs(rpaddr->rp_holdtime), -						rpaddr->rp_pri); +						ntohs(bsm_rpinfo->rp_holdtime), +						bsm_rpinfo->rp_pri);  				}  			}  			vty_out(vty, "\n"); @@ -3208,19 +1187,14 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,  	json_object *json_group = NULL;  	json_object *json_row = NULL; -	if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) -		strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str)); - -	else -		pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, bsr_str, -			       sizeof(bsr_str)); +	snprintfrr(bsr_str, sizeof(bsr_str), "%pPAs", +		   &pim->global_scope.current_bsr);  	if (uj) {  		json = json_object_new_object();  		json_object_string_add(json, "BSR Address", bsr_str); -	} else { +	} else  		vty_out(vty, "BSR Address  %s\n", bsr_str); -	}  	for (rn = route_top(pim->global_scope.bsrp_table); rn;  	     rn = route_next(rn)) { @@ -3252,13 +1226,14 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,  		frr_each (bsm_rpinfos, bsgrp->bsrp_list, bsm_rp) {  			char rp_str[INET_ADDRSTRLEN]; -			pim_inet4_dump("<Rp Address?>", bsm_rp->rp_address, -				       rp_str, sizeof(rp_str)); +			snprintfrr(rp_str, sizeof(rp_str), "%pPAs", +				   &bsm_rp->rp_address);  			if (uj) {  				json_row = json_object_new_object(); -				json_object_string_add(json_row, "Rp Address", -						       rp_str); +				json_object_string_addf(json_row, "Rp Address", +							"%pPAs", +							&bsm_rp->rp_address);  				json_object_int_add(json_row, "Rp HoldTime",  						    bsm_rp->rp_holdtime);  				json_object_int_add(json_row, "Rp Priority", @@ -3269,8 +1244,8 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,  						       json_row);  			} else { -				vty_out(vty, "%-15s %-15u %-15u %-15u\n", -					rp_str, bsm_rp->rp_prio, +				vty_out(vty, "%-15pPAs %-15u %-15u %-15u\n", +					&bsm_rp->rp_address, bsm_rp->rp_prio,  					bsm_rp->rp_holdtime, bsm_rp->hash);  			}  		} @@ -3293,13 +1268,14 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,  		frr_each (bsm_rpinfos, bsgrp->partial_bsrp_list, bsm_rp) {  			char rp_str[INET_ADDRSTRLEN]; -			pim_inet4_dump("<Rp Addr?>", bsm_rp->rp_address, rp_str, -				       sizeof(rp_str)); +			snprintfrr(rp_str, sizeof(rp_str), "%pPAs", +				   &bsm_rp->rp_address);  			if (uj) {  				json_row = json_object_new_object(); -				json_object_string_add(json_row, "Rp Address", -						       rp_str); +				json_object_string_addf(json_row, "Rp Address", +							"%pPAs", +							&bsm_rp->rp_address);  				json_object_int_add(json_row, "Rp HoldTime",  						    bsm_rp->rp_holdtime);  				json_object_int_add(json_row, "Rp Priority", @@ -3309,8 +1285,8 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,  				json_object_object_add(json_group, rp_str,  						       json_row);  			} else { -				vty_out(vty, "%-15s %-15u %-15u %-15u\n", -					rp_str, bsm_rp->rp_prio, +				vty_out(vty, "%-15pPAs %-15u %-15u %-15u\n", +					&bsm_rp->rp_address, bsm_rp->rp_prio,  					bsm_rp->rp_holdtime, bsm_rp->hash);  			}  		} @@ -3325,78 +1301,6 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,  		vty_json(vty, json);  } -/* pim statistics - just adding only bsm related now. - * We can continue to add all pim related stats here. - */ -static void pim_show_statistics(struct pim_instance *pim, struct vty *vty, -				const char *ifname, bool uj) -{ -	json_object *json = NULL; -	struct interface *ifp; - -	if (uj) { -		json = json_object_new_object(); -		json_object_int_add(json, "bsmRx", pim->bsm_rcvd); -		json_object_int_add(json, "bsmTx", pim->bsm_sent); -		json_object_int_add(json, "bsmDropped", pim->bsm_dropped); -	} else { -		vty_out(vty, "BSM Statistics :\n"); -		vty_out(vty, "----------------\n"); -		vty_out(vty, "Number of Received BSMs : %" PRIu64 "\n", -			pim->bsm_rcvd); -		vty_out(vty, "Number of Forwared BSMs : %" PRIu64 "\n", -			pim->bsm_sent); -		vty_out(vty, "Number of Dropped BSMs  : %" PRIu64 "\n", -			pim->bsm_dropped); -	} - -	vty_out(vty, "\n"); - -	/* scan interfaces */ -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		struct pim_interface *pim_ifp = ifp->info; - -		if (ifname && strcmp(ifname, ifp->name)) -			continue; - -		if (!pim_ifp) -			continue; - -		if (!uj) { -			vty_out(vty, "Interface : %s\n", ifp->name); -			vty_out(vty, "-------------------\n"); -			vty_out(vty, -				"Number of BSMs dropped due to config miss : %u\n", -				pim_ifp->pim_ifstat_bsm_cfg_miss); -			vty_out(vty, "Number of unicast BSMs dropped : %u\n", -				pim_ifp->pim_ifstat_ucast_bsm_cfg_miss); -			vty_out(vty, -				"Number of BSMs dropped due to invalid scope zone : %u\n", -				pim_ifp->pim_ifstat_bsm_invalid_sz); -		} else { - -			json_object *json_row = NULL; - -			json_row = json_object_new_object(); - -			json_object_string_add(json_row, "If Name", ifp->name); -			json_object_int_add(json_row, "bsmDroppedConfig", -					    pim_ifp->pim_ifstat_bsm_cfg_miss); -			json_object_int_add( -				json_row, "bsmDroppedUnicast", -				pim_ifp->pim_ifstat_ucast_bsm_cfg_miss); -			json_object_int_add(json_row, -					    "bsmDroppedInvalidScopeZone", -					    pim_ifp->pim_ifstat_bsm_invalid_sz); -			json_object_object_add(json, ifp->name, json_row); -		} -		vty_out(vty, "\n"); -	} - -	if (uj) -		vty_json(vty, json); -} -  static void clear_pim_statistics(struct pim_instance *pim)  {  	struct interface *ifp; @@ -3594,7 +1498,7 @@ static void igmp_show_sources(struct pim_instance *pim, struct vty *vty,  		json = json_object_new_object();  	else  		vty_out(vty, -			"Interface        Address         Group           Source          Timer Fwd Uptime  \n"); +			"Interface        Group           Source          Timer Fwd Uptime  \n");  	/* scan interfaces */  	FOR_ALL_INTERFACES (pim->vrf, ifp) { @@ -3752,8 +1656,7 @@ static void pim_show_bsr(struct pim_instance *pim,  	char bsr_str[PREFIX_STRLEN];  	json_object *json = NULL; -	if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) { -		strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str)); +	if (pim_addr_is_any(pim->global_scope.current_bsr)) {  		pim_time_uptime(uptime, sizeof(uptime),  				pim->global_scope.current_bsr_first_ts);  		pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen), @@ -3761,8 +1664,6 @@ static void pim_show_bsr(struct pim_instance *pim,  	}  	else { -		pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, -			       bsr_str, sizeof(bsr_str));  		now = pim_time_monotonic_sec();  		pim_time_uptime(uptime, sizeof(uptime),  				(now - pim->global_scope.current_bsr_first_ts)); @@ -3770,6 +1671,9 @@ static void pim_show_bsr(struct pim_instance *pim,  				now - pim->global_scope.current_bsr_last_ts);  	} +	snprintfrr(bsr_str, sizeof(bsr_str), "%pPAs", +		   &pim->global_scope.current_bsr); +  	switch (pim->global_scope.state) {  	case NO_INFO:  		strlcpy(bsr_state, "NO_INFO", sizeof(bsr_state)); @@ -3784,6 +1688,7 @@ static void pim_show_bsr(struct pim_instance *pim,  		strlcpy(bsr_state, "", sizeof(bsr_state));  	} +  	if (uj) {  		json = json_object_new_object();  		json_object_string_add(json, "bsr", bsr_str); @@ -4570,9 +2475,9 @@ DEFUN (show_ip_pim_assert_winner_metric,  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_interface, +DEFPY (show_ip_pim_interface,         show_ip_pim_interface_cmd, -       "show ip pim [mlag] [vrf NAME] interface [detail|WORD] [json]", +       "show ip pim [mlag$mlag] [vrf NAME] interface [detail|WORD]$interface [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -4583,30 +2488,34 @@ DEFUN (show_ip_pim_interface,         "interface name\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); -	bool mlag = false; +	struct vrf *v; +	bool uj = !!json; +	bool is_mlag = !!mlag; +	json_object *json_parent = NULL; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	if (argv_find(argv, argc, "mlag", &idx)) -		mlag = true; +	if (uj) +		json_parent = json_object_new_object(); -	if (argv_find(argv, argc, "WORD", &idx) -	    || argv_find(argv, argc, "detail", &idx)) -		pim_show_interfaces_single(vrf->info, vty, argv[idx]->arg, mlag, -					   uj); +	if (interface) +		pim_show_interfaces_single(v->info, vty, interface, is_mlag, +					   json_parent);  	else -		pim_show_interfaces(vrf->info, vty, mlag, uj); +		pim_show_interfaces(v->info, vty, is_mlag, json_parent); + +	if (uj) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_interface_vrf_all, +DEFPY (show_ip_pim_interface_vrf_all,         show_ip_pim_interface_vrf_all_cmd, -       "show ip pim [mlag] vrf all interface [detail|WORD] [json]", +       "show ip pim [mlag$mlag] vrf all interface [detail|WORD]$interface [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -4617,35 +2526,32 @@ DEFUN (show_ip_pim_interface_vrf_all,         "interface name\n"         JSON_STR)  { -	int idx = 2; -	bool uj = use_json(argc, argv); -	struct vrf *vrf; -	bool first = true; -	bool mlag = false; - -	if (argv_find(argv, argc, "mlag", &idx)) -		mlag = true; +	bool uj = !!json; +	bool is_mlag = !!mlag; +	struct vrf *v; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; -	idx = 6;  	if (uj) -		vty_out(vty, "{ "); -	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else -			vty_out(vty, "VRF: %s\n", vrf->name); -		if (argv_find(argv, argc, "WORD", &idx) -		    || argv_find(argv, argc, "detail", &idx)) -			pim_show_interfaces_single(vrf->info, vty, -						   argv[idx]->arg, mlag, uj); +		json_parent = json_object_new_object(); + +	RB_FOREACH (v, vrf_name_head, &vrfs_by_name) { +		if (!uj) +			vty_out(vty, "VRF: %s\n", v->name); +		else +			json_vrf = json_object_new_object(); + +		if (interface) +			pim_show_interfaces_single(v->info, vty, interface, +						   is_mlag, json_vrf);  		else -			pim_show_interfaces(vrf->info, vty, mlag, uj); +			pim_show_interfaces(v->info, vty, is_mlag, json_vrf); + +		if (uj) +			json_object_object_add(json_parent, v->name, json_vrf);  	}  	if (uj) -		vty_out(vty, "}\n"); +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } @@ -4664,8 +2570,8 @@ DEFPY (show_ip_pim_join,  {  	pim_sgaddr sg = {0};  	struct vrf *v; -	bool uj = !!json;  	struct pim_instance *pim; +	json_object *json_parent = NULL;  	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); @@ -4688,14 +2594,20 @@ DEFPY (show_ip_pim_join,  			sg.grp = s_or_g;  	} -	pim_show_join(pim, vty, &sg, uj); +	if (json) +		json_parent = json_object_new_object(); + +	pim_show_join(pim, vty, &sg, json_parent); + +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_join_vrf_all, +DEFPY (show_ip_pim_join_vrf_all,         show_ip_pim_join_vrf_all_cmd, -       "show ip pim vrf all join [json]", +       "show ip pim vrf all join [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -4704,77 +2616,29 @@ DEFUN (show_ip_pim_join_vrf_all,         JSON_STR)  {  	pim_sgaddr sg = {0}; -	bool uj = use_json(argc, argv); -	struct vrf *vrf; -	bool first = true; +	struct vrf *vrf_struct; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; -	if (uj) -		vty_out(vty, "{ "); -	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else -			vty_out(vty, "VRF: %s\n", vrf->name); -		pim_show_join(vrf->info, vty, &sg, uj); +	if (json) +		json_parent = json_object_new_object(); +	RB_FOREACH (vrf_struct, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf_struct->name); +		else +			json_vrf = json_object_new_object(); +		pim_show_join(vrf_struct->info, vty, &sg, json_vrf); + +		if (json) +			json_object_object_add(json_parent, vrf_struct->name, +					       json_vrf);  	} -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent);  	return CMD_WARNING;  } -static void pim_show_jp_agg_helper(struct vty *vty, -				   struct interface *ifp, -				   struct pim_neighbor *neigh, -				   struct pim_upstream *up, -				   int is_join) -{ -	char rpf_str[INET_ADDRSTRLEN]; - -	/* pius->address.s_addr */ -	pim_inet4_dump("<rpf?>", neigh->source_addr, rpf_str, sizeof(rpf_str)); - -	vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %5s\n", ifp->name, rpf_str, -		&up->sg.src, &up->sg.grp, is_join ? "J" : "P"); -} - -static void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty) -{ -	struct interface *ifp; -	struct pim_interface *pim_ifp; -	struct listnode *n_node; -	struct pim_neighbor *neigh; -	struct listnode *jag_node; -	struct pim_jp_agg_group *jag; -	struct listnode *js_node; -	struct pim_jp_sources *js; - -	vty_out(vty, -		"Interface        RPF Nbr         Source          Group           State\n"); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		pim_ifp = ifp->info; -		if (!pim_ifp) -			continue; - -		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, -					  n_node, neigh)) { -			for (ALL_LIST_ELEMENTS_RO(neigh->upstream_jp_agg, -						  jag_node, jag)) { -				for (ALL_LIST_ELEMENTS_RO(jag->sources, -							  js_node, js)) { -					pim_show_jp_agg_helper(vty, -							       ifp, neigh, js->up, -							       js->is_join); -				} -			} -		} -	} -} -  DEFPY (show_ip_pim_jp_agg,         show_ip_pim_jp_agg_cmd,         "show ip pim [vrf NAME] jp-agg", @@ -4805,9 +2669,9 @@ DEFPY (show_ip_pim_jp_agg,  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_local_membership, +DEFPY (show_ip_pim_local_membership,         show_ip_pim_local_membership_cmd, -       "show ip pim [vrf NAME] local-membership [json]", +       "show ip pim [vrf NAME] local-membership [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -4815,14 +2679,15 @@ DEFUN (show_ip_pim_local_membership,         "PIM interface local-membership\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct vrf *v; +	bool uj = !!json; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	pim_show_membership(vrf->info, vty, uj); +	pim_show_membership(v->info, vty, uj);  	return CMD_SUCCESS;  } @@ -5094,9 +2959,9 @@ DEFUN(show_ip_pim_mlag_up_vrf_all, show_ip_pim_mlag_up_vrf_all_cmd,  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_neighbor, +DEFPY (show_ip_pim_neighbor,         show_ip_pim_neighbor_cmd, -       "show ip pim [vrf NAME] neighbor [detail|WORD] [json]", +       "show ip pim [vrf NAME] neighbor [detail|WORD]$interface [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5106,25 +2971,31 @@ DEFUN (show_ip_pim_neighbor,         "Name of interface or neighbor\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct vrf *v; +	json_object *json_parent = NULL; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	if (argv_find(argv, argc, "detail", &idx) -	    || argv_find(argv, argc, "WORD", &idx)) -		pim_show_neighbors_single(vrf->info, vty, argv[idx]->arg, uj); +	if (json) +		json_parent = json_object_new_object(); + +	if (interface) +		pim_show_neighbors_single(v->info, vty, interface, json_parent);  	else -		pim_show_neighbors(vrf->info, vty, uj); +		pim_show_neighbors(v->info, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_neighbor_vrf_all, +DEFPY (show_ip_pim_neighbor_vrf_all,         show_ip_pim_neighbor_vrf_all_cmd, -       "show ip pim vrf all neighbor [detail|WORD] [json]", +       "show ip pim vrf all neighbor [detail|WORD]$interface [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5134,35 +3005,34 @@ DEFUN (show_ip_pim_neighbor_vrf_all,         "Name of interface or neighbor\n"         JSON_STR)  { -	int idx = 2; -	bool uj = use_json(argc, argv); -	struct vrf *vrf; -	bool first = true; +	struct vrf *v; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; -	if (uj) -		vty_out(vty, "{ "); -	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else -			vty_out(vty, "VRF: %s\n", vrf->name); -		if (argv_find(argv, argc, "detail", &idx) -		    || argv_find(argv, argc, "WORD", &idx)) -			pim_show_neighbors_single(vrf->info, vty, -						  argv[idx]->arg, uj); +	if (json) +		json_parent = json_object_new_object(); +	RB_FOREACH (v, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", v->name); +		else +			json_vrf = json_object_new_object(); + +		if (interface) +			pim_show_neighbors_single(v->info, vty, interface, +						  json_vrf);  		else -			pim_show_neighbors(vrf->info, vty, uj); +			pim_show_neighbors(v->info, vty, json_vrf); + +		if (json) +			json_object_object_add(json_parent, v->name, json_vrf);  	} -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_secondary, +DEFPY (show_ip_pim_secondary,         show_ip_pim_secondary_cmd,         "show ip pim [vrf NAME] secondary",         SHOW_STR @@ -5171,20 +3041,29 @@ DEFUN (show_ip_pim_secondary,         VRF_CMD_HELP_STR         "PIM neighbor addresses\n")  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); +	struct pim_instance *pim; +	struct vrf *v; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n");  		return CMD_WARNING; +	} -	pim_show_neighbors_secondary(vrf->info, vty); +	pim_show_neighbors_secondary(pim, vty);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_state, +DEFPY (show_ip_pim_state,         show_ip_pim_state_cmd, -       "show ip pim [vrf NAME] state [A.B.C.D [A.B.C.D]] [json]", +       "show ip pim [vrf NAME] state [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5194,32 +3073,36 @@ DEFUN (show_ip_pim_state,         "Multicast address\n"         JSON_STR)  { -	const char *src_or_group = NULL; -	const char *group = NULL; -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	if (uj) -		argc--; +	pim = pim_get_pim_instance(v->vrf_id); -	if (argv_find(argv, argc, "A.B.C.D", &idx)) { -		src_or_group = argv[idx]->arg; -		if (idx + 1 < argc) -			group = argv[idx + 1]->arg; +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING;  	} -	pim_show_state(vrf->info, vty, src_or_group, group, uj); +	if (json) +		json_parent = json_object_new_object(); + +	pim_show_state(pim, vty, s_or_g_str, g_str, json_parent); + +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_state_vrf_all, +DEFPY (show_ip_pim_state_vrf_all,         show_ip_pim_state_vrf_all_cmd, -       "show ip pim vrf all state [A.B.C.D [A.B.C.D]] [json]", +       "show ip pim vrf all state [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5229,36 +3112,25 @@ DEFUN (show_ip_pim_state_vrf_all,         "Multicast address\n"         JSON_STR)  { -	const char *src_or_group = NULL; -	const char *group = NULL; -	int idx = 2; -	bool uj = use_json(argc, argv);  	struct vrf *vrf; -	bool first = true; - -	if (uj) { -		vty_out(vty, "{ "); -		argc--; -	} +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; -	if (argv_find(argv, argc, "A.B.C.D", &idx)) { -		src_or_group = argv[idx]->arg; -		if (idx + 1 < argc) -			group = argv[idx + 1]->arg; -	} +	if (json) +		json_parent = json_object_new_object();  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); -		pim_show_state(vrf->info, vty, src_or_group, group, uj); +		else +			json_vrf = json_object_new_object(); +		pim_show_state(vrf->info, vty, s_or_g_str, g_str, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } @@ -5279,6 +3151,7 @@ DEFPY (show_ip_pim_upstream,  	struct vrf *v;  	bool uj = !!json;  	struct pim_instance *pim; +	json_object *json_parent = NULL;  	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); @@ -5293,6 +3166,9 @@ DEFPY (show_ip_pim_upstream,  		return CMD_WARNING;  	} +	if (uj) +		json_parent = json_object_new_object(); +  	if (s_or_g.s_addr != INADDR_ANY) {  		if (g.s_addr != INADDR_ANY) {  			sg.src = s_or_g; @@ -5300,14 +3176,17 @@ DEFPY (show_ip_pim_upstream,  		} else  			sg.grp = s_or_g;  	} -	pim_show_upstream(pim, vty, &sg, uj); +	pim_show_upstream(pim, vty, &sg, json_parent); + +	if (uj) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_upstream_vrf_all, +DEFPY (show_ip_pim_upstream_vrf_all,         show_ip_pim_upstream_vrf_all_cmd, -       "show ip pim vrf all upstream [json]", +       "show ip pim vrf all upstream [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5316,29 +3195,33 @@ DEFUN (show_ip_pim_upstream_vrf_all,         JSON_STR)  {  	pim_sgaddr sg = {0}; -	bool uj = use_json(argc, argv);  	struct vrf *vrf; -	bool first = true; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); -	if (uj) -		vty_out(vty, "{ ");  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); -		pim_show_upstream(vrf->info, vty, &sg, uj); +		else +			json_vrf = json_object_new_object(); +		pim_show_upstream(vrf->info, vty, &sg, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} +	if (json) +		vty_json(vty, json_parent); +  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_channel, +DEFPY (show_ip_pim_channel,         show_ip_pim_channel_cmd, -       "show ip pim [vrf NAME] channel [json]", +       "show ip pim [vrf NAME] channel [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5346,21 +3229,22 @@ DEFUN (show_ip_pim_channel,         "PIM downstream channel info\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct vrf *v; +	bool uj = !!json; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	pim_show_channel(vrf->info, vty, uj); +	pim_show_channel(v->info, vty, uj);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_upstream_join_desired, +DEFPY (show_ip_pim_upstream_join_desired,         show_ip_pim_upstream_join_desired_cmd, -       "show ip pim [vrf NAME] upstream-join-desired [json]", +       "show ip pim [vrf NAME] upstream-join-desired [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5368,21 +3252,30 @@ DEFUN (show_ip_pim_upstream_join_desired,         "PIM upstream join-desired\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct pim_instance *pim; +	struct vrf *v; +	bool uj = !!json; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	pim_show_join_desired(vrf->info, vty, uj); +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	pim_show_join_desired(pim, vty, uj);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_upstream_rpf, +DEFPY (show_ip_pim_upstream_rpf,         show_ip_pim_upstream_rpf_cmd, -       "show ip pim [vrf NAME] upstream-rpf [json]", +       "show ip pim [vrf NAME] upstream-rpf [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5390,21 +3283,30 @@ DEFUN (show_ip_pim_upstream_rpf,         "PIM upstream source rpf\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct pim_instance *pim; +	struct vrf *v; +	bool uj = !!json; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n");  		return CMD_WARNING; +	} -	pim_show_upstream_rpf(vrf->info, vty, uj); +	pim_show_upstream_rpf(pim, vty, uj);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_rp, +DEFPY (show_ip_pim_rp,         show_ip_pim_rp_cmd, -       "show ip pim [vrf NAME] rp-info [A.B.C.D/M] [json]", +       "show ip pim [vrf NAME] rp-info [A.B.C.D/M$group] [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5413,28 +3315,45 @@ DEFUN (show_ip_pim_rp,         "Multicast Group range\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL;  	struct prefix *range = NULL; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n");  		return CMD_WARNING; +	} -	if (argv_find(argv, argc, "A.B.C.D/M", &idx)) { +	if (group_str) {  		range = prefix_new(); -		(void)str2prefix(argv[idx]->arg, range); +		prefix_copy(range, group);  		apply_mask(range);  	} -	pim_rp_show_information(vrf->info, range, vty, uj); +	if (json) +		json_parent = json_object_new_object(); + +	pim_rp_show_information(pim, range, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); + +	prefix_free(&range);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_rp_vrf_all, +DEFPY (show_ip_pim_rp_vrf_all,         show_ip_pim_rp_vrf_all_cmd, -       "show ip pim vrf all rp-info [A.B.C.D/M] [json]", +       "show ip pim vrf all rp-info [A.B.C.D/M$group] [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5443,39 +3362,41 @@ DEFUN (show_ip_pim_rp_vrf_all,         "Multicast Group range\n"         JSON_STR)  { -	int idx = 0; -	bool uj = use_json(argc, argv);  	struct vrf *vrf; -	bool first = true; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL;  	struct prefix *range = NULL; -	if (argv_find(argv, argc, "A.B.C.D/M", &idx)) { +	if (group_str) {  		range = prefix_new(); -		(void)str2prefix(argv[idx]->arg, range); +		prefix_copy(range, group);  		apply_mask(range);  	} -	if (uj) -		vty_out(vty, "{ "); +	if (json) +		json_parent = json_object_new_object(); +  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); -		pim_rp_show_information(vrf->info, range, vty, uj); +		else +			json_vrf = json_object_new_object(); +		pim_rp_show_information(vrf->info, range, vty, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent); + +	prefix_free(&range);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_rpf, +DEFPY (show_ip_pim_rpf,         show_ip_pim_rpf_cmd, -       "show ip pim [vrf NAME] rpf [json]", +       "show ip pim [vrf NAME] rpf [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5483,21 +3404,36 @@ DEFUN (show_ip_pim_rpf,         "PIM cached source rpf information\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n");  		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); -	pim_show_rpf(vrf->info, vty, uj); +	pim_show_rpf(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_rpf_vrf_all, +DEFPY (show_ip_pim_rpf_vrf_all,         show_ip_pim_rpf_vrf_all_cmd, -       "show ip pim vrf all rpf [json]", +       "show ip pim vrf all rpf [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5505,29 +3441,30 @@ DEFUN (show_ip_pim_rpf_vrf_all,         "PIM cached source rpf information\n"         JSON_STR)  { -	bool uj = use_json(argc, argv);  	struct vrf *vrf; -	bool first = true; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); -	if (uj) -		vty_out(vty, "{ ");  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); -		pim_show_rpf(vrf->info, vty, uj); +		else +			json_vrf = json_object_new_object(); +		pim_show_rpf(vrf->info, vty, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_nexthop, +DEFPY (show_ip_pim_nexthop,         show_ip_pim_nexthop_cmd,         "show ip pim [vrf NAME] nexthop",         SHOW_STR @@ -5536,20 +3473,21 @@ DEFUN (show_ip_pim_nexthop,         VRF_CMD_HELP_STR         "PIM cached nexthop rpf information\n")  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); +	struct vrf *v; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v)  		return CMD_WARNING; -	pim_show_nexthop(vrf->info, vty); +	pim_show_nexthop(v->info, vty);  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_nexthop_lookup, +DEFPY (show_ip_pim_nexthop_lookup,         show_ip_pim_nexthop_lookup_cmd, -       "show ip pim [vrf NAME] nexthop-lookup A.B.C.D A.B.C.D", +       "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source A.B.C.D$group",         SHOW_STR         IP_STR         PIM_STR @@ -5560,61 +3498,36 @@ DEFUN (show_ip_pim_nexthop_lookup,  {  	struct prefix nht_p;  	int result = 0; -	struct in_addr src_addr, grp_addr; -	struct in_addr vif_source; -	const char *addr_str, *addr_str1; +	pim_addr vif_source;  	struct prefix grp;  	struct pim_nexthop nexthop; -	char nexthop_addr_str[PREFIX_STRLEN]; -	char grp_str[PREFIX_STRLEN]; -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); +	struct vrf *v; -	if (!vrf) -		return CMD_WARNING; +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); -	argv_find(argv, argc, "A.B.C.D", &idx); -	addr_str = argv[idx]->arg; -	result = inet_pton(AF_INET, addr_str, &src_addr); -	if (result <= 0) { -		vty_out(vty, "Bad unicast address %s: errno=%d: %s\n", addr_str, -			errno, safe_strerror(errno)); +	if (!v)  		return CMD_WARNING; -	} -	if (pim_is_group_224_4(src_addr)) { +	if (pim_is_group_224_4(source)) {  		vty_out(vty,  			"Invalid argument. Expected Valid Source Address.\n");  		return CMD_WARNING;  	} -	addr_str1 = argv[idx + 1]->arg; -	result = inet_pton(AF_INET, addr_str1, &grp_addr); -	if (result <= 0) { -		vty_out(vty, "Bad unicast address %s: errno=%d: %s\n", addr_str, -			errno, safe_strerror(errno)); -		return CMD_WARNING; -	} - -	if (!pim_is_group_224_4(grp_addr)) { +	if (!pim_is_group_224_4(group)) {  		vty_out(vty,  			"Invalid argument. Expected Valid Multicast Group Address.\n");  		return CMD_WARNING;  	} -	if (!pim_rp_set_upstream_addr(vrf->info, &vif_source, src_addr, -				      grp_addr)) +	if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group))  		return CMD_SUCCESS; -	nht_p.family = AF_INET; -	nht_p.prefixlen = IPV4_MAX_BITLEN; -	nht_p.u.prefix4 = vif_source; -	grp.family = AF_INET; -	grp.prefixlen = IPV4_MAX_BITLEN; -	grp.u.prefix4 = grp_addr; +	pim_addr_to_prefix(&nht_p, vif_source); +	pim_addr_to_prefix(&grp, group);  	memset(&nexthop, 0, sizeof(nexthop)); -	result = pim_ecmp_nexthop_lookup(vrf->info, &nexthop, &nht_p, &grp, 0); +	result = pim_ecmp_nexthop_lookup(v->info, &nexthop, &nht_p, &grp, 0);  	if (!result) {  		vty_out(vty, @@ -5622,11 +3535,8 @@ DEFUN (show_ip_pim_nexthop_lookup,  		return CMD_SUCCESS;  	} -	pim_addr_dump("<grp?>", &grp, grp_str, sizeof(grp_str)); -	pim_addr_dump("<nexthop?>", &nexthop.mrib_nexthop_addr, -		      nexthop_addr_str, sizeof(nexthop_addr_str)); -	vty_out(vty, "Group %s --- Nexthop %s Interface %s \n", grp_str, -		nexthop_addr_str, nexthop.interface->name); +	vty_out(vty, "Group %s --- Nexthop %pPAs Interface %s \n", group_str, +		&nexthop.mrib_nexthop_addr, nexthop.interface->name);  	return CMD_SUCCESS;  } @@ -5702,9 +3612,9 @@ DEFUN (show_ip_pim_bsrp,  	return CMD_SUCCESS;  } -DEFUN (show_ip_pim_statistics, +DEFPY (show_ip_pim_statistics,         show_ip_pim_statistics_cmd, -       "show ip pim [vrf NAME] statistics [interface WORD] [json]", +       "show ip pim [vrf NAME] statistics [interface WORD$word] [json$json]",         SHOW_STR         IP_STR         PIM_STR @@ -5714,619 +3624,145 @@ DEFUN (show_ip_pim_statistics,         "PIM interface\n"         JSON_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); - -	if (!vrf) -		return CMD_WARNING; - -	if (argv_find(argv, argc, "WORD", &idx)) -		pim_show_statistics(vrf->info, vty, argv[idx]->arg, uj); -	else -		pim_show_statistics(vrf->info, vty, NULL, uj); - -	return CMD_SUCCESS; -} - -static void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty, -				      bool uj) -{ -	struct interface *ifp; -	char buf[PREFIX_STRLEN]; -	json_object *json = NULL; -	json_object *json_row = NULL; - -	vty_out(vty, "\n"); - -	if (uj) -		json = json_object_new_object(); -	else -		vty_out(vty, -			"Interface        Address            ifi Vif  PktsIn PktsOut    BytesIn   BytesOut\n"); - -	FOR_ALL_INTERFACES (pim->vrf, ifp) { -		struct pim_interface *pim_ifp; -		struct in_addr ifaddr; -		struct sioc_vif_req vreq; - -		pim_ifp = ifp->info; +	struct pim_instance *pim; +	struct vrf *v; +	bool uj = !!json; -		if (!pim_ifp) -			continue; +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); -		memset(&vreq, 0, sizeof(vreq)); -		vreq.vifi = pim_ifp->mroute_vif_index; +	if (!v) +		return CMD_WARNING; -		if (ioctl(pim->mroute_socket, SIOCGETVIFCNT, &vreq)) { -			zlog_warn( -				"ioctl(SIOCGETVIFCNT=%lu) failure for interface %s vif_index=%d: errno=%d: %s", -				(unsigned long)SIOCGETVIFCNT, ifp->name, -				pim_ifp->mroute_vif_index, errno, -				safe_strerror(errno)); -		} +	pim = pim_get_pim_instance(v->vrf_id); -		ifaddr = pim_ifp->primary_address; -		if (uj) { -			json_row = json_object_new_object(); -			json_object_string_add(json_row, "name", ifp->name); -			json_object_string_add(json_row, "state", -					       if_is_up(ifp) ? "up" : "down"); -			json_object_string_addf(json_row, "address", "%pI4", -						&pim_ifp->primary_address); -			json_object_int_add(json_row, "ifIndex", ifp->ifindex); -			json_object_int_add(json_row, "vif", -					    pim_ifp->mroute_vif_index); -			json_object_int_add(json_row, "pktsIn", -					    (unsigned long)vreq.icount); -			json_object_int_add(json_row, "pktsOut", -					    (unsigned long)vreq.ocount); -			json_object_int_add(json_row, "bytesIn", -					    (unsigned long)vreq.ibytes); -			json_object_int_add(json_row, "bytesOut", -					    (unsigned long)vreq.obytes); -			json_object_object_add(json, ifp->name, json_row); -		} else { -			vty_out(vty, -				"%-16s %-15s %3d %3d %7lu %7lu %10lu %10lu\n", -				ifp->name, -				inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), -				ifp->ifindex, pim_ifp->mroute_vif_index, -				(unsigned long)vreq.icount, -				(unsigned long)vreq.ocount, -				(unsigned long)vreq.ibytes, -				(unsigned long)vreq.obytes); -		} +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING;  	} -	if (uj) -		vty_json(vty, json); -} - -static void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim, -					     struct vty *vty) -{ -	struct vrf *vrf = pim->vrf; -	time_t now = pim_time_monotonic_sec(); -	char uptime[10]; -	char mlag_role[80]; - -	pim = vrf->info; - -	vty_out(vty, "Router MLAG Role: %s\n", -		mlag_role2str(router->mlag_role, mlag_role, sizeof(mlag_role))); -	vty_out(vty, "Mroute socket descriptor:"); - -	vty_out(vty, " %d(%s)\n", pim->mroute_socket, vrf->name); - -	pim_time_uptime(uptime, sizeof(uptime), -			now - pim->mroute_socket_creation); -	vty_out(vty, "Mroute socket uptime: %s\n", uptime); - -	vty_out(vty, "\n"); - -	pim_zebra_zclient_update(vty); -	pim_zlookup_show_ip_multicast(vty); - -	vty_out(vty, "\n"); -	vty_out(vty, "Maximum highest VifIndex: %d\n", PIM_MAX_USABLE_VIFS); - -	vty_out(vty, "\n"); -	vty_out(vty, "Upstream Join Timer: %d secs\n", router->t_periodic); -	vty_out(vty, "Join/Prune Holdtime: %d secs\n", PIM_JP_HOLDTIME); -	vty_out(vty, "PIM ECMP: %s\n", pim->ecmp_enable ? "Enable" : "Disable"); -	vty_out(vty, "PIM ECMP Rebalance: %s\n", -		pim->ecmp_rebalance_enable ? "Enable" : "Disable"); - -	vty_out(vty, "\n"); - -	show_rpf_refresh_stats(vty, pim, now, NULL); - -	vty_out(vty, "\n"); - -	show_scan_oil_stats(pim, vty, now); +	if (word) +		pim_show_statistics(pim, vty, word, uj); +	else +		pim_show_statistics(pim, vty, NULL, uj); -	show_multicast_interfaces(pim, vty, false); +	return CMD_SUCCESS;  } -DEFUN (show_ip_multicast, +DEFPY (show_ip_multicast,         show_ip_multicast_cmd,         "show ip multicast [vrf NAME]",         SHOW_STR         IP_STR -       VRF_CMD_HELP_STR -       "Multicast global information\n") +       "Multicast global information\n" +       VRF_CMD_HELP_STR)  { -	int idx = 2; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); +	struct vrf *v; +	struct pim_instance *pim; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n");  		return CMD_WARNING; +	} -	pim_cmd_show_ip_multicast_helper(vrf->info, vty); +	pim_cmd_show_ip_multicast_helper(pim, vty);  	return CMD_SUCCESS;  } -DEFUN (show_ip_multicast_vrf_all, +DEFPY (show_ip_multicast_vrf_all,         show_ip_multicast_vrf_all_cmd,         "show ip multicast vrf all",         SHOW_STR         IP_STR -       VRF_CMD_HELP_STR -       "Multicast global information\n") +       "Multicast global information\n" +       VRF_CMD_HELP_STR)  { -	bool uj = use_json(argc, argv);  	struct vrf *vrf; -	bool first = true; -	if (uj) -		vty_out(vty, "{ ");  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else -			vty_out(vty, "VRF: %s\n", vrf->name); +		vty_out(vty, "VRF: %s\n", vrf->name);  		pim_cmd_show_ip_multicast_helper(vrf->info, vty);  	} -	if (uj) -		vty_out(vty, "}\n");  	return CMD_SUCCESS;  } -DEFUN(show_ip_multicast_count, -      show_ip_multicast_count_cmd, -      "show ip multicast count [vrf NAME] [json]", -      SHOW_STR IP_STR -      "Multicast global information\n" -      "Data packet count\n" -      VRF_CMD_HELP_STR JSON_STR) +DEFPY (show_ip_multicast_count, +       show_ip_multicast_count_cmd, +       "show ip multicast count [vrf NAME] [json$json]", +       SHOW_STR +       IP_STR +       "Multicast global information\n" +       "Data packet count\n" +       VRF_CMD_HELP_STR +       JSON_STR)  { -	int idx = 3; -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	bool uj = use_json(argc, argv); - -	if (!vrf) -		return CMD_WARNING; - -	show_multicast_interfaces(vrf->info, vty, uj); +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; -	return CMD_SUCCESS; -} +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); -DEFUN(show_ip_multicast_count_vrf_all, -      show_ip_multicast_count_vrf_all_cmd, -      "show ip multicast count vrf all [json]", -      SHOW_STR IP_STR -      "Multicast global information\n" -      "Data packet count\n" -      VRF_CMD_HELP_STR JSON_STR) -{ -	bool uj = use_json(argc, argv); -	struct vrf *vrf; -	bool first = true; +	if (!v) +		return CMD_WARNING; -	if (uj) -		vty_out(vty, "{ "); +	pim = pim_get_pim_instance(v->vrf_id); -	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else -			vty_out(vty, "VRF: %s\n", vrf->name); +	if (json) +		json_parent = json_object_new_object(); -		show_multicast_interfaces(vrf->info, vty, uj); -	} +	show_multicast_interfaces(pim, vty, json_parent); -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } -static void show_mroute(struct pim_instance *pim, struct vty *vty, -			pim_sgaddr *sg, bool fill, bool uj) +DEFPY (show_ip_multicast_count_vrf_all, +       show_ip_multicast_count_vrf_all_cmd, +       "show ip multicast count vrf all [json$json]", +       SHOW_STR +       IP_STR +       "Multicast global information\n" +       "Data packet count\n" +       VRF_CMD_HELP_STR +       JSON_STR)  { -	struct listnode *node; -	struct channel_oil *c_oil; -	struct static_route *s_route; -	time_t now; -	json_object *json = NULL; -	json_object *json_group = NULL; -	json_object *json_source = NULL; -	json_object *json_oil = NULL; -	json_object *json_ifp_out = NULL; -	int found_oif; -	int first; -	char grp_str[INET_ADDRSTRLEN]; -	char src_str[INET_ADDRSTRLEN]; -	char in_ifname[INTERFACE_NAMSIZ + 1]; -	char out_ifname[INTERFACE_NAMSIZ + 1]; -	int oif_vif_index; -	struct interface *ifp_in; -	char proto[100]; -	char state_str[PIM_REG_STATE_STR_LEN]; -	char mroute_uptime[10]; - -	if (uj) { -		json = json_object_new_object(); -	} else { -		vty_out(vty, "IP Multicast Routing Table\n"); -		vty_out(vty, "Flags: S - Sparse, C - Connected, P - Pruned\n"); -		vty_out(vty, -			"       R - SGRpt Pruned, F - Register flag, T - SPT-bit set\n"); -		vty_out(vty, -			"\nSource          Group           Flags    Proto  Input            Output           TTL  Uptime\n"); -	} - -	now = pim_time_monotonic_sec(); - -	/* print list of PIM and IGMP routes */ -	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) { -		found_oif = 0; -		first = 1; -		if (!c_oil->installed) -			continue; - -		if (!pim_addr_is_any(sg->grp) && -		    pim_addr_cmp(sg->grp, c_oil->oil.mfcc_mcastgrp)) -			continue; -		if (!pim_addr_is_any(sg->src) && -		    pim_addr_cmp(sg->src, c_oil->oil.mfcc_origin)) -			continue; - -		pim_inet4_dump("<group?>", c_oil->oil.mfcc_mcastgrp, grp_str, -			       sizeof(grp_str)); -		pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, src_str, -			       sizeof(src_str)); - -		strlcpy(state_str, "S", sizeof(state_str)); -		/* When a non DR receives a igmp join, it creates a (*,G) -		 * channel_oil without any upstream creation */ -		if (c_oil->up) { -			if (PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(c_oil->up->flags)) -				strlcat(state_str, "C", sizeof(state_str)); -			if (pim_upstream_is_sg_rpt(c_oil->up)) -				strlcat(state_str, "R", sizeof(state_str)); -			if (PIM_UPSTREAM_FLAG_TEST_FHR(c_oil->up->flags)) -				strlcat(state_str, "F", sizeof(state_str)); -			if (c_oil->up->sptbit == PIM_UPSTREAM_SPTBIT_TRUE) -				strlcat(state_str, "T", sizeof(state_str)); -		} -		if (pim_channel_oil_empty(c_oil)) -			strlcat(state_str, "P", sizeof(state_str)); - -		ifp_in = pim_if_find_by_vif_index(pim, c_oil->oil.mfcc_parent); - -		if (ifp_in) -			strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname)); -		else -			strlcpy(in_ifname, "<iif?>", sizeof(in_ifname)); - - -		pim_time_uptime(mroute_uptime, sizeof(mroute_uptime), -				now - c_oil->mroute_creation); - -		if (uj) { - -			/* Find the group, create it if it doesn't exist */ -			json_object_object_get_ex(json, grp_str, &json_group); - -			if (!json_group) { -				json_group = json_object_new_object(); -				json_object_object_add(json, grp_str, -						       json_group); -			} - -			/* Find the source nested under the group, create it if -			 * it doesn't exist -			 */ -			json_object_object_get_ex(json_group, src_str, -						  &json_source); - -			if (!json_source) { -				json_source = json_object_new_object(); -				json_object_object_add(json_group, src_str, -						       json_source); -			} - -			/* Find the inbound interface nested under the source, -			 * create it if it doesn't exist */ -			json_object_string_add(json_source, "source", -					       src_str); -			json_object_string_add(json_source, "group", -					       grp_str); -			json_object_int_add(json_source, "installed", -					    c_oil->installed); -			json_object_int_add(json_source, "refCount", -					    c_oil->oil_ref_count); -			json_object_int_add(json_source, "oilSize", -					    c_oil->oil_size); -			json_object_int_add(json_source, "OilInheritedRescan", -					    c_oil->oil_inherited_rescan); -			json_object_int_add(json_source, "oilInheritedRescan", -					    c_oil->oil_inherited_rescan); -			json_object_string_add(json_source, "iif", in_ifname); -			json_object_string_add(json_source, "upTime", -					       mroute_uptime); -			json_oil = NULL; -		} - -		for (oif_vif_index = 0; oif_vif_index < MAXVIFS; -		     ++oif_vif_index) { -			struct interface *ifp_out; -			int ttl; - -			ttl = c_oil->oil.mfcc_ttls[oif_vif_index]; -			if (ttl < 1) -				continue; - -			/* do not display muted OIFs */ -			if (c_oil->oif_flags[oif_vif_index] -			    & PIM_OIF_FLAG_MUTE) -				continue; - -			if (c_oil->oil.mfcc_parent == oif_vif_index && -			    !pim_mroute_allow_iif_in_oil(c_oil, -							 oif_vif_index)) -				continue; - -			ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index); -			found_oif = 1; - -			if (ifp_out) -				strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname)); -			else -				strlcpy(out_ifname, "<oif?>", sizeof(out_ifname)); - -			if (uj) { -				json_ifp_out = json_object_new_object(); -				json_object_string_add(json_ifp_out, "source", -						       src_str); -				json_object_string_add(json_ifp_out, "group", -						       grp_str); - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_PIM) -					json_object_boolean_true_add( -						json_ifp_out, "protocolPim"); - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_IGMP) -					json_object_boolean_true_add( -						json_ifp_out, "protocolIgmp"); - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_VXLAN) -					json_object_boolean_true_add( -						json_ifp_out, "protocolVxlan"); - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_STAR) -					json_object_boolean_true_add( -						json_ifp_out, -						"protocolInherited"); - -				json_object_string_add(json_ifp_out, -						       "inboundInterface", -						       in_ifname); -				json_object_int_add(json_ifp_out, "iVifI", -						    c_oil->oil.mfcc_parent); -				json_object_string_add(json_ifp_out, -						       "outboundInterface", -						       out_ifname); -				json_object_int_add(json_ifp_out, "oVifI", -						    oif_vif_index); -				json_object_int_add(json_ifp_out, "ttl", ttl); -				json_object_string_add(json_ifp_out, "upTime", -						       mroute_uptime); -				json_object_string_add(json_source, "flags", -						       state_str); -				if (!json_oil) { -					json_oil = json_object_new_object(); -					json_object_object_add(json_source, -							       "oil", json_oil); -				} -				json_object_object_add(json_oil, out_ifname, -						       json_ifp_out); -			} else { -				proto[0] = '\0'; -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_PIM) { -					strlcpy(proto, "PIM", sizeof(proto)); -				} - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_IGMP) { -					strlcpy(proto, "IGMP", sizeof(proto)); -				} - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_VXLAN) { -					strlcpy(proto, "VxLAN", sizeof(proto)); -				} - -				if (c_oil->oif_flags[oif_vif_index] -				    & PIM_OIF_FLAG_PROTO_STAR) { -					strlcpy(proto, "STAR", sizeof(proto)); -				} - -				vty_out(vty, -					"%-15s %-15s %-8s %-6s %-16s %-16s %-3d  %8s\n", -					src_str, grp_str, state_str, proto, -					in_ifname, out_ifname, ttl, -					mroute_uptime); - -				if (first) { -					src_str[0] = '\0'; -					grp_str[0] = '\0'; -					in_ifname[0] = '\0'; -					state_str[0] = '\0'; -					mroute_uptime[0] = '\0'; -					first = 0; -				} -			} -		} - -		if (!uj && !found_oif) { -			vty_out(vty, -				"%-15s %-15s %-8s %-6s %-16s %-16s %-3d  %8s\n", -				src_str, grp_str, state_str, "none", in_ifname, -				"none", 0, "--:--:--"); -		} -	} - -	/* Print list of static routes */ -	for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) { -		first = 1; - -		if (!s_route->c_oil.installed) -			continue; +	struct vrf *vrf; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; -		pim_inet4_dump("<group?>", s_route->group, grp_str, -			       sizeof(grp_str)); -		pim_inet4_dump("<source?>", s_route->source, src_str, -			       sizeof(src_str)); -		ifp_in = pim_if_find_by_vif_index(pim, s_route->iif); -		found_oif = 0; +	if (json) +		json_parent = json_object_new_object(); -		if (ifp_in) -			strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname)); +	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { +		if (!json) +			vty_out(vty, "VRF: %s\n", vrf->name);  		else -			strlcpy(in_ifname, "<iif?>", sizeof(in_ifname)); - -		if (uj) { - -			/* Find the group, create it if it doesn't exist */ -			json_object_object_get_ex(json, grp_str, &json_group); - -			if (!json_group) { -				json_group = json_object_new_object(); -				json_object_object_add(json, grp_str, -						       json_group); -			} - -			/* Find the source nested under the group, create it if -			 * it doesn't exist */ -			json_object_object_get_ex(json_group, src_str, -						  &json_source); - -			if (!json_source) { -				json_source = json_object_new_object(); -				json_object_object_add(json_group, src_str, -						       json_source); -			} - -			json_object_string_add(json_source, "iif", in_ifname); -			json_oil = NULL; -		} else { -			strlcpy(proto, "STATIC", sizeof(proto)); -		} - -		for (oif_vif_index = 0; oif_vif_index < MAXVIFS; -		     ++oif_vif_index) { -			struct interface *ifp_out; -			char oif_uptime[10]; -			int ttl; - -			ttl = s_route->oif_ttls[oif_vif_index]; -			if (ttl < 1) -				continue; - -			ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index); -			pim_time_uptime( -				oif_uptime, sizeof(oif_uptime), -				now -				- s_route->c_oil -				.oif_creation[oif_vif_index]); -			found_oif = 1; - -			if (ifp_out) -				strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname)); -			else -				strlcpy(out_ifname, "<oif?>", sizeof(out_ifname)); - -			if (uj) { -				json_ifp_out = json_object_new_object(); -				json_object_string_add(json_ifp_out, "source", -						       src_str); -				json_object_string_add(json_ifp_out, "group", -						       grp_str); -				json_object_boolean_true_add(json_ifp_out, -							     "protocolStatic"); -				json_object_string_add(json_ifp_out, -						       "inboundInterface", -						       in_ifname); -				json_object_int_add( -					json_ifp_out, "iVifI", -					s_route->c_oil.oil.mfcc_parent); -				json_object_string_add(json_ifp_out, -						       "outboundInterface", -						       out_ifname); -				json_object_int_add(json_ifp_out, "oVifI", -						    oif_vif_index); -				json_object_int_add(json_ifp_out, "ttl", ttl); -				json_object_string_add(json_ifp_out, "upTime", -						       oif_uptime); -				if (!json_oil) { -					json_oil = json_object_new_object(); -					json_object_object_add(json_source, -							       "oil", json_oil); -				} -				json_object_object_add(json_oil, out_ifname, -						       json_ifp_out); -			} else { -				vty_out(vty, -					"%-15s %-15s %-8s %-6s %-16s %-16s %-3d  %8s\n", -					src_str, grp_str, "-", proto, in_ifname, -					out_ifname, ttl, oif_uptime); -				if (first && !fill) { -					src_str[0] = '\0'; -					grp_str[0] = '\0'; -					in_ifname[0] = '\0'; -					first = 0; -				} -			} -		} +			json_vrf = json_object_new_object(); -		if (!uj && !found_oif) { -			vty_out(vty, -				"%-15s %-15s %-8s %-6s %-16s %-16s %-3d  %8s\n", -				src_str, grp_str, "-", proto, in_ifname, "none", -				0, "--:--:--"); -		} +		show_multicast_interfaces(vrf->info, vty, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} +	if (json) +		vty_json(vty, json_parent); -	if (uj) -		vty_json(vty, json); +	return CMD_SUCCESS;  }  DEFPY (show_ip_mroute, @@ -6344,13 +3780,13 @@ DEFPY (show_ip_mroute,  	pim_sgaddr sg = {0};  	struct pim_instance *pim;  	struct vrf *v; +	json_object *json_parent = NULL;  	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); -	if (!v) { -		vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf); +	if (!v)  		return CMD_WARNING; -	} +  	pim = pim_get_pim_instance(v->vrf_id);  	if (!pim) { @@ -6358,6 +3794,9 @@ DEFPY (show_ip_mroute,  		return CMD_WARNING;  	} +	if (json) +		json_parent = json_object_new_object(); +  	if (s_or_g.s_addr != INADDR_ANY) {  		if (g.s_addr != INADDR_ANY) {  			sg.src = s_or_g; @@ -6365,13 +3804,18 @@ DEFPY (show_ip_mroute,  		} else  			sg.grp = s_or_g;  	} -	show_mroute(pim, vty, &sg, !!fill, !!json); + +	show_mroute(pim, vty, &sg, !!fill, json_parent); + +	if (json) +		vty_json(vty, json_parent); +  	return CMD_SUCCESS;  } -DEFUN (show_ip_mroute_vrf_all, +DEFPY (show_ip_mroute_vrf_all,         show_ip_mroute_vrf_all_cmd, -       "show ip mroute vrf all [fill] [json]", +       "show ip mroute vrf all [fill$fill] [json$json]",         SHOW_STR         IP_STR         MROUTE_STR @@ -6380,29 +3824,25 @@ DEFUN (show_ip_mroute_vrf_all,         JSON_STR)  {  	pim_sgaddr sg = {0}; -	bool uj = use_json(argc, argv); -	int idx = 4;  	struct vrf *vrf; -	bool first = true; -	bool fill = false; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; -	if (argv_find(argv, argc, "fill", &idx)) -		fill = true; +	if (json) +		json_parent = json_object_new_object(); -	if (uj) -		vty_out(vty, "{ ");  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); -		show_mroute(vrf->info, vty, &sg, fill, uj); +		else +			json_vrf = json_object_new_object(); +		show_mroute(vrf->info, vty, &sg, !!fill, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} -	if (uj) -		vty_out(vty, "}\n"); +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } @@ -6450,81 +3890,9 @@ DEFUN (clear_ip_mroute_count,  	return CMD_SUCCESS;  } -static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil, -					      json_object *json, -					      struct vty *vty) -{ -	char group_str[INET_ADDRSTRLEN]; -	char source_str[INET_ADDRSTRLEN]; -	json_object *json_group = NULL; -	json_object *json_source = NULL; - -	if (!c_oil->installed) -		return; - -	pim_mroute_update_counters(c_oil); - -	pim_inet4_dump("<group?>", c_oil->oil.mfcc_mcastgrp, group_str, -		       sizeof(group_str)); -	pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, source_str, -		       sizeof(source_str)); - -	if (json) { -		json_object_object_get_ex(json, group_str, &json_group); - -		if (!json_group) { -			json_group = json_object_new_object(); -			json_object_object_add(json, group_str, json_group); -		} - -		json_source = json_object_new_object(); -		json_object_object_add(json_group, source_str, json_source); -		json_object_int_add(json_source, "lastUsed", -				    c_oil->cc.lastused / 100); -		json_object_int_add(json_source, "packets", c_oil->cc.pktcnt); -		json_object_int_add(json_source, "bytes", c_oil->cc.bytecnt); -		json_object_int_add(json_source, "wrongIf", c_oil->cc.wrong_if); - -	} else { -		vty_out(vty, "%-15s %-15s %-8llu %-7ld %-10ld %-7ld\n", -			source_str, group_str, c_oil->cc.lastused / 100, -			c_oil->cc.pktcnt - c_oil->cc.origpktcnt, -			c_oil->cc.bytecnt - c_oil->cc.origbytecnt, -			c_oil->cc.wrong_if - c_oil->cc.origwrong_if); -	} -} - -static void show_mroute_count(struct pim_instance *pim, struct vty *vty, -			      bool uj) -{ -	struct listnode *node; -	struct channel_oil *c_oil; -	struct static_route *sr; -	json_object *json = NULL; - -	if (uj) -		json = json_object_new_object(); -	else { -		vty_out(vty, "\n"); - -		vty_out(vty, -			"Source          Group           LastUsed Packets Bytes WrongIf  \n"); -	} - -	/* Print PIM and IGMP route counts */ -	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) -		show_mroute_count_per_channel_oil(c_oil, json, vty); - -	for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr)) -		show_mroute_count_per_channel_oil(&sr->c_oil, json, vty); - -	if (uj) -		vty_json(vty, json); -} - -DEFUN (show_ip_mroute_count, +DEFPY (show_ip_mroute_count,         show_ip_mroute_count_cmd, -       "show ip mroute [vrf NAME] count [json]", +       "show ip mroute [vrf NAME] count [json$json]",         SHOW_STR         IP_STR         MROUTE_STR @@ -6532,20 +3900,36 @@ DEFUN (show_ip_mroute_count,         "Route and packet count data\n"         JSON_STR)  { -	int idx = 2; -	bool uj = use_json(argc, argv); -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; -	if (!vrf) +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + +	if (!v) +		return CMD_WARNING; + +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n");  		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	show_mroute_count(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); -	show_mroute_count(vrf->info, vty, uj);  	return CMD_SUCCESS;  } -DEFUN (show_ip_mroute_count_vrf_all, +DEFPY (show_ip_mroute_count_vrf_all,         show_ip_mroute_count_vrf_all_cmd, -       "show ip mroute vrf all count [json]", +       "show ip mroute vrf all count [json$json]",         SHOW_STR         IP_STR         MROUTE_STR @@ -6553,112 +3937,34 @@ DEFUN (show_ip_mroute_count_vrf_all,         "Route and packet count data\n"         JSON_STR)  { -	bool uj = use_json(argc, argv);  	struct vrf *vrf; -	bool first = true; +	json_object *json_parent = NULL; +	json_object *json_vrf = NULL; + +	if (json) +		json_parent = json_object_new_object(); -	if (uj) -		vty_out(vty, "{ ");  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) { -			if (!first) -				vty_out(vty, ", "); -			vty_out(vty, " \"%s\": ", vrf->name); -			first = false; -		} else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); -		show_mroute_count(vrf->info, vty, uj); -	} -	if (uj) -		vty_out(vty, "}\n"); - -	return CMD_SUCCESS; -} +		else +			json_vrf = json_object_new_object(); -static void show_mroute_summary(struct pim_instance *pim, struct vty *vty, -				json_object *json) -{ -	struct listnode *node; -	struct channel_oil *c_oil; -	struct static_route *s_route; -	uint32_t starg_sw_mroute_cnt = 0; -	uint32_t sg_sw_mroute_cnt = 0; -	uint32_t starg_hw_mroute_cnt = 0; -	uint32_t sg_hw_mroute_cnt = 0; -	json_object *json_starg = NULL; -	json_object *json_sg = NULL; - -	if (!json) -		vty_out(vty, "Mroute Type    Installed/Total\n"); - -	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) { -		if (!c_oil->installed) { -			if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY) -				starg_sw_mroute_cnt++; -			else -				sg_sw_mroute_cnt++; -		} else { -			if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY) -				starg_hw_mroute_cnt++; -			else -				sg_hw_mroute_cnt++; -		} -	} +		show_mroute_count(vrf->info, vty, json_vrf); -	for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) { -		if (!s_route->c_oil.installed) { -			if (s_route->c_oil.oil.mfcc_origin.s_addr == INADDR_ANY) -				starg_sw_mroute_cnt++; -			else -				sg_sw_mroute_cnt++; -		} else { -			if (s_route->c_oil.oil.mfcc_origin.s_addr == INADDR_ANY) -				starg_hw_mroute_cnt++; -			else -				sg_hw_mroute_cnt++; -		} +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} +	if (json) +		vty_json(vty, json_parent); -	if (!json) { -		vty_out(vty, "%-20s %u/%u\n", "(*, G)", starg_hw_mroute_cnt, -			starg_sw_mroute_cnt + starg_hw_mroute_cnt); -		vty_out(vty, "%-20s %u/%u\n", "(S, G)", sg_hw_mroute_cnt, -			sg_sw_mroute_cnt + sg_hw_mroute_cnt); -		vty_out(vty, "------\n"); -		vty_out(vty, "%-20s %u/%u\n", "Total", -			(starg_hw_mroute_cnt + sg_hw_mroute_cnt), -			(starg_sw_mroute_cnt + starg_hw_mroute_cnt -			 + sg_sw_mroute_cnt + sg_hw_mroute_cnt)); -	} else { -		/* (*,G) route details */ -		json_starg = json_object_new_object(); -		json_object_object_add(json, "wildcardGroup", json_starg); - -		json_object_int_add(json_starg, "installed", -				    starg_hw_mroute_cnt); -		json_object_int_add(json_starg, "total", -				    starg_sw_mroute_cnt + starg_hw_mroute_cnt); - -		/* (S, G) route details */ -		json_sg = json_object_new_object(); -		json_object_object_add(json, "sourceGroup", json_sg); - -		json_object_int_add(json_sg, "installed", sg_hw_mroute_cnt); -		json_object_int_add(json_sg, "total", -				    sg_sw_mroute_cnt + sg_hw_mroute_cnt); - -		json_object_int_add(json, "totalNumOfInstalledMroutes", -				    starg_hw_mroute_cnt + sg_hw_mroute_cnt); -		json_object_int_add(json, "totalNumOfMroutes", -				    starg_sw_mroute_cnt + starg_hw_mroute_cnt -				    + sg_sw_mroute_cnt -				    + sg_hw_mroute_cnt); -	} +	return CMD_SUCCESS;  } -DEFUN (show_ip_mroute_summary, +DEFPY (show_ip_mroute_summary,         show_ip_mroute_summary_cmd, -       "show ip mroute [vrf NAME] summary [json]", +       "show ip mroute [vrf NAME] summary [json$json]",         SHOW_STR         IP_STR         MROUTE_STR @@ -6666,27 +3972,36 @@ DEFUN (show_ip_mroute_summary,         "Summary of all mroutes\n"         JSON_STR)  { -	int idx = 2; -	bool uj = use_json(argc, argv); -	struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); -	json_object *json = NULL; +	struct pim_instance *pim; +	struct vrf *v; +	json_object *json_parent = NULL; -	if (uj) -		json = json_object_new_object(); +	v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); -	if (!vrf) +	if (!v)  		return CMD_WARNING; -	show_mroute_summary(vrf->info, vty, json); +	pim = pim_get_pim_instance(v->vrf_id); + +	if (!pim) { +		vty_out(vty, "%% Unable to find pim instance\n"); +		return CMD_WARNING; +	} + +	if (json) +		json_parent = json_object_new_object(); + +	show_mroute_summary(pim, vty, json_parent); + +	if (json) +		vty_json(vty, json_parent); -	if (uj) -		vty_json(vty, json);  	return CMD_SUCCESS;  } -DEFUN (show_ip_mroute_summary_vrf_all, +DEFPY (show_ip_mroute_summary_vrf_all,         show_ip_mroute_summary_vrf_all_cmd, -       "show ip mroute vrf all summary [json]", +       "show ip mroute vrf all summary [json$json]",         SHOW_STR         IP_STR         MROUTE_STR @@ -6695,27 +4010,27 @@ DEFUN (show_ip_mroute_summary_vrf_all,         JSON_STR)  {  	struct vrf *vrf; -	bool uj = use_json(argc, argv); -	json_object *json = NULL; +	json_object *json_parent = NULL;  	json_object *json_vrf = NULL; -	if (uj) -		json = json_object_new_object(); +	if (json) +		json_parent = json_object_new_object();  	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if (uj) -			json_vrf = json_object_new_object(); -		else +		if (!json)  			vty_out(vty, "VRF: %s\n", vrf->name); +		else +			json_vrf = json_object_new_object();  		show_mroute_summary(vrf->info, vty, json_vrf); -		if (uj) -			json_object_object_add(json, vrf->name, json_vrf); +		if (json) +			json_object_object_add(json_parent, vrf->name, +					       json_vrf);  	} -	if (uj) -		vty_json(vty, json); +	if (json) +		vty_json(vty, json_parent);  	return CMD_SUCCESS;  } @@ -6734,7 +4049,6 @@ DEFUN (show_ip_rib,  	struct in_addr addr;  	const char *addr_str;  	struct pim_nexthop nexthop; -	char nexthop_addr_str[PREFIX_STRLEN];  	int result;  	if (!vrf) @@ -6760,10 +4074,8 @@ DEFUN (show_ip_rib,  	vty_out(vty,  		"Address         NextHop         Interface Metric Preference\n"); -	pim_addr_dump("<nexthop?>", &nexthop.mrib_nexthop_addr, -		      nexthop_addr_str, sizeof(nexthop_addr_str)); - -	vty_out(vty, "%-15s %-15s %-9s %6d %10d\n", addr_str, nexthop_addr_str, +	vty_out(vty, "%-15s %-15pPAs %-9s %6d %10d\n", addr_str, +		&nexthop.mrib_nexthop_addr,  		nexthop.interface ? nexthop.interface->name : "<ifname?>",  		nexthop.mrib_route_metric, nexthop.mrib_metric_preference); @@ -6963,7 +4275,7 @@ DEFPY (ip_pim_rp_keep_alive,         "ip pim rp keep-alive-timer (1-65535)$kat",         IP_STR         "pim multicast routing\n" -       "Rendevous Point\n" +       "Rendezvous Point\n"         "Keep alive Timer\n"         "Seconds\n")  { @@ -6976,7 +4288,7 @@ DEFUN (no_ip_pim_rp_keep_alive,         NO_STR         IP_STR         "pim multicast routing\n" -       "Rendevous Point\n" +       "Rendezvous Point\n"         "Keep alive Timer\n"         IGNORED_IN_NO_STR)  { @@ -7029,22 +4341,22 @@ DEFUN (no_ip_pim_packets,  	return pim_process_no_pim_packet_cmd(vty);  } -DEFPY (igmp_group_watermark, -       igmp_group_watermark_cmd, +DEFPY (ip_igmp_group_watermark, +       ip_igmp_group_watermark_cmd,         "ip igmp watermark-warn (1-65535)$limit",         IP_STR         IGMP_STR         "Configure group limit for watermark warning\n"         "Group count to generate watermark warning\n")  { -	PIM_DECLVAR_CONTEXT(vrf, pim); +	PIM_DECLVAR_CONTEXT_VRF(vrf, pim);  	pim->igmp_watermark_limit = limit;  	return CMD_SUCCESS;  } -DEFPY (no_igmp_group_watermark, -       no_igmp_group_watermark_cmd, +DEFPY (no_ip_igmp_group_watermark, +       no_ip_igmp_group_watermark_cmd,         "no ip igmp watermark-warn [(1-65535)$limit]",         NO_STR         IP_STR @@ -7052,7 +4364,7 @@ DEFPY (no_igmp_group_watermark,         "Unconfigure group limit for watermark warning\n"         IGNORED_IN_NO_STR)  { -	PIM_DECLVAR_CONTEXT(vrf, pim); +	PIM_DECLVAR_CONTEXT_VRF(vrf, pim);  	pim->igmp_watermark_limit = 0;  	return CMD_SUCCESS; @@ -7116,7 +4428,7 @@ DEFPY (ip_pim_rp,         "ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",         IP_STR         "pim multicast routing\n" -       "Rendevous Point\n" +       "Rendezvous Point\n"         "ip address of RP\n"         "Group Address range to cover\n")  { @@ -7144,7 +4456,7 @@ DEFPY (no_ip_pim_rp,         NO_STR         IP_STR         "pim multicast routing\n" -       "Rendevous Point\n" +       "Rendezvous Point\n"         "ip address of RP\n"         "Group Address range to cover\n")  { @@ -7265,22 +4577,6 @@ DEFUN (no_ip_pim_ssm_prefix_list_name,  	return CMD_WARNING_CONFIG_FAILED;  } -static void ip_pim_ssm_show_group_range(struct pim_instance *pim, -					struct vty *vty, bool uj) -{ -	struct pim_ssm *ssm = pim->ssm_info; -	const char *range_str = -		ssm->plist_name ? ssm->plist_name : PIM_SSM_STANDARD_RANGE; - -	if (uj) { -		json_object *json; -		json = json_object_new_object(); -		json_object_string_add(json, "ssmGroups", range_str); -		vty_json(vty, json); -	} else -		vty_out(vty, "SSM group range : %s\n", range_str); -} -  DEFUN (show_ip_pim_ssm_range,         show_ip_pim_ssm_range_cmd,         "show ip pim [vrf NAME] group-type [json]", @@ -7384,24 +4680,9 @@ DEFUN (ip_ssmpingd,         "Source address\n")  {  	int idx_ipv4 = 2; -	const char *source_str = (argc == 3) ? argv[idx_ipv4]->arg : "0.0.0.0"; -	const char *vrfname; -	char ssmpingd_ip_xpath[XPATH_MAXLEN]; - -	vrfname = pim_cli_get_vrf_name(vty); -	if (vrfname == NULL) -		return CMD_WARNING_CONFIG_FAILED; - -	snprintf(ssmpingd_ip_xpath, sizeof(ssmpingd_ip_xpath), -		 FRR_PIM_VRF_XPATH, -		 "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4"); -	strlcat(ssmpingd_ip_xpath, "/ssm-pingd-source-ip", -		sizeof(ssmpingd_ip_xpath)); +	const char *src_str = (argc == 3) ? argv[idx_ipv4]->arg : "0.0.0.0"; -	nb_cli_enqueue_change(vty, ssmpingd_ip_xpath, NB_OP_CREATE, -			      source_str); - -	return nb_cli_apply_changes(vty, NULL); +	return pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, src_str);  }  DEFUN (no_ip_ssmpingd, @@ -7412,25 +4693,10 @@ DEFUN (no_ip_ssmpingd,         CONF_SSMPINGD_STR         "Source address\n")  { -	const char *vrfname;  	int idx_ipv4 = 3; -	const char *source_str = (argc == 4) ? argv[idx_ipv4]->arg : "0.0.0.0"; -	char ssmpingd_ip_xpath[XPATH_MAXLEN]; - -	vrfname = pim_cli_get_vrf_name(vty); -	if (vrfname == NULL) -		return CMD_WARNING_CONFIG_FAILED; - -	snprintf(ssmpingd_ip_xpath, sizeof(ssmpingd_ip_xpath), -		 FRR_PIM_VRF_XPATH, -		 "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4"); -	strlcat(ssmpingd_ip_xpath, "/ssm-pingd-source-ip", -		sizeof(ssmpingd_ip_xpath)); - -	nb_cli_enqueue_change(vty, ssmpingd_ip_xpath, NB_OP_DESTROY, -			      source_str); +	const char *src_str = (argc == 4) ? argv[idx_ipv4]->arg : "0.0.0.0"; -	return nb_cli_apply_changes(vty, NULL); +	return pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, src_str);  }  DEFUN (ip_pim_ecmp, @@ -7734,35 +5000,15 @@ DEFUN (interface_no_ip_igmp_version,  				    "frr-routing:ipv4");  } -DEFUN (interface_ip_igmp_query_max_response_time, +DEFPY (interface_ip_igmp_query_max_response_time,         interface_ip_igmp_query_max_response_time_cmd, -       "ip igmp query-max-response-time (1-65535)", +       "ip igmp query-max-response-time (1-65535)$qmrt",         IP_STR         IFACE_IGMP_STR         IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR         "Query response value in deci-seconds\n")  { -	const struct lyd_node *pim_enable_dnode; - -	pim_enable_dnode = -		yang_dnode_getf(vty->candidate_config->dnode, -				FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, -				"frr-routing:ipv4"); - -	if (!pim_enable_dnode) { -		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, -				      "true"); -	} else { -		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) -			nb_cli_enqueue_change(vty, "./enable", -					      NB_OP_MODIFY, "true"); -	} - -	nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_MODIFY, -			      argv[3]->arg); - -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv4"); +	return gm_process_query_max_response_time_cmd(vty, qmrt_str);  }  DEFUN (interface_no_ip_igmp_query_max_response_time, @@ -7774,10 +5020,7 @@ DEFUN (interface_no_ip_igmp_query_max_response_time,         IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR         IGNORED_IN_NO_STR)  { -	nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_DESTROY, -			      NULL); -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv4"); +	return gm_process_no_query_max_response_time_cmd(vty);  }  DEFUN_HIDDEN (interface_ip_igmp_query_max_response_time_dsec, @@ -7826,34 +5069,15 @@ DEFUN_HIDDEN (interface_no_ip_igmp_query_max_response_time_dsec,  				    "frr-routing:ipv4");  } -DEFUN (interface_ip_igmp_last_member_query_count, +DEFPY (interface_ip_igmp_last_member_query_count,         interface_ip_igmp_last_member_query_count_cmd, -       "ip igmp last-member-query-count (1-255)", +       "ip igmp last-member-query-count (1-255)$lmqc",         IP_STR         IFACE_IGMP_STR         IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR         "Last member query count\n")  { -	const struct lyd_node *pim_enable_dnode; - -	pim_enable_dnode = -		yang_dnode_getf(vty->candidate_config->dnode, -				FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, -				"frr-routing:ipv4"); -	if (!pim_enable_dnode) { -		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, -				      "true"); -	} else { -		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) -			nb_cli_enqueue_change(vty, "./enable", -					      NB_OP_MODIFY, "true"); -	} - -	nb_cli_enqueue_change(vty, "./robustness-variable", NB_OP_MODIFY, -			      argv[3]->arg); - -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv4"); +	return gm_process_last_member_query_count_cmd(vty, lmqc_str);  }  DEFUN (interface_no_ip_igmp_last_member_query_count, @@ -7865,41 +5089,18 @@ DEFUN (interface_no_ip_igmp_last_member_query_count,         IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR         IGNORED_IN_NO_STR)  { -	nb_cli_enqueue_change(vty, "./robustness-variable", NB_OP_DESTROY, -			      NULL); - -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv4"); +	return gm_process_no_last_member_query_count_cmd(vty);  } -DEFUN (interface_ip_igmp_last_member_query_interval, +DEFPY (interface_ip_igmp_last_member_query_interval,         interface_ip_igmp_last_member_query_interval_cmd, -       "ip igmp last-member-query-interval (1-65535)", +       "ip igmp last-member-query-interval (1-65535)$lmqi",         IP_STR         IFACE_IGMP_STR         IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR         "Last member query interval in deciseconds\n")  { -	const struct lyd_node *pim_enable_dnode; - -	pim_enable_dnode = -		yang_dnode_getf(vty->candidate_config->dnode, -				FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, -				"frr-routing:ipv4"); -	if (!pim_enable_dnode) { -		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, -				      "true"); -	} else { -		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) -			nb_cli_enqueue_change(vty, "./enable", -					      NB_OP_MODIFY, "true"); -	} - -	nb_cli_enqueue_change(vty, "./last-member-query-interval", NB_OP_MODIFY, -			      argv[3]->arg); - -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv4"); +	return gm_process_last_member_query_interval_cmd(vty, lmqi_str);  }  DEFUN (interface_no_ip_igmp_last_member_query_interval, @@ -7911,11 +5112,7 @@ DEFUN (interface_no_ip_igmp_last_member_query_interval,         IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR         IGNORED_IN_NO_STR)  { -	nb_cli_enqueue_change(vty, "./last-member-query-interval", -			      NB_OP_DESTROY, NULL); - -	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, -				    "frr-routing:ipv4"); +	return gm_process_no_last_member_query_interval_cmd(vty);  }  DEFUN (interface_ip_pim_drprio, @@ -8308,6 +5505,32 @@ DEFUN (no_debug_igmp_trace,  } +DEFUN (debug_igmp_trace_detail, +       debug_igmp_trace_detail_cmd, +       "debug igmp trace detail", +       DEBUG_STR +       DEBUG_IGMP_STR +       DEBUG_IGMP_TRACE_STR +       "detailed\n") +{ +	PIM_DO_DEBUG_IGMP_TRACE_DETAIL; +	return CMD_SUCCESS; +} + +DEFUN (no_debug_igmp_trace_detail, +       no_debug_igmp_trace_detail_cmd, +       "no debug igmp trace detail", +       NO_STR +       DEBUG_STR +       DEBUG_IGMP_STR +       DEBUG_IGMP_TRACE_STR +       "detailed\n") +{ +	PIM_DONT_DEBUG_IGMP_TRACE_DETAIL; +	return CMD_SUCCESS; +} + +  DEFUN (debug_mroute,         debug_mroute_cmd,         "debug mroute", @@ -10591,10 +7814,10 @@ void pim_cmd_init(void)  	install_element(VRF_NODE, &no_ip_pim_ecmp_rebalance_cmd);  	install_element(CONFIG_NODE, &ip_pim_mlag_cmd);  	install_element(CONFIG_NODE, &no_ip_pim_mlag_cmd); -	install_element(CONFIG_NODE, &igmp_group_watermark_cmd); -	install_element(VRF_NODE, &igmp_group_watermark_cmd); -	install_element(CONFIG_NODE, &no_igmp_group_watermark_cmd); -	install_element(VRF_NODE, &no_igmp_group_watermark_cmd); +	install_element(CONFIG_NODE, &ip_igmp_group_watermark_cmd); +	install_element(VRF_NODE, &ip_igmp_group_watermark_cmd); +	install_element(CONFIG_NODE, &no_ip_igmp_group_watermark_cmd); +	install_element(VRF_NODE, &no_ip_igmp_group_watermark_cmd);  	install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);  	install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd); @@ -10717,6 +7940,8 @@ void pim_cmd_init(void)  	install_element(ENABLE_NODE, &no_debug_igmp_packets_cmd);  	install_element(ENABLE_NODE, &debug_igmp_trace_cmd);  	install_element(ENABLE_NODE, &no_debug_igmp_trace_cmd); +	install_element(ENABLE_NODE, &debug_igmp_trace_detail_cmd); +	install_element(ENABLE_NODE, &no_debug_igmp_trace_detail_cmd);  	install_element(ENABLE_NODE, &debug_mroute_cmd);  	install_element(ENABLE_NODE, &debug_mroute_detail_cmd);  	install_element(ENABLE_NODE, &no_debug_mroute_cmd); @@ -10770,6 +7995,8 @@ void pim_cmd_init(void)  	install_element(CONFIG_NODE, &no_debug_igmp_packets_cmd);  	install_element(CONFIG_NODE, &debug_igmp_trace_cmd);  	install_element(CONFIG_NODE, &no_debug_igmp_trace_cmd); +	install_element(CONFIG_NODE, &debug_igmp_trace_detail_cmd); +	install_element(CONFIG_NODE, &no_debug_igmp_trace_detail_cmd);  	install_element(CONFIG_NODE, &debug_mroute_cmd);  	install_element(CONFIG_NODE, &debug_mroute_detail_cmd);  	install_element(CONFIG_NODE, &no_debug_mroute_cmd); diff --git a/pimd/pim_cmd.h b/pimd/pim_cmd.h index 89a4e6e699..8022eeea0e 100644 --- a/pimd/pim_cmd.h +++ b/pimd/pim_cmd.h @@ -73,13 +73,4 @@  void pim_cmd_init(void); -/* - * Special Macro to allow us to get the correct pim_instance; - */ -#define PIM_DECLVAR_CONTEXT(A, B)                                              \ -	struct vrf *A = VTY_GET_CONTEXT(vrf);                                  \ -	struct pim_instance *B =                                               \ -		(vrf) ? vrf->info : pim_get_pim_instance(VRF_DEFAULT);         \ -	vrf = (vrf) ? vrf : pim->vrf; -  #endif /* PIM_CMD_H */ diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index c5d89f8065..d69b94ab12 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -31,13 +31,28 @@  #include "vrf.h"  #include "ferr.h"  #include "lib/srcdest_table.h" +#include "lib/linklist.h"  #include "pimd.h"  #include "pim_vty.h"  #include "lib/northbound_cli.h"  #include "pim_errors.h"  #include "pim_nb.h" +#include "pim_mroute.h" +#include "pim_cmd.h" +#include "pim6_cmd.h"  #include "pim_cmd_common.h" +#include "pim_time.h" +#include "pim_zebra.h" +#include "pim_zlookup.h" +#include "pim_iface.h" +#include "pim_macro.h" +#include "pim_neighbor.h" +#include "pim_nht.h" +#include "pim_sock.h" +#include "pim_ssm.h" +#include "pim_addr.h" +#include "pim_static.h"  /**   * Get current node VRF name. @@ -653,3 +668,2874 @@ int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,  	return nb_cli_apply_changes(vty, NULL);  } + +bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match) +{ +	return (pim_addr_is_any(match.grp) || +		!pim_addr_cmp(match.grp, item.grp)) && +	       (pim_addr_is_any(match.src) || +		!pim_addr_cmp(match.src, item.src)); +} + +void json_object_pim_ifp_add(struct json_object *json, struct interface *ifp) +{ +	struct pim_interface *pim_ifp; + +	pim_ifp = ifp->info; +	json_object_string_add(json, "name", ifp->name); +	json_object_string_add(json, "state", if_is_up(ifp) ? "up" : "down"); +	json_object_string_addf(json, "address", "%pPA", +				&pim_ifp->primary_address); +	json_object_int_add(json, "index", ifp->ifindex); + +	if (if_is_multicast(ifp)) +		json_object_boolean_true_add(json, "flagMulticast"); + +	if (if_is_broadcast(ifp)) +		json_object_boolean_true_add(json, "flagBroadcast"); + +	if (ifp->flags & IFF_ALLMULTI) +		json_object_boolean_true_add(json, "flagAllMulticast"); + +	if (ifp->flags & IFF_PROMISC) +		json_object_boolean_true_add(json, "flagPromiscuous"); + +	if (PIM_IF_IS_DELETED(ifp)) +		json_object_boolean_true_add(json, "flagDeleted"); + +	if (pim_if_lan_delay_enabled(ifp)) +		json_object_boolean_true_add(json, "lanDelayEnabled"); +} + +void pim_print_ifp_flags(struct vty *vty, struct interface *ifp) +{ +	vty_out(vty, "Flags\n"); +	vty_out(vty, "-----\n"); +	vty_out(vty, "All Multicast   : %s\n", +		(ifp->flags & IFF_ALLMULTI) ? "yes" : "no"); +	vty_out(vty, "Broadcast       : %s\n", +		if_is_broadcast(ifp) ? "yes" : "no"); +	vty_out(vty, "Deleted         : %s\n", +		PIM_IF_IS_DELETED(ifp) ? "yes" : "no"); +	vty_out(vty, "Interface Index : %d\n", ifp->ifindex); +	vty_out(vty, "Multicast       : %s\n", +		if_is_multicast(ifp) ? "yes" : "no"); +	vty_out(vty, "Promiscuous     : %s\n", +		(ifp->flags & IFF_PROMISC) ? "yes" : "no"); +	vty_out(vty, "\n"); +	vty_out(vty, "\n"); +} + +void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up) +{ +	json_object_boolean_add( +		json, "drJoinDesired", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)); +	json_object_boolean_add( +		json, "drJoinDesiredUpdated", +		CHECK_FLAG(up->flags, +			   PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED)); +	json_object_boolean_add( +		json, "firstHopRouter", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_FHR)); +	json_object_boolean_add( +		json, "sourceIgmp", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)); +	json_object_boolean_add( +		json, "sourcePim", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_PIM)); +	json_object_boolean_add( +		json, "sourceStream", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_STREAM)); +	/* XXX: need to print ths flag in the plain text display as well */ +	json_object_boolean_add( +		json, "sourceMsdp", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)); +	json_object_boolean_add( +		json, "sendSGRptPrune", +		CHECK_FLAG(up->flags, +			   PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)); +	json_object_boolean_add( +		json, "lastHopRouter", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_LHR)); +	json_object_boolean_add( +		json, "disableKATExpiry", +		CHECK_FLAG(up->flags, +			   PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)); +	json_object_boolean_add( +		json, "staticIncomingInterface", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)); +	json_object_boolean_add( +		json, "allowIncomingInterfaceinOil", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)); +	json_object_boolean_add( +		json, "noPimRegistrationData", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)); +	json_object_boolean_add( +		json, "forcePimRegistration", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)); +	json_object_boolean_add( +		json, "sourceVxlanOrigination", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)); +	json_object_boolean_add( +		json, "sourceVxlanTermination", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)); +	json_object_boolean_add( +		json, "mlagVxlan", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)); +	json_object_boolean_add( +		json, "mlagNonDesignatedForwarder", +		CHECK_FLAG(up->flags, PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)); +} + +static const char * +pim_upstream_state2brief_str(enum pim_upstream_state join_state, +			     char *state_str, size_t state_str_len) +{ +	switch (join_state) { +	case PIM_UPSTREAM_NOTJOINED: +		strlcpy(state_str, "NotJ", state_str_len); +		break; +	case PIM_UPSTREAM_JOINED: +		strlcpy(state_str, "J", state_str_len); +		break; +	default: +		strlcpy(state_str, "Unk", state_str_len); +	} +	return state_str; +} + +static const char *pim_reg_state2brief_str(enum pim_reg_state reg_state, +					   char *state_str, +					   size_t state_str_len) +{ +	switch (reg_state) { +	case PIM_REG_NOINFO: +		strlcpy(state_str, "RegNI", state_str_len); +		break; +	case PIM_REG_JOIN: +		strlcpy(state_str, "RegJ", state_str_len); +		break; +	case PIM_REG_JOIN_PENDING: +	case PIM_REG_PRUNE: +		strlcpy(state_str, "RegP", state_str_len); +		break; +	} +	return state_str; +} + +void pim_show_rpf_refresh_stats(struct vty *vty, struct pim_instance *pim, +				time_t now, json_object *json) +{ +	char refresh_uptime[10]; + +	pim_time_uptime_begin(refresh_uptime, sizeof(refresh_uptime), now, +			      pim->rpf_cache_refresh_last); + +	if (json) { +		json_object_int_add(json, "rpfCacheRefreshDelayMsecs", +				    router->rpf_cache_refresh_delay_msec); +		json_object_int_add( +			json, "rpfCacheRefreshTimer", +			pim_time_timer_remain_msec(pim->rpf_cache_refresher)); +		json_object_int_add(json, "rpfCacheRefreshRequests", +				    pim->rpf_cache_refresh_requests); +		json_object_int_add(json, "rpfCacheRefreshEvents", +				    pim->rpf_cache_refresh_events); +		json_object_string_add(json, "rpfCacheRefreshLast", +				       refresh_uptime); +		json_object_int_add(json, "nexthopLookups", +				    pim->nexthop_lookups); +		json_object_int_add(json, "nexthopLookupsAvoided", +				    pim->nexthop_lookups_avoided); +	} else { +		vty_out(vty, +			"RPF Cache Refresh Delay:    %ld msecs\n" +			"RPF Cache Refresh Timer:    %ld msecs\n" +			"RPF Cache Refresh Requests: %lld\n" +			"RPF Cache Refresh Events:   %lld\n" +			"RPF Cache Refresh Last:     %s\n" +			"Nexthop Lookups:            %lld\n" +			"Nexthop Lookups Avoided:    %lld\n", +			router->rpf_cache_refresh_delay_msec, +			pim_time_timer_remain_msec(pim->rpf_cache_refresher), +			(long long)pim->rpf_cache_refresh_requests, +			(long long)pim->rpf_cache_refresh_events, +			refresh_uptime, (long long)pim->nexthop_lookups, +			(long long)pim->nexthop_lookups_avoided); +	} +} + +void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json) +{ +	struct pim_upstream *up; +	time_t now = pim_time_monotonic_sec(); +	json_object *json_group = NULL; +	json_object *json_row = NULL; + +	pim_show_rpf_refresh_stats(vty, pim, now, json); + +	if (!json) { +		vty_out(vty, "\n"); +		vty_out(vty, +			"Source          Group           RpfIface         RpfAddress      RibNextHop      Metric Pref\n"); +	} + +	frr_each (rb_pim_upstream, &pim->upstream_head, up) { +		char rpf_addr_str[PREFIX_STRLEN]; +		const char *rpf_ifname; +		struct pim_rpf *rpf = &up->rpf; + +		pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str, +			      sizeof(rpf_addr_str)); + +		rpf_ifname = +			rpf->source_nexthop.interface ? rpf->source_nexthop +								.interface->name +						      : "<ifname?>"; + +		if (json) { +			char grp_str[PIM_ADDRSTRLEN]; +			char src_str[PIM_ADDRSTRLEN]; + +			snprintfrr(grp_str, sizeof(grp_str), "%pPAs", +				   &up->sg.grp); +			snprintfrr(src_str, sizeof(src_str), "%pPAs", +				   &up->sg.src); + +			json_object_object_get_ex(json, grp_str, &json_group); + +			if (!json_group) { +				json_group = json_object_new_object(); +				json_object_object_add(json, grp_str, +						       json_group); +			} + +			json_row = json_object_new_object(); +			json_object_string_add(json_row, "source", src_str); +			json_object_string_add(json_row, "group", grp_str); +			json_object_string_add(json_row, "rpfInterface", +					       rpf_ifname); +			json_object_string_add(json_row, "rpfAddress", +					       rpf_addr_str); +			json_object_string_addf( +				json_row, "ribNexthop", "%pPAs", +				&rpf->source_nexthop.mrib_nexthop_addr); +			json_object_int_add( +				json_row, "routeMetric", +				rpf->source_nexthop.mrib_route_metric); +			json_object_int_add( +				json_row, "routePreference", +				rpf->source_nexthop.mrib_metric_preference); +			json_object_object_add(json_group, src_str, json_row); + +		} else { +			vty_out(vty, +				"%-15pPAs %-15pPAs %-16s %-15s %-15pPAs %6d %4d\n", +				&up->sg.src, &up->sg.grp, rpf_ifname, +				rpf_addr_str, +				&rpf->source_nexthop.mrib_nexthop_addr, +				rpf->source_nexthop.mrib_route_metric, +				rpf->source_nexthop.mrib_metric_preference); +		} +	} +} + +void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty) +{ +	struct interface *ifp; + +	vty_out(vty, +		"Interface        Address         Neighbor        Secondary      \n"); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		struct pim_interface *pim_ifp; +		pim_addr ifaddr; +		struct listnode *neighnode; +		struct pim_neighbor *neigh; + +		pim_ifp = ifp->info; + +		if (!pim_ifp) +			continue; + +		if (pim_ifp->pim_sock_fd < 0) +			continue; + +		ifaddr = pim_ifp->primary_address; + +		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode, +					  neigh)) { +			struct listnode *prefix_node; +			struct prefix *p; + +			if (!neigh->prefix_list) +				continue; + +			for (ALL_LIST_ELEMENTS_RO(neigh->prefix_list, +						  prefix_node, p)) +				vty_out(vty, +					"%-16s %-15pPAs %-15pPAs %-15pFX\n", +					ifp->name, &ifaddr, &neigh->source_addr, +					p); +		} +	} +} + +void pim_show_state(struct pim_instance *pim, struct vty *vty, +		    const char *src_or_group, const char *group, +		    json_object *json) +{ +	struct channel_oil *c_oil; +	json_object *json_group = NULL; +	json_object *json_ifp_in = NULL; +	json_object *json_ifp_out = NULL; +	json_object *json_source = NULL; +	time_t now; +	int first_oif; + +	now = pim_time_monotonic_sec(); + +	if (!json) { +		vty_out(vty, +			"Codes: J -> Pim Join, I -> IGMP Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted"); +		vty_out(vty, +			"\nActive Source           Group            RPT  IIF               OIL\n"); +	} + +	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) { +		char src_str[PIM_ADDRSTRLEN]; +		char grp_str[PIM_ADDRSTRLEN]; +		char in_ifname[INTERFACE_NAMSIZ + 1]; +		char out_ifname[INTERFACE_NAMSIZ + 1]; +		int oif_vif_index; +		struct interface *ifp_in; +		bool isRpt; + +		first_oif = 1; + +		if ((c_oil->up && +		     PIM_UPSTREAM_FLAG_TEST_USE_RPT(c_oil->up->flags)) || +		    pim_addr_is_any(*oil_origin(c_oil))) +			isRpt = true; +		else +			isRpt = false; + +		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", +			   oil_mcastgrp(c_oil)); +		snprintfrr(src_str, sizeof(src_str), "%pPAs", +			   oil_origin(c_oil)); +		ifp_in = pim_if_find_by_vif_index(pim, *oil_parent(c_oil)); + +		if (ifp_in) +			strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname)); +		else +			strlcpy(in_ifname, "<iif?>", sizeof(in_ifname)); + +		if (src_or_group) { +			if (strcmp(src_or_group, src_str) && +			    strcmp(src_or_group, grp_str)) +				continue; + +			if (group && strcmp(group, grp_str)) +				continue; +		} + +		if (json) { + +			/* Find the group, create it if it doesn't exist */ +			json_object_object_get_ex(json, grp_str, &json_group); + +			if (!json_group) { +				json_group = json_object_new_object(); +				json_object_object_add(json, grp_str, +						       json_group); +			} + +			/* Find the source nested under the group, create it if +			 * it doesn't exist +			 */ +			json_object_object_get_ex(json_group, src_str, +						  &json_source); + +			if (!json_source) { +				json_source = json_object_new_object(); +				json_object_object_add(json_group, src_str, +						       json_source); +			} + +			/* Find the inbound interface nested under the source, +			 * create it if it doesn't exist +			 */ +			json_object_object_get_ex(json_source, in_ifname, +						  &json_ifp_in); + +			if (!json_ifp_in) { +				json_ifp_in = json_object_new_object(); +				json_object_object_add(json_source, in_ifname, +						       json_ifp_in); +				json_object_int_add(json_source, "Installed", +						    c_oil->installed); +				json_object_int_add(json_source, "installed", +						    c_oil->installed); +				json_object_boolean_add(json_source, "isRpt", +							isRpt); +				json_object_int_add(json_source, "RefCount", +						    c_oil->oil_ref_count); +				json_object_int_add(json_source, "refCount", +						    c_oil->oil_ref_count); +				json_object_int_add(json_source, "OilListSize", +						    c_oil->oil_size); +				json_object_int_add(json_source, "oilListSize", +						    c_oil->oil_size); +				json_object_int_add( +					json_source, "OilRescan", +					c_oil->oil_inherited_rescan); +				json_object_int_add( +					json_source, "oilRescan", +					c_oil->oil_inherited_rescan); +				json_object_int_add(json_source, "LastUsed", +						    c_oil->cc.lastused); +				json_object_int_add(json_source, "lastUsed", +						    c_oil->cc.lastused); +				json_object_int_add(json_source, "PacketCount", +						    c_oil->cc.pktcnt); +				json_object_int_add(json_source, "packetCount", +						    c_oil->cc.pktcnt); +				json_object_int_add(json_source, "ByteCount", +						    c_oil->cc.bytecnt); +				json_object_int_add(json_source, "byteCount", +						    c_oil->cc.bytecnt); +				json_object_int_add(json_source, +						    "WrongInterface", +						    c_oil->cc.wrong_if); +				json_object_int_add(json_source, +						    "wrongInterface", +						    c_oil->cc.wrong_if); +			} +		} else +			vty_out(vty, "%-6d %-15pPAs  %-15pPAs  %-3s  %-16s  ", +				c_oil->installed, oil_origin(c_oil), +				oil_mcastgrp(c_oil), isRpt ? "y" : "n", +				in_ifname); + +		for (oif_vif_index = 0; oif_vif_index < MAXVIFS; +		     ++oif_vif_index) { +			struct interface *ifp_out; +			char oif_uptime[10]; +			int ttl; + +			ttl = oil_if_has(c_oil, oif_vif_index); +			if (ttl < 1) +				continue; + +			ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index); +			pim_time_uptime( +				oif_uptime, sizeof(oif_uptime), +				now - c_oil->oif_creation[oif_vif_index]); + +			if (ifp_out) +				strlcpy(out_ifname, ifp_out->name, +					sizeof(out_ifname)); +			else +				strlcpy(out_ifname, "<oif?>", +					sizeof(out_ifname)); + +			if (json) { +				json_ifp_out = json_object_new_object(); +				json_object_string_add(json_ifp_out, "source", +						       src_str); +				json_object_string_add(json_ifp_out, "group", +						       grp_str); +				json_object_string_add(json_ifp_out, +						       "inboundInterface", +						       in_ifname); +				json_object_string_add(json_ifp_out, +						       "outboundInterface", +						       out_ifname); +				json_object_int_add(json_ifp_out, "installed", +						    c_oil->installed); + +				json_object_object_add(json_ifp_in, out_ifname, +						       json_ifp_out); +			} else { +				if (first_oif) { +					first_oif = 0; +					vty_out(vty, "%s(%c%c%c%c%c)", +						out_ifname, +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_GM) +							? 'I' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_PIM) +							? 'J' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_VXLAN) +							? 'V' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_STAR) +							? '*' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_MUTE) +							? 'M' +							: ' '); +				} else +					vty_out(vty, ", %s(%c%c%c%c%c)", +						out_ifname, +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_GM) +							? 'I' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_PIM) +							? 'J' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_VXLAN) +							? 'V' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_PROTO_STAR) +							? '*' +							: ' ', +						(c_oil->oif_flags +							 [oif_vif_index] & +						 PIM_OIF_FLAG_MUTE) +							? 'M' +							: ' '); +			} +		} + +		if (!json) +			vty_out(vty, "\n"); +	} + +	if (!json) +		vty_out(vty, "\n"); +} + +/* pim statistics - just adding only bsm related now. + * We can continue to add all pim related stats here. + */ +void pim_show_statistics(struct pim_instance *pim, struct vty *vty, +			 const char *ifname, bool uj) +{ +	json_object *json = NULL; +	struct interface *ifp; + +	if (uj) { +		json = json_object_new_object(); +		json_object_int_add(json, "bsmRx", pim->bsm_rcvd); +		json_object_int_add(json, "bsmTx", pim->bsm_sent); +		json_object_int_add(json, "bsmDropped", pim->bsm_dropped); +	} else { +		vty_out(vty, "BSM Statistics :\n"); +		vty_out(vty, "----------------\n"); +		vty_out(vty, "Number of Received BSMs : %" PRIu64 "\n", +			pim->bsm_rcvd); +		vty_out(vty, "Number of Forwared BSMs : %" PRIu64 "\n", +			pim->bsm_sent); +		vty_out(vty, "Number of Dropped BSMs  : %" PRIu64 "\n", +			pim->bsm_dropped); +	} + +	vty_out(vty, "\n"); + +	/* scan interfaces */ +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		struct pim_interface *pim_ifp = ifp->info; + +		if (ifname && strcmp(ifname, ifp->name)) +			continue; + +		if (!pim_ifp) +			continue; + +		if (!uj) { +			vty_out(vty, "Interface : %s\n", ifp->name); +			vty_out(vty, "-------------------\n"); +			vty_out(vty, +				"Number of BSMs dropped due to config miss : %u\n", +				pim_ifp->pim_ifstat_bsm_cfg_miss); +			vty_out(vty, "Number of unicast BSMs dropped : %u\n", +				pim_ifp->pim_ifstat_ucast_bsm_cfg_miss); +			vty_out(vty, +				"Number of BSMs dropped due to invalid scope zone : %u\n", +				pim_ifp->pim_ifstat_bsm_invalid_sz); +		} else { + +			json_object *json_row = NULL; + +			json_row = json_object_new_object(); + +			json_object_string_add(json_row, "If Name", ifp->name); +			json_object_int_add(json_row, "bsmDroppedConfig", +					    pim_ifp->pim_ifstat_bsm_cfg_miss); +			json_object_int_add( +				json_row, "bsmDroppedUnicast", +				pim_ifp->pim_ifstat_ucast_bsm_cfg_miss); +			json_object_int_add(json_row, +					    "bsmDroppedInvalidScopeZone", +					    pim_ifp->pim_ifstat_bsm_invalid_sz); +			json_object_object_add(json, ifp->name, json_row); +		} +		vty_out(vty, "\n"); +	} + +	if (uj) +		vty_json(vty, json); +} + +void pim_show_upstream(struct pim_instance *pim, struct vty *vty, +		       pim_sgaddr *sg, json_object *json) +{ +	struct pim_upstream *up; +	time_t now; +	json_object *json_group = NULL; +	json_object *json_row = NULL; + +	now = pim_time_monotonic_sec(); + +	if (!json) +		vty_out(vty, +			"Iif             Source          Group           State       Uptime   JoinTimer RSTimer   KATimer   RefCnt\n"); + +	frr_each (rb_pim_upstream, &pim->upstream_head, up) { +		char uptime[10]; +		char join_timer[10]; +		char rs_timer[10]; +		char ka_timer[10]; +		char msdp_reg_timer[10]; +		char state_str[PIM_REG_STATE_STR_LEN]; + +		if (!pim_sgaddr_match(up->sg, *sg)) +			continue; + +		pim_time_uptime(uptime, sizeof(uptime), +				now - up->state_transition); +		pim_time_timer_to_hhmmss(join_timer, sizeof(join_timer), +					 up->t_join_timer); + +		/* +		 * If the upstream is not dummy and it has a J/P timer for the +		 * neighbor display that +		 */ +		if (!up->t_join_timer && up->rpf.source_nexthop.interface) { +			struct pim_neighbor *nbr; + +			nbr = pim_neighbor_find_prefix( +				up->rpf.source_nexthop.interface, +				&up->rpf.rpf_addr); +			if (nbr) +				pim_time_timer_to_hhmmss(join_timer, +							 sizeof(join_timer), +							 nbr->jp_timer); +		} + +		pim_time_timer_to_hhmmss(rs_timer, sizeof(rs_timer), +					 up->t_rs_timer); +		pim_time_timer_to_hhmmss(ka_timer, sizeof(ka_timer), +					 up->t_ka_timer); +		pim_time_timer_to_hhmmss(msdp_reg_timer, sizeof(msdp_reg_timer), +					 up->t_msdp_reg_timer); + +		pim_upstream_state2brief_str(up->join_state, state_str, +					     sizeof(state_str)); +		if (up->reg_state != PIM_REG_NOINFO) { +			char tmp_str[PIM_REG_STATE_STR_LEN]; +			char tmp[sizeof(state_str) + 1]; + +			snprintf(tmp, sizeof(tmp), ",%s", +				 pim_reg_state2brief_str(up->reg_state, tmp_str, +							 sizeof(tmp_str))); +			strlcat(state_str, tmp, sizeof(state_str)); +		} + +		if (json) { +			char grp_str[PIM_ADDRSTRLEN]; +			char src_str[PIM_ADDRSTRLEN]; + +			snprintfrr(grp_str, sizeof(grp_str), "%pPAs", +				   &up->sg.grp); +			snprintfrr(src_str, sizeof(src_str), "%pPAs", +				   &up->sg.src); + +			json_object_object_get_ex(json, grp_str, &json_group); + +			if (!json_group) { +				json_group = json_object_new_object(); +				json_object_object_add(json, grp_str, +						       json_group); +			} + +			json_row = json_object_new_object(); +			json_object_pim_upstream_add(json_row, up); +			json_object_string_add( +				json_row, "inboundInterface", +				up->rpf.source_nexthop.interface +				? up->rpf.source_nexthop.interface->name +				: "Unknown"); + +			/* +			 * The RPF address we use is slightly different +			 * based upon what we are looking up. +			 * If we have a S, list that unless +			 * we are the FHR, else we just put +			 * the RP as the rpfAddress +			 */ +			if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR || +			    pim_addr_is_any(up->sg.src)) { +				struct pim_rpf *rpg; + +				rpg = RP(pim, up->sg.grp); +				json_object_string_addf(json_row, "rpfAddress", +							"%pFX", &rpg->rpf_addr); +			} else { +				json_object_string_add(json_row, "rpfAddress", +						       src_str); +			} + +			json_object_string_add(json_row, "source", src_str); +			json_object_string_add(json_row, "group", grp_str); +			json_object_string_add(json_row, "state", state_str); +			json_object_string_add( +				json_row, "joinState", +				pim_upstream_state2str(up->join_state)); +			json_object_string_add( +				json_row, "regState", +				pim_reg_state2str(up->reg_state, state_str, +						  sizeof(state_str))); +			json_object_string_add(json_row, "upTime", uptime); +			json_object_string_add(json_row, "joinTimer", +					       join_timer); +			json_object_string_add(json_row, "resetTimer", +					       rs_timer); +			json_object_string_add(json_row, "keepaliveTimer", +					       ka_timer); +			json_object_string_add(json_row, "msdpRegTimer", +					       msdp_reg_timer); +			json_object_int_add(json_row, "refCount", +					    up->ref_count); +			json_object_int_add(json_row, "sptBit", up->sptbit); +			json_object_object_add(json_group, src_str, json_row); +		} else { +			vty_out(vty, +				"%-16s%-15pPAs %-15pPAs %-11s %-8s %-9s %-9s %-9s %6d\n", +				up->rpf.source_nexthop.interface +				? up->rpf.source_nexthop.interface->name +				: "Unknown", +				&up->sg.src, &up->sg.grp, state_str, uptime, +				join_timer, rs_timer, ka_timer, up->ref_count); +		} +	} +} + +static void pim_show_join_desired_helper(struct pim_instance *pim, +					 struct vty *vty, +					 struct pim_upstream *up, +					 json_object *json, bool uj) +{ +	json_object *json_group = NULL; +	json_object *json_row = NULL; + +	if (uj) { +		char grp_str[PIM_ADDRSTRLEN]; +		char src_str[PIM_ADDRSTRLEN]; + +		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp); +		snprintfrr(src_str, sizeof(src_str), "%pPAs", &up->sg.src); + +		json_object_object_get_ex(json, grp_str, &json_group); + +		if (!json_group) { +			json_group = json_object_new_object(); +			json_object_object_add(json, grp_str, json_group); +		} + +		json_row = json_object_new_object(); +		json_object_pim_upstream_add(json_row, up); +		json_object_string_add(json_row, "source", src_str); +		json_object_string_add(json_row, "group", grp_str); + +		if (pim_upstream_evaluate_join_desired(pim, up)) +			json_object_boolean_true_add(json_row, +						     "evaluateJoinDesired"); + +		json_object_object_add(json_group, src_str, json_row); + +	} else { +		vty_out(vty, "%-15pPAs %-15pPAs %-6s\n", &up->sg.src, +			&up->sg.grp, +			pim_upstream_evaluate_join_desired(pim, up) ? "yes" +								    : "no"); +	} +} + +void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj) +{ +	struct pim_upstream *up; + +	json_object *json = NULL; + +	if (uj) +		json = json_object_new_object(); +	else +		vty_out(vty, "Source          Group           EvalJD\n"); + +	frr_each (rb_pim_upstream, &pim->upstream_head, up) { +		/* scan all interfaces */ +		pim_show_join_desired_helper(pim, vty, up, json, uj); +	} + +	if (uj) +		vty_json(vty, json); +} + +void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj) +{ +	struct pim_upstream *up; +	json_object *json = NULL; +	json_object *json_group = NULL; +	json_object *json_row = NULL; + +	if (uj) +		json = json_object_new_object(); +	else +		vty_out(vty, +			"Source          Group           RpfIface         RibNextHop      RpfAddress     \n"); + +	frr_each (rb_pim_upstream, &pim->upstream_head, up) { +		char rpf_addr_str[PREFIX_STRLEN]; +		struct pim_rpf *rpf; +		const char *rpf_ifname; + +		rpf = &up->rpf; + +		pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str, +			      sizeof(rpf_addr_str)); + +		rpf_ifname = +			rpf->source_nexthop.interface ? rpf->source_nexthop +								.interface->name +						      : "<ifname?>"; + +		if (uj) { +			char grp_str[PIM_ADDRSTRLEN]; +			char src_str[PIM_ADDRSTRLEN]; + +			snprintfrr(grp_str, sizeof(grp_str), "%pPAs", +				   &up->sg.grp); +			snprintfrr(src_str, sizeof(src_str), "%pPAs", +				   &up->sg.src); +			json_object_object_get_ex(json, grp_str, &json_group); + +			if (!json_group) { +				json_group = json_object_new_object(); +				json_object_object_add(json, grp_str, +						       json_group); +			} + +			json_row = json_object_new_object(); +			json_object_pim_upstream_add(json_row, up); +			json_object_string_add(json_row, "source", src_str); +			json_object_string_add(json_row, "group", grp_str); +			json_object_string_add(json_row, "rpfInterface", +					       rpf_ifname); +			json_object_string_addf( +				json_row, "ribNexthop", "%pPAs", +				&rpf->source_nexthop.mrib_nexthop_addr); +			json_object_string_add(json_row, "rpfAddress", +					       rpf_addr_str); +			json_object_object_add(json_group, src_str, json_row); +		} else { +			vty_out(vty, "%-15pPAs %-15pPAs %-16s %-15pPA %-15s\n", +				&up->sg.src, &up->sg.grp, rpf_ifname, +				&rpf->source_nexthop.mrib_nexthop_addr, +				rpf_addr_str); +		} +	} + +	if (uj) +		vty_json(vty, json); +} + +static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp, +				 struct pim_ifchannel *ch, json_object *json, +				 time_t now) +{ +	json_object *json_iface = NULL; +	json_object *json_row = NULL; +	json_object *json_grp = NULL; +	pim_addr ifaddr; +	char uptime[10]; +	char expire[10]; +	char prune[10]; + +	ifaddr = pim_ifp->primary_address; + +	pim_time_uptime_begin(uptime, sizeof(uptime), now, ch->ifjoin_creation); +	pim_time_timer_to_mmss(expire, sizeof(expire), +			       ch->t_ifjoin_expiry_timer); +	pim_time_timer_to_mmss(prune, sizeof(prune), +			       ch->t_ifjoin_prune_pending_timer); + +	if (json) { +		char ch_grp_str[PIM_ADDRSTRLEN]; + +		json_object_object_get_ex(json, ch->interface->name, +					  &json_iface); + +		if (!json_iface) { +			json_iface = json_object_new_object(); +			json_object_pim_ifp_add(json_iface, ch->interface); +			json_object_object_add(json, ch->interface->name, +					       json_iface); +		} + +		json_row = json_object_new_object(); +		json_object_string_addf(json_row, "source", "%pPAs", +					&ch->sg.src); +		json_object_string_addf(json_row, "group", "%pPAs", +					&ch->sg.grp); +		json_object_string_add(json_row, "upTime", uptime); +		json_object_string_add(json_row, "expire", expire); +		json_object_string_add(json_row, "prune", prune); +		json_object_string_add( +			json_row, "channelJoinName", +			pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags)); +		if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags)) { +#if CONFDATE > 20230131 +			CPP_NOTICE( +				"Remove JSON object commands with keys starting with capital") +#endif +			json_object_int_add(json_row, "SGRpt", 1); +			json_object_int_add(json_row, "sgRpt", 1); +		} +		if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags)) +			json_object_int_add(json_row, "protocolPim", 1); +		if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) +			json_object_int_add(json_row, "protocolIgmp", 1); +		snprintfrr(ch_grp_str, sizeof(ch_grp_str), "%pPAs", +			   &ch->sg.grp); +		json_object_object_get_ex(json_iface, ch_grp_str, &json_grp); +		if (!json_grp) { +			json_grp = json_object_new_object(); +			json_object_object_addf(json_grp, json_row, "%pPAs", +						&ch->sg.src); +			json_object_object_addf(json_iface, json_grp, "%pPAs", +						&ch->sg.grp); +		} else +			json_object_object_addf(json_grp, json_row, "%pPAs", +						&ch->sg.src); +	} else { +		vty_out(vty, +			"%-16s %-15pPAs %-15pPAs %-15pPAs %-10s %8s %-6s %5s\n", +			ch->interface->name, &ifaddr, &ch->sg.src, &ch->sg.grp, +			pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags), +			uptime, expire, prune); +	} +} + +void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, +		   json_object *json) +{ +	struct pim_interface *pim_ifp; +	struct pim_ifchannel *ch; +	struct interface *ifp; +	time_t now; + +	now = pim_time_monotonic_sec(); + +	if (!json) +		vty_out(vty, +			"Interface        Address         Source          Group           State      Uptime   Expire Prune\n"); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; +		if (!pim_ifp) +			continue; + +		RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) { +			if (!pim_sgaddr_match(ch->sg, *sg)) +				continue; + +			pim_show_join_helper(vty, pim_ifp, ch, json, now); +		} /* scan interface channels */ +	} +} + +static void pim_show_jp_agg_helper(struct vty *vty, struct interface *ifp, +				   struct pim_neighbor *neigh, +				   struct pim_upstream *up, int is_join) +{ +	vty_out(vty, "%-16s %-15pPAs %-15pPAs %-15pPAs %5s\n", ifp->name, +		&neigh->source_addr, &up->sg.src, &up->sg.grp, +		is_join ? "J" : "P"); +} + +void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty) +{ +	struct interface *ifp; +	struct pim_interface *pim_ifp; +	struct listnode *n_node; +	struct pim_neighbor *neigh; +	struct listnode *jag_node; +	struct pim_jp_agg_group *jag; +	struct listnode *js_node; +	struct pim_jp_sources *js; + +	vty_out(vty, +		"Interface        RPF Nbr         Source          Group           State\n"); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; +		if (!pim_ifp) +			continue; + +		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, n_node, +					  neigh)) { +			for (ALL_LIST_ELEMENTS_RO(neigh->upstream_jp_agg, +						  jag_node, jag)) { +				for (ALL_LIST_ELEMENTS_RO(jag->sources, js_node, +							  js)) { +					pim_show_jp_agg_helper(vty, ifp, neigh, +							       js->up, +							       js->is_join); +				} +			} +		} +	} +} + +static void pim_show_membership_helper(struct vty *vty, +				       struct pim_interface *pim_ifp, +				       struct pim_ifchannel *ch, +				       struct json_object *json) +{ +	json_object *json_iface = NULL; +	json_object *json_row = NULL; + +	json_object_object_get_ex(json, ch->interface->name, &json_iface); +	if (!json_iface) { +		json_iface = json_object_new_object(); +		json_object_pim_ifp_add(json_iface, ch->interface); +		json_object_object_add(json, ch->interface->name, json_iface); +	} + +	json_row = json_object_new_object(); +	json_object_string_addf(json_row, "source", "%pPAs", &ch->sg.src); +	json_object_string_addf(json_row, "group", "%pPAs", &ch->sg.grp); +	json_object_string_add(json_row, "localMembership", +			       ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO +				       ? "NOINFO" +				       : "INCLUDE"); +	json_object_object_addf(json_iface, json_row, "%pPAs", &ch->sg.grp); +} + +void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj) +{ +	struct pim_interface *pim_ifp; +	struct pim_ifchannel *ch; +	struct interface *ifp; +	enum json_type type; +	json_object *json = NULL; +	json_object *json_tmp = NULL; + +	json = json_object_new_object(); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; +		if (!pim_ifp) +			continue; + +		RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) { +			pim_show_membership_helper(vty, pim_ifp, ch, json); +		} /* scan interface channels */ +	} + +	if (uj) { +		vty_json(vty, json); +	} else { +		vty_out(vty, +			"Interface         Address          Source           Group            Membership\n"); + +		/* +		 * Example of the json data we are traversing +		 * +		 * { +		 *   "swp3":{ +		 *     "name":"swp3", +		 *     "state":"up", +		 *     "address":"10.1.20.1", +		 *     "index":5, +		 *     "flagMulticast":true, +		 *     "flagBroadcast":true, +		 *     "lanDelayEnabled":true, +		 *     "226.10.10.10":{ +		 *       "source":"*", +		 *       "group":"226.10.10.10", +		 *       "localMembership":"INCLUDE" +		 *     } +		 *   } +		 * } +		 */ + +		/* foreach interface */ +		json_object_object_foreach(json, key, val) +		{ + +			/* Find all of the keys where the val is an object. In +			 * the example +			 * above the only one is 226.10.10.10 +			 */ +			json_object_object_foreach(val, if_field_key, +						   if_field_val) +			{ +				type = json_object_get_type(if_field_val); + +				if (type == json_type_object) { +					vty_out(vty, "%-16s  ", key); + +					json_object_object_get_ex( +						val, "address", &json_tmp); +					vty_out(vty, "%-15s  ", +						json_object_get_string( +							json_tmp)); + +					json_object_object_get_ex(if_field_val, +								  "source", +								  &json_tmp); +					vty_out(vty, "%-15s  ", +						json_object_get_string( +							json_tmp)); + +					/* Group */ +					vty_out(vty, "%-15s  ", if_field_key); + +					json_object_object_get_ex( +						if_field_val, "localMembership", +						&json_tmp); +					vty_out(vty, "%-10s\n", +						json_object_get_string( +							json_tmp)); +				} +			} +		} +		json_object_free(json); +	} +} + +static void pim_show_channel_helper(struct pim_instance *pim, struct vty *vty, +				    struct pim_interface *pim_ifp, +				    struct pim_ifchannel *ch, json_object *json, +				    bool uj) +{ +	struct pim_upstream *up = ch->upstream; +	json_object *json_group = NULL; +	json_object *json_row = NULL; + +	if (uj) { +		char grp_str[PIM_ADDRSTRLEN]; + +		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp); +		json_object_object_get_ex(json, grp_str, &json_group); + +		if (!json_group) { +			json_group = json_object_new_object(); +			json_object_object_add(json, grp_str, json_group); +		} + +		json_row = json_object_new_object(); +		json_object_pim_upstream_add(json_row, up); +		json_object_string_add(json_row, "interface", +				       ch->interface->name); +		json_object_string_addf(json_row, "source", "%pPAs", +					&up->sg.src); +		json_object_string_addf(json_row, "group", "%pPAs", +					&up->sg.grp); + +		if (pim_macro_ch_lost_assert(ch)) +			json_object_boolean_true_add(json_row, "lostAssert"); + +		if (pim_macro_chisin_joins(ch)) +			json_object_boolean_true_add(json_row, "joins"); + +		if (pim_macro_chisin_pim_include(ch)) +			json_object_boolean_true_add(json_row, "pimInclude"); + +		if (pim_upstream_evaluate_join_desired(pim, up)) +			json_object_boolean_true_add(json_row, +						     "evaluateJoinDesired"); + +		json_object_object_addf(json_group, json_row, "%pPAs", +					&up->sg.src); + +	} else { +		vty_out(vty, +			"%-16s %-15pPAs %-15pPAs %-10s %-5s %-10s %-11s %-6s\n", +			ch->interface->name, &up->sg.src, &up->sg.grp, +			pim_macro_ch_lost_assert(ch) ? "yes" : "no", +			pim_macro_chisin_joins(ch) ? "yes" : "no", +			pim_macro_chisin_pim_include(ch) ? "yes" : "no", +			PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(up->flags) +				? "yes" +				: "no", +			pim_upstream_evaluate_join_desired(pim, up) ? "yes" +								    : "no"); +	} +} + +void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj) +{ +	struct pim_interface *pim_ifp; +	struct pim_ifchannel *ch; +	struct interface *ifp; + +	json_object *json = NULL; + +	if (uj) +		json = json_object_new_object(); +	else +		vty_out(vty, +			"Interface        Source          Group           LostAssert Joins PimInclude JoinDesired EvalJD\n"); + +	/* scan per-interface (S,G) state */ +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; +		if (!pim_ifp) +			continue; + + +		RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) { +			/* scan all interfaces */ +			pim_show_channel_helper(pim, vty, pim_ifp, ch, json, +						uj); +		} +	} + +	if (uj) +		vty_json(vty, json); +} + +void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag, +			 json_object *json) +{ +	struct interface *ifp; +	struct pim_interface *pim_ifp; +	struct pim_upstream *up; +	int fhr = 0; +	int pim_nbrs = 0; +	int pim_ifchannels = 0; +	bool uj = true; +	json_object *json_row = NULL; +	json_object *json_tmp; + +	if (!json) { +		uj = false; +		json = json_object_new_object(); +	} + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; + +		if (!pim_ifp) +			continue; + +		if (mlag == true && pim_ifp->activeactive == false) +			continue; + +		pim_nbrs = pim_ifp->pim_neighbor_list->count; +		pim_ifchannels = pim_if_ifchannel_count(pim_ifp); +		fhr = 0; + +		frr_each (rb_pim_upstream, &pim->upstream_head, up) +			if (ifp == up->rpf.source_nexthop.interface) +				if (up->flags & PIM_UPSTREAM_FLAG_MASK_FHR) +					fhr++; + +		json_row = json_object_new_object(); +		json_object_pim_ifp_add(json_row, ifp); +		json_object_int_add(json_row, "pimNeighbors", pim_nbrs); +		json_object_int_add(json_row, "pimIfChannels", pim_ifchannels); +		json_object_int_add(json_row, "firstHopRouterCount", fhr); +		json_object_string_addf(json_row, "pimDesignatedRouter", +					"%pPAs", &pim_ifp->pim_dr_addr); + +		if (pim_addr_cmp(pim_ifp->pim_dr_addr, +				 pim_ifp->primary_address)) +			json_object_boolean_true_add( +				json_row, "pimDesignatedRouterLocal"); + +		json_object_object_add(json, ifp->name, json_row); +	} + +	if (!uj) { +		vty_out(vty, +			"Interface         State          Address  PIM Nbrs           PIM DR  FHR IfChannels\n"); + +		json_object_object_foreach(json, key, val) +		{ +			vty_out(vty, "%-16s  ", key); + +			json_object_object_get_ex(val, "state", &json_tmp); +			vty_out(vty, "%5s  ", json_object_get_string(json_tmp)); + +			json_object_object_get_ex(val, "address", &json_tmp); +			vty_out(vty, "%15s  ", +				json_object_get_string(json_tmp)); + +			json_object_object_get_ex(val, "pimNeighbors", +						  &json_tmp); +			vty_out(vty, "%8d  ", json_object_get_int(json_tmp)); + +			if (json_object_object_get_ex( +				    val, "pimDesignatedRouterLocal", +				    &json_tmp)) { +				vty_out(vty, "%15s  ", "local"); +			} else { +				json_object_object_get_ex( +					val, "pimDesignatedRouter", &json_tmp); +				vty_out(vty, "%15s  ", +					json_object_get_string(json_tmp)); +			} + +			json_object_object_get_ex(val, "firstHopRouter", +						  &json_tmp); +			vty_out(vty, "%3d  ", json_object_get_int(json_tmp)); + +			json_object_object_get_ex(val, "pimIfChannels", +						  &json_tmp); +			vty_out(vty, "%9d\n", json_object_get_int(json_tmp)); +		} +	} +} + +void pim_show_interfaces_single(struct pim_instance *pim, struct vty *vty, +				const char *ifname, bool mlag, +				json_object *json) +{ +	pim_addr ifaddr; +	struct interface *ifp; +	struct listnode *neighnode; +	struct pim_interface *pim_ifp; +	struct pim_neighbor *neigh; +	struct pim_upstream *up; +	time_t now; +	char dr_str[PIM_ADDRSTRLEN]; +	char dr_uptime[10]; +	char expire[10]; +	char grp_str[PIM_ADDRSTRLEN]; +	char hello_period[10]; +	char hello_timer[10]; +	char neigh_src_str[PIM_ADDRSTRLEN]; +	char src_str[PIM_ADDRSTRLEN]; +	char stat_uptime[10]; +	char uptime[10]; +	int found_ifname = 0; +	int print_header; +	json_object *json_row = NULL; +	json_object *json_pim_neighbor = NULL; +	json_object *json_pim_neighbors = NULL; +	json_object *json_group = NULL; +	json_object *json_group_source = NULL; +	json_object *json_fhr_sources = NULL; +	struct pim_secondary_addr *sec_addr; +	struct listnode *sec_node; + +	now = pim_time_monotonic_sec(); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; + +		if (!pim_ifp) +			continue; + +		if (mlag == true && pim_ifp->activeactive == false) +			continue; + +		if (strcmp(ifname, "detail") && strcmp(ifname, ifp->name)) +			continue; + +		found_ifname = 1; +		ifaddr = pim_ifp->primary_address; +		snprintfrr(dr_str, sizeof(dr_str), "%pPAs", +			   &pim_ifp->pim_dr_addr); +		pim_time_uptime_begin(dr_uptime, sizeof(dr_uptime), now, +				      pim_ifp->pim_dr_election_last); +		pim_time_timer_to_hhmmss(hello_timer, sizeof(hello_timer), +					 pim_ifp->t_pim_hello_timer); +		pim_time_mmss(hello_period, sizeof(hello_period), +			      pim_ifp->pim_hello_period); +		pim_time_uptime(stat_uptime, sizeof(stat_uptime), +				now - pim_ifp->pim_ifstat_start); + +		if (json) { +			json_row = json_object_new_object(); +			json_object_pim_ifp_add(json_row, ifp); + +			if (!pim_addr_is_any(pim_ifp->update_source)) { +				json_object_string_addf( +					json_row, "useSource", "%pPAs", +					&pim_ifp->update_source); +			} +			if (pim_ifp->sec_addr_list) { +				json_object *sec_list = NULL; + +				sec_list = json_object_new_array(); +				for (ALL_LIST_ELEMENTS_RO( +					     pim_ifp->sec_addr_list, sec_node, +					     sec_addr)) { +					json_object_array_add( +						sec_list, +						json_object_new_stringf( +							"%pFXh", +							&sec_addr->addr)); +				} +				json_object_object_add(json_row, +						       "secondaryAddressList", +						       sec_list); +			} + +			/* PIM neighbors */ +			if (pim_ifp->pim_neighbor_list->count) { +				json_pim_neighbors = json_object_new_object(); + +				for (ALL_LIST_ELEMENTS_RO( +					     pim_ifp->pim_neighbor_list, +					     neighnode, neigh)) { +					json_pim_neighbor = +						json_object_new_object(); +					snprintfrr(neigh_src_str, +						   sizeof(neigh_src_str), +						   "%pPAs", +						   &neigh->source_addr); +					pim_time_uptime(uptime, sizeof(uptime), +							now - neigh->creation); +					pim_time_timer_to_hhmmss( +						expire, sizeof(expire), +						neigh->t_expire_timer); + +					json_object_string_add( +						json_pim_neighbor, "address", +						neigh_src_str); +					json_object_string_add( +						json_pim_neighbor, "upTime", +						uptime); +					json_object_string_add( +						json_pim_neighbor, "holdtime", +						expire); + +					json_object_object_add( +						json_pim_neighbors, +						neigh_src_str, +						json_pim_neighbor); +				} + +				json_object_object_add(json_row, "neighbors", +						       json_pim_neighbors); +			} + +			json_object_string_add(json_row, "drAddress", dr_str); +			json_object_int_add(json_row, "drPriority", +					    pim_ifp->pim_dr_priority); +			json_object_string_add(json_row, "drUptime", dr_uptime); +			json_object_int_add(json_row, "drElections", +					    pim_ifp->pim_dr_election_count); +			json_object_int_add(json_row, "drChanges", +					    pim_ifp->pim_dr_election_changes); + +			/* FHR */ +			frr_each (rb_pim_upstream, &pim->upstream_head, up) { +				if (ifp != up->rpf.source_nexthop.interface) +					continue; + +				if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_FHR)) +					continue; + +				if (!json_fhr_sources) +					json_fhr_sources = +						json_object_new_object(); + +				snprintfrr(grp_str, sizeof(grp_str), "%pPAs", +					   &up->sg.grp); +				snprintfrr(src_str, sizeof(src_str), "%pPAs", +					   &up->sg.src); +				pim_time_uptime(uptime, sizeof(uptime), +						now - up->state_transition); + +				/* +				 * Does this group live in json_fhr_sources? +				 * If not create it. +				 */ +				json_object_object_get_ex(json_fhr_sources, +							  grp_str, &json_group); + +				if (!json_group) { +					json_group = json_object_new_object(); +					json_object_object_add(json_fhr_sources, +							       grp_str, +							       json_group); +				} + +				json_group_source = json_object_new_object(); +				json_object_string_add(json_group_source, +						       "source", src_str); +				json_object_string_add(json_group_source, +						       "group", grp_str); +				json_object_string_add(json_group_source, +						       "upTime", uptime); +				json_object_object_add(json_group, src_str, +						       json_group_source); +			} + +			if (json_fhr_sources) { +				json_object_object_add(json_row, +						       "firstHopRouter", +						       json_fhr_sources); +			} + +			json_object_int_add(json_row, "helloPeriod", +					    pim_ifp->pim_hello_period); +			json_object_int_add(json_row, "holdTime", +					    PIM_IF_DEFAULT_HOLDTIME(pim_ifp)); +			json_object_string_add(json_row, "helloTimer", +					       hello_timer); +			json_object_string_add(json_row, "helloStatStart", +					       stat_uptime); +			json_object_int_add(json_row, "helloReceived", +					    pim_ifp->pim_ifstat_hello_recv); +			json_object_int_add(json_row, "helloReceivedFailed", +					    pim_ifp->pim_ifstat_hello_recvfail); +			json_object_int_add(json_row, "helloSend", +					    pim_ifp->pim_ifstat_hello_sent); +			json_object_int_add(json_row, "hellosendFailed", +					    pim_ifp->pim_ifstat_hello_sendfail); +			json_object_int_add(json_row, "helloGenerationId", +					    pim_ifp->pim_generation_id); + +			json_object_int_add( +				json_row, "effectivePropagationDelay", +				pim_if_effective_propagation_delay_msec(ifp)); +			json_object_int_add( +				json_row, "effectiveOverrideInterval", +				pim_if_effective_override_interval_msec(ifp)); +			json_object_int_add( +				json_row, "joinPruneOverrideInterval", +				pim_if_jp_override_interval_msec(ifp)); + +			json_object_int_add( +				json_row, "propagationDelay", +				pim_ifp->pim_propagation_delay_msec); +			json_object_int_add( +				json_row, "propagationDelayHighest", +				pim_ifp->pim_neighbors_highest_propagation_delay_msec); +			json_object_int_add( +				json_row, "overrideInterval", +				pim_ifp->pim_override_interval_msec); +			json_object_int_add( +				json_row, "overrideIntervalHighest", +				pim_ifp->pim_neighbors_highest_override_interval_msec); +			if (pim_ifp->bsm_enable) +				json_object_boolean_true_add(json_row, +							     "bsmEnabled"); +			if (pim_ifp->ucast_bsm_accept) +				json_object_boolean_true_add(json_row, +							     "ucastBsmEnabled"); +			json_object_object_add(json, ifp->name, json_row); + +		} else { +			vty_out(vty, "Interface  : %s\n", ifp->name); +			vty_out(vty, "State      : %s\n", +				if_is_up(ifp) ? "up" : "down"); +			if (!pim_addr_is_any(pim_ifp->update_source)) { +				vty_out(vty, "Use Source : %pPAs\n", +					&pim_ifp->update_source); +			} +			if (pim_ifp->sec_addr_list) { +				vty_out(vty, "Address    : %pPAs (primary)\n", +					&ifaddr); +				for (ALL_LIST_ELEMENTS_RO( +					     pim_ifp->sec_addr_list, sec_node, +					     sec_addr)) +					vty_out(vty, "             %pFX\n", +						&sec_addr->addr); +			} else { +				vty_out(vty, "Address    : %pPAs\n", &ifaddr); +			} +			vty_out(vty, "\n"); + +			/* PIM neighbors */ +			print_header = 1; + +			for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, +						  neighnode, neigh)) { + +				if (print_header) { +					vty_out(vty, "PIM Neighbors\n"); +					vty_out(vty, "-------------\n"); +					print_header = 0; +				} + +				snprintfrr(neigh_src_str, sizeof(neigh_src_str), +					   "%pPAs", &neigh->source_addr); +				pim_time_uptime(uptime, sizeof(uptime), +						now - neigh->creation); +				pim_time_timer_to_hhmmss(expire, sizeof(expire), +							 neigh->t_expire_timer); +				vty_out(vty, +					"%-15s : up for %s, holdtime expires in %s\n", +					neigh_src_str, uptime, expire); +			} + +			if (!print_header) { +				vty_out(vty, "\n"); +				vty_out(vty, "\n"); +			} + +			vty_out(vty, "Designated Router\n"); +			vty_out(vty, "-----------------\n"); +			vty_out(vty, "Address   : %s\n", dr_str); +			vty_out(vty, "Priority  : %u(%d)\n", +				pim_ifp->pim_dr_priority, +				pim_ifp->pim_dr_num_nondrpri_neighbors); +			vty_out(vty, "Uptime    : %s\n", dr_uptime); +			vty_out(vty, "Elections : %d\n", +				pim_ifp->pim_dr_election_count); +			vty_out(vty, "Changes   : %d\n", +				pim_ifp->pim_dr_election_changes); +			vty_out(vty, "\n"); +			vty_out(vty, "\n"); + +			/* FHR */ +			print_header = 1; +			frr_each (rb_pim_upstream, &pim->upstream_head, up) { +				if (!up->rpf.source_nexthop.interface) +					continue; + +				if (strcmp(ifp->name, +					   up->rpf.source_nexthop +						   .interface->name) != 0) +					continue; + +				if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_FHR)) +					continue; + +				if (print_header) { +					vty_out(vty, +						"FHR - First Hop Router\n"); +					vty_out(vty, +						"----------------------\n"); +					print_header = 0; +				} + +				pim_time_uptime(uptime, sizeof(uptime), +						now - up->state_transition); +				vty_out(vty, +					"%pPAs : %pPAs is a source, uptime is %s\n", +					&up->sg.grp, &up->sg.src, uptime); +			} + +			if (!print_header) { +				vty_out(vty, "\n"); +				vty_out(vty, "\n"); +			} + +			vty_out(vty, "Hellos\n"); +			vty_out(vty, "------\n"); +			vty_out(vty, "Period         : %d\n", +				pim_ifp->pim_hello_period); +			vty_out(vty, "HoldTime       : %d\n", +				PIM_IF_DEFAULT_HOLDTIME(pim_ifp)); +			vty_out(vty, "Timer          : %s\n", hello_timer); +			vty_out(vty, "StatStart      : %s\n", stat_uptime); +			vty_out(vty, "Receive        : %d\n", +				pim_ifp->pim_ifstat_hello_recv); +			vty_out(vty, "Receive Failed : %d\n", +				pim_ifp->pim_ifstat_hello_recvfail); +			vty_out(vty, "Send           : %d\n", +				pim_ifp->pim_ifstat_hello_sent); +			vty_out(vty, "Send Failed    : %d\n", +				pim_ifp->pim_ifstat_hello_sendfail); +			vty_out(vty, "Generation ID  : %08x\n", +				pim_ifp->pim_generation_id); +			vty_out(vty, "\n"); +			vty_out(vty, "\n"); + +			pim_print_ifp_flags(vty, ifp); + +			vty_out(vty, "Join Prune Interval\n"); +			vty_out(vty, "-------------------\n"); +			vty_out(vty, "LAN Delay                    : %s\n", +				pim_if_lan_delay_enabled(ifp) ? "yes" : "no"); +			vty_out(vty, "Effective Propagation Delay  : %d msec\n", +				pim_if_effective_propagation_delay_msec(ifp)); +			vty_out(vty, "Effective Override Interval  : %d msec\n", +				pim_if_effective_override_interval_msec(ifp)); +			vty_out(vty, "Join Prune Override Interval : %d msec\n", +				pim_if_jp_override_interval_msec(ifp)); +			vty_out(vty, "\n"); +			vty_out(vty, "\n"); + +			vty_out(vty, "LAN Prune Delay\n"); +			vty_out(vty, "---------------\n"); +			vty_out(vty, "Propagation Delay           : %d msec\n", +				pim_ifp->pim_propagation_delay_msec); +			vty_out(vty, "Propagation Delay (Highest) : %d msec\n", +				pim_ifp->pim_neighbors_highest_propagation_delay_msec); +			vty_out(vty, "Override Interval           : %d msec\n", +				pim_ifp->pim_override_interval_msec); +			vty_out(vty, "Override Interval (Highest) : %d msec\n", +				pim_ifp->pim_neighbors_highest_override_interval_msec); +			vty_out(vty, "\n"); +			vty_out(vty, "\n"); + +			vty_out(vty, "BSM Status\n"); +			vty_out(vty, "----------\n"); +			vty_out(vty, "Bsm Enabled          : %s\n", +				pim_ifp->bsm_enable ? "yes" : "no"); +			vty_out(vty, "Unicast Bsm Enabled  : %s\n", +				pim_ifp->ucast_bsm_accept ? "yes" : "no"); +			vty_out(vty, "\n"); +			vty_out(vty, "\n"); +		} +	} + +	if (!found_ifname) +		vty_out(vty, "%% No such interface\n"); +} + +void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty, +				 bool uj) +{ +	struct pim_ssm *ssm = pim->ssm_info; +	const char *range_str = +		ssm->plist_name ? ssm->plist_name : PIM_SSM_STANDARD_RANGE; + +	if (uj) { +		json_object *json; + +		json = json_object_new_object(); +		json_object_string_add(json, "ssmGroups", range_str); +		vty_json(vty, json); +	} else +		vty_out(vty, "SSM group range : %s\n", range_str); +} + +struct pnc_cache_walk_data { +	struct vty *vty; +	struct pim_instance *pim; +}; + +static int pim_print_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg) +{ +	struct pim_nexthop_cache *pnc = bucket->data; +	struct pnc_cache_walk_data *cwd = arg; +	struct vty *vty = cwd->vty; +	struct pim_instance *pim = cwd->pim; +	struct nexthop *nh_node = NULL; +	ifindex_t first_ifindex; +	struct interface *ifp = NULL; +	char buf[PREFIX_STRLEN]; + +	for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) { +		first_ifindex = nh_node->ifindex; +		ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id); + +		vty_out(vty, "%-15s ", +			inet_ntop(AF_INET, &pnc->rpf.rpf_addr.u.prefix4, buf, +				  sizeof(buf))); +		vty_out(vty, "%-16s ", ifp ? ifp->name : "NULL"); +		vty_out(vty, "%pI4 ", &nh_node->gate.ipv4); +		vty_out(vty, "\n"); +	} +	return CMD_SUCCESS; +} + +void pim_show_nexthop(struct pim_instance *pim, struct vty *vty) +{ +	struct pnc_cache_walk_data cwd; + +	cwd.vty = vty; +	cwd.pim = pim; +	vty_out(vty, "Number of registered addresses: %lu\n", +		pim->rpf_hash->count); +	vty_out(vty, "Address         Interface        Nexthop\n"); +	vty_out(vty, "---------------------------------------------\n"); + +	hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd); +} + +void pim_show_neighbors_single(struct pim_instance *pim, struct vty *vty, +			       const char *neighbor, json_object *json) +{ +	struct listnode *neighnode; +	struct interface *ifp; +	struct pim_interface *pim_ifp; +	struct pim_neighbor *neigh; +	time_t now; +	int found_neighbor = 0; +	int option_address_list; +	int option_dr_priority; +	int option_generation_id; +	int option_holdtime; +	int option_lan_prune_delay; +	int option_t_bit; +	char uptime[10]; +	char expire[10]; +	char neigh_src_str[PIM_ADDRSTRLEN]; + +	json_object *json_ifp = NULL; +	json_object *json_row = NULL; + +	now = pim_time_monotonic_sec(); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; + +		if (!pim_ifp) +			continue; + +		if (pim_ifp->pim_sock_fd < 0) +			continue; + +		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode, +					  neigh)) { +			snprintfrr(neigh_src_str, sizeof(neigh_src_str), +				   "%pPAs", &neigh->source_addr); + +			/* +			 * The user can specify either the interface name or the +			 * PIM neighbor IP. +			 * If this pim_ifp matches neither then skip. +			 */ +			if (strcmp(neighbor, "detail") && +			    strcmp(neighbor, ifp->name) && +			    strcmp(neighbor, neigh_src_str)) +				continue; + +			found_neighbor = 1; +			pim_time_uptime(uptime, sizeof(uptime), +					now - neigh->creation); +			pim_time_timer_to_hhmmss(expire, sizeof(expire), +						 neigh->t_expire_timer); + +			option_address_list = 0; +			option_dr_priority = 0; +			option_generation_id = 0; +			option_holdtime = 0; +			option_lan_prune_delay = 0; +			option_t_bit = 0; + +			if (PIM_OPTION_IS_SET(neigh->hello_options, +					      PIM_OPTION_MASK_ADDRESS_LIST)) +				option_address_list = 1; + +			if (PIM_OPTION_IS_SET(neigh->hello_options, +					      PIM_OPTION_MASK_DR_PRIORITY)) +				option_dr_priority = 1; + +			if (PIM_OPTION_IS_SET(neigh->hello_options, +					      PIM_OPTION_MASK_GENERATION_ID)) +				option_generation_id = 1; + +			if (PIM_OPTION_IS_SET(neigh->hello_options, +					      PIM_OPTION_MASK_HOLDTIME)) +				option_holdtime = 1; + +			if (PIM_OPTION_IS_SET(neigh->hello_options, +					      PIM_OPTION_MASK_LAN_PRUNE_DELAY)) +				option_lan_prune_delay = 1; + +			if (PIM_OPTION_IS_SET( +				    neigh->hello_options, +				    PIM_OPTION_MASK_CAN_DISABLE_JOIN_SUPPRESSION)) +				option_t_bit = 1; + +			if (json) { + +				/* Does this ifp live in json? If not create it +				 */ +				json_object_object_get_ex(json, ifp->name, +							  &json_ifp); + +				if (!json_ifp) { +					json_ifp = json_object_new_object(); +					json_object_pim_ifp_add(json_ifp, ifp); +					json_object_object_add(json, ifp->name, +							       json_ifp); +				} + +				json_row = json_object_new_object(); +				json_object_string_add(json_row, "interface", +						       ifp->name); +				json_object_string_add(json_row, "address", +						       neigh_src_str); +				json_object_string_add(json_row, "upTime", +						       uptime); +				json_object_string_add(json_row, "holdtime", +						       expire); +				json_object_int_add(json_row, "drPriority", +						    neigh->dr_priority); +				json_object_int_add(json_row, "generationId", +						    neigh->generation_id); + +				if (option_address_list) +					json_object_boolean_true_add( +						json_row, +						"helloOptionAddressList"); + +				if (option_dr_priority) +					json_object_boolean_true_add( +						json_row, +						"helloOptionDrPriority"); + +				if (option_generation_id) +					json_object_boolean_true_add( +						json_row, +						"helloOptionGenerationId"); + +				if (option_holdtime) +					json_object_boolean_true_add( +						json_row, +						"helloOptionHoldtime"); + +				if (option_lan_prune_delay) +					json_object_boolean_true_add( +						json_row, +						"helloOptionLanPruneDelay"); + +				if (option_t_bit) +					json_object_boolean_true_add( +						json_row, "helloOptionTBit"); + +				json_object_object_add(json_ifp, neigh_src_str, +						       json_row); + +			} else { +				vty_out(vty, "Interface : %s\n", ifp->name); +				vty_out(vty, "Neighbor  : %s\n", neigh_src_str); +				vty_out(vty, +					"    Uptime                         : %s\n", +					uptime); +				vty_out(vty, +					"    Holdtime                       : %s\n", +					expire); +				vty_out(vty, +					"    DR Priority                    : %d\n", +					neigh->dr_priority); +				vty_out(vty, +					"    Generation ID                  : %08x\n", +					neigh->generation_id); +				vty_out(vty, +					"    Override Interval (msec)       : %d\n", +					neigh->override_interval_msec); +				vty_out(vty, +					"    Propagation Delay (msec)       : %d\n", +					neigh->propagation_delay_msec); +				vty_out(vty, +					"    Hello Option - Address List    : %s\n", +					option_address_list ? "yes" : "no"); +				vty_out(vty, +					"    Hello Option - DR Priority     : %s\n", +					option_dr_priority ? "yes" : "no"); +				vty_out(vty, +					"    Hello Option - Generation ID   : %s\n", +					option_generation_id ? "yes" : "no"); +				vty_out(vty, +					"    Hello Option - Holdtime        : %s\n", +					option_holdtime ? "yes" : "no"); +				vty_out(vty, +					"    Hello Option - LAN Prune Delay : %s\n", +					option_lan_prune_delay ? "yes" : "no"); +				vty_out(vty, +					"    Hello Option - T-bit           : %s\n", +					option_t_bit ? "yes" : "no"); +				bfd_sess_show(vty, json_ifp, +					      neigh->bfd_session); +				vty_out(vty, "\n"); +			} +		} +	} + +	if (!found_neighbor) +		vty_out(vty, "%% No such interface or neighbor\n"); +} + +void pim_show_neighbors(struct pim_instance *pim, struct vty *vty, +			json_object *json) +{ +	struct listnode *neighnode; +	struct interface *ifp; +	struct pim_interface *pim_ifp; +	struct pim_neighbor *neigh; +	time_t now; +	char uptime[10]; +	char expire[10]; +	char neigh_src_str[PIM_ADDRSTRLEN]; +	json_object *json_ifp_rows = NULL; +	json_object *json_row = NULL; + +	now = pim_time_monotonic_sec(); + +	if (!json) { +		vty_out(vty, +			"Interface                Neighbor    Uptime  Holdtime  DR Pri\n"); +	} + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		pim_ifp = ifp->info; + +		if (!pim_ifp) +			continue; + +		if (pim_ifp->pim_sock_fd < 0) +			continue; + +		if (json) +			json_ifp_rows = json_object_new_object(); + +		for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode, +					  neigh)) { +			snprintfrr(neigh_src_str, sizeof(neigh_src_str), +				   "%pPAs", &neigh->source_addr); +			pim_time_uptime(uptime, sizeof(uptime), +					now - neigh->creation); +			pim_time_timer_to_hhmmss(expire, sizeof(expire), +						 neigh->t_expire_timer); + +			if (json) { +				json_row = json_object_new_object(); +				json_object_string_add(json_row, "interface", +						       ifp->name); +				json_object_string_add(json_row, "neighbor", +						       neigh_src_str); +				json_object_string_add(json_row, "upTime", +						       uptime); +				json_object_string_add(json_row, "holdTime", +						       expire); +				json_object_int_add(json_row, "holdTimeMax", +						    neigh->holdtime); +				json_object_int_add(json_row, "drPriority", +						    neigh->dr_priority); +				json_object_object_add(json_ifp_rows, +						       neigh_src_str, json_row); + +			} else { +				vty_out(vty, "%-16s  %15s  %8s  %8s  %6d\n", +					ifp->name, neigh_src_str, uptime, +					expire, neigh->dr_priority); +			} +		} + +		if (json) { +			json_object_object_add(json, ifp->name, json_ifp_rows); +			json_ifp_rows = NULL; +		} +	} +} + +int gm_process_query_max_response_time_cmd(struct vty *vty, +					   const char *qmrt_str) +{ +	const struct lyd_node *pim_enable_dnode; + +	pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode, +					   FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, +					   FRR_PIM_AF_XPATH_VAL); + +	if (!pim_enable_dnode) { +		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true"); +	} else { +		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) +			nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, +					      "true"); +	} + +	nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_MODIFY, +			      qmrt_str); +	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, +				    FRR_PIM_AF_XPATH_VAL); +} + +int gm_process_no_query_max_response_time_cmd(struct vty *vty) +{ +	nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_DESTROY, +			      NULL); +	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, +				    FRR_PIM_AF_XPATH_VAL); +} + +int gm_process_last_member_query_count_cmd(struct vty *vty, +					   const char *lmqc_str) +{ +	const struct lyd_node *pim_enable_dnode; + +	pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode, +					   FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, +					   FRR_PIM_AF_XPATH_VAL); +	if (!pim_enable_dnode) { +		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true"); +	} else { +		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) +			nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, +					      "true"); +	} + +	nb_cli_enqueue_change(vty, "./robustness-variable", NB_OP_MODIFY, +			      lmqc_str); +	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, +				    FRR_PIM_AF_XPATH_VAL); +} + +int gm_process_no_last_member_query_count_cmd(struct vty *vty) +{ +	nb_cli_enqueue_change(vty, "./robustness-variable", NB_OP_DESTROY, +			      NULL); +	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, +				    FRR_PIM_AF_XPATH_VAL); +} + +int gm_process_last_member_query_interval_cmd(struct vty *vty, +					      const char *lmqi_str) +{ +	const struct lyd_node *pim_enable_dnode; + +	pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode, +					   FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH, +					   FRR_PIM_AF_XPATH_VAL); +	if (!pim_enable_dnode) { +		nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true"); +	} else { +		if (!yang_dnode_get_bool(pim_enable_dnode, ".")) +			nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, +					      "true"); +	} + +	nb_cli_enqueue_change(vty, "./last-member-query-interval", NB_OP_MODIFY, +			      lmqi_str); +	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, +				    FRR_PIM_AF_XPATH_VAL); +} + +int gm_process_no_last_member_query_interval_cmd(struct vty *vty) +{ +	nb_cli_enqueue_change(vty, "./last-member-query-interval", +			      NB_OP_DESTROY, NULL); +	return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, +				    FRR_PIM_AF_XPATH_VAL); +} + +int pim_process_ssmpingd_cmd(struct vty *vty, enum nb_operation operation, +			     const char *src_str) +{ +	const char *vrfname; +	char ssmpingd_ip_xpath[XPATH_MAXLEN]; + +	vrfname = pim_cli_get_vrf_name(vty); +	if (vrfname == NULL) +		return CMD_WARNING_CONFIG_FAILED; + +	snprintf(ssmpingd_ip_xpath, sizeof(ssmpingd_ip_xpath), +		 FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim", vrfname, +		 FRR_PIM_AF_XPATH_VAL); +	strlcat(ssmpingd_ip_xpath, "/ssm-pingd-source-ip", +		sizeof(ssmpingd_ip_xpath)); + +	nb_cli_enqueue_change(vty, ssmpingd_ip_xpath, operation, src_str); + +	return nb_cli_apply_changes(vty, NULL); +} + +static void show_scan_oil_stats(struct pim_instance *pim, struct vty *vty, +				time_t now) +{ +	char uptime_scan_oil[10]; +	char uptime_mroute_add[10]; +	char uptime_mroute_del[10]; + +	pim_time_uptime_begin(uptime_scan_oil, sizeof(uptime_scan_oil), now, +			      pim->scan_oil_last); +	pim_time_uptime_begin(uptime_mroute_add, sizeof(uptime_mroute_add), now, +			      pim->mroute_add_last); +	pim_time_uptime_begin(uptime_mroute_del, sizeof(uptime_mroute_del), now, +			      pim->mroute_del_last); + +	vty_out(vty, +		"Scan OIL - Last: %s  Events: %lld\n" +		"MFC Add  - Last: %s  Events: %lld\n" +		"MFC Del  - Last: %s  Events: %lld\n", +		uptime_scan_oil, (long long)pim->scan_oil_events, +		uptime_mroute_add, (long long)pim->mroute_add_events, +		uptime_mroute_del, (long long)pim->mroute_del_events); +} + +void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty, +			       json_object *json) +{ +	struct interface *ifp; +	json_object *json_row = NULL; + +	vty_out(vty, "\n"); + +	if (!json) +		vty_out(vty, +			"Interface        Address            ifi Vif  PktsIn PktsOut    BytesIn   BytesOut\n"); + +	FOR_ALL_INTERFACES (pim->vrf, ifp) { +		struct pim_interface *pim_ifp; +#if PIM_IPV == 4 +		struct sioc_vif_req vreq; +#else +		struct sioc_mif_req6 vreq; +#endif + +		pim_ifp = ifp->info; + +		if (!pim_ifp) +			continue; + +		memset(&vreq, 0, sizeof(vreq)); +#if PIM_IPV == 4 +		vreq.vifi = pim_ifp->mroute_vif_index; +		if (ioctl(pim->mroute_socket, SIOCGETVIFCNT, &vreq)) { +			zlog_warn( +				"ioctl(SIOCGETVIFCNT=%lu) failure for interface %s vif_index=%d: errno=%d: %s", +				(unsigned long)SIOCGETVIFCNT, ifp->name, +				pim_ifp->mroute_vif_index, errno, +				safe_strerror(errno)); +		} +#else +		vreq.mifi = pim_ifp->mroute_vif_index; +		if (ioctl(pim->mroute_socket, SIOCGETMIFCNT_IN6, &vreq)) { +			zlog_warn( +				"ioctl(SIOCGETMIFCNT_IN6=%lu) failure for interface %s vif_index=%d: errno=%d: %s", +				(unsigned long)SIOCGETMIFCNT_IN6, ifp->name, +				pim_ifp->mroute_vif_index, errno, +				safe_strerror(errno)); +		} +#endif + +		if (json) { +			json_row = json_object_new_object(); +			json_object_string_add(json_row, "name", ifp->name); +			json_object_string_add(json_row, "state", +					       if_is_up(ifp) ? "up" : "down"); +			json_object_string_addf(json_row, "address", "%pPA", +						&pim_ifp->primary_address); +			json_object_int_add(json_row, "ifIndex", ifp->ifindex); +			json_object_int_add(json_row, "vif", +					    pim_ifp->mroute_vif_index); +			json_object_int_add(json_row, "pktsIn", +					    (unsigned long)vreq.icount); +			json_object_int_add(json_row, "pktsOut", +					    (unsigned long)vreq.ocount); +			json_object_int_add(json_row, "bytesIn", +					    (unsigned long)vreq.ibytes); +			json_object_int_add(json_row, "bytesOut", +					    (unsigned long)vreq.obytes); +			json_object_object_add(json, ifp->name, json_row); +		} else { +			vty_out(vty, +				"%-16s %-15pPAs %3d %3d %7lu %7lu %10lu %10lu\n", +				ifp->name, &pim_ifp->primary_address, +				ifp->ifindex, pim_ifp->mroute_vif_index, +				(unsigned long)vreq.icount, +				(unsigned long)vreq.ocount, +				(unsigned long)vreq.ibytes, +				(unsigned long)vreq.obytes); +		} +	} +} + +void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim, struct vty *vty) +{ +	struct vrf *vrf = pim->vrf; +	time_t now = pim_time_monotonic_sec(); +	char uptime[10]; +	char mlag_role[80]; + +	pim = vrf->info; + +	vty_out(vty, "Router MLAG Role: %s\n", +		mlag_role2str(router->mlag_role, mlag_role, sizeof(mlag_role))); +	vty_out(vty, "Mroute socket descriptor:"); + +	vty_out(vty, " %d(%s)\n", pim->mroute_socket, vrf->name); + +	pim_time_uptime(uptime, sizeof(uptime), +			now - pim->mroute_socket_creation); +	vty_out(vty, "Mroute socket uptime: %s\n", uptime); + +	vty_out(vty, "\n"); + +	pim_zebra_zclient_update(vty); +#if PIM_IPV == 4 +	pim_zlookup_show_ip_multicast(vty); +#else +	/* TBD */ +#endif + +	vty_out(vty, "\n"); +	vty_out(vty, "Maximum highest VifIndex: %d\n", PIM_MAX_USABLE_VIFS); + +	vty_out(vty, "\n"); +	vty_out(vty, "Upstream Join Timer: %d secs\n", router->t_periodic); +	vty_out(vty, "Join/Prune Holdtime: %d secs\n", PIM_JP_HOLDTIME); +	vty_out(vty, "PIM ECMP: %s\n", pim->ecmp_enable ? "Enable" : "Disable"); +	vty_out(vty, "PIM ECMP Rebalance: %s\n", +		pim->ecmp_rebalance_enable ? "Enable" : "Disable"); + +	vty_out(vty, "\n"); + +	pim_show_rpf_refresh_stats(vty, pim, now, NULL); + +	vty_out(vty, "\n"); + +	show_scan_oil_stats(pim, vty, now); + +	show_multicast_interfaces(pim, vty, NULL); +} + +void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, +		 bool fill, json_object *json) +{ +	struct listnode *node; +	struct channel_oil *c_oil; +	struct static_route *s_route; +	time_t now; +	json_object *json_group = NULL; +	json_object *json_source = NULL; +	json_object *json_oil = NULL; +	json_object *json_ifp_out = NULL; +	int found_oif; +	int first; +	char grp_str[PIM_ADDRSTRLEN]; +	char src_str[PIM_ADDRSTRLEN]; +	char in_ifname[INTERFACE_NAMSIZ + 1]; +	char out_ifname[INTERFACE_NAMSIZ + 1]; +	int oif_vif_index; +	struct interface *ifp_in; +	char proto[100]; +	char state_str[PIM_REG_STATE_STR_LEN]; +	char mroute_uptime[10]; + +	if (!json) { +		vty_out(vty, "IP Multicast Routing Table\n"); +		vty_out(vty, "Flags: S - Sparse, C - Connected, P - Pruned\n"); +		vty_out(vty, +			"       R - SGRpt Pruned, F - Register flag, T - SPT-bit set\n"); +		vty_out(vty, +			"\nSource          Group           Flags    Proto  Input            Output           TTL  Uptime\n"); +	} + +	now = pim_time_monotonic_sec(); + +	/* print list of PIM and IGMP routes */ +	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) { +		found_oif = 0; +		first = 1; +		if (!c_oil->installed) +			continue; + +		if (!pim_addr_is_any(sg->grp) && +		    pim_addr_cmp(sg->grp, *oil_mcastgrp(c_oil))) +			continue; +		if (!pim_addr_is_any(sg->src) && +		    pim_addr_cmp(sg->src, *oil_origin(c_oil))) +			continue; + +		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", +			   oil_mcastgrp(c_oil)); +		snprintfrr(src_str, sizeof(src_str), "%pPAs", +			   oil_origin(c_oil)); + +		strlcpy(state_str, "S", sizeof(state_str)); +		/* When a non DR receives a igmp join, it creates a (*,G) +		 * channel_oil without any upstream creation +		 */ +		if (c_oil->up) { +			if (PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(c_oil->up->flags)) +				strlcat(state_str, "C", sizeof(state_str)); +			if (pim_upstream_is_sg_rpt(c_oil->up)) +				strlcat(state_str, "R", sizeof(state_str)); +			if (PIM_UPSTREAM_FLAG_TEST_FHR(c_oil->up->flags)) +				strlcat(state_str, "F", sizeof(state_str)); +			if (c_oil->up->sptbit == PIM_UPSTREAM_SPTBIT_TRUE) +				strlcat(state_str, "T", sizeof(state_str)); +		} +		if (pim_channel_oil_empty(c_oil)) +			strlcat(state_str, "P", sizeof(state_str)); + +		ifp_in = pim_if_find_by_vif_index(pim, *oil_parent(c_oil)); + +		if (ifp_in) +			strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname)); +		else +			strlcpy(in_ifname, "<iif?>", sizeof(in_ifname)); + + +		pim_time_uptime(mroute_uptime, sizeof(mroute_uptime), +				now - c_oil->mroute_creation); + +		if (json) { + +			/* Find the group, create it if it doesn't exist */ +			json_object_object_get_ex(json, grp_str, &json_group); + +			if (!json_group) { +				json_group = json_object_new_object(); +				json_object_object_add(json, grp_str, +						       json_group); +			} + +			/* Find the source nested under the group, create it if +			 * it doesn't exist +			 */ +			json_object_object_get_ex(json_group, src_str, +						  &json_source); + +			if (!json_source) { +				json_source = json_object_new_object(); +				json_object_object_add(json_group, src_str, +						       json_source); +			} + +			/* Find the inbound interface nested under the source, +			 * create it if it doesn't exist +			 */ +			json_object_string_add(json_source, "source", src_str); +			json_object_string_add(json_source, "group", grp_str); +			json_object_int_add(json_source, "installed", +					    c_oil->installed); +			json_object_int_add(json_source, "refCount", +					    c_oil->oil_ref_count); +			json_object_int_add(json_source, "oilSize", +					    c_oil->oil_size); +			json_object_int_add(json_source, "OilInheritedRescan", +					    c_oil->oil_inherited_rescan); +			json_object_int_add(json_source, "oilInheritedRescan", +					    c_oil->oil_inherited_rescan); +			json_object_string_add(json_source, "iif", in_ifname); +			json_object_string_add(json_source, "upTime", +					       mroute_uptime); +			json_oil = NULL; +		} + +		for (oif_vif_index = 0; oif_vif_index < MAXVIFS; +		     ++oif_vif_index) { +			struct interface *ifp_out; +			int ttl; + +			ttl = oil_if_has(c_oil, oif_vif_index); +			if (ttl < 1) +				continue; + +			/* do not display muted OIFs */ +			if (c_oil->oif_flags[oif_vif_index] & PIM_OIF_FLAG_MUTE) +				continue; + +			if (*oil_parent(c_oil) == oif_vif_index && +			    !pim_mroute_allow_iif_in_oil(c_oil, oif_vif_index)) +				continue; + +			ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index); +			found_oif = 1; + +			if (ifp_out) +				strlcpy(out_ifname, ifp_out->name, +					sizeof(out_ifname)); +			else +				strlcpy(out_ifname, "<oif?>", +					sizeof(out_ifname)); + +			if (json) { +				json_ifp_out = json_object_new_object(); +				json_object_string_add(json_ifp_out, "source", +						       src_str); +				json_object_string_add(json_ifp_out, "group", +						       grp_str); + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_PIM) +					json_object_boolean_true_add( +						json_ifp_out, "protocolPim"); + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_GM) +#if PIM_IPV == 4 +					json_object_boolean_true_add( +						json_ifp_out, "protocolIgmp"); +#else +					json_object_boolean_true_add( +						json_ifp_out, "protocolMld"); +#endif + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_VXLAN) +					json_object_boolean_true_add( +						json_ifp_out, "protocolVxlan"); + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_STAR) +					json_object_boolean_true_add( +						json_ifp_out, +						"protocolInherited"); + +				json_object_string_add(json_ifp_out, +						       "inboundInterface", +						       in_ifname); +				json_object_int_add(json_ifp_out, "iVifI", +						    *oil_parent(c_oil)); +				json_object_string_add(json_ifp_out, +						       "outboundInterface", +						       out_ifname); +				json_object_int_add(json_ifp_out, "oVifI", +						    oif_vif_index); +				json_object_int_add(json_ifp_out, "ttl", ttl); +				json_object_string_add(json_ifp_out, "upTime", +						       mroute_uptime); +				json_object_string_add(json_source, "flags", +						       state_str); +				if (!json_oil) { +					json_oil = json_object_new_object(); +					json_object_object_add(json_source, +							       "oil", json_oil); +				} +				json_object_object_add(json_oil, out_ifname, +						       json_ifp_out); +			} else { +				proto[0] = '\0'; +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_PIM) { +					strlcpy(proto, "PIM", sizeof(proto)); +				} + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_GM) { +#if PIM_IPV == 4 +					strlcpy(proto, "IGMP", sizeof(proto)); +#else +					strlcpy(proto, "MLD", sizeof(proto)); +#endif +				} + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_VXLAN) { +					strlcpy(proto, "VxLAN", sizeof(proto)); +				} + +				if (c_oil->oif_flags[oif_vif_index] & +				    PIM_OIF_FLAG_PROTO_STAR) { +					strlcpy(proto, "STAR", sizeof(proto)); +				} + +				vty_out(vty, +					"%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n", +					oil_origin(c_oil), oil_mcastgrp(c_oil), +					state_str, proto, in_ifname, out_ifname, +					ttl, mroute_uptime); + +				if (first) { +					src_str[0] = '\0'; +					grp_str[0] = '\0'; +					in_ifname[0] = '\0'; +					state_str[0] = '\0'; +					mroute_uptime[0] = '\0'; +					first = 0; +				} +			} +		} + +		if (!json && !found_oif) { +			vty_out(vty, +				"%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n", +				oil_origin(c_oil), oil_mcastgrp(c_oil), +				state_str, "none", in_ifname, "none", 0, +				"--:--:--"); +		} +	} + +	/* Print list of static routes */ +	for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) { +		first = 1; + +		if (!s_route->c_oil.installed) +			continue; + +		snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &s_route->group); +		snprintfrr(src_str, sizeof(src_str), "%pPAs", &s_route->source); +		ifp_in = pim_if_find_by_vif_index(pim, s_route->iif); +		found_oif = 0; + +		if (ifp_in) +			strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname)); +		else +			strlcpy(in_ifname, "<iif?>", sizeof(in_ifname)); + +		if (json) { + +			/* Find the group, create it if it doesn't exist */ +			json_object_object_get_ex(json, grp_str, &json_group); + +			if (!json_group) { +				json_group = json_object_new_object(); +				json_object_object_add(json, grp_str, +						       json_group); +			} + +			/* Find the source nested under the group, create it if +			 * it doesn't exist +			 */ +			json_object_object_get_ex(json_group, src_str, +						  &json_source); + +			if (!json_source) { +				json_source = json_object_new_object(); +				json_object_object_add(json_group, src_str, +						       json_source); +			} + +			json_object_string_add(json_source, "iif", in_ifname); +			json_oil = NULL; +		} else { +			strlcpy(proto, "STATIC", sizeof(proto)); +		} + +		for (oif_vif_index = 0; oif_vif_index < MAXVIFS; +		     ++oif_vif_index) { +			struct interface *ifp_out; +			char oif_uptime[10]; +			int ttl; + +			ttl = s_route->oif_ttls[oif_vif_index]; +			if (ttl < 1) +				continue; + +			ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index); +			pim_time_uptime( +				oif_uptime, sizeof(oif_uptime), +				now - s_route->c_oil +						.oif_creation[oif_vif_index]); +			found_oif = 1; + +			if (ifp_out) +				strlcpy(out_ifname, ifp_out->name, +					sizeof(out_ifname)); +			else +				strlcpy(out_ifname, "<oif?>", +					sizeof(out_ifname)); + +			if (json) { +				json_ifp_out = json_object_new_object(); +				json_object_string_add(json_ifp_out, "source", +						       src_str); +				json_object_string_add(json_ifp_out, "group", +						       grp_str); +				json_object_boolean_true_add(json_ifp_out, +							     "protocolStatic"); +				json_object_string_add(json_ifp_out, +						       "inboundInterface", +						       in_ifname); +				json_object_int_add( +					json_ifp_out, "iVifI", +					*oil_parent(&s_route->c_oil)); +				json_object_string_add(json_ifp_out, +						       "outboundInterface", +						       out_ifname); +				json_object_int_add(json_ifp_out, "oVifI", +						    oif_vif_index); +				json_object_int_add(json_ifp_out, "ttl", ttl); +				json_object_string_add(json_ifp_out, "upTime", +						       oif_uptime); +				if (!json_oil) { +					json_oil = json_object_new_object(); +					json_object_object_add(json_source, +							       "oil", json_oil); +				} +				json_object_object_add(json_oil, out_ifname, +						       json_ifp_out); +			} else { +				vty_out(vty, +					"%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n", +					&s_route->source, &s_route->group, "-", +					proto, in_ifname, out_ifname, ttl, +					oif_uptime); +				if (first && !fill) { +					src_str[0] = '\0'; +					grp_str[0] = '\0'; +					in_ifname[0] = '\0'; +					first = 0; +				} +			} +		} + +		if (!json && !found_oif) { +			vty_out(vty, +				"%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d  %8s\n", +				&s_route->source, &s_route->group, "-", proto, +				in_ifname, "none", 0, "--:--:--"); +		} +	} +} + +static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil, +					      json_object *json, +					      struct vty *vty) +{ +	json_object *json_group = NULL; +	json_object *json_source = NULL; + +	if (!c_oil->installed) +		return; + +	pim_mroute_update_counters(c_oil); + +	if (json) { +		char group_str[PIM_ADDRSTRLEN]; +		char source_str[PIM_ADDRSTRLEN]; + +		snprintfrr(group_str, sizeof(group_str), "%pPAs", +			   oil_mcastgrp(c_oil)); +		snprintfrr(source_str, sizeof(source_str), "%pPAs", +			   oil_origin(c_oil)); + +		json_object_object_get_ex(json, group_str, &json_group); + +		if (!json_group) { +			json_group = json_object_new_object(); +			json_object_object_add(json, group_str, json_group); +		} + +		json_source = json_object_new_object(); +		json_object_object_add(json_group, source_str, json_source); +		json_object_int_add(json_source, "lastUsed", +				    c_oil->cc.lastused / 100); +		json_object_int_add(json_source, "packets", c_oil->cc.pktcnt); +		json_object_int_add(json_source, "bytes", c_oil->cc.bytecnt); +		json_object_int_add(json_source, "wrongIf", c_oil->cc.wrong_if); + +	} else { +		vty_out(vty, "%-15pPAs %-15pPAs %-8llu %-7ld %-10ld %-7ld\n", +			oil_origin(c_oil), oil_mcastgrp(c_oil), +			c_oil->cc.lastused / 100, +			c_oil->cc.pktcnt - c_oil->cc.origpktcnt, +			c_oil->cc.bytecnt - c_oil->cc.origbytecnt, +			c_oil->cc.wrong_if - c_oil->cc.origwrong_if); +	} +} + +void show_mroute_count(struct pim_instance *pim, struct vty *vty, +		       json_object *json) +{ +	struct listnode *node; +	struct channel_oil *c_oil; +	struct static_route *sr; + +	if (!json) { +		vty_out(vty, "\n"); + +		vty_out(vty, +			"Source          Group           LastUsed Packets Bytes WrongIf  \n"); +	} + +	/* Print PIM and IGMP route counts */ +	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) +		show_mroute_count_per_channel_oil(c_oil, json, vty); + +	for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr)) +		show_mroute_count_per_channel_oil(&sr->c_oil, json, vty); +} + +void show_mroute_summary(struct pim_instance *pim, struct vty *vty, +			 json_object *json) +{ +	struct listnode *node; +	struct channel_oil *c_oil; +	struct static_route *s_route; +	uint32_t starg_sw_mroute_cnt = 0; +	uint32_t sg_sw_mroute_cnt = 0; +	uint32_t starg_hw_mroute_cnt = 0; +	uint32_t sg_hw_mroute_cnt = 0; +	json_object *json_starg = NULL; +	json_object *json_sg = NULL; + +	if (!json) +		vty_out(vty, "Mroute Type    Installed/Total\n"); + +	frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) { +		if (!c_oil->installed) { +			if (pim_addr_is_any(*oil_origin(c_oil))) +				starg_sw_mroute_cnt++; +			else +				sg_sw_mroute_cnt++; +		} else { +			if (pim_addr_is_any(*oil_origin(c_oil))) +				starg_hw_mroute_cnt++; +			else +				sg_hw_mroute_cnt++; +		} +	} + +	for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) { +		if (!s_route->c_oil.installed) { +			if (pim_addr_is_any(*oil_origin(&s_route->c_oil))) +				starg_sw_mroute_cnt++; +			else +				sg_sw_mroute_cnt++; +		} else { +			if (pim_addr_is_any(*oil_origin(&s_route->c_oil))) +				starg_hw_mroute_cnt++; +			else +				sg_hw_mroute_cnt++; +		} +	} + +	if (!json) { +		vty_out(vty, "%-20s %u/%u\n", "(*, G)", starg_hw_mroute_cnt, +			starg_sw_mroute_cnt + starg_hw_mroute_cnt); +		vty_out(vty, "%-20s %u/%u\n", "(S, G)", sg_hw_mroute_cnt, +			sg_sw_mroute_cnt + sg_hw_mroute_cnt); +		vty_out(vty, "------\n"); +		vty_out(vty, "%-20s %u/%u\n", "Total", +			(starg_hw_mroute_cnt + sg_hw_mroute_cnt), +			(starg_sw_mroute_cnt + starg_hw_mroute_cnt + +			 sg_sw_mroute_cnt + sg_hw_mroute_cnt)); +	} else { +		/* (*,G) route details */ +		json_starg = json_object_new_object(); +		json_object_object_add(json, "wildcardGroup", json_starg); + +		json_object_int_add(json_starg, "installed", +				    starg_hw_mroute_cnt); +		json_object_int_add(json_starg, "total", +				    starg_sw_mroute_cnt + starg_hw_mroute_cnt); + +		/* (S, G) route details */ +		json_sg = json_object_new_object(); +		json_object_object_add(json, "sourceGroup", json_sg); + +		json_object_int_add(json_sg, "installed", sg_hw_mroute_cnt); +		json_object_int_add(json_sg, "total", +				    sg_sw_mroute_cnt + sg_hw_mroute_cnt); + +		json_object_int_add(json, "totalNumOfInstalledMroutes", +				    starg_hw_mroute_cnt + sg_hw_mroute_cnt); +		json_object_int_add(json, "totalNumOfMroutes", +				    starg_sw_mroute_cnt + starg_hw_mroute_cnt + +					    sg_sw_mroute_cnt + +					    sg_hw_mroute_cnt); +	} +} diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h index b7e6b6ac80..8753d1444e 100644 --- a/pimd/pim_cmd_common.h +++ b/pimd/pim_cmd_common.h @@ -58,5 +58,69 @@ int pim_process_ip_mroute_cmd(struct vty *vty, const char *interface,  			      const char *group_str, const char *source_str);  int pim_process_no_ip_mroute_cmd(struct vty *vty, const char *interface,  				 const char *group_str, const char *src_str); +void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up); +void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json); +void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty); +void pim_show_state(struct pim_instance *pim, struct vty *vty, +		    const char *src_or_group, const char *group, +		    json_object *json); +void pim_show_statistics(struct pim_instance *pim, struct vty *vty, +			 const char *ifname, bool uj); +void pim_show_upstream(struct pim_instance *pim, struct vty *vty, +		       pim_sgaddr *sg, json_object *json); +void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj); +void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj); +void pim_show_rpf_refresh_stats(struct vty *vty, struct pim_instance *pim, +				time_t now, json_object *json); +bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match); +void json_object_pim_ifp_add(struct json_object *json, struct interface *ifp); +void pim_print_ifp_flags(struct vty *vty, struct interface *ifp); +void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up); +void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, +		   json_object *json); +void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty); +void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj); +void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj); +void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag, +			 json_object *json); +void pim_show_interfaces_single(struct pim_instance *pim, struct vty *vty, +				const char *ifname, bool mlag, +				json_object *json); +void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty, +				 bool uj); +void pim_show_nexthop(struct pim_instance *pim, struct vty *vty); +void pim_show_neighbors_single(struct pim_instance *pim, struct vty *vty, +			       const char *neighbor, json_object *json); +void pim_show_neighbors(struct pim_instance *pim, struct vty *vty, +			json_object *json); +int gm_process_query_max_response_time_cmd(struct vty *vty, +					   const char *qmrt_str); +int gm_process_no_query_max_response_time_cmd(struct vty *vty); +int gm_process_last_member_query_count_cmd(struct vty *vty, +					   const char *lmqc_str); +int gm_process_no_last_member_query_count_cmd(struct vty *vty); +int gm_process_last_member_query_interval_cmd(struct vty *vty, +					      const char *lmqi_str); +int gm_process_no_last_member_query_interval_cmd(struct vty *vty); +int pim_process_ssmpingd_cmd(struct vty *vty, enum nb_operation operation, +			     const char *src_str); +void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim, +				      struct vty *vty); +void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty, +			       json_object *json); +void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, +		 bool fill, json_object *json); +void show_mroute_count(struct pim_instance *pim, struct vty *vty, +		       json_object *json); +void show_mroute_summary(struct pim_instance *pim, struct vty *vty, +			 json_object *json); + +/* + * Special Macro to allow us to get the correct pim_instance; + */ +#define PIM_DECLVAR_CONTEXT_VRF(vrfptr, pimptr)                                \ +	VTY_DECLVAR_CONTEXT_VRF(vrfptr);                                       \ +	struct pim_instance *pimptr = vrfptr->info;                            \ +	MACRO_REQUIRE_SEMICOLON() /* end */  #endif /* PIM_CMD_COMMON_H */ diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index 8e0e418a99..175368cb07 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -50,6 +50,8 @@  #include "pim_igmp_join.h"  #include "pim_vxlan.h" +#include "pim6_mld.h" +  #if PIM_IPV == 4  static void pim_if_igmp_join_del_all(struct interface *ifp);  static int igmp_join_sock(const char *ifname, ifindex_t ifindex, @@ -123,11 +125,11 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,  	pim_ifp = XCALLOC(MTYPE_PIM_INTERFACE, sizeof(*pim_ifp)); -	pim_ifp->options = 0;  	pim_ifp->pim = ifp->vrf->info;  	pim_ifp->mroute_vif_index = -1;  	pim_ifp->igmp_version = IGMP_DEFAULT_VERSION; +	pim_ifp->mld_version = MLD_DEFAULT_VERSION;  	pim_ifp->gm_default_robustness_variable =  		IGMP_DEFAULT_ROBUSTNESS_VARIABLE;  	pim_ifp->gm_default_query_interval = IGMP_GENERAL_QUERY_INTERVAL; @@ -136,10 +138,12 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,  	pim_ifp->gm_specific_query_max_response_time_dsec =  		IGMP_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC;  	pim_ifp->gm_last_member_query_count = IGMP_DEFAULT_ROBUSTNESS_VARIABLE; +	pim_ifp->mld_last_query_intv = 30000; +	pim_ifp->mld_max_resp_ms = 5000;  	/* BSM config on interface: true by default */  	pim_ifp->bsm_enable = true; -	pim_ifp->ucast_bsm_accept = true; +	pim_ifp->ucast_bsm_accept = false;  	pim_ifp->am_i_dr = false;  	/* @@ -150,13 +154,9 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,  	assert(pim_ifp->gm_query_max_response_time_dsec <  	       pim_ifp->gm_default_query_interval); -	if (pim) -		PIM_IF_DO_PIM(pim_ifp->options); +	pim_ifp->pim_enable = pim;  #if PIM_IPV == 4 -	if (igmp) -		PIM_IF_DO_IGMP(pim_ifp->options); - -	PIM_IF_DO_IGMP_LISTEN_ALLROUTERS(pim_ifp->options); +	pim_ifp->igmp_enable = igmp;  #endif  	pim_ifp->gm_join_list = NULL; @@ -187,9 +187,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,  	ifp->info = pim_ifp; -#if PIM_IPV == 4  	pim_sock_reset(ifp); -#endif  	pim_if_add_vif(ifp, ispimreg, is_vxlan_term);  	pim_ifp->pim->mcast_if_count++; @@ -319,7 +317,7 @@ static int detect_primary_address_change(struct interface *ifp,  	if (changed) {  		/* Before updating pim_ifp send Hello time with 0 hold time */ -		if (PIM_IF_TEST_PIM(pim_ifp->options)) { +		if (pim_ifp->pim_enable) {  			pim_hello_send(ifp, 0 /* zero-sec holdtime */);  		}  		pim_ifp->primary_address = new_prim_addr; @@ -464,7 +462,7 @@ static void detect_address_change(struct interface *ifp, int force_prim_as_any,  	if (changed) { -		if (!PIM_IF_TEST_PIM(pim_ifp->options)) { +		if (!pim_ifp->pim_enable) {  			return;  		} @@ -545,7 +543,7 @@ void pim_if_addr_add(struct connected *ifc)  #if PIM_IPV == 4  	struct in_addr ifaddr = ifc->address->u.prefix4; -	if (PIM_IF_TEST_IGMP(pim_ifp->options)) { +	if (pim_ifp->igmp_enable) {  		struct gm_sock *igmp;  		/* lookup IGMP socket */ @@ -612,7 +610,7 @@ void pim_if_addr_add(struct connected *ifc)  	} /* igmp mtrace only */  #endif -	if (PIM_IF_TEST_PIM(pim_ifp->options)) { +	if (pim_ifp->pim_enable) {  		if (!pim_addr_is_any(pim_ifp->primary_address)) { @@ -657,6 +655,7 @@ void pim_if_addr_add(struct connected *ifc)  		vxlan_term = pim_vxlan_is_term_dev_cfg(pim_ifp->pim, ifp);  		pim_if_add_vif(ifp, false, vxlan_term);  	} +	gm_ifp_update(ifp);  	pim_ifchannel_scan_forward_start(ifp);  } @@ -769,6 +768,8 @@ void pim_if_addr_del(struct connected *ifc, int force_prim_as_any)  				"%s: removed link-local %pI6, lowest now %pI6, highest %pI6",  				ifc->ifp->name, &ifc->address->u.prefix6,  				&pim_ifp->ll_lowest, &pim_ifp->ll_highest); + +		gm_ifp_update(ifp);  	}  #endif @@ -804,7 +805,7 @@ void pim_if_addr_add_all(struct interface *ifp)  	}  	if (!v4_addrs && v6_addrs && !if_is_loopback(ifp)) { -		if (PIM_IF_TEST_PIM(pim_ifp->options)) { +		if (pim_ifp->pim_enable) {  			/* Interface has a valid primary address ? */  			if (!pim_addr_is_any(pim_ifp->primary_address)) { @@ -828,6 +829,7 @@ void pim_if_addr_add_all(struct interface *ifp)  		vxlan_term = pim_vxlan_is_term_dev_cfg(pim_ifp->pim, ifp);  		pim_if_add_vif(ifp, false, vxlan_term);  	} +	gm_ifp_update(ifp);  	pim_ifchannel_scan_forward_start(ifp);  	pim_rp_setup(pim_ifp->pim); @@ -1006,12 +1008,15 @@ int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term)  	}  	ifaddr = pim_ifp->primary_address; +#if PIM_IPV != 6 +	/* IPv6 API is always by interface index */  	if (!ispimreg && !is_vxlan_term && pim_addr_is_any(ifaddr)) {  		zlog_warn(  			"%s: could not get address for interface %s ifindex=%d",  			__func__, ifp->name, ifp->ifindex);  		return -4;  	} +#endif  	pim_ifp->mroute_vif_index = pim_iface_next_vif_index(ifp); @@ -1036,9 +1041,10 @@ int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term)  	pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 1; +	gm_ifp_update(ifp); +  	/* if the device qualifies as pim_vxlan iif/oif update vxlan entries */  	pim_vxlan_add_vif(ifp); -  	return 0;  } @@ -1056,6 +1062,8 @@ int pim_if_del_vif(struct interface *ifp)  	/* if the device was a pim_vxlan iif/oif update vxlan mroute entries */  	pim_vxlan_del_vif(ifp); +	gm_ifp_teardown(ifp); +  	pim_mroute_del_vif(ifp);  	/* @@ -1064,7 +1072,6 @@ int pim_if_del_vif(struct interface *ifp)  	pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 0;  	pim_ifp->mroute_vif_index = -1; -  	return 0;  } @@ -1213,7 +1220,7 @@ long pim_if_t_suppressed_msec(struct interface *ifp)  	assert(pim_ifp);  	/* join suppression disabled ? */ -	if (PIM_IF_TEST_PIM_CAN_DISABLE_JOIN_SUPPRESSION(pim_ifp->options)) +	if (pim_ifp->pim_can_disable_join_suppression)  		return 0;  	/* t_suppressed = t_periodic * rand(1.1, 1.4) */ @@ -1532,7 +1539,7 @@ void pim_if_update_assert_tracking_desired(struct interface *ifp)  /*   * PIM wants to have an interface pointer for everything it does.   * The pimreg is a special interface that we have that is not - * quite an inteface but a VIF is created for it. + * quite an interface but a VIF is created for it.   */  void pim_if_create_pimreg(struct pim_instance *pim)  { @@ -1751,7 +1758,6 @@ static int pim_ifp_down(struct interface *ifp)  		*/  		pim_if_addr_del_all(ifp); -#if PIM_IPV == 4  		/*  		  pim_sock_delete() closes the socket, stops read and timer  		  threads, @@ -1760,7 +1766,6 @@ static int pim_ifp_down(struct interface *ifp)  		if (ifp->info) {  			pim_sock_delete(ifp, "link down");  		} -#endif  	}  	if (ifp->info) { diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h index bab73eae86..bdc7c521d6 100644 --- a/pimd/pim_iface.h +++ b/pimd/pim_iface.h @@ -34,31 +34,8 @@  #include "bfd.h"  #include "pim_str.h" -#define PIM_IF_MASK_PIM                             (1 << 0) -#define PIM_IF_MASK_IGMP                            (1 << 1) -#define PIM_IF_MASK_IGMP_LISTEN_ALLROUTERS          (1 << 2) -#define PIM_IF_MASK_PIM_CAN_DISABLE_JOIN_SUPPRESSION (1 << 3) -  #define PIM_IF_IS_DELETED(ifp) ((ifp)->ifindex == IFINDEX_INTERNAL) -#define PIM_IF_TEST_PIM(options) (PIM_IF_MASK_PIM & (options)) -#define PIM_IF_TEST_IGMP(options) (PIM_IF_MASK_IGMP & (options)) -#define PIM_IF_TEST_IGMP_LISTEN_ALLROUTERS(options) (PIM_IF_MASK_IGMP_LISTEN_ALLROUTERS & (options)) -#define PIM_IF_TEST_PIM_CAN_DISABLE_JOIN_SUPPRESSION(options)                  \ -	(PIM_IF_MASK_PIM_CAN_DISABLE_JOIN_SUPPRESSION & (options)) - -#define PIM_IF_DO_PIM(options) ((options) |= PIM_IF_MASK_PIM) -#define PIM_IF_DO_IGMP(options) ((options) |= PIM_IF_MASK_IGMP) -#define PIM_IF_DO_IGMP_LISTEN_ALLROUTERS(options) ((options) |= PIM_IF_MASK_IGMP_LISTEN_ALLROUTERS) -#define PIM_IF_DO_PIM_CAN_DISABLE_JOIN_SUPPRESSION(options)                    \ -	((options) |= PIM_IF_MASK_PIM_CAN_DISABLE_JOIN_SUPPRESSION) - -#define PIM_IF_DONT_PIM(options) ((options) &= ~PIM_IF_MASK_PIM) -#define PIM_IF_DONT_IGMP(options) ((options) &= ~PIM_IF_MASK_IGMP) -#define PIM_IF_DONT_IGMP_LISTEN_ALLROUTERS(options) ((options) &= ~PIM_IF_MASK_IGMP_LISTEN_ALLROUTERS) -#define PIM_IF_DONT_PIM_CAN_DISABLE_JOIN_SUPPRESSION(options)                  \ -	((options) &= ~PIM_IF_MASK_PIM_CAN_DISABLE_JOIN_SUPPRESSION) -  #define PIM_I_am_DR(pim_ifp)                                                   \  	!pim_addr_cmp((pim_ifp)->pim_dr_addr, (pim_ifp)->primary_address)  #define PIM_I_am_DualActive(pim_ifp) (pim_ifp)->activeactive == true @@ -92,8 +69,14 @@ struct pim_secondary_addr {  	enum pim_secondary_addr_flags flags;  }; +struct gm_if; +  struct pim_interface { -	uint32_t options; /* bit vector */ +	bool pim_enable : 1; +	bool pim_can_disable_join_suppression : 1; + +	bool igmp_enable : 1; +  	ifindex_t mroute_vif_index;  	struct pim_instance *pim; @@ -109,6 +92,7 @@ struct pim_interface {  					 * address of the interface */  	int igmp_version;		     /* IGMP version */ +	int mld_version;  	int gm_default_robustness_variable;  /* IGMP or MLD QRV */  	int gm_default_query_interval;       /* IGMP or MLD secs between general  						  queries */ @@ -125,11 +109,16 @@ struct pim_interface {  	int gm_last_member_query_count;		      /* IGMP or MLD last member  							 query count  						       */ +	int mld_max_resp_ms; +	int mld_last_query_intv; +  	struct list *gm_socket_list; /* list of struct IGMP or MLD sock */  	struct list *gm_join_list;   /* list of struct IGMP or MLD join */  	struct list *gm_group_list;  /* list of struct IGMP or MLD group */  	struct hash *gm_group_hash; +	struct gm_if *mld; +  	int pim_sock_fd;		/* PIM socket file descriptor */  	struct thread *t_pim_sock_read; /* thread for reading PIM socket */  	int64_t pim_sock_creation;      /* timestamp of PIM socket creation */ diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c index f9fb8cf094..40aaf1877c 100644 --- a/pimd/pim_ifchannel.c +++ b/pimd/pim_ifchannel.c @@ -138,7 +138,7 @@ void pim_ifchannel_delete(struct pim_ifchannel *ch)  	if (ch->upstream->channel_oil) {  		uint32_t mask = PIM_OIF_FLAG_PROTO_PIM;  		if (ch->upstream->flags & PIM_UPSTREAM_FLAG_MASK_SRC_IGMP) -			mask |= PIM_OIF_FLAG_PROTO_IGMP; +			mask |= PIM_OIF_FLAG_PROTO_GM;  		/*  		 * A S,G RPT channel can have an empty oil, we also @@ -1155,7 +1155,7 @@ int pim_ifchannel_local_membership_add(struct interface *ifp, pim_sgaddr *sg,  		return 0;  	} -	if (!PIM_IF_TEST_PIM(pim_ifp->options)) { +	if (!pim_ifp->pim_enable) {  		if (PIM_DEBUG_EVENTS)  			zlog_debug("%s:%pSG PIM is not configured on this interface %s",  				   __func__, sg, ifp->name); @@ -1227,14 +1227,13 @@ int pim_ifchannel_local_membership_add(struct interface *ifp, pim_sgaddr *sg,  				    == PREFIX_DENY) {  					pim_channel_add_oif(  						up->channel_oil, pim->regiface, -						PIM_OIF_FLAG_PROTO_IGMP, +						PIM_OIF_FLAG_PROTO_GM,  						__func__);  				}  			}  		} else  			pim_channel_add_oif(up->channel_oil, pim->regiface, -					PIM_OIF_FLAG_PROTO_IGMP, -					__func__); +					    PIM_OIF_FLAG_PROTO_GM, __func__);  	}  	return 1; @@ -1249,7 +1248,7 @@ void pim_ifchannel_local_membership_del(struct interface *ifp, pim_sgaddr *sg)  	pim_ifp = ifp->info;  	if (!pim_ifp)  		return; -	if (!PIM_IF_TEST_PIM(pim_ifp->options)) +	if (!pim_ifp->pim_enable)  		return;  	orig = ch = pim_ifchannel_find(ifp, sg); diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c index 57c4cdc470..0dce8faa90 100644 --- a/pimd/pim_igmp.c +++ b/pimd/pim_igmp.c @@ -72,42 +72,42 @@ void igmp_anysource_forward_stop(struct gm_group *group)  }  static void igmp_source_forward_reevaluate_one(struct pim_instance *pim, -					       struct gm_source *source) +					       struct gm_source *source, +					       int is_grp_ssm)  {  	pim_sgaddr sg;  	struct gm_group *group = source->source_group; -	struct pim_ifchannel *ch; - -	if ((source->source_addr.s_addr != INADDR_ANY) || -	    !IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) -		return;  	memset(&sg, 0, sizeof(sg));  	sg.src = source->source_addr;  	sg.grp = group->group_addr; -	ch = pim_ifchannel_find(group->interface, &sg); -	if (pim_is_grp_ssm(pim, group->group_addr)) { -		/* If SSM group withdraw local membership */ -		if (ch && -		    (ch->local_ifmembership == PIM_IFMEMBERSHIP_INCLUDE)) { -			if (PIM_DEBUG_PIM_EVENTS) -				zlog_debug( -					"local membership del for %pSG as G is now SSM", -					&sg); -			pim_ifchannel_local_membership_del(group->interface, -							   &sg); +	/** if there is no PIM state **/ +	if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) { +		if (pim_addr_is_any(source->source_addr)) { +			if (is_grp_ssm) { +				if (PIM_DEBUG_PIM_EVENTS) +					zlog_debug( +						"local membership del for %pSG as G is now SSM", +						&sg); +				igmp_source_forward_stop(source); +			} +		} else { +			if (!is_grp_ssm) { +				if (PIM_DEBUG_PIM_EVENTS) +					zlog_debug( +						"local membership del for %pSG as G is now ASM", +						&sg); +				igmp_source_forward_stop(source); +			}  		}  	} else { -		/* If ASM group add local membership */ -		if (!ch || -		    (ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO)) { +		if (!pim_addr_is_any(source->source_addr) && (is_grp_ssm)) {  			if (PIM_DEBUG_PIM_EVENTS)  				zlog_debug( -					"local membership add for %pSG as G is now ASM", +					"local membership add for %pSG as G is now SSM",  					&sg); -			pim_ifchannel_local_membership_add( -				group->interface, &sg, false /*is_vxlan*/); +			igmp_source_forward_start(pim, source);  		}  	}  } @@ -118,7 +118,7 @@ void igmp_source_forward_reevaluate_all(struct pim_instance *pim)  	FOR_ALL_INTERFACES (pim->vrf, ifp) {  		struct pim_interface *pim_ifp = ifp->info; -		struct listnode *grpnode; +		struct listnode *grpnode, *grp_nextnode;  		struct gm_group *grp;  		struct pim_ifchannel *ch, *ch_temp; @@ -126,10 +126,11 @@ void igmp_source_forward_reevaluate_all(struct pim_instance *pim)  			continue;  		/* scan igmp groups */ -		for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode, -					  grp)) { +		for (ALL_LIST_ELEMENTS(pim_ifp->gm_group_list, grpnode, +				       grp_nextnode, grp)) {  			struct listnode *srcnode;  			struct gm_source *src; +			int is_grp_ssm;  			/*  			 * RFC 4604 @@ -139,16 +140,16 @@ void igmp_source_forward_reevaluate_all(struct pim_instance *pim)  			 * MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE  			 * requests in the SSM range.  			 */ -			if (pim_is_grp_ssm(pim, grp->group_addr) && -			    grp->group_filtermode_isexcl) { +			is_grp_ssm = pim_is_grp_ssm(pim, grp->group_addr); +			if (is_grp_ssm && grp->group_filtermode_isexcl) {  				igmp_group_delete(grp);  			} else {  				/* scan group sources */  				for (ALL_LIST_ELEMENTS_RO(  					     grp->group_source_list, srcnode,  					     src)) { -					igmp_source_forward_reevaluate_one(pim, -									   src); +					igmp_source_forward_reevaluate_one( +						pim, src, is_grp_ssm);  				} /* scan group sources */  			}  		} /* scan igmp groups */ @@ -179,6 +180,18 @@ void igmp_source_forward_start(struct pim_instance *pim,  			   IGMP_SOURCE_TEST_FORWARDING(source->source_flags));  	} +	/* +	 * PIM state should not be allowed for ASM group with valid source +	 * address. +	 */ +	if ((!pim_is_grp_ssm(pim, source->source_group->group_addr)) && +	    !pim_addr_is_any(source->source_addr)) { +		zlog_warn( +			"%s: (S,G)=%pSG ASM range having source address, not allowed to create PIM state", +			__func__, &sg); +		return; +	} +  	/* Prevent IGMP interface from installing multicast route multiple  	   times */  	if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) { @@ -193,8 +206,8 @@ void igmp_source_forward_start(struct pim_instance *pim,  }  /* -  igmp_source_forward_stop: stop fowarding, but keep the source -  igmp_source_delete:       stop fowarding, and delete the source +  igmp_source_forward_stop: stop forwarding, but keep the source +  igmp_source_delete:       stop forwarding, and delete the source   */  void igmp_source_forward_stop(struct gm_source *source)  { @@ -229,8 +242,7 @@ void igmp_source_forward_stop(struct gm_source *source)  /* This socket is used for TXing IGMP packets only, IGMP RX happens   * in pim_mroute_msg()   */ -static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp, -			  uint32_t pim_options) +static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp)  {  	int fd;  	int join = 0; @@ -242,17 +254,14 @@ static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp,  	if (fd < 0)  		return -1; -	if (PIM_IF_TEST_IGMP_LISTEN_ALLROUTERS(pim_options)) { -		if (inet_aton(PIM_ALL_ROUTERS, &group)) { -			if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex, -					     pim_ifp)) -				++join; -		} else { -			zlog_warn( -				"%s %s: IGMP socket fd=%d interface %pI4: could not solve %s to group address: errno=%d: %s", -				__FILE__, __func__, fd, &ifaddr, -				PIM_ALL_ROUTERS, errno, safe_strerror(errno)); -		} +	if (inet_aton(PIM_ALL_ROUTERS, &group)) { +		if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex, pim_ifp)) +			++join; +	} else { +		zlog_warn( +			"%s %s: IGMP socket fd=%d interface %pI4: could not solve %s to group address: errno=%d: %s", +			__FILE__, __func__, fd, &ifaddr, PIM_ALL_ROUTERS, errno, +			safe_strerror(errno));  	}  	/* @@ -659,7 +668,7 @@ static int igmp_v1_recv_report(struct gm_sock *igmp, struct in_addr from,  	if (pim_is_group_filtered(ifp->info, &group_addr))  		return -1; -	/* non-existant group is created as INCLUDE {empty} */ +	/* non-existent group is created as INCLUDE {empty} */  	group = igmp_add_group_by_addr(igmp, group_addr);  	if (!group) {  		return -1; @@ -1244,14 +1253,11 @@ struct gm_sock *pim_igmp_sock_add(struct list *igmp_sock_list,  				  struct in_addr ifaddr, struct interface *ifp,  				  bool mtrace_only)  { -	struct pim_interface *pim_ifp;  	struct gm_sock *igmp;  	struct sockaddr_in sin;  	int fd; -	pim_ifp = ifp->info; - -	fd = igmp_sock_open(ifaddr, ifp, pim_ifp->options); +	fd = igmp_sock_open(ifaddr, ifp);  	if (fd < 0) {  		zlog_warn("Could not open IGMP socket for %pI4 on %s",  			  &ifaddr, ifp->name); diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c index 11bb2db7eb..9148d1050a 100644 --- a/pimd/pim_igmp_mtrace.c +++ b/pimd/pim_igmp_mtrace.c @@ -81,11 +81,9 @@ static bool mtrace_fwd_info_weak(struct pim_instance *pim,  		zlog_debug("mtrace pim_nexthop_lookup OK");  	if (PIM_DEBUG_MTRACE) -		zlog_debug("mtrace next_hop=%pI4", -			   &nexthop.mrib_nexthop_addr.u.prefix4); +		zlog_debug("mtrace next_hop=%pPAs", &nexthop.mrib_nexthop_addr); -	if (nexthop.mrib_nexthop_addr.family == AF_INET) -		nh_addr = nexthop.mrib_nexthop_addr.u.prefix4; +	nh_addr = nexthop.mrib_nexthop_addr;  	ifp_in = nexthop.interface; @@ -134,7 +132,7 @@ static bool mtrace_fwd_info(struct pim_instance *pim,  	}  	ifp_in = up->rpf.source_nexthop.interface; -	nh_addr = up->rpf.source_nexthop.mrib_nexthop_addr.u.prefix4; +	nh_addr = up->rpf.source_nexthop.mrib_nexthop_addr;  	total = htonl(MTRACE_UNKNOWN_COUNT);  	if (PIM_DEBUG_MTRACE) diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c index b6114f9ead..5d46383a8e 100644 --- a/pimd/pim_igmpv3.c +++ b/pimd/pim_igmpv3.c @@ -340,8 +340,8 @@ static void source_channel_oil_detach(struct gm_source *source)  }  /* -  igmp_source_delete:       stop fowarding, and delete the source -  igmp_source_forward_stop: stop fowarding, but keep the source +  igmp_source_delete:       stop forwarding, and delete the source +  igmp_source_forward_stop: stop forwarding, but keep the source  */  void igmp_source_delete(struct gm_source *source)  { @@ -465,6 +465,9 @@ struct gm_source *igmp_get_source_by_addr(struct gm_group *group,  	src = XCALLOC(MTYPE_PIM_IGMP_GROUP_SOURCE, sizeof(*src)); +	if (new) +		*new = true; +  	src->t_source_timer = NULL;  	src->source_group = group; /* back pointer */  	src->source_addr = src_addr; @@ -516,7 +519,7 @@ static void allow(struct gm_sock *igmp, struct in_addr from,  		return;  	} -	/* non-existant group is created as INCLUDE {empty} */ +	/* non-existent group is created as INCLUDE {empty} */  	group = igmp_add_group_by_addr(igmp, group_addr);  	if (!group) {  		return; @@ -669,7 +672,7 @@ void igmpv3_report_isex(struct gm_sock *igmp, struct in_addr from,  	if (pim_is_group_filtered(ifp->info, &group_addr))  		return; -	/* non-existant group is created as INCLUDE {empty} */ +	/* non-existent group is created as INCLUDE {empty} */  	group = igmp_add_group_by_addr(igmp, group_addr);  	if (!group) {  		return; @@ -790,7 +793,7 @@ void igmpv3_report_toin(struct gm_sock *igmp, struct in_addr from,  	 * entry is present, the request is ignored.  	 */  	if (num_sources) { -		/* non-existant group is created as INCLUDE {empty} */ +		/* non-existent group is created as INCLUDE {empty} */  		group = igmp_add_group_by_addr(igmp, group_addr);  		if (!group) {  			return; @@ -944,7 +947,7 @@ void igmpv3_report_toex(struct gm_sock *igmp, struct in_addr from,  	on_trace(__func__, ifp, from, group_addr, num_sources, sources); -	/* non-existant group is created as INCLUDE {empty} */ +	/* non-existent group is created as INCLUDE {empty} */  	group = igmp_add_group_by_addr(igmp, group_addr);  	if (!group) {  		return; @@ -1465,7 +1468,7 @@ void igmpv3_report_block(struct gm_sock *igmp, struct in_addr from,  	on_trace(__func__, ifp, from, group_addr, num_sources, sources); -	/* non-existant group is created as INCLUDE {empty} */ +	/* non-existent group is created as INCLUDE {empty} */  	group = igmp_add_group_by_addr(igmp, group_addr);  	if (!group) {  		return; diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index 14f984508d..d54add45ae 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -114,6 +114,8 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)  	pim->send_v6_secondary = 1; +	pim->gm_socket = -1; +  	pim_rp_init(pim);  	pim_bsm_proc_init(pim); diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index f8323deda0..b19e8208ba 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -167,6 +167,10 @@ struct pim_instance {  	struct list *ssmpingd_list;  	pim_addr ssmpingd_group_addr; +	unsigned int gm_socket_if_count; +	int gm_socket; +	struct thread *t_gm_recv; +  	unsigned int igmp_group_count;  	unsigned int igmp_watermark_limit;  	unsigned int keep_alive_time; @@ -194,6 +198,8 @@ struct pim_instance {  	int64_t nexthop_lookups;  	int64_t nexthop_lookups_avoided;  	int64_t last_route_change_time; + +	uint64_t gm_rx_drop_sys;  };  void pim_vrf_init(void); diff --git a/pimd/pim_join.c b/pimd/pim_join.c index 2c11d5d13f..88078dd366 100644 --- a/pimd/pim_join.c +++ b/pimd/pim_join.c @@ -488,7 +488,9 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)  		group_size = pim_msg_get_jp_group_size(group->sources);  		if (group_size > packet_left) { -			pim_msg_build_header(pim_msg, packet_size, +			pim_msg_build_header(pim_ifp->primary_address, +					     qpim_all_pim_routers_addr, pim_msg, +					     packet_size,  					     PIM_MSG_TYPE_JOIN_PRUNE, false);  			if (pim_msg_send(pim_ifp->pim_sock_fd,  					 pim_ifp->primary_address, @@ -544,7 +546,9 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)  		grp = (struct pim_jp_groups *)curr_ptr;  		if (packet_left < sizeof(struct pim_jp_groups)  		    || msg->num_groups == 255) { -			pim_msg_build_header(pim_msg, packet_size, +			pim_msg_build_header(pim_ifp->primary_address, +					     qpim_all_pim_routers_addr, pim_msg, +					     packet_size,  					     PIM_MSG_TYPE_JOIN_PRUNE, false);  			if (pim_msg_send(pim_ifp->pim_sock_fd,  					 pim_ifp->primary_address, @@ -564,8 +568,9 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)  	if (!new_packet) {  		// msg->num_groups = htons (msg->num_groups); -		pim_msg_build_header(pim_msg, packet_size, -				     PIM_MSG_TYPE_JOIN_PRUNE, false); +		pim_msg_build_header( +			pim_ifp->primary_address, qpim_all_pim_routers_addr, +			pim_msg, packet_size, PIM_MSG_TYPE_JOIN_PRUNE, false);  		if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,  				 qpim_all_pim_routers_addr, pim_msg,  				 packet_size, diff --git a/pimd/pim_mlag.c b/pimd/pim_mlag.c index 68f77ad6d9..9763a79a8d 100644 --- a/pimd/pim_mlag.c +++ b/pimd/pim_mlag.c @@ -1015,7 +1015,7 @@ void pim_if_configure_mlag_dualactive(struct pim_interface *pim_ifp)  	if (router->pim_mlag_intf_cnt == 1) {  		/* -		 * atleast one Interface is configured for MLAG, send register +		 * at least one Interface is configured for MLAG, send register  		 * to Zebra for receiving MLAG Updates  		 */  		pim_mlag_register(); diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c index 7fc4f12d27..5f951b4dfc 100644 --- a/pimd/pim_mroute.c +++ b/pimd/pim_mroute.c @@ -437,6 +437,28 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf)  						pim_ifp->primary_address,  						up->upstream_register);  				up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE; +			} else { +				/* +				 * At this point pimd is connected to +				 * the source, it has a parent, we are not +				 * the RP  and the SPTBIT should be set +				 * since we know *the* S,G is on the SPT. +				 * The first time this happens, let's cause +				 * an immediate join to go out so that +				 * the RP can trim this guy immediately +				 * if necessary, instead of waiting +				 * one join/prune send cycle +				 */ +				if (up->sptbit != PIM_UPSTREAM_SPTBIT_TRUE && +				    up->parent && +				    up->rpf.source_nexthop.interface != +					    up->parent->rpf.source_nexthop +						    .interface) { +					up->sptbit = PIM_UPSTREAM_SPTBIT_TRUE; +					pim_jp_agg_single_upstream_send( +						&up->parent->rpf, up->parent, +						true); +				}  			}  			pim_upstream_keep_alive_timer_start(  				up, pim_ifp->pim->keep_alive_time); @@ -549,6 +571,27 @@ int pim_mroute_socket_enable(struct pim_instance *pim)  			return -2;  		} +#if PIM_IPV == 6 +		struct icmp6_filter filter[1]; +		int ret; + +		/* Unlike IPv4, this socket is not used for MLD, so just drop +		 * everything with an empty ICMP6 filter.  Otherwise we get +		 * all kinds of garbage here, possibly even non-multicast +		 * related ICMPv6 traffic (e.g. ping) +		 * +		 * (mroute kernel upcall "packets" are injected directly on the +		 * socket, this sockopt -or any other- has no effect on them) +		 */ +		ICMP6_FILTER_SETBLOCKALL(filter); +		ret = setsockopt(fd, SOL_ICMPV6, ICMP6_FILTER, filter, +				 sizeof(filter)); +		if (ret) +			zlog_err( +				"(VRF %s) failed to set mroute control filter: %m", +				pim->vrf->name); +#endif +  #ifdef SO_BINDTODEVICE  		if (pim->vrf->vrf_id != VRF_DEFAULT  		    && setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, @@ -717,8 +760,8 @@ bool pim_mroute_allow_iif_in_oil(struct channel_oil *c_oil,  	pim_ifp = ifp_out->info;  	if (!pim_ifp)  		return false; -	if ((c_oil->oif_flags[oif_index] & PIM_OIF_FLAG_PROTO_IGMP) && -			PIM_I_am_DR(pim_ifp)) +	if ((c_oil->oif_flags[oif_index] & PIM_OIF_FLAG_PROTO_GM) && +	    PIM_I_am_DR(pim_ifp))  		return true;  	return false; @@ -1025,14 +1068,14 @@ void pim_mroute_update_counters(struct channel_oil *c_oil)  	memset(&sgreq, 0, sizeof(sgreq)); +	pim_zlookup_sg_statistics(c_oil); +  #if PIM_IPV == 4  	sgreq.src = *oil_origin(c_oil);  	sgreq.grp = *oil_mcastgrp(c_oil); -	pim_zlookup_sg_statistics(c_oil);  #else  	sgreq.src = c_oil->oil.mf6cc_origin;  	sgreq.grp = c_oil->oil.mf6cc_mcastgrp; -	/* TODO Zlookup_sg_statistics for V6 to be added */  #endif  	if (ioctl(pim->mroute_socket, PIM_SIOCGETSGCNT, &sgreq)) {  		pim_sgaddr sg; diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 7c1c80f71a..10e515cb56 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -709,8 +709,8 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp)  	}  	/* check if the MSDP peer is the nexthop for the RP */ -	if (pim_nexthop_lookup(mp->pim, &nexthop, rp, 0) -	    && nexthop.mrib_nexthop_addr.u.prefix4.s_addr == mp->peer.s_addr) { +	if (pim_nexthop_lookup(mp->pim, &nexthop, rp, 0) && +	    nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) {  		return true;  	} diff --git a/pimd/pim_msg.c b/pimd/pim_msg.c index a0653e1a57..1eda51417f 100644 --- a/pimd/pim_msg.c +++ b/pimd/pim_msg.c @@ -38,10 +38,36 @@  #include "pim_jp_agg.h"  #include "pim_oil.h" -void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size, -			  uint8_t pim_msg_type, bool no_fwd) +void pim_msg_build_header(pim_addr src, pim_addr dst, uint8_t *pim_msg, +			  size_t pim_msg_size, uint8_t pim_msg_type, +			  bool no_fwd)  {  	struct pim_msg_header *header = (struct pim_msg_header *)pim_msg; +	struct iovec iov[2], *iovp = iov; + +	/* +	 * The checksum for Registers is done only on the first 8 bytes of the +	 * packet, including the PIM header and the next 4 bytes, excluding the +	 * data packet portion +	 * +	 * for IPv6, the pseudoheader upper-level protocol length is also +	 * truncated, so let's just set it here before everything else. +	 */ +	if (pim_msg_type == PIM_MSG_TYPE_REGISTER) +		pim_msg_size = PIM_MSG_REGISTER_LEN; + +#if PIM_IPV == 6 +	struct ipv6_ph phdr = { +		.src = src, +		.dst = dst, +		.ulpl = htonl(pim_msg_size), +		.next_hdr = IPPROTO_PIM, +	}; + +	iovp->iov_base = &phdr; +	iovp->iov_len = sizeof(phdr); +	iovp++; +#endif  	/*  	 * Write header @@ -51,18 +77,12 @@ void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,  	header->Nbit = no_fwd;  	header->reserved = 0; -  	header->checksum = 0; -	/* -	 * The checksum for Registers is done only on the first 8 bytes of the -	 * packet, -	 * including the PIM header and the next 4 bytes, excluding the data -	 * packet portion -	 */ -	if (pim_msg_type == PIM_MSG_TYPE_REGISTER) -		header->checksum = in_cksum(pim_msg, PIM_MSG_REGISTER_LEN); -	else -		header->checksum = in_cksum(pim_msg, pim_msg_size); +	iovp->iov_base = header; +	iovp->iov_len = pim_msg_size; +	iovp++; + +	header->checksum = in_cksumv(iov, iovp - iov);  }  uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr) diff --git a/pimd/pim_msg.h b/pimd/pim_msg.h index 3ad958f097..733210af3a 100644 --- a/pimd/pim_msg.h +++ b/pimd/pim_msg.h @@ -216,8 +216,9 @@ static inline pim_sgaddr pim_sgaddr_from_iphdr(const void *iphdr)  }  #endif -void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size, -			  uint8_t pim_msg_type, bool no_fwd); +void pim_msg_build_header(pim_addr src, pim_addr dst, uint8_t *pim_msg, +			  size_t pim_msg_size, uint8_t pim_msg_type, +			  bool no_fwd);  uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr);  uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr); diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 265c0bb189..2f72c956f3 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -36,6 +36,7 @@  #include "log.h"  #include "lib_errors.h"  #include "pim_util.h" +#include "pim6_mld.h"  #if PIM_IPV == 6  #define pim6_msdp_err(funcname, argtype)                                       \ @@ -64,8 +65,7 @@ static void pim_if_membership_clear(struct interface *ifp)  	pim_ifp = ifp->info;  	assert(pim_ifp); -	if (PIM_IF_TEST_PIM(pim_ifp->options) -	    && PIM_IF_TEST_IGMP(pim_ifp->options)) { +	if (pim_ifp->pim_enable && pim_ifp->igmp_enable) {  		return;  	} @@ -91,9 +91,9 @@ static void pim_if_membership_refresh(struct interface *ifp)  	pim_ifp = ifp->info;  	assert(pim_ifp); -	if (!PIM_IF_TEST_PIM(pim_ifp->options)) +	if (!pim_ifp->pim_enable)  		return; -	if (!PIM_IF_TEST_IGMP(pim_ifp->options)) +	if (!pim_ifp->igmp_enable)  		return;  	/* @@ -144,7 +144,7 @@ static int pim_cmd_interface_add(struct interface *ifp)  	if (!pim_ifp)  		pim_ifp = pim_if_new(ifp, false, true, false, false);  	else -		PIM_IF_DO_PIM(pim_ifp->options); +		pim_ifp->pim_enable = true;  	pim_if_addr_add_all(ifp);  	pim_upstream_nh_if_update(pim_ifp->pim, ifp); @@ -161,7 +161,7 @@ static int pim_cmd_interface_delete(struct interface *ifp)  	if (!pim_ifp)  		return 1; -	PIM_IF_DONT_PIM(pim_ifp->options); +	pim_ifp->pim_enable = false;  	pim_if_membership_clear(ifp); @@ -171,7 +171,7 @@ static int pim_cmd_interface_delete(struct interface *ifp)  	 */  	pim_sock_delete(ifp, "pim unconfigured on interface"); -	if (!PIM_IF_TEST_IGMP(pim_ifp->options)) { +	if (!pim_ifp->igmp_enable) {  		pim_if_addr_del_all(ifp);  		pim_upstream_nh_if_update(pim_ifp->pim, ifp);  		pim_if_delete(ifp); @@ -180,6 +180,21 @@ static int pim_cmd_interface_delete(struct interface *ifp)  	return 1;  } +static struct pim_interface *pim_ifp_get(const struct lyd_node *dnode, +					 const char *path, +					 struct interface **ifpp) +{ +	struct interface *ifp; + +	ifp = nb_running_get_entry(dnode, path, true); +	if (ifpp) +		*ifpp = ifp; +	if (ifp->info) +		return (struct pim_interface *)ifp->info; + +	return pim_if_new(ifp, false, false, false, false); +} +  static int interface_pim_use_src_cmd_worker(struct interface *ifp,  		pim_addr source_addr, char *errmsg, size_t errmsg_len)  { @@ -363,8 +378,8 @@ static int pim_cmd_igmp_start(struct interface *ifp)  		pim_ifp = pim_if_new(ifp, true, false, false, false);  		need_startup = 1;  	} else { -		if (!PIM_IF_TEST_IGMP(pim_ifp->options)) { -			PIM_IF_DO_IGMP(pim_ifp->options); +		if (!pim_ifp->igmp_enable) { +			pim_ifp->igmp_enable = true;  			need_startup = 1;  		}  	} @@ -412,7 +427,6 @@ static void igmp_sock_query_interval_reconfig(struct gm_sock *igmp)  	 */  	igmp_startup_mode_on(igmp);  } -#endif  static void igmp_sock_query_reschedule(struct gm_sock *igmp)  { @@ -442,6 +456,7 @@ static void igmp_sock_query_reschedule(struct gm_sock *igmp)  		assert(igmp->t_other_querier_timer);  	}  } +#endif /* PIM_IPV == 4 */  #if PIM_IPV == 4  static void change_query_interval(struct pim_interface *pim_ifp, @@ -459,6 +474,7 @@ static void change_query_interval(struct pim_interface *pim_ifp,  }  #endif +#if PIM_IPV == 4  static void change_query_max_response_time(struct pim_interface *pim_ifp,  		int query_max_response_time_dsec)  { @@ -506,6 +522,7 @@ static void change_query_max_response_time(struct pim_interface *pim_ifp,  		}  	}  } +#endif  int routing_control_plane_protocols_name_validate(  	struct nb_cb_create_args *args) @@ -2540,13 +2557,13 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)  		if (!pim_ifp)  			return NB_OK; -		PIM_IF_DONT_IGMP(pim_ifp->options); +		pim_ifp->igmp_enable = false;  		pim_if_membership_clear(ifp);  		pim_if_addr_del_all_igmp(ifp); -		if (!PIM_IF_TEST_PIM(pim_ifp->options)) +		if (!pim_ifp->pim_enable)  			pim_if_delete(ifp);  	} @@ -2597,13 +2614,13 @@ int lib_interface_gmp_address_family_enable_modify(  			if (!pim_ifp)  				return NB_ERR_INCONSISTENCY; -			PIM_IF_DONT_IGMP(pim_ifp->options); +			pim_ifp->igmp_enable = false;  			pim_if_membership_clear(ifp);  			pim_if_addr_del_all_igmp(ifp); -			if (!PIM_IF_TEST_PIM(pim_ifp->options)) +			if (!pim_ifp->pim_enable)  				pim_if_delete(ifp);  		}  	} @@ -2678,12 +2695,19 @@ int lib_interface_gmp_address_family_igmp_version_destroy(  int lib_interface_gmp_address_family_mld_version_modify(  	struct nb_cb_modify_args *args)  { +	struct interface *ifp; +	struct pim_interface *pim_ifp; +  	switch (args->event) {  	case NB_EV_VALIDATE:  	case NB_EV_PREPARE:  	case NB_EV_ABORT: +		break;  	case NB_EV_APPLY: -		/* TBD depends on MLD data structure changes */ +		pim_ifp = pim_ifp_get(args->dnode, NULL, &ifp); + +		pim_ifp->mld_version = yang_dnode_get_uint8(args->dnode, NULL); +		gm_ifp_update(ifp);  		break;  	} @@ -2693,14 +2717,6 @@ int lib_interface_gmp_address_family_mld_version_modify(  int lib_interface_gmp_address_family_mld_version_destroy(  	struct nb_cb_destroy_args *args)  { -	switch (args->event) { -	case NB_EV_VALIDATE: -	case NB_EV_PREPARE: -	case NB_EV_ABORT: -	case NB_EV_APPLY: -		break; -	} -  	return NB_OK;  } @@ -2710,10 +2726,10 @@ int lib_interface_gmp_address_family_mld_version_destroy(  int lib_interface_gmp_address_family_query_interval_modify(  	struct nb_cb_modify_args *args)  { -#if PIM_IPV == 4  	struct interface *ifp;  	int query_interval; +#if PIM_IPV == 4  	switch (args->event) {  	case NB_EV_VALIDATE:  	case NB_EV_PREPARE: @@ -2725,7 +2741,20 @@ int lib_interface_gmp_address_family_query_interval_modify(  		change_query_interval(ifp->info, query_interval);  	}  #else -	/* TBD Depends on MLD data structure changes */ +	struct pim_interface *pim_ifp; + +	switch (args->event) { +	case NB_EV_VALIDATE: +	case NB_EV_PREPARE: +	case NB_EV_ABORT: +		break; +	case NB_EV_APPLY: +		pim_ifp = pim_ifp_get(args->dnode, NULL, &ifp); + +		query_interval = yang_dnode_get_uint16(args->dnode, NULL); +		pim_ifp->gm_default_query_interval = query_interval; +		gm_ifp_update(ifp); +	}  #endif  	return NB_OK;  } @@ -2737,6 +2766,7 @@ int lib_interface_gmp_address_family_query_max_response_time_modify(  	struct nb_cb_modify_args *args)  {  	struct interface *ifp; +#if PIM_IPV == 4  	int query_max_response_time_dsec;  	switch (args->event) { @@ -2751,7 +2781,24 @@ int lib_interface_gmp_address_family_query_max_response_time_modify(  		change_query_max_response_time(ifp->info,  				query_max_response_time_dsec);  	} +#else +	struct pim_interface *pim_ifp; +	int max_resp_ms; + +	switch (args->event) { +	case NB_EV_VALIDATE: +	case NB_EV_PREPARE: +	case NB_EV_ABORT: +		break; +	case NB_EV_APPLY: +		pim_ifp = pim_ifp_get(args->dnode, NULL, &ifp); +		max_resp_ms = yang_dnode_get_uint16(args->dnode, NULL); +		pim_ifp->mld_max_resp_ms = max_resp_ms; +		gm_ifp_update(ifp); +		break; +	} +#endif  	return NB_OK;  } @@ -2763,6 +2810,7 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify(  {  	struct interface *ifp;  	struct pim_interface *pim_ifp; +#if PIM_IPV == 4  	int last_member_query_interval;  	switch (args->event) { @@ -2780,7 +2828,23 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify(  		break;  	} +#else +	int last_query_intv; + +	switch (args->event) { +	case NB_EV_VALIDATE: +	case NB_EV_PREPARE: +	case NB_EV_ABORT: +		break; +	case NB_EV_APPLY: +		pim_ifp = pim_ifp_get(args->dnode, NULL, &ifp); +		last_query_intv = yang_dnode_get_uint16(args->dnode, NULL); +		pim_ifp->mld_last_query_intv = last_query_intv; +		gm_ifp_update(ifp); +		break; +	} +#endif  	return NB_OK;  } @@ -2792,6 +2856,8 @@ int lib_interface_gmp_address_family_robustness_variable_modify(  {  	struct interface *ifp;  	struct pim_interface *pim_ifp; + +#if PIM_IPV == 4  	int last_member_query_count;  	switch (args->event) { @@ -2808,6 +2874,24 @@ int lib_interface_gmp_address_family_robustness_variable_modify(  		break;  	} +#else +	int robustness; + +	switch (args->event) { +	case NB_EV_VALIDATE: +	case NB_EV_PREPARE: +	case NB_EV_ABORT: +		break; +	case NB_EV_APPLY: +		pim_ifp = pim_ifp_get(args->dnode, NULL, &ifp); + +		robustness = yang_dnode_get_uint8(args->dnode, NULL); +		pim_ifp->gm_default_robustness_variable = robustness; +		pim_ifp->gm_last_member_query_count = robustness; +		gm_ifp_update(ifp); +		break; +	} +#endif  	return NB_OK;  } diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c index f1508f7631..e4f6b5c8a5 100644 --- a/pimd/pim_nht.c +++ b/pimd/pim_nht.c @@ -162,21 +162,16 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,  	return 0;  } -#if PIM_IPV == 4 -void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr) +void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)  {  	struct pim_nexthop_cache *pnc;  	struct prefix pfx; -	pfx.family = AF_INET; -	pfx.prefixlen = IPV4_MAX_BITLEN; -	pfx.u.prefix4 = addr; - +	pim_addr_to_prefix(&pfx, addr);  	pnc = pim_nht_get(pim, &pfx);  	pnc->bsr_count++;  } -#endif /* PIM_IPV == 4 */  static void pim_nht_drop_maybe(struct pim_instance *pim,  			       struct pim_nexthop_cache *pnc) @@ -246,8 +241,7 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,  	pim_nht_drop_maybe(pim, pnc);  } -#if PIM_IPV == 4 -void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr) +void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)  {  	struct pim_nexthop_cache *pnc = NULL;  	struct pim_nexthop_cache lookup; @@ -257,28 +251,26 @@ void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)  	 * is 0.0.0.0 as that the BSR has not been registered  	 * for tracking yet.  	 */ -	if (addr.s_addr == INADDR_ANY) +	if (pim_addr_is_any(addr))  		return; -	lookup.rpf.rpf_addr.family = AF_INET; -	lookup.rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN; -	lookup.rpf.rpf_addr.u.prefix4 = addr; +	pim_addr_to_prefix(&lookup.rpf.rpf_addr, addr);  	pnc = hash_lookup(pim->rpf_hash, &lookup);  	if (!pnc) { -		zlog_warn("attempting to delete nonexistent NHT BSR entry %pI4", +		zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA",  			  &addr);  		return;  	} -	assertf(pnc->bsr_count > 0, "addr=%pI4", &addr); +	assertf(pnc->bsr_count > 0, "addr=%pPA", &addr);  	pnc->bsr_count--;  	pim_nht_drop_maybe(pim, pnc);  } -bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr, +bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,  			   struct interface *src_ifp, pim_addr src_ip)  {  	struct pim_nexthop_cache *pnc = NULL; @@ -287,9 +279,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,  	struct nexthop *nh;  	struct interface *ifp; -	lookup.rpf.rpf_addr.family = AF_INET; -	lookup.rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN; -	lookup.rpf.rpf_addr.u.prefix4 = bsr_addr; +	pim_addr_to_prefix(&lookup.rpf.rpf_addr, bsr_addr);  	pnc = hash_lookup(pim->rpf_hash, &lookup);  	if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) { @@ -331,13 +321,12 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,  			if (if_is_loopback(ifp) && if_is_loopback(src_ifp))  				return true; -			nbr = pim_neighbor_find_prefix(ifp, &znh->nexthop_addr); +			nbr = pim_neighbor_find(ifp, znh->nexthop_addr);  			if (!nbr)  				continue; -			return znh->ifindex == src_ifp->ifindex -			       && znh->nexthop_addr.u.prefix4.s_addr -					  == src_ip.s_addr; +			return znh->ifindex == src_ifp->ifindex && +			       (!pim_addr_cmp(znh->nexthop_addr, src_ip));  		}  		return false;  	} @@ -395,19 +384,16 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,  		nbr = pim_neighbor_find(ifp, nhaddr);  		if (!nbr)  			continue; - -		return nh->ifindex == src_ifp->ifindex -		       && nhaddr.s_addr == src_ip.s_addr; +		return nh->ifindex == src_ifp->ifindex && +		       (!pim_addr_cmp(nhaddr, src_ip));  	}  	return false;  } -#endif /* PIM_IPV == 4 */  void pim_rp_nexthop_del(struct rp_info *rp_info)  {  	rp_info->rp.source_nexthop.interface = NULL; -	pim_addr_to_prefix(&rp_info->rp.source_nexthop.mrib_nexthop_addr, -			   PIMADDR_ANY); +	rp_info->rp.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;  	rp_info->rp.source_nexthop.mrib_metric_preference =  		router->infinite_assert_metric.metric_preference;  	rp_info->rp.source_nexthop.mrib_route_metric = @@ -541,7 +527,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,  	uint32_t hash_val = 0, mod_val = 0;  	uint8_t nh_iter = 0, found = 0;  	uint32_t i, num_nbrs = 0; -	pim_addr nh_addr = pim_addr_from_prefix(&(nexthop->mrib_nexthop_addr)); +	pim_addr nh_addr = nexthop->mrib_nexthop_addr;  	pim_addr src_addr = pim_addr_from_prefix(src);  	pim_addr grp_addr = pim_addr_from_prefix(grp); @@ -578,9 +564,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,  			if (curr_route_valid &&  			    !pim_if_connected_to_source(nexthop->interface,  							src_addr)) { -				nbr = pim_neighbor_find_prefix( +				nbr = pim_neighbor_find(  					nexthop->interface, -					&nexthop->mrib_nexthop_addr); +					nexthop->mrib_nexthop_addr);  				if (!nbr  				    && !if_is_loopback(nexthop->interface)) {  					if (PIM_DEBUG_PIM_NHT) @@ -701,14 +687,10 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,  		if (nh_iter == mod_val) {  			nexthop->interface = ifp; -			nexthop->mrib_nexthop_addr.family = PIM_AF; -			nexthop->mrib_nexthop_addr.prefixlen = PIM_MAX_BITLEN;  #if PIM_IPV == 4 -			nexthop->mrib_nexthop_addr.u.prefix4 = -				nh_node->gate.ipv4; +			nexthop->mrib_nexthop_addr = nh_node->gate.ipv4;  #else -			nexthop->mrib_nexthop_addr.u.prefix6 = -				nh_node->gate.ipv6; +			nexthop->mrib_nexthop_addr = nh_node->gate.ipv6;  #endif  			nexthop->mrib_metric_preference = pnc->distance;  			nexthop->mrib_route_metric = pnc->metric; @@ -742,9 +724,7 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)  	int i;  	struct pim_rpf rpf;  	struct pim_nexthop_cache *pnc = NULL; -	struct pim_neighbor *nbr = NULL;  	struct interface *ifp = NULL; -	struct interface *ifp1 = NULL;  	struct vrf *vrf = vrf_lookup_by_id(vrf_id);  	struct pim_instance *pim;  	struct zapi_route nhr; @@ -785,11 +765,6 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)  		for (i = 0; i < nhr.nexthop_num; i++) {  			nexthop = nexthop_from_zapi_nexthop(&nhr.nexthops[i]);  			switch (nexthop->type) { -			case NEXTHOP_TYPE_IPV4: -			case NEXTHOP_TYPE_IPV4_IFINDEX: -			case NEXTHOP_TYPE_IPV6: -			case NEXTHOP_TYPE_BLACKHOLE: -				break;  			case NEXTHOP_TYPE_IFINDEX:  				/*  				 * Connected route (i.e. no nexthop), use @@ -806,31 +781,44 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)  					pnc->rpf.rpf_addr.u.prefix6;  #endif  				break; -			case NEXTHOP_TYPE_IPV6_IFINDEX: +#if PIM_IPV == 4 +			/* RFC5549 IPv4-over-IPv6 nexthop handling: +			 * if we get an IPv6 nexthop in IPv4 PIM, hunt down a +			 * PIM neighbor and use that instead. +			 */ +			case NEXTHOP_TYPE_IPV6_IFINDEX: { +				struct interface *ifp1 = NULL; +				struct pim_neighbor *nbr = NULL; +  				ifp1 = if_lookup_by_index(nexthop->ifindex,  							  pim->vrf->vrf_id);  				if (!ifp1)  					nbr = NULL;  				else +					/* FIXME: should really use nbr's +					 * secondary address list here +					 */  					nbr = pim_neighbor_find_if(ifp1); +  				/* Overwrite with Nbr address as NH addr */  				if (nbr) -#if PIM_IPV == 4  					nexthop->gate.ipv4 = nbr->source_addr; -#else -					nexthop->gate.ipv6 = nbr->source_addr; -#endif -				else { +				else  					// Mark nexthop address to 0 until PIM  					// Nbr is resolved. -#if PIM_IPV == 4  					nexthop->gate.ipv4 = PIMADDR_ANY; + +				break; +			}  #else -					nexthop->gate.ipv6 = PIMADDR_ANY; +			case NEXTHOP_TYPE_IPV6_IFINDEX:  #endif -				} - +			case NEXTHOP_TYPE_IPV6: +			case NEXTHOP_TYPE_IPV4: +			case NEXTHOP_TYPE_IPV4_IFINDEX: +			case NEXTHOP_TYPE_BLACKHOLE: +				/* nothing to do for the other nexthop types */  				break;  			} @@ -850,13 +838,18 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)  				continue;  			} -			if (PIM_DEBUG_PIM_NHT) +			if (PIM_DEBUG_PIM_NHT) { +#if PIM_IPV == 4 +				pim_addr nhaddr = nexthop->gate.ipv4; +#else +				pim_addr nhaddr = nexthop->gate.ipv6; +#endif  				zlog_debug( -					"%s: NHT addr %pFX(%s) %d-nhop via %pI4(%s) type %d distance:%u metric:%u ", +					"%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ",  					__func__, &match, pim->vrf->name, i + 1, -					&nexthop->gate.ipv4, ifp->name, -					nexthop->type, nhr.distance, -					nhr.metric); +					&nhaddr, ifp->name, nexthop->type, +					nhr.distance, nhr.metric); +			}  			if (!ifp->info) {  				/* @@ -977,8 +970,8 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,  		ifps[i] = if_lookup_by_index(nexthop_tab[i].ifindex,  					     pim->vrf->vrf_id);  		if (ifps[i]) { -			nbrs[i] = pim_neighbor_find_prefix( -				ifps[i], &nexthop_tab[i].nexthop_addr); +			nbrs[i] = pim_neighbor_find( +				ifps[i], nexthop_tab[i].nexthop_addr);  			if (nbrs[i] ||  			    pim_if_connected_to_source(ifps[i], src_addr))  				num_nbrs++; @@ -1056,7 +1049,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,  					mod_val++;  				if (PIM_DEBUG_PIM_NHT)  					zlog_debug( -						"%s: NBR (%pFXh) not found on input interface %s(%s) (RPF for source %pPA)", +						"%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)",  						__func__,  						&nexthop_tab[i].nexthop_addr,  						ifp->name, pim->vrf->name, @@ -1067,19 +1060,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,  		}  		if (i == mod_val) { -			if (PIM_DEBUG_PIM_NHT) { -				char nexthop_str[PREFIX_STRLEN]; - -				pim_addr_dump("<nexthop?>", -					      &nexthop_tab[i].nexthop_addr, -					      nexthop_str, sizeof(nexthop_str)); +			if (PIM_DEBUG_PIM_NHT)  				zlog_debug( -					"%s: found nhop %s for addr %pPA interface %s(%s) metric %d dist %d", -					__func__, nexthop_str, &src_addr, -					ifp->name, pim->vrf->name, +					"%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d", +					__func__, &nexthop_tab[i].nexthop_addr, +					&src_addr, ifp->name, pim->vrf->name,  					nexthop_tab[i].route_metric,  					nexthop_tab[i].protocol_distance); -			}  			/* update nexthop data */  			nexthop->interface = ifp;  			nexthop->mrib_nexthop_addr = diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h index 16ff44291f..549329c86f 100644 --- a/pimd/pim_nht.h +++ b/pimd/pim_nht.h @@ -77,10 +77,10 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,  void pim_rp_nexthop_del(struct rp_info *rp_info);  /* for RPF check on BSM message receipt */ -void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr bsr_addr); -void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr bsr_addr); +void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr); +void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);  /* RPF(bsr_addr) == src_ip%src_ifp? */ -bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr, +bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,  			   struct interface *src_ifp, pim_addr src_ip);  void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp); diff --git a/pimd/pim_oil.h b/pimd/pim_oil.h index 68b5ef474e..eed4b203cd 100644 --- a/pimd/pim_oil.h +++ b/pimd/pim_oil.h @@ -27,18 +27,18 @@ struct pim_interface;  /*   * Where did we get this (S,G) from?   * - * IGMP - Learned from IGMP + * GM - Learned from IGMP/MLD   * PIM - Learned from PIM   * SOURCE - Learned from Source multicast packet received   * STAR - Inherited   */ -#define PIM_OIF_FLAG_PROTO_IGMP   (1 << 0) +#define PIM_OIF_FLAG_PROTO_GM     (1 << 0)  #define PIM_OIF_FLAG_PROTO_PIM    (1 << 1)  #define PIM_OIF_FLAG_PROTO_STAR   (1 << 2)  #define PIM_OIF_FLAG_PROTO_VXLAN  (1 << 3) -#define PIM_OIF_FLAG_PROTO_ANY                                 \ -	(PIM_OIF_FLAG_PROTO_IGMP | PIM_OIF_FLAG_PROTO_PIM      \ -	 | PIM_OIF_FLAG_PROTO_STAR | PIM_OIF_FLAG_PROTO_VXLAN) +#define PIM_OIF_FLAG_PROTO_ANY                                                 \ +	(PIM_OIF_FLAG_PROTO_GM | PIM_OIF_FLAG_PROTO_PIM |                      \ +	 PIM_OIF_FLAG_PROTO_STAR | PIM_OIF_FLAG_PROTO_VXLAN)  /* OIF is present in the OIL but must not be used for forwarding traffic */  #define PIM_OIF_FLAG_MUTE         (1 << 4) @@ -197,9 +197,6 @@ struct channel_oil *pim_find_channel_oil(struct pim_instance *pim,  					 pim_sgaddr *sg);  struct channel_oil *pim_channel_oil_add(struct pim_instance *pim,  					pim_sgaddr *sg, const char *name); -void pim_channel_oil_change_iif(struct pim_instance *pim, -				struct channel_oil *c_oil, int input_vif_index, -				const char *name);  struct channel_oil *pim_channel_oil_del(struct channel_oil *c_oil,  					const char *name); diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c index 82f735465a..1baa5c38ca 100644 --- a/pimd/pim_pim.c +++ b/pimd/pim_pim.c @@ -152,6 +152,7 @@ static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, pim_addr addr)  int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,  		   pim_sgaddr sg)  { +	struct iovec iov[2], *iovp = iov;  #if PIM_IPV == 4  	struct ip *ip_hdr = (struct ip *)buf;  	size_t ip_hlen; /* ip header length in bytes */ @@ -179,11 +180,26 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,  	pim_msg = buf + ip_hlen;  	pim_msg_len = len - ip_hlen;  #else +	struct ipv6_ph phdr = { +		.src = sg.src, +		.dst = sg.grp, +		.ulpl = htonl(len), +		.next_hdr = IPPROTO_PIM, +	}; + +	iovp->iov_base = &phdr; +	iovp->iov_len = sizeof(phdr); +	iovp++; +  	/* NB: header is not included in IPv6 RX */  	pim_msg = buf;  	pim_msg_len = len;  #endif +	iovp->iov_base = pim_msg; +	iovp->iov_len = pim_msg_len; +	iovp++; +  	header = (struct pim_msg_header *)pim_msg;  	if (pim_msg_len < PIM_PIM_MIN_LEN) {  		if (PIM_DEBUG_PIM_PACKETS) @@ -215,10 +231,21 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,  					   pim_msg_len, PIM_MSG_REGISTER_LEN);  			return -1;  		} + +#if PIM_IPV == 6 +		phdr.ulpl = htonl(PIM_MSG_REGISTER_LEN); +#endif  		/* First 8 byte header checksum */ -		checksum = in_cksum(pim_msg, PIM_MSG_REGISTER_LEN); +		iovp[-1].iov_len = PIM_MSG_REGISTER_LEN; +		checksum = in_cksumv(iov, iovp - iov); +  		if (checksum != pim_checksum) { -			checksum = in_cksum(pim_msg, pim_msg_len); +#if PIM_IPV == 6 +			phdr.ulpl = htonl(pim_msg_len); +#endif +			iovp[-1].iov_len = pim_msg_len; + +			checksum = in_cksumv(iov, iovp - iov);  			if (checksum != pim_checksum) {  				if (PIM_DEBUG_PIM_PACKETS)  					zlog_debug( @@ -230,7 +257,7 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,  			}  		}  	} else { -		checksum = in_cksum(pim_msg, pim_msg_len); +		checksum = in_cksumv(iov, iovp - iov);  		if (checksum != pim_checksum) {  			if (PIM_DEBUG_PIM_PACKETS)  				zlog_debug( @@ -499,11 +526,8 @@ void pim_sock_reset(struct interface *ifp)  		PIM_DEFAULT_PROPAGATION_DELAY_MSEC;  	pim_ifp->pim_override_interval_msec =  		PIM_DEFAULT_OVERRIDE_INTERVAL_MSEC; -	if (PIM_DEFAULT_CAN_DISABLE_JOIN_SUPPRESSION) { -		PIM_IF_DO_PIM_CAN_DISABLE_JOIN_SUPPRESSION(pim_ifp->options); -	} else { -		PIM_IF_DONT_PIM_CAN_DISABLE_JOIN_SUPPRESSION(pim_ifp->options); -	} +	pim_ifp->pim_can_disable_join_suppression = +		PIM_DEFAULT_CAN_DISABLE_JOIN_SUPPRESSION;  	/* neighbors without lan_delay */  	pim_ifp->pim_number_of_nonlandelay_neighbors = 0; @@ -522,75 +546,62 @@ void pim_sock_reset(struct interface *ifp)  	pim_ifstat_reset(ifp);  } +#if PIM_IPV == 4  static uint16_t ip_id = 0; - +#endif  static int pim_msg_send_frame(int fd, char *buf, size_t len,  			      struct sockaddr *dst, size_t salen,  			      const char *ifname)  { -	struct ip *ip = (struct ip *)buf; - -	if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) < 0) { -		char dst_str[INET_ADDRSTRLEN]; - -		switch (errno) { -		case EMSGSIZE: { -			size_t hdrsize = sizeof(struct ip); -			size_t newlen1 = ((len - hdrsize) / 2) & 0xFFF8; -			size_t sendlen = newlen1 + hdrsize; -			size_t offset = ntohs(ip->ip_off); - -			ip->ip_len = htons(sendlen); -			ip->ip_off = htons(offset | IP_MF); -			if (pim_msg_send_frame(fd, buf, sendlen, dst, salen, -					       ifname) == 0) { -				struct ip *ip2 = (struct ip *)(buf + newlen1); -				size_t newlen2 = len - sendlen; -				sendlen = newlen2 + hdrsize; - -				memcpy(ip2, ip, hdrsize); -				ip2->ip_len = htons(sendlen); -				ip2->ip_off = htons(offset + (newlen1 >> 3)); -				return pim_msg_send_frame(fd, (char *)ip2, -							  sendlen, dst, salen, -							  ifname); -			} -		} +	if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) >= 0) +		return 0; -			return -1; -		default: -			if (PIM_DEBUG_PIM_PACKETS) { -				pim_inet4_dump("<dst?>", ip->ip_dst, dst_str, -					       sizeof(dst_str)); -				zlog_warn( -					"%s: sendto() failure to %s: iface=%s fd=%d msg_size=%zd: errno=%d: %s", -					__func__, dst_str, ifname, fd, len, -					errno, safe_strerror(errno)); -			} -			return -1; -		} +#if PIM_IPV == 4 +	if (errno == EMSGSIZE) { +		struct ip *ip = (struct ip *)buf; +		size_t hdrsize = sizeof(struct ip); +		size_t newlen1 = ((len - hdrsize) / 2) & 0xFFF8; +		size_t sendlen = newlen1 + hdrsize; +		size_t offset = ntohs(ip->ip_off); +		int ret; + +		ip->ip_len = htons(sendlen); +		ip->ip_off = htons(offset | IP_MF); + +		ret = pim_msg_send_frame(fd, buf, sendlen, dst, salen, ifname); +		if (ret) +			return ret; + +		struct ip *ip2 = (struct ip *)(buf + newlen1); +		size_t newlen2 = len - sendlen; + +		sendlen = newlen2 + hdrsize; + +		memcpy(ip2, ip, hdrsize); +		ip2->ip_len = htons(sendlen); +		ip2->ip_off = htons(offset + (newlen1 >> 3)); +		return pim_msg_send_frame(fd, (char *)ip2, sendlen, dst, salen, +					  ifname);  	} +#endif -	return 0; +	zlog_warn( +		"%s: sendto() failure to %pSU: iface=%s fd=%d msg_size=%zd: %m", +		__func__, dst, ifname, fd, len); +	return -1;  }  int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,  		 int pim_msg_size, const char *ifname)  { -	struct sockaddr_in to;  	socklen_t tolen;  	unsigned char buffer[10000];  	unsigned char *msg_start;  	uint8_t ttl;  	struct pim_msg_header *header; -	struct ip *ip;  	memset(buffer, 0, 10000); -	int sendlen = sizeof(struct ip) + pim_msg_size; - -	msg_start = buffer + sizeof(struct ip); -	memcpy(msg_start, pim_msg, pim_msg_size);  	header = (struct pim_msg_header *)pim_msg;  /* @@ -620,7 +631,11 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,  		break;  	} -	ip = (struct ip *)buffer; +#if PIM_IPV == 4 +	struct ip *ip = (struct ip *)buffer; +	struct sockaddr_in to = {}; +	int sendlen = sizeof(*ip) + pim_msg_size; +  	ip->ip_id = htons(++ip_id);  	ip->ip_hl = 5;  	ip->ip_v = 4; @@ -631,17 +646,34 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,  	ip->ip_ttl = ttl;  	ip->ip_len = htons(sendlen); -	if (PIM_DEBUG_PIM_PACKETS) { -		char dst_str[INET_ADDRSTRLEN]; -		pim_inet4_dump("<dst?>", dst, dst_str, sizeof(dst_str)); -		zlog_debug("%s: to %s on %s: msg_size=%d checksum=%x", __func__, -			   dst_str, ifname, pim_msg_size, header->checksum); -	} - -	memset(&to, 0, sizeof(to));  	to.sin_family = AF_INET;  	to.sin_addr = dst;  	tolen = sizeof(to); +#else +	struct ip6_hdr *ip = (struct ip6_hdr *)buffer; +	struct sockaddr_in6 to = {}; +	int sendlen = sizeof(*ip) + pim_msg_size; + +	ip->ip6_flow = 0; +	ip->ip6_vfc = (6 << 4) | (IPTOS_PREC_INTERNETCONTROL >> 4); +	ip->ip6_plen = htons(pim_msg_size); +	ip->ip6_nxt = PIM_IP_PROTO_PIM; +	ip->ip6_hlim = ttl; +	ip->ip6_src = src; +	ip->ip6_dst = dst; + +	to.sin6_family = AF_INET6; +	to.sin6_addr = dst; +	tolen = sizeof(to); +#endif + +	msg_start = buffer + sizeof(*ip); +	memcpy(msg_start, pim_msg, pim_msg_size); + +	if (PIM_DEBUG_PIM_PACKETS) +		zlog_debug("%s: to %pPA on %s: msg_size=%d checksum=%x", +			   __func__, &dst, ifname, pim_msg_size, +			   header->checksum);  	if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {  		pim_pkt_dump(__func__, pim_msg, pim_msg_size); @@ -661,20 +693,15 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)  	pim_ifp = ifp->info; -	if (PIM_DEBUG_PIM_HELLO) { -		char dst_str[INET_ADDRSTRLEN]; -		pim_inet4_dump("<dst?>", qpim_all_pim_routers_addr, dst_str, -			       sizeof(dst_str)); +	if (PIM_DEBUG_PIM_HELLO)  		zlog_debug( -			"%s: to %s on %s: holdt=%u prop_d=%u overr_i=%u dis_join_supp=%d dr_prio=%u gen_id=%08x addrs=%d", -			__func__, dst_str, ifp->name, holdtime, -			pim_ifp->pim_propagation_delay_msec, +			"%s: to %pPA on %s: holdt=%u prop_d=%u overr_i=%u dis_join_supp=%d dr_prio=%u gen_id=%08x addrs=%d", +			__func__, &qpim_all_pim_routers_addr, ifp->name, +			holdtime, pim_ifp->pim_propagation_delay_msec,  			pim_ifp->pim_override_interval_msec, -			PIM_IF_TEST_PIM_CAN_DISABLE_JOIN_SUPPRESSION( -				pim_ifp->options), +			pim_ifp->pim_can_disable_join_suppression,  			pim_ifp->pim_dr_priority, pim_ifp->pim_generation_id,  			listcount(ifp->connected)); -	}  	pim_tlv_size = pim_hello_build_tlv(  		ifp, pim_msg + PIM_PIM_MIN_LEN, @@ -682,7 +709,7 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)  		pim_ifp->pim_dr_priority, pim_ifp->pim_generation_id,  		pim_ifp->pim_propagation_delay_msec,  		pim_ifp->pim_override_interval_msec, -		PIM_IF_TEST_PIM_CAN_DISABLE_JOIN_SUPPRESSION(pim_ifp->options)); +		pim_ifp->pim_can_disable_join_suppression);  	if (pim_tlv_size < 0) {  		return -1;  	} @@ -692,7 +719,9 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)  	assert(pim_msg_size >= PIM_PIM_MIN_LEN);  	assert(pim_msg_size <= PIM_PIM_BUFSIZE_WRITE); -	pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO, false); +	pim_msg_build_header(pim_ifp->primary_address, +			     qpim_all_pim_routers_addr, pim_msg, pim_msg_size, +			     PIM_MSG_TYPE_HELLO, false);  	if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,  			 qpim_all_pim_routers_addr, pim_msg, pim_msg_size, diff --git a/pimd/pim_register.c b/pimd/pim_register.c index 7fa36e5a44..fef5339749 100644 --- a/pimd/pim_register.c +++ b/pimd/pim_register.c @@ -44,6 +44,7 @@  #include "pim_util.h"  #include "pim_ssm.h"  #include "pim_vxlan.h" +#include "pim_addr.h"  struct thread *send_test_packet_timer = NULL; @@ -64,8 +65,8 @@ void pim_register_join(struct pim_upstream *up)  	pim_vxlan_update_sg_reg_state(pim, up, true);  } -void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, -			    struct in_addr src, struct in_addr originator) +void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src, +			    pim_addr originator)  {  	struct pim_interface *pinfo;  	unsigned char buffer[10000]; @@ -74,7 +75,7 @@ void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,  	uint8_t *b1;  	if (PIM_DEBUG_PIM_REG) { -		zlog_debug("Sending Register stop for %pSG to %pI4 on %s", sg, +		zlog_debug("Sending Register stop for %pSG to %pPA on %s", sg,  			   &originator, ifp->name);  	} @@ -88,7 +89,8 @@ void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,  	length = pim_encode_addr_ucast(b1, sg->src);  	b1length += length; -	pim_msg_build_header(buffer, b1length + PIM_MSG_REGISTER_STOP_LEN, +	pim_msg_build_header(src, originator, buffer, +			     b1length + PIM_MSG_REGISTER_STOP_LEN,  			     PIM_MSG_TYPE_REG_STOP, false);  	pinfo = (struct pim_interface *)ifp->info; @@ -217,7 +219,7 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)  	return 0;  } -void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src, +void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,  		       struct pim_rpf *rpg, int null_register,  		       struct pim_upstream *up)  { @@ -225,11 +227,11 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,  	unsigned char *b1;  	struct pim_interface *pinfo;  	struct interface *ifp; +	pim_addr dst = pim_addr_from_prefix(&rpg->rpf_addr);  	if (PIM_DEBUG_PIM_REG) { -		zlog_debug("Sending %s %sRegister Packet to %pI4", up->sg_str, -			   null_register ? "NULL " : "", -			   &rpg->rpf_addr.u.prefix4); +		zlog_debug("Sending %s %sRegister Packet to %pPA", up->sg_str, +			   null_register ? "NULL " : "", &dst);  	}  	ifp = rpg->source_nexthop.interface; @@ -249,9 +251,9 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,  	}  	if (PIM_DEBUG_PIM_REG) { -		zlog_debug("%s: Sending %s %sRegister Packet to %pI4 on %s", +		zlog_debug("%s: Sending %s %sRegister Packet to %pPA on %s",  			   __func__, up->sg_str, null_register ? "NULL " : "", -			   &rpg->rpf_addr.u.prefix4, ifp->name); +			   &dst, ifp->name);  	}  	memset(buffer, 0, 10000); @@ -261,13 +263,13 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,  	memcpy(b1, (const unsigned char *)buf, buf_size); -	pim_msg_build_header(buffer, buf_size + PIM_MSG_REGISTER_LEN, +	pim_msg_build_header(src, dst, buffer, buf_size + PIM_MSG_REGISTER_LEN,  			     PIM_MSG_TYPE_REGISTER, false);  	++pinfo->pim_ifstat_reg_send; -	if (pim_msg_send(pinfo->pim_sock_fd, src, rpg->rpf_addr.u.prefix4, -			 buffer, buf_size + PIM_MSG_REGISTER_LEN, ifp->name)) { +	if (pim_msg_send(pinfo->pim_sock_fd, src, dst, buffer, +			 buf_size + PIM_MSG_REGISTER_LEN, ifp->name)) {  		if (PIM_DEBUG_PIM_TRACE) {  			zlog_debug(  				"%s: could not send PIM register message on interface %s", @@ -277,12 +279,13 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,  	}  } +#if PIM_IPV == 4  void pim_null_register_send(struct pim_upstream *up)  {  	struct ip ip_hdr;  	struct pim_interface *pim_ifp;  	struct pim_rpf *rpg; -	struct in_addr src; +	pim_addr src;  	pim_ifp = up->rpf.source_nexthop.interface->info;  	if (!pim_ifp) { @@ -321,9 +324,71 @@ void pim_null_register_send(struct pim_upstream *up)  			return;  		}  	} -	pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip), -			src, rpg, 1, up); +	pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip), src, rpg, 1, +			  up);  } +#else +void pim_null_register_send(struct pim_upstream *up) +{ +	struct ip6_hdr ip6_hdr; +	struct pim_msg_header pim_msg_header; +	struct pim_interface *pim_ifp; +	struct pim_rpf *rpg; +	pim_addr src; +	unsigned char buffer[sizeof(ip6_hdr) + sizeof(pim_msg_header)]; +	struct ipv6_ph ph; + +	pim_ifp = up->rpf.source_nexthop.interface->info; +	if (!pim_ifp) { +		if (PIM_DEBUG_PIM_TRACE) +			zlog_debug( +				"Cannot send null-register for %s no valid iif", +				up->sg_str); +		return; +	} + +	rpg = RP(pim_ifp->pim, up->sg.grp); +	if (!rpg) { +		if (PIM_DEBUG_PIM_TRACE) +			zlog_debug( +				"Cannot send null-register for %s no RPF to the RP", +				up->sg_str); +		return; +	} + +	memset(&ip6_hdr, 0, sizeof(ip6_hdr)); +	ip6_hdr.ip6_nxt = PIM_IP_PROTO_PIM; +	ip6_hdr.ip6_plen = PIM_MSG_HEADER_LEN; +	ip6_hdr.ip6_vfc = 6 << 4; +	ip6_hdr.ip6_hlim = MAXTTL; +	ip6_hdr.ip6_src = up->sg.src; +	ip6_hdr.ip6_dst = up->sg.grp; + +	memset(buffer, 0, (sizeof(ip6_hdr) + sizeof(pim_msg_header))); +	memcpy(buffer, &ip6_hdr, sizeof(ip6_hdr)); + +	pim_msg_header.ver = 0; +	pim_msg_header.type = 0; +	pim_msg_header.reserved = 0; + +	pim_msg_header.checksum = 0; + +	ph.src = up->sg.src; +	ph.dst = up->sg.grp; +	ph.ulpl = htonl(PIM_MSG_HEADER_LEN); +	ph.next_hdr = IPPROTO_PIM; +	pim_msg_header.checksum = +		in_cksum_with_ph6(&ph, &pim_msg_header, PIM_MSG_HEADER_LEN); + +	memcpy(buffer + sizeof(ip6_hdr), &pim_msg_header, PIM_MSG_HEADER_LEN); + + +	src = pim_ifp->primary_address; +	pim_register_send((uint8_t *)buffer, +			  sizeof(ip6_hdr) + PIM_MSG_HEADER_LEN, src, rpg, 1, +			  up); +} +#endif  /*   * 4.4.2 Receiving Register Messages at the RP @@ -423,6 +488,46 @@ int pim_register_recv(struct interface *ifp, pim_addr dest_addr,  	memset(&sg, 0, sizeof(sg));  	sg = pim_sgaddr_from_iphdr(ip_hdr); +#if PIM_IPV == 6 +	/* +	 * According to RFC section 4.9.3, If Dummy PIM Header is included +	 * in NULL Register as a payload there would be two PIM headers. +	 * The inner PIM Header's checksum field should also be validated +	 * in addition to the outer PIM Header's checksum. Validation of +	 * inner PIM header checksum is done here. +	 */ +	if ((*bits & PIM_REGISTER_NR_BIT) && +	    ((tlv_buf_size - PIM_MSG_REGISTER_BIT_RESERVED_LEN) > +	     (int)sizeof(struct ip6_hdr))) { +		uint16_t computed_checksum; +		uint16_t received_checksum; +		struct ipv6_ph ph; +		struct pim_msg_header *header; + +		header = (struct pim_msg_header +				  *)(tlv_buf + +				     PIM_MSG_REGISTER_BIT_RESERVED_LEN + +				     sizeof(struct ip6_hdr)); +		ph.src = sg.src; +		ph.dst = sg.grp; +		ph.ulpl = htonl(PIM_MSG_HEADER_LEN); +		ph.next_hdr = IPPROTO_PIM; + +		received_checksum = header->checksum; + +		header->checksum = 0; +		computed_checksum = in_cksum_with_ph6( +			&ph, header, htonl(PIM_MSG_HEADER_LEN)); + +		if (computed_checksum != received_checksum) { +			if (PIM_DEBUG_PIM_PACKETS) +				zlog_debug( +					"Ignoring Null Register message%pSG from %pPA due to bad checksum in Encapsulated dummy PIM header", +					&sg, &src_addr); +			return 0; +		} +	} +#endif  	i_am_rp = I_am_RP(pim, sg.grp);  	if (PIM_DEBUG_PIM_REG) diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index a552e77823..730870fb33 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -1147,20 +1147,17 @@ int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,  }  void pim_rp_show_information(struct pim_instance *pim, struct prefix *range, -			     struct vty *vty, bool uj) +			     struct vty *vty, json_object *json)  {  	struct rp_info *rp_info;  	struct rp_info *prev_rp_info = NULL;  	struct listnode *node;  	char source[7]; -	json_object *json = NULL;  	json_object *json_rp_rows = NULL;  	json_object *json_row = NULL; -	if (uj) -		json = json_object_new_object(); -	else +	if (!json)  		vty_out(vty,  			"RP address       group/prefix-list   OIF               I am RP    Source   Group-Type\n");  	for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) { @@ -1184,7 +1181,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,  			strlcpy(source, "BSR", sizeof(source));  		else  			strlcpy(source, "None", sizeof(source)); -		if (uj) { +		if (json) {  			/*  			 * If we have moved on to a new RP then add the  			 * entry for the previous RP @@ -1257,12 +1254,10 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,  		prev_rp_info = rp_info;  	} -	if (uj) { +	if (json) {  		if (prev_rp_info && json_rp_rows)  			json_object_object_addf(json, json_rp_rows, "%pFXh",  						&prev_rp_info->rp.rpf_addr); - -		vty_json(vty, json);  	}  } diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h index 04faeb5f26..e1bc71a3dc 100644 --- a/pimd/pim_rp.h +++ b/pimd/pim_rp.h @@ -25,6 +25,7 @@  #include "vty.h"  #include "plist.h"  #include "pim_rpf.h" +#include "lib/json.h"  struct pim_interface; @@ -79,7 +80,7 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group);  #define RP(P, G)       pim_rp_g ((P), (G))  void pim_rp_show_information(struct pim_instance *pim, struct prefix *range, -			     struct vty *vty, bool uj); +			     struct vty *vty, json_object *json);  void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr);  int pim_rp_list_cmp(void *v1, void *v2);  struct rp_info *pim_rp_find_match_group(struct pim_instance *pim, diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c index eebe38bb88..8357536393 100644 --- a/pimd/pim_rpf.c +++ b/pimd/pim_rpf.c @@ -76,7 +76,7 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,  		if (PIM_DEBUG_PIM_NHT)  			zlog_debug(  				"%s: Using last lookup for %pPAs at %lld, %" PRId64 -				" addr %pFX", +				" addr %pPAs",  				__func__, &addr, nexthop->last_lookup_time,  				pim->last_route_change_time,  				&nexthop->mrib_nexthop_addr); @@ -122,20 +122,17 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,  					__func__, ifp->name, first_ifindex,  					&addr);  			i++; - -		} else if (!PIM_IF_TEST_PIM(((struct pim_interface *)ifp->info) -						    ->options)) { +		} else if (!((struct pim_interface *)ifp->info)->pim_enable) {  			if (PIM_DEBUG_ZEBRA)  				zlog_debug( -					"%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pI4)", +					"%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPA)",  					__func__, ifp->name, first_ifindex,  					&addr);  			i++; - -		} else if (neighbor_needed && -			   !pim_if_connected_to_source(ifp, addr)) { -			nbr = pim_neighbor_find_prefix( -				ifp, &nexthop_tab[i].nexthop_addr); +		} else if (neighbor_needed +			   && !pim_if_connected_to_source(ifp, addr)) { +			nbr = pim_neighbor_find(ifp, +						nexthop_tab[i].nexthop_addr);  			if (PIM_DEBUG_PIM_TRACE_DETAIL)  				zlog_debug("ifp name: %s, pim nbr: %p",  					   ifp->name, nbr); @@ -150,11 +147,12 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,  	if (found) {  		if (PIM_DEBUG_ZEBRA)  			zlog_debug( -				"%s %s: found nexthop %pFX for address %pPAs: interface %s ifindex=%d metric=%d pref=%d", +				"%s %s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",  				__FILE__, __func__,  				&nexthop_tab[i].nexthop_addr, &addr, ifp->name,  				first_ifindex, nexthop_tab[i].route_metric,  				nexthop_tab[i].protocol_distance); +  		/* update nexthop data */  		nexthop->interface = ifp;  		nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr; @@ -172,11 +170,8 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,  static int nexthop_mismatch(const struct pim_nexthop *nh1,  			    const struct pim_nexthop *nh2)  { -	pim_addr nh_addr1 = pim_addr_from_prefix(&nh1->mrib_nexthop_addr); -	pim_addr nh_addr2 = pim_addr_from_prefix(&nh2->mrib_nexthop_addr); -  	return (nh1->interface != nh2->interface) || -	       (pim_addr_cmp(nh_addr1, nh_addr2)) || +	       (pim_addr_cmp(nh1->mrib_nexthop_addr, nh2->mrib_nexthop_addr)) ||  	       (nh1->mrib_metric_preference != nh2->mrib_metric_preference) ||  	       (nh1->mrib_route_metric != nh2->mrib_route_metric);  } @@ -267,7 +262,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,  	if (nexthop_mismatch(&rpf->source_nexthop, &saved.source_nexthop)) {  		if (PIM_DEBUG_ZEBRA) -			zlog_debug("%s(%s): (S,G)=%s source nexthop now is: interface=%s address=%pFX pref=%d metric=%d", +			zlog_debug("%s(%s): (S,G)=%s source nexthop now is: interface=%s address=%pPAs pref=%d metric=%d",  		 __func__, caller,  		 up->sg_str,  		 rpf->source_nexthop.interface ? rpf->source_nexthop.interface->name : "<ifname?>", @@ -326,8 +321,7 @@ void pim_upstream_rpf_clear(struct pim_instance *pim,  	if (up->rpf.source_nexthop.interface) {  		pim_upstream_switch(pim, up, PIM_UPSTREAM_NOTJOINED);  		up->rpf.source_nexthop.interface = NULL; -		pim_addr_to_prefix(&up->rpf.source_nexthop.mrib_nexthop_addr, -				   PIMADDR_ANY); +		up->rpf.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;  		up->rpf.source_nexthop.mrib_metric_preference =  			router->infinite_assert_metric.metric_preference;  		up->rpf.source_nexthop.mrib_route_metric = @@ -374,11 +368,8 @@ static pim_addr pim_rpf_find_rpf_addr(struct pim_upstream *up)  	/* return NBR( RPF_interface(S), MRIB.next_hop( S ) ) */ -	pim_addr nhaddr; - -	nhaddr = -		pim_addr_from_prefix(&up->rpf.source_nexthop.mrib_nexthop_addr); -	neigh = pim_if_find_neighbor(up->rpf.source_nexthop.interface, nhaddr); +	neigh = pim_if_find_neighbor(up->rpf.source_nexthop.interface, +				     up->rpf.source_nexthop.mrib_nexthop_addr);  	if (neigh)  		rpf_addr = neigh->source_addr;  	else diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h index 74aca43d54..40cd066bd1 100644 --- a/pimd/pim_rpf.h +++ b/pimd/pim_rpf.h @@ -39,7 +39,7 @@ struct pim_nexthop {  	pim_addr last_lookup;  	long long last_lookup_time;  	struct interface *interface;     /* RPF_interface(S) */ -	struct prefix mrib_nexthop_addr; /* MRIB.next_hop(S) */ +	pim_addr mrib_nexthop_addr;      /* MRIB.next_hop(S) */  	uint32_t mrib_metric_preference; /* MRIB.pref(S) */  	uint32_t mrib_route_metric;      /* MRIB.metric(S) */  	struct pim_neighbor *nbr; diff --git a/pimd/pim_ssm.c b/pimd/pim_ssm.c index 74310474d4..62d6eb8308 100644 --- a/pimd/pim_ssm.c +++ b/pimd/pim_ssm.c @@ -70,19 +70,9 @@ void pim_ssm_prefix_list_update(struct pim_instance *pim,  static int pim_is_grp_standard_ssm(struct prefix *group)  { -	static int first = 1; -	static struct prefix group_ssm; +	pim_addr addr = pim_addr_from_prefix(group); -	if (first) { -		if (!str2prefix(PIM_SSM_STANDARD_RANGE, &group_ssm)) -			flog_err(EC_LIB_DEVELOPMENT, -				 "%s: Failure to Read Group Address: %s", -				 __func__, PIM_SSM_STANDARD_RANGE); - -		first = 0; -	} - -	return prefix_match(&group_ssm, group); +	return pim_addr_ssm(addr);  }  int pim_is_grp_ssm(struct pim_instance *pim, pim_addr group_addr) diff --git a/pimd/pim_ssmpingd.c b/pimd/pim_ssmpingd.c index afa7e37da1..d86be85bd8 100644 --- a/pimd/pim_ssmpingd.c +++ b/pimd/pim_ssmpingd.c @@ -361,10 +361,8 @@ int pim_ssmpingd_start(struct pim_instance *pim, pim_addr source_addr)  		return 0;  	} -	{ -		zlog_info("%s: starting ssmpingd for source %pPAs", __func__, -			  &source_addr); -	} +	zlog_info("%s: starting ssmpingd for source %pPAs", __func__, +		  &source_addr);  	ss = ssmpingd_new(pim, source_addr);  	if (!ss) { diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c index 838f11211e..80e75f1b09 100644 --- a/pimd/pim_tib.c +++ b/pimd/pim_tib.c @@ -110,8 +110,8 @@ bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,  	if (PIM_I_am_DR(pim_oif) || PIM_I_am_DualActive(pim_oif)) {  		int result; -		result = pim_channel_add_oif(*oilp, oif, -					     PIM_OIF_FLAG_PROTO_IGMP, __func__); +		result = pim_channel_add_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM, +					     __func__);  		if (result) {  			if (PIM_DEBUG_MROUTE)  				zlog_warn("%s: add_oif() failed with return=%d", @@ -136,7 +136,7 @@ bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,  				"%s: Failure to add local membership for %pSG",  				__func__, &sg); -		pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_IGMP, +		pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM,  				    __func__);  		return false;  	} @@ -160,7 +160,7 @@ void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,  	 fixes the issue without ill effect, similar to  	 pim_forward_stop below.  	*/ -	result = pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_IGMP, +	result = pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_GM,  				     __func__);  	if (result) {  		if (PIM_DEBUG_IGMP_TRACE) diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c index 571117ac0a..3817d5d9e1 100644 --- a/pimd/pim_upstream.c +++ b/pimd/pim_upstream.c @@ -647,7 +647,7 @@ void pim_upstream_register_reevaluate(struct pim_instance *pim)   * 2. and along the RPT if SPTbit is not set   * If forwarding is hw accelerated i.e. control and dataplane components   * are separate you may not be able to reliably set SPT bit on intermediate - * routers while still fowarding on the (S,G,rpt). + * routers while still forwarding on the (S,G,rpt).   *   * This macro is a slight deviation on the RFC and uses "traffic-agnostic"   * criteria to decide between using the RPT vs. SPT for forwarding. @@ -838,8 +838,7 @@ void pim_upstream_fill_static_iif(struct pim_upstream *up,  	up->rpf.source_nexthop.interface = incoming;  	/* reset other parameters to matched a connected incoming interface */ -	pim_addr_to_prefix(&up->rpf.source_nexthop.mrib_nexthop_addr, -			   PIMADDR_ANY); +	up->rpf.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;  	up->rpf.source_nexthop.mrib_metric_preference =  		ZEBRA_CONNECT_DISTANCE_DEFAULT;  	up->rpf.source_nexthop.mrib_route_metric = 0; @@ -899,8 +898,7 @@ static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,  	up->sptbit = PIM_UPSTREAM_SPTBIT_FALSE;  	up->rpf.source_nexthop.interface = NULL; -	pim_addr_to_prefix(&up->rpf.source_nexthop.mrib_nexthop_addr, -			   PIMADDR_ANY); +	up->rpf.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;  	up->rpf.source_nexthop.mrib_metric_preference =  		router->infinite_assert_metric.metric_preference;  	up->rpf.source_nexthop.mrib_route_metric = @@ -1838,7 +1836,7 @@ int pim_upstream_inherited_olist_decide(struct pim_instance *pim,  				flag = PIM_OIF_FLAG_PROTO_STAR;  			else {  				if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) -					flag = PIM_OIF_FLAG_PROTO_IGMP; +					flag = PIM_OIF_FLAG_PROTO_GM;  				if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags))  					flag |= PIM_OIF_FLAG_PROTO_PIM;  				if (starch) @@ -2062,6 +2060,7 @@ static bool pim_upstream_sg_running_proc(struct pim_upstream *up)  	if ((up->sptbit != PIM_UPSTREAM_SPTBIT_TRUE) &&  	    (up->rpf.source_nexthop.interface)) {  		pim_upstream_set_sptbit(up, up->rpf.source_nexthop.interface); +		pim_upstream_update_could_assert(up);  	}  	return rv; @@ -2116,7 +2115,7 @@ void pim_upstream_add_lhr_star_pimreg(struct pim_instance *pim)  			continue;  		pim_channel_add_oif(up->channel_oil, pim->regiface, -				    PIM_OIF_FLAG_PROTO_IGMP, __func__); +				    PIM_OIF_FLAG_PROTO_GM, __func__);  	}  } @@ -2161,18 +2160,17 @@ void pim_upstream_remove_lhr_star_pimreg(struct pim_instance *pim,  		if (!nlist) {  			pim_channel_del_oif(up->channel_oil, pim->regiface, -					PIM_OIF_FLAG_PROTO_IGMP, __func__); +					    PIM_OIF_FLAG_PROTO_GM, __func__);  			continue;  		}  		pim_addr_to_prefix(&g, up->sg.grp);  		apply_new = prefix_list_apply(np, &g);  		if (apply_new == PREFIX_DENY)  			pim_channel_add_oif(up->channel_oil, pim->regiface, -					    PIM_OIF_FLAG_PROTO_IGMP, -						__func__); +					    PIM_OIF_FLAG_PROTO_GM, __func__);  		else  			pim_channel_del_oif(up->channel_oil, pim->regiface, -					PIM_OIF_FLAG_PROTO_IGMP, __func__); +					    PIM_OIF_FLAG_PROTO_GM, __func__);  	}  } diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index a0dea63b79..6df36d5fc1 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -40,6 +40,7 @@  #include "pim_bfd.h"  #include "pim_bsm.h"  #include "pim_vxlan.h" +#include "pim6_mld.h"  int pim_debug_config_write(struct vty *vty)  { @@ -65,23 +66,28 @@ int pim_debug_config_write(struct vty *vty)  		vty_out(vty, "debug igmp packets\n");  		++writes;  	} -	if (PIM_DEBUG_IGMP_TRACE) { +	/* PIM_DEBUG_IGMP_TRACE catches _DETAIL too */ +	if (router->debugs & PIM_MASK_IGMP_TRACE) {  		vty_out(vty, "debug igmp trace\n");  		++writes;  	} +	if (PIM_DEBUG_IGMP_TRACE_DETAIL) { +		vty_out(vty, "debug igmp trace detail\n"); +		++writes; +	} -	if (PIM_DEBUG_MROUTE) { +	/* PIM_DEBUG_MROUTE catches _DETAIL too */ +	if (router->debugs & PIM_MASK_MROUTE) {  		vty_out(vty, "debug mroute\n");  		++writes;  	} - -	if (PIM_DEBUG_MTRACE) { -		vty_out(vty, "debug mtrace\n"); +	if (PIM_DEBUG_MROUTE_DETAIL) { +		vty_out(vty, "debug mroute detail\n");  		++writes;  	} -	if (PIM_DEBUG_MROUTE_DETAIL_ONLY) { -		vty_out(vty, "debug mroute detail\n"); +	if (PIM_DEBUG_MTRACE) { +		vty_out(vty, "debug mtrace\n");  		++writes;  	} @@ -102,11 +108,12 @@ int pim_debug_config_write(struct vty *vty)  		++writes;  	} -	if (PIM_DEBUG_PIM_TRACE) { +	/* PIM_DEBUG_PIM_TRACE catches _DETAIL too */ +	if (router->debugs & PIM_MASK_PIM_TRACE) {  		vty_out(vty, "debug pim trace\n");  		++writes;  	} -	if (PIM_DEBUG_PIM_TRACE_DETAIL_ONLY) { +	if (PIM_DEBUG_PIM_TRACE_DETAIL) {  		vty_out(vty, "debug pim trace detail\n");  		++writes;  	} @@ -264,8 +271,8 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)  		struct ssmpingd_sock *ss;  		++writes;  		for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss)) { -			vty_out(vty, "%sip ssmpingd %pPA\n", spaces, -				&ss->source_addr); +			vty_out(vty, "%s" PIM_AF_NAME " ssmpingd %pPA\n", +				spaces, &ss->source_addr);  			++writes;  		}  	} @@ -285,11 +292,11 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)  }  #if PIM_IPV == 4 -static int pim_igmp_config_write(struct vty *vty, int writes, -				 struct pim_interface *pim_ifp) +static int gm_config_write(struct vty *vty, int writes, +			   struct pim_interface *pim_ifp)  {  	/* IF ip igmp */ -	if (PIM_IF_TEST_IGMP(pim_ifp->options)) { +	if (pim_ifp->igmp_enable) {  		vty_out(vty, " ip igmp\n");  		++writes;  	} @@ -354,6 +361,17 @@ static int pim_igmp_config_write(struct vty *vty, int writes,  	return writes;  } +#else +static int gm_config_write(struct vty *vty, int writes, +			   struct pim_interface *pim_ifp) +{ +	if (pim_ifp->mld_version != MLD_DEFAULT_VERSION) +		vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version); +	if (pim_ifp->gm_default_query_interval != IGMP_GENERAL_QUERY_INTERVAL) +		vty_out(vty, " ipv6 mld query-interval %d\n", +			pim_ifp->gm_default_query_interval); +	return 0; +}  #endif  int pim_config_write(struct vty *vty, int writes, struct interface *ifp, @@ -361,7 +379,7 @@ int pim_config_write(struct vty *vty, int writes, struct interface *ifp,  {  	struct pim_interface *pim_ifp = ifp->info; -	if (PIM_IF_TEST_PIM(pim_ifp->options)) { +	if (pim_ifp->pim_enable) {  		vty_out(vty, " " PIM_AF_NAME " pim\n");  		++writes;  	} @@ -382,9 +400,7 @@ int pim_config_write(struct vty *vty, int writes, struct interface *ifp,  		++writes;  	} -#if PIM_IPV == 4 -	writes += pim_igmp_config_write(vty, writes, pim_ifp); -#endif +	writes += gm_config_write(vty, writes, pim_ifp);  	/* update source */  	if (!pim_addr_is_any(pim_ifp->update_source)) { diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c index 5e55b9f9c8..120293dbfd 100644 --- a/pimd/pim_vxlan.c +++ b/pimd/pim_vxlan.c @@ -880,6 +880,12 @@ void pim_vxlan_mlag_update(bool enable, bool peer_state, uint32_t role,  	 */  	pim = pim_get_pim_instance(VRF_DEFAULT); +	if (!pim) { +		if (PIM_DEBUG_VXLAN) +			zlog_debug("%s: Unable to find pim instance", __func__); +		return; +	} +  	if (enable)  		vxlan_mlag.flags |= PIM_VXLAN_MLAGF_ENABLED;  	else @@ -1137,7 +1143,7 @@ void pim_vxlan_add_term_dev(struct pim_instance *pim,  	/* enable pim on the term ifp */  	pim_ifp = (struct pim_interface *)ifp->info;  	if (pim_ifp) { -		PIM_IF_DO_PIM(pim_ifp->options); +		pim_ifp->pim_enable = true;  		/* ifp is already oper up; activate it as a term dev */  		if (pim_ifp->mroute_vif_index >= 0)  			pim_vxlan_term_oif_update(pim, ifp); @@ -1165,8 +1171,8 @@ void pim_vxlan_del_term_dev(struct pim_instance *pim)  	pim_ifp = (struct pim_interface *)ifp->info;  	if (pim_ifp) { -		PIM_IF_DONT_PIM(pim_ifp->options); -		if (!PIM_IF_TEST_IGMP(pim_ifp->options)) +		pim_ifp->pim_enable = false; +		if (!pim_ifp->igmp_enable)  			pim_if_delete(ifp);  	}  } diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c index 7f463715a5..4135ae0408 100644 --- a/pimd/pim_zebra.c +++ b/pimd/pim_zebra.c @@ -166,6 +166,13 @@ static int pim_zebra_if_address_add(ZAPI_CALLBACK_ARGS)  		struct pim_instance *pim;  		pim = pim_get_pim_instance(vrf_id); +		if (!pim) { +			if (PIM_DEBUG_ZEBRA) +				zlog_debug("%s: Unable to find pim instance", +					   __func__); +			return 0; +		} +  		pim_ifp->pim = pim;  		pim_rp_check_on_if_add(pim_ifp); @@ -492,7 +499,7 @@ void pim_forward_start(struct pim_ifchannel *ch)  			   ch->interface->name, &up->upstream_addr);  	if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags)) -		mask = PIM_OIF_FLAG_PROTO_IGMP; +		mask = PIM_OIF_FLAG_PROTO_GM;  	if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags))  		mask |= PIM_OIF_FLAG_PROTO_PIM; diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index c487f995e7..a9553089c9 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -156,7 +156,7 @@ void zclient_lookup_new(void)  static int zclient_read_nexthop(struct pim_instance *pim,  				struct zclient *zlookup,  				struct pim_zlookup_nexthop nexthop_tab[], -				const int tab_size, struct in_addr addr) +				const int tab_size, pim_addr addr)  {  	int num_ifindex = 0;  	struct stream *s; @@ -165,23 +165,19 @@ static int zclient_read_nexthop(struct pim_instance *pim,  	uint8_t version;  	vrf_id_t vrf_id;  	uint16_t command = 0; -	struct in_addr raddr; +	struct ipaddr raddr;  	uint8_t distance;  	uint32_t metric;  	int nexthop_num;  	int i, err; -	if (PIM_DEBUG_PIM_NHT_DETAIL) { -		char addr_str[INET_ADDRSTRLEN]; - -		pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str)); -		zlog_debug("%s: addr=%s(%s)", __func__, addr_str, +	if (PIM_DEBUG_PIM_NHT_DETAIL) +		zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr,  			   pim->vrf->name); -	}  	s = zlookup->ibuf; -	while (command != ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB) { +	while (command != ZEBRA_NEXTHOP_LOOKUP_MRIB) {  		stream_reset(s);  		err = zclient_read_header(s, zlookup->sock, &length, &marker,  					  &version, &vrf_id, &command); @@ -201,15 +197,12 @@ static int zclient_read_nexthop(struct pim_instance *pim,  		}  	} -	raddr.s_addr = stream_get_ipv4(s); +	stream_get_ipaddr(s, &raddr); -	if (raddr.s_addr != addr.s_addr) { -		char addr_str[INET_ADDRSTRLEN]; -		char raddr_str[INET_ADDRSTRLEN]; -		pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str)); -		pim_inet4_dump("<raddr?>", raddr, raddr_str, sizeof(raddr_str)); -		zlog_warn("%s: address mismatch: addr=%s(%s) raddr=%s", -			  __func__, addr_str, pim->vrf->name, raddr_str); +	if (raddr.ipa_type != PIM_IPADDR || +	    pim_addr_cmp(raddr.ipaddr_pim, addr)) { +		zlog_warn("%s: address mismatch: addr=%pPA(%s) raddr=%pIA", +			  __func__, &addr, pim->vrf->name, &raddr);  		/* warning only */  	} @@ -227,18 +220,16 @@ static int zclient_read_nexthop(struct pim_instance *pim,  	for (i = 0; i < nexthop_num; ++i) {  		vrf_id_t nexthop_vrf_id;  		enum nexthop_types_t nexthop_type; -		struct pim_neighbor *nbr; -		struct prefix p; +		struct in_addr nh_ip4; +		struct in6_addr nh_ip6; +		ifindex_t nh_ifi;  		nexthop_vrf_id = stream_getl(s);  		nexthop_type = stream_getc(s);  		if (num_ifindex >= tab_size) { -			char addr_str[INET_ADDRSTRLEN]; -			pim_inet4_dump("<addr?>", addr, addr_str, -				       sizeof(addr_str));  			zlog_warn( -				"%s: found too many nexthop ifindexes (%d > %d) for address %s(%s)", -				__func__, (num_ifindex + 1), tab_size, addr_str, +				"%s: found too many nexthop ifindexes (%d > %d) for address %pPAs(%s)", +				__func__, (num_ifindex + 1), tab_size, &addr,  				pim->vrf->name);  			return num_ifindex;  		} @@ -254,33 +245,32 @@ static int zclient_read_nexthop(struct pim_instance *pim,  			 * allow us to work in cases where we are  			 * trying to find a route for this box.  			 */ -			nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET; -			nexthop_tab[num_ifindex].nexthop_addr.prefixlen = -				IPV4_MAX_BITLEN; -			nexthop_tab[num_ifindex].nexthop_addr.u.prefix4 = -				addr; +			nexthop_tab[num_ifindex].nexthop_addr = addr;  			++num_ifindex;  			break;  		case NEXTHOP_TYPE_IPV4_IFINDEX:  		case NEXTHOP_TYPE_IPV4: -			nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET; -			nexthop_tab[num_ifindex].nexthop_addr.u.prefix4.s_addr = -				stream_get_ipv4(s); -			nexthop_tab[num_ifindex].ifindex = stream_getl(s); +			nh_ip4.s_addr = stream_get_ipv4(s); +			nh_ifi = stream_getl(s); +#if PIM_IPV == 4 +			nexthop_tab[num_ifindex].nexthop_addr = nh_ip4; +			nexthop_tab[num_ifindex].ifindex = nh_ifi;  			++num_ifindex; +#else +			zlog_warn("cannot use IPv4 nexthop %pI4 for IPv6 %pPA", +				  &nh_ip4, &addr); +#endif  			break;  		case NEXTHOP_TYPE_IPV6_IFINDEX: -			nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET6; -			stream_get(&nexthop_tab[num_ifindex] -					    .nexthop_addr.u.prefix6, -				   s, sizeof(struct in6_addr)); -			nexthop_tab[num_ifindex].ifindex = stream_getl(s); +			stream_get(&nh_ip6, s, sizeof(nh_ip6)); +			nh_ifi = stream_getl(s); -			p.family = AF_INET6; -			p.prefixlen = IPV6_MAX_BITLEN; -			memcpy(&p.u.prefix6, -			       &nexthop_tab[num_ifindex].nexthop_addr.u.prefix6, -			       sizeof(struct in6_addr)); +#if PIM_IPV == 6 +			nexthop_tab[num_ifindex].nexthop_addr = nh_ip6; +			nexthop_tab[num_ifindex].ifindex = nh_ifi; +			++num_ifindex; +#else +			/* RFC 5549 v4-over-v6 nexthop handling */  			/*  			 * If we are sending v6 secondary assume we receive v6 @@ -291,32 +281,35 @@ static int zclient_read_nexthop(struct pim_instance *pim,  				nexthop_vrf_id);  			if (!ifp) -				nbr = NULL; -			else if (pim->send_v6_secondary) +				break; + +			struct pim_neighbor *nbr; + +			if (pim->send_v6_secondary) { +				struct prefix p; + +				p.family = AF_INET6; +				p.prefixlen = IPV6_MAX_BITLEN; +				p.u.prefix6 = nh_ip6; +  				nbr = pim_neighbor_find_by_secondary(ifp, &p); -			else +			} else  				nbr = pim_neighbor_find_if(ifp); -			if (nbr) { -				nexthop_tab[num_ifindex].nexthop_addr.family = -					AF_INET; -				pim_addr_to_prefix( -					&nexthop_tab[num_ifindex].nexthop_addr, -					nbr->source_addr); -			} +			if (!nbr) +				break; + +			nexthop_tab[num_ifindex].nexthop_addr = +				nbr->source_addr; +			nexthop_tab[num_ifindex].ifindex = nh_ifi;  			++num_ifindex; +#endif  			break;  		default:  			/* do nothing */ -			{ -				char addr_str[INET_ADDRSTRLEN]; -				pim_inet4_dump("<addr?>", addr, addr_str, -					       sizeof(addr_str)); -				zlog_warn( -					"%s: found non-ifindex nexthop type=%d for address %s(%s)", -					__func__, nexthop_type, addr_str, -					pim->vrf->name); -			} +			zlog_warn( +				"%s: found non-ifindex nexthop type=%d for address %pPAs(%s)", +				__func__, nexthop_type, &addr, pim->vrf->name);  			break;  		}  	} @@ -326,17 +319,15 @@ static int zclient_read_nexthop(struct pim_instance *pim,  static int zclient_lookup_nexthop_once(struct pim_instance *pim,  				       struct pim_zlookup_nexthop nexthop_tab[], -				       const int tab_size, struct in_addr addr) +				       const int tab_size, pim_addr addr)  {  	struct stream *s;  	int ret; +	struct ipaddr ipaddr; -	if (PIM_DEBUG_PIM_NHT_DETAIL) { -		char addr_str[INET_ADDRSTRLEN]; -		pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str)); -		zlog_debug("%s: addr=%s(%s)", __func__, addr_str, +	if (PIM_DEBUG_PIM_NHT_DETAIL) +		zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr,  			   pim->vrf->name); -	}  	/* Check socket. */  	if (zlookup->sock < 0) { @@ -354,11 +345,13 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim,  		return -1;  	} +	ipaddr.ipa_type = PIM_IPADDR; +	ipaddr.ipaddr_pim = addr; +  	s = zlookup->obuf;  	stream_reset(s); -	zclient_create_header(s, ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB, -			      pim->vrf->vrf_id); -	stream_put_in_addr(s, &addr); +	zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, pim->vrf->vrf_id); +	stream_put_ipaddr(s, &ipaddr);  	stream_putw_at(s, 0, stream_get_endp(s));  	ret = writen(zlookup->sock, s->data, stream_get_endp(s)); @@ -386,7 +379,13 @@ void zclient_lookup_read_pipe(struct thread *thread)  	struct zclient *zlookup = THREAD_ARG(thread);  	struct pim_instance *pim = pim_get_pim_instance(VRF_DEFAULT);  	struct pim_zlookup_nexthop nexthop_tab[10]; -	struct in_addr l = {.s_addr = INADDR_ANY}; +	pim_addr l = PIMADDR_ANY; + +	if (!pim) { +		if (PIM_DEBUG_PIM_NHT_DETAIL) +			zlog_debug("%s: Unable to find pim instance", __func__); +		return; +	}  	zclient_lookup_nexthop_once(pim, nexthop_tab, 10, l);  	thread_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60, @@ -407,7 +406,7 @@ int zclient_lookup_nexthop(struct pim_instance *pim,  	for (lookup = 0; lookup < max_lookup; ++lookup) {  		int num_ifindex;  		int first_ifindex; -		struct prefix nexthop_addr; +		pim_addr nexthop_addr;  		num_ifindex = zclient_lookup_nexthop_once(pim, nexthop_tab,  							  tab_size, addr); @@ -456,8 +455,7 @@ int zclient_lookup_nexthop(struct pim_instance *pim,  						nexthop_tab[0].route_metric);  				/* use last address as nexthop address */ -				pim_addr_to_prefix( -					&(nexthop_tab[0].nexthop_addr), addr); +				nexthop_tab[0].nexthop_addr = addr;  				/* report original route metric/distance */  				nexthop_tab[0].route_metric = route_metric; @@ -468,23 +466,16 @@ int zclient_lookup_nexthop(struct pim_instance *pim,  			return num_ifindex;  		} -		if (PIM_DEBUG_PIM_NHT) { -			char addr_str[INET_ADDRSTRLEN]; -			char nexthop_str[PREFIX_STRLEN]; -			pim_inet4_dump("<addr?>", addr, addr_str, -				       sizeof(addr_str)); -			pim_addr_dump("<nexthop?>", &nexthop_addr, nexthop_str, -				      sizeof(nexthop_str)); +		if (PIM_DEBUG_PIM_NHT)  			zlog_debug( -				"%s: lookup=%d/%d: zebra returned recursive nexthop %s for address %pPA(%s) dist=%d met=%d", -				__func__, lookup, max_lookup, nexthop_str, +				"%s: lookup=%d/%d: zebra returned recursive nexthop %pPAs for address %pPA(%s) dist=%d met=%d", +				__func__, lookup, max_lookup, &nexthop_addr,  				&addr, pim->vrf->name,  				nexthop_tab[0].protocol_distance,  				nexthop_tab[0].route_metric); -		} -		addr = pim_addr_from_prefix(&(nexthop_addr)); /* use nexthop -						  addr for recursive lookup */ +		addr = nexthop_addr; /* use nexthop +					addr for recursive lookup */  	} /* for (max_lookup) */ @@ -514,17 +505,16 @@ int pim_zlookup_sg_statistics(struct channel_oil *c_oil)  	pim_sgaddr sg;  	int count = 0;  	int ret; +	pim_sgaddr more = {};  	struct interface *ifp = -		pim_if_find_by_vif_index(c_oil->pim, c_oil->oil.mfcc_parent); +		pim_if_find_by_vif_index(c_oil->pim, *oil_parent(c_oil));  	if (PIM_DEBUG_ZEBRA) { -		pim_sgaddr more; - -		more.src = c_oil->oil.mfcc_origin; -		more.grp = c_oil->oil.mfcc_mcastgrp; -		zlog_debug("Sending Request for New Channel Oil Information%pSG VIIF %d(%s)", -			   &more, c_oil->oil.mfcc_parent, -			   c_oil->pim->vrf->name); +		more.src = *oil_origin(c_oil); +		more.grp = *oil_mcastgrp(c_oil); +		zlog_debug( +			"Sending Request for New Channel Oil Information%pSG VIIF %d(%s)", +			&more, *oil_parent(c_oil), c_oil->pim->vrf->name);  	}  	if (!ifp) @@ -533,8 +523,9 @@ int pim_zlookup_sg_statistics(struct channel_oil *c_oil)  	stream_reset(s);  	zclient_create_header(s, ZEBRA_IPMR_ROUTE_STATS,  			      c_oil->pim->vrf->vrf_id); -	stream_put_in_addr(s, &c_oil->oil.mfcc_origin); -	stream_put_in_addr(s, &c_oil->oil.mfcc_mcastgrp); +	stream_putl(s, PIM_AF); +	stream_write(s, oil_origin(c_oil), sizeof(pim_addr)); +	stream_write(s, oil_mcastgrp(c_oil), sizeof(pim_addr));  	stream_putl(s, ifp->ifindex);  	stream_putw_at(s, 0, stream_get_endp(s)); @@ -568,20 +559,17 @@ int pim_zlookup_sg_statistics(struct channel_oil *c_oil)  		}  	} -	sg.src.s_addr = stream_get_ipv4(s); -	sg.grp.s_addr = stream_get_ipv4(s); -	if (sg.src.s_addr != c_oil->oil.mfcc_origin.s_addr -	    || sg.grp.s_addr != c_oil->oil.mfcc_mcastgrp.s_addr) { -		if (PIM_DEBUG_ZEBRA) { -			pim_sgaddr more; +	stream_get(&sg.src, s, sizeof(pim_addr)); +	stream_get(&sg.grp, s, sizeof(pim_addr)); -			more.src = c_oil->oil.mfcc_origin; -			more.grp = c_oil->oil.mfcc_mcastgrp; +	more.src = *oil_origin(c_oil); +	more.grp = *oil_mcastgrp(c_oil); +	if (pim_sgaddr_cmp(sg, more)) { +		if (PIM_DEBUG_ZEBRA)  			flog_err(  				EC_LIB_ZAPI_MISSMATCH,  				"%s: Received wrong %pSG(%s) information requested",  				__func__, &more, c_oil->pim->vrf->name); -		}  		zclient_lookup_failed(zlookup);  		return -3;  	} diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h index c3818dbdbc..4ea865a7e7 100644 --- a/pimd/pim_zlookup.h +++ b/pimd/pim_zlookup.h @@ -28,7 +28,7 @@  struct pim_zlookup_nexthop {  	vrf_id_t vrf_id; -	struct prefix nexthop_addr; +	pim_addr nexthop_addr;  	ifindex_t ifindex;  	uint32_t route_metric;  	uint8_t protocol_distance; diff --git a/pimd/pimd.h b/pimd/pimd.h index 1f7919ac6c..aeb4859952 100644 --- a/pimd/pimd.h +++ b/pimd/pimd.h @@ -157,23 +157,22 @@ extern uint8_t qpim_ecmp_rebalance_enable;  	(router->debugs & PIM_MASK_PIM_PACKETDUMP_SEND)  #define PIM_DEBUG_PIM_PACKETDUMP_RECV                                          \  	(router->debugs & PIM_MASK_PIM_PACKETDUMP_RECV) -#define PIM_DEBUG_PIM_TRACE (router->debugs & PIM_MASK_PIM_TRACE) +#define PIM_DEBUG_PIM_TRACE                                                    \ +	(router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_PIM_TRACE_DETAIL))  #define PIM_DEBUG_PIM_TRACE_DETAIL                                             \ -	(router->debugs & (PIM_MASK_PIM_TRACE_DETAIL | PIM_MASK_PIM_TRACE)) -#define PIM_DEBUG_PIM_TRACE_DETAIL_ONLY                                        \  	(router->debugs & PIM_MASK_PIM_TRACE_DETAIL)  #define PIM_DEBUG_IGMP_EVENTS (router->debugs & PIM_MASK_IGMP_EVENTS)  #define PIM_DEBUG_IGMP_PACKETS (router->debugs & PIM_MASK_IGMP_PACKETS) -#define PIM_DEBUG_IGMP_TRACE (router->debugs & PIM_MASK_IGMP_TRACE) +#define PIM_DEBUG_IGMP_TRACE                                                   \ +	(router->debugs & (PIM_MASK_IGMP_TRACE | PIM_MASK_IGMP_TRACE_DETAIL))  #define PIM_DEBUG_IGMP_TRACE_DETAIL                                            \ -	(router->debugs & (PIM_MASK_IGMP_TRACE_DETAIL | PIM_MASK_IGMP_TRACE)) +	(router->debugs & PIM_MASK_IGMP_TRACE_DETAIL)  #define PIM_DEBUG_ZEBRA (router->debugs & PIM_MASK_ZEBRA)  #define PIM_DEBUG_MLAG (router->debugs & PIM_MASK_MLAG)  #define PIM_DEBUG_SSMPINGD (router->debugs & PIM_MASK_SSMPINGD) -#define PIM_DEBUG_MROUTE (router->debugs & PIM_MASK_MROUTE) -#define PIM_DEBUG_MROUTE_DETAIL                                                \ -	(router->debugs & (PIM_MASK_MROUTE_DETAIL | PIM_MASK_MROUTE)) -#define PIM_DEBUG_MROUTE_DETAIL_ONLY (router->debugs & PIM_MASK_MROUTE_DETAIL) +#define PIM_DEBUG_MROUTE                                                       \ +	(router->debugs & (PIM_MASK_MROUTE | PIM_MASK_MROUTE_DETAIL)) +#define PIM_DEBUG_MROUTE_DETAIL (router->debugs & PIM_MASK_MROUTE_DETAIL)  #define PIM_DEBUG_PIM_HELLO (router->debugs & PIM_MASK_PIM_HELLO)  #define PIM_DEBUG_PIM_J_P (router->debugs & PIM_MASK_PIM_J_P)  #define PIM_DEBUG_PIM_REG (router->debugs & PIM_MASK_PIM_REG) diff --git a/pimd/subdir.am b/pimd/subdir.am index 9e61b03c94..5219794083 100644 --- a/pimd/subdir.am +++ b/pimd/subdir.am @@ -9,6 +9,7 @@ noinst_PROGRAMS += pimd/test_igmpv3_join  vtysh_scan += \  	pimd/pim_cmd.c \  	pimd/pim6_cmd.c \ +	pimd/pim6_mld.c \  	#end  vtysh_daemons += pimd  vtysh_daemons += pim6d @@ -21,6 +22,7 @@ pim_common = \  	pimd/pim_assert.c \  	pimd/pim_bfd.c \  	pimd/pim_br.c \ +	pimd/pim_bsm.c \  	pimd/pim_cmd_common.c \  	pimd/pim_errors.c \  	pimd/pim_hello.c \ @@ -39,6 +41,7 @@ pim_common = \  	pimd/pim_neighbor.c \  	pimd/pim_nht.c \  	pimd/pim_oil.c \ +	pimd/pim_pim.c \  	pimd/pim_routemap.c \  	pimd/pim_rp.c \  	pimd/pim_rpf.c \ @@ -54,13 +57,14 @@ pim_common = \  	pimd/pim_util.c \  	pimd/pim_vty.c \  	pimd/pim_zebra.c \ +	pimd/pim_zlookup.c \  	pimd/pim_vxlan.c \ +	pimd/pim_register.c \  	pimd/pimd.c \  	# end  pimd_pimd_SOURCES = \  	$(pim_common) \ -	pimd/pim_bsm.c \  	pimd/pim_cmd.c \  	pimd/pim_igmp.c \  	pimd/pim_igmp_mtrace.c \ @@ -72,10 +76,7 @@ pimd_pimd_SOURCES = \  	pimd/pim_msdp.c \  	pimd/pim_msdp_packet.c \  	pimd/pim_msdp_socket.c \ -	pimd/pim_pim.c \ -	pimd/pim_register.c \  	pimd/pim_signals.c \ -	pimd/pim_zlookup.c \  	pimd/pim_zpthread.c \  	pimd/pim_mroute_msg.c \  	# end @@ -89,6 +90,7 @@ nodist_pimd_pimd_SOURCES = \  pimd_pim6d_SOURCES = \  	$(pim_common) \  	pimd/pim6_main.c \ +	pimd/pim6_mld.c \  	pimd/pim6_stubs.c \  	pimd/pim6_cmd.c \  	pimd/pim6_mroute_msg.c \ @@ -155,6 +157,8 @@ noinst_HEADERS += \  	pimd/pim_vxlan.h \  	pimd/pim_vxlan_instance.h \  	pimd/pimd.h \ +	pimd/pim6_mld.h \ +	pimd/pim6_mld_protocol.h \  	pimd/mtracebis_netlink.h \  	pimd/mtracebis_routeget.h \  	pimd/pim6_cmd.h \ @@ -163,6 +167,7 @@ noinst_HEADERS += \  clippy_scan += \  	pimd/pim_cmd.c \  	pimd/pim6_cmd.c \ +	pimd/pim6_mld.c \  	# end  pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4 @@ -170,11 +175,7 @@ pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP)  if PIMD  if DEV_BUILD -# -# pim6d is only enabled for --enable-dev-build, and NOT installed currently -# (change noinst_ to sbin_ below to install it.) -# -noinst_PROGRAMS += pimd/pim6d +sbin_PROGRAMS += pimd/pim6d  pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6  pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP)  endif diff --git a/redhat/frr.logrotate b/redhat/frr.logrotate index 22b2332b7c..04ae96b654 100644 --- a/redhat/frr.logrotate +++ b/redhat/frr.logrotate @@ -110,6 +110,14 @@      endscript  } +/var/log/frr/pathd.log { +    notifempty +    missingok +    postrotate +        /bin/kill -USR1 `cat /var/run/frr/pathd.pid 2> /dev/null` 2> /dev/null || true +    endscript +} +  /var/log/frr/pbrd.log {      notifempty      missingok diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index 740cfe498a..73c214ea2e 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -260,7 +260,7 @@ Group: System Environment/Daemons  Requires: %{name} = %{version}-%{release}  %description devel -The frr-devel package contains the header and object files neccessary for +The frr-devel package contains the header and object files necessary for  developing OSPF-API and frr applications. @@ -432,7 +432,8 @@ popd  %install  mkdir -p %{buildroot}%{_sysconfdir}/{frr,sysconfig,logrotate.d,pam.d,default} \ -         %{buildroot}%{_localstatedir}/log/frr %{buildroot}%{_infodir} +         %{buildroot}%{_infodir} +mkdir -m 0755 -p %{buildroot}%{_localstatedir}/log/frr  make DESTDIR=%{buildroot} INSTALL="install -p" CP="cp -p" install  # Remove this file, as it is uninstalled and causes errors when building on RH9 @@ -639,11 +640,11 @@ fi  /usr/share/yang/*.yang  %if 0%{?frr_user:1}      %dir %attr(751,%{frr_user},%{frr_user}) %{configdir} -    %dir %attr(750,%{frr_user},%{frr_user}) %{_localstatedir}/log/frr +    %dir %attr(755,%{frr_user},%{frr_user}) %{_localstatedir}/log/frr      %dir %attr(751,%{frr_user},%{frr_user}) %{rundir}  %else      %dir %attr(750,root,root) %{configdir} -    %dir %attr(750,root,root) %{_localstatedir}/log/frr +    %dir %attr(755,root,root) %{_localstatedir}/log/frr      %dir %attr(750,root,root) %{rundir}  %endif  %{_infodir}/frr.info.gz @@ -918,7 +919,7 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons  -    Add ability to show BGP routes from a particular table version  -    Add support for for RFC 8050 (MRT add-path)  -    Add SNMP support for MPLS VPN --    Add `show bgp summary wide` command to show more detailed output  +-    Add `show bgp summary wide` command to show more detailed output       on wide terminals  -    Add ability for peer-groups to have `ttl-security hops` configured  -    Add support for conditional Advertisement diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c index 1269f25b35..042c9713b2 100644 --- a/ripd/rip_interface.c +++ b/ripd/rip_interface.c @@ -311,7 +311,7 @@ int if_check_address(struct rip *rip, struct in_addr addr)  	return 0;  } -/* Inteface link down message processing. */ +/* Interface link down message processing. */  static int rip_ifp_down(struct interface *ifp)  {  	rip_interface_sync(ifp); @@ -327,7 +327,7 @@ static int rip_ifp_down(struct interface *ifp)  	return 0;  } -/* Inteface link up message processing */ +/* Interface link up message processing */  static int rip_ifp_up(struct interface *ifp)  {  	if (IS_RIP_DEBUG_ZEBRA) @@ -351,7 +351,7 @@ static int rip_ifp_up(struct interface *ifp)  	return 0;  } -/* Inteface addition message from zebra. */ +/* Interface addition message from zebra. */  static int rip_ifp_create(struct interface *ifp)  {  	rip_interface_sync(ifp); @@ -617,7 +617,7 @@ int rip_interface_address_delete(ZAPI_CALLBACK_ARGS)  			hook_call(rip_ifaddr_del, ifc); -			/* Chech wether this prefix needs to be removed */ +			/* Chech whether this prefix needs to be removed */  			rip_apply_address_del(ifc);  		} @@ -628,7 +628,7 @@ int rip_interface_address_delete(ZAPI_CALLBACK_ARGS)  }  /* Check interface is enabled by network statement. */ -/* Check wether the interface has at least a connected prefix that +/* Check whether the interface has at least a connected prefix that   * is within the ripng_enable_network table. */  static int rip_enable_network_lookup_if(struct interface *ifp)  { @@ -663,7 +663,7 @@ static int rip_enable_network_lookup_if(struct interface *ifp)  	return -1;  } -/* Check wether connected is within the ripng_enable_network table. */ +/* Check whether connected is within the ripng_enable_network table. */  static int rip_enable_network_lookup2(struct connected *connected)  {  	struct rip_interface *ri = connected->ifp->info; @@ -840,7 +840,7 @@ static void rip_connect_set(struct interface *ifp, int set)  		nh.ifindex = connected->ifp->ifindex;  		nh.type = NEXTHOP_TYPE_IFINDEX;  		if (set) { -			/* Check once more wether this prefix is within a +			/* Check once more whether this prefix is within a  			 * "network IF_OR_PREF" one */  			if ((rip_enable_if_lookup(rip, connected->ifp->name)  			     >= 0) diff --git a/ripd/rip_offset.c b/ripd/rip_offset.c index 4034fe8424..d499229340 100644 --- a/ripd/rip_offset.c +++ b/ripd/rip_offset.c @@ -78,7 +78,7 @@ struct rip_offset_list *rip_offset_list_lookup(struct rip *rip,  	return NULL;  } -/* If metric is modifed return 1. */ +/* If metric is modified return 1. */  int rip_offset_list_apply_in(struct prefix_ipv4 *p, struct interface *ifp,  			     uint32_t *metric)  { @@ -115,7 +115,7 @@ int rip_offset_list_apply_in(struct prefix_ipv4 *p, struct interface *ifp,  	return 0;  } -/* If metric is modifed return 1. */ +/* If metric is modified return 1. */  int rip_offset_list_apply_out(struct prefix_ipv4 *p, struct interface *ifp,  			      uint32_t *metric)  { diff --git a/ripd/rip_routemap.c b/ripd/rip_routemap.c index 061cefec30..4da5f945fe 100644 --- a/ripd/rip_routemap.c +++ b/ripd/rip_routemap.c @@ -461,7 +461,7 @@ static const struct route_map_rule_cmd route_set_metric_cmd = {  /* `set ip next-hop IP_ADDRESS' */ -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_ip_nexthop(void *rule, const struct prefix *prefix, @@ -515,7 +515,7 @@ static const struct route_map_rule_cmd route_set_ip_nexthop_cmd = {  /* `set tag TAG' */ -/* Set tag to object.  ojbect must be pointer to struct attr. */ +/* Set tag to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_tag(void *rule, const struct prefix *prefix, void *object)  { diff --git a/ripd/ripd.h b/ripd/ripd.h index 85aac985f5..f26dcd8775 100644 --- a/ripd/ripd.h +++ b/ripd/ripd.h @@ -526,7 +526,7 @@ extern void rip_cli_init(void);  extern struct zebra_privs_t ripd_privs;  extern struct rip_instance_head rip_instances; -/* Master thread strucutre. */ +/* Master thread structure. */  extern struct thread_master *master;  DECLARE_HOOK(rip_ifaddr_add, (struct connected * ifc), (ifc)); diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c index 57bc40f005..5159a9825b 100644 --- a/ripngd/ripng_interface.c +++ b/ripngd/ripng_interface.c @@ -198,7 +198,7 @@ static int ripng_if_down(struct interface *ifp)  	return 0;  } -/* Inteface link up message processing. */ +/* Interface link up message processing. */  static int ripng_ifp_up(struct interface *ifp)  {  	if (IS_RIPNG_DEBUG_ZEBRA) @@ -222,7 +222,7 @@ static int ripng_ifp_up(struct interface *ifp)  	return 0;  } -/* Inteface link down message processing. */ +/* Interface link down message processing. */  static int ripng_ifp_down(struct interface *ifp)  {  	ripng_interface_sync(ifp); @@ -238,7 +238,7 @@ static int ripng_ifp_down(struct interface *ifp)  	return 0;  } -/* Inteface addition message from zebra. */ +/* Interface addition message from zebra. */  static int ripng_ifp_create(struct interface *ifp)  {  	ripng_interface_sync(ifp); @@ -432,7 +432,7 @@ int ripng_interface_address_delete(ZAPI_CALLBACK_ARGS)  					"RIPng connected address %pFX delete",  					p); -			/* Check wether this prefix needs to be removed. */ +			/* Check whether this prefix needs to be removed. */  			ripng_apply_address_del(ifc);  		}  		connected_free(&ifc); @@ -442,7 +442,7 @@ int ripng_interface_address_delete(ZAPI_CALLBACK_ARGS)  }  /* Lookup RIPng enable network. */ -/* Check wether the interface has at least a connected prefix that +/* Check whether the interface has at least a connected prefix that   * is within the ripng->enable_network table. */  static int ripng_enable_network_lookup_if(struct interface *ifp)  { @@ -477,7 +477,7 @@ static int ripng_enable_network_lookup_if(struct interface *ifp)  	return -1;  } -/* Check wether connected is within the ripng->enable_network table. */ +/* Check whether connected is within the ripng->enable_network table. */  static int ripng_enable_network_lookup2(struct connected *connected)  {  	struct ripng_interface *ri = connected->ifp->info; @@ -647,7 +647,7 @@ static void ripng_connect_set(struct interface *ifp, int set)  		apply_mask_ipv6(&address);  		if (set) { -			/* Check once more wether this prefix is within a +			/* Check once more whether this prefix is within a  			 * "network IF_OR_PREF" one */  			if ((ripng_enable_if_lookup(ripng, connected->ifp->name)  			     >= 0) diff --git a/ripngd/ripng_offset.c b/ripngd/ripng_offset.c index efce8a0926..38c69f1b75 100644 --- a/ripngd/ripng_offset.c +++ b/ripngd/ripng_offset.c @@ -85,7 +85,7 @@ struct ripng_offset_list *ripng_offset_list_lookup(struct ripng *ripng,  	return NULL;  } -/* If metric is modifed return 1. */ +/* If metric is modified return 1. */  int ripng_offset_list_apply_in(struct ripng *ripng, struct prefix_ipv6 *p,  			       struct interface *ifp, uint8_t *metric)  { @@ -123,7 +123,7 @@ int ripng_offset_list_apply_in(struct ripng *ripng, struct prefix_ipv6 *p,  	return 0;  } -/* If metric is modifed return 1. */ +/* If metric is modified return 1. */  int ripng_offset_list_apply_out(struct ripng *ripng, struct prefix_ipv6 *p,  				struct interface *ifp, uint8_t *metric)  { diff --git a/ripngd/ripng_routemap.c b/ripngd/ripng_routemap.c index 9aed8d6963..4f2f9e2101 100644 --- a/ripngd/ripng_routemap.c +++ b/ripngd/ripng_routemap.c @@ -254,7 +254,7 @@ static const struct route_map_rule_cmd route_set_metric_cmd = {  /* `set ipv6 next-hop local IP_ADDRESS' */ -/* Set nexthop to object.  ojbect must be pointer to struct attr. */ +/* Set nexthop to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_ipv6_nexthop_local(void *rule, const struct prefix *p, void *object)  { @@ -307,7 +307,7 @@ static const struct route_map_rule_cmd  /* `set tag TAG' */ -/* Set tag to object.  ojbect must be pointer to struct attr. */ +/* Set tag to object.  object must be pointer to struct attr. */  static enum route_map_cmd_result_t  route_set_tag(void *rule, const struct prefix *prefix, void *object)  { diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c index 889643f65e..2281b3ce26 100644 --- a/sharpd/sharp_vty.c +++ b/sharpd/sharp_vty.c @@ -104,7 +104,7 @@ DEFPY(watch_nexthop_v6, watch_nexthop_v6_cmd,  		p.family = AF_INET6;  	} else {  		type_import = true; -		p = *(const struct prefix *)inhop; +		prefix_copy(&p, inhop);  	}  	sharp_nh_tracker_get(&p); @@ -149,7 +149,7 @@ DEFPY(watch_nexthop_v4, watch_nexthop_v4_cmd,  	}  	else {  		type_import = true; -		p = *(const struct prefix *)inhop; +		prefix_copy(&p, inhop);  	}  	sharp_nh_tracker_get(&p); diff --git a/staticd/static_routes.c b/staticd/static_routes.c index 589d509a59..ed4cdc51ce 100644 --- a/staticd/static_routes.c +++ b/staticd/static_routes.c @@ -369,13 +369,11 @@ void static_install_nexthop(struct static_nexthop *nh)  	switch (nh->type) {  	case STATIC_IPV4_GATEWAY:  	case STATIC_IPV6_GATEWAY: -		if (!static_zebra_nh_update(nh)) -			static_zebra_nht_register(nh, true); +		static_zebra_nht_register(nh, true);  		break;  	case STATIC_IPV4_GATEWAY_IFNAME:  	case STATIC_IPV6_GATEWAY_IFNAME: -		if (!static_zebra_nh_update(nh)) -			static_zebra_nht_register(nh, true); +		static_zebra_nht_register(nh, true);  		break;  	case STATIC_BLACKHOLE:  		static_install_path(pn); diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c index af153b4bc3..de07ad8ef3 100644 --- a/staticd/static_zebra.c +++ b/staticd/static_zebra.c @@ -57,6 +57,7 @@ struct static_nht_data {  	uint32_t refcount;  	uint8_t nh_num; +	bool registered;  };  static int static_nht_data_cmp(const struct static_nht_data *nhtd1, @@ -87,7 +88,7 @@ static struct static_nht_hash_head static_nht_hash[1];  struct zclient *zclient;  uint32_t zebra_ecmp_count = MULTIPATH_NUM; -/* Inteface addition message from zebra. */ +/* Interface addition message from zebra. */  static int static_ifp_create(struct interface *ifp)  {  	static_ifindex_update(ifp, true); @@ -175,9 +176,12 @@ static int route_notify_owner(ZAPI_CALLBACK_ARGS)  	return 0;  } +  static void zebra_connected(struct zclient *zclient)  {  	zclient_send_reg_requests(zclient, VRF_DEFAULT); + +	static_fixup_vrf_ids(vrf_info_lookup(VRF_DEFAULT));  }  /* API to check whether the configured nexthop address is @@ -262,8 +266,12 @@ static_nht_hash_getref(const struct static_nht_data *ref)  	return nhtd;  } -static bool static_nht_hash_decref(struct static_nht_data *nhtd) +static bool static_nht_hash_decref(struct static_nht_data **nhtd_p)  { +	struct static_nht_data *nhtd = *nhtd_p; + +	*nhtd_p = NULL; +  	if (--nhtd->refcount > 0)  		return true; @@ -280,133 +288,110 @@ static void static_nht_hash_clear(void)  		XFREE(MTYPE_STATIC_NHT_DATA, nhtd);  } -void static_zebra_nht_register(struct static_nexthop *nh, bool reg) +static bool static_zebra_nht_get_prefix(const struct static_nexthop *nh, +					struct prefix *p)  { -	struct static_path *pn = nh->pn; -	struct route_node *rn = pn->rn; -	struct static_route_info *si = static_route_info_from_rnode(rn); -	struct static_nht_data lookup; -	uint32_t cmd; -	struct prefix p; -	afi_t afi = AFI_IP; - -	cmd = (reg) ? -		ZEBRA_NEXTHOP_REGISTER : ZEBRA_NEXTHOP_UNREGISTER; - -	if (nh->nh_registered && reg) -		return; - -	if (!nh->nh_registered && !reg) -		return; - -	memset(&p, 0, sizeof(p));  	switch (nh->type) {  	case STATIC_IFNAME:  	case STATIC_BLACKHOLE: -		return; +		p->family = AF_UNSPEC; +		return false; +  	case STATIC_IPV4_GATEWAY:  	case STATIC_IPV4_GATEWAY_IFNAME: -		p.family = AF_INET; -		p.prefixlen = IPV4_MAX_BITLEN; -		p.u.prefix4 = nh->addr.ipv4; -		afi = AFI_IP; -		break; +		p->family = AF_INET; +		p->prefixlen = IPV4_MAX_BITLEN; +		p->u.prefix4 = nh->addr.ipv4; +		return true; +  	case STATIC_IPV6_GATEWAY:  	case STATIC_IPV6_GATEWAY_IFNAME: -		p.family = AF_INET6; -		p.prefixlen = IPV6_MAX_BITLEN; -		p.u.prefix6 = nh->addr.ipv6; -		afi = AFI_IP6; -		break; +		p->family = AF_INET6; +		p->prefixlen = IPV6_MAX_BITLEN; +		p->u.prefix6 = nh->addr.ipv6; +		return true;  	} -	memset(&lookup, 0, sizeof(lookup)); -	lookup.nh = p; +	assertf(0, "BUG: someone forgot to add nexthop type %u", nh->type); +} + +void static_zebra_nht_register(struct static_nexthop *nh, bool reg) +{ +	struct static_path *pn = nh->pn; +	struct route_node *rn = pn->rn; +	struct static_route_info *si = static_route_info_from_rnode(rn); +	struct static_nht_data *nhtd, lookup = {}; +	uint32_t cmd; + +	if (!static_zebra_nht_get_prefix(nh, &lookup.nh)) +		return;  	lookup.nh_vrf_id = nh->nh_vrf_id;  	lookup.safi = si->safi; -	nh->nh_registered = reg; - -	if (reg) { -		struct static_nht_data *nhtd; +	if (nh->nh_registered) { +		/* nh->nh_registered means we own a reference on the nhtd */ +		nhtd = static_nht_hash_find(static_nht_hash, &lookup); +		assertf(nhtd, "BUG: NH %pFX registered but not in hashtable", +			&lookup.nh); +	} else if (reg) {  		nhtd = static_nht_hash_getref(&lookup); -		if (nhtd->refcount > 1) { +		if (nhtd->refcount > 1)  			DEBUGD(&static_dbg_route, -			       "Already registered nexthop(%pFX) for %pRN %d", -			       &p, rn, nhtd->nh_num); -			if (nhtd->nh_num) -				static_nht_update(&rn->p, &nhtd->nh, -						  nhtd->nh_num, afi, si->safi, -						  nh->nh_vrf_id); -			return; -		} +			       "Reusing registered nexthop(%pFX) for %pRN %d", +			       &lookup.nh, rn, nhtd->nh_num);  	} else { -		struct static_nht_data *nhtd; +		/* !reg && !nh->nh_registered */ +		zlog_warn("trying to unregister nexthop %pFX twice", +			  &lookup.nh); +		return; +	} -		nhtd = static_nht_hash_find(static_nht_hash, &lookup); -		if (!nhtd) +	nh->nh_registered = reg; + +	if (reg) { +		if (nhtd->nh_num) { +			/* refresh with existing data */ +			afi_t afi = prefix_afi(&lookup.nh); + +			if (nh->state == STATIC_NOT_INSTALLED) +				nh->state = STATIC_START; +			static_nht_update(&rn->p, &nhtd->nh, nhtd->nh_num, afi, +					  si->safi, nh->nh_vrf_id);  			return; -		if (static_nht_hash_decref(nhtd)) +		} + +		if (nhtd->registered) +			/* have no data, but did send register */  			return; -	} -	DEBUGD(&static_dbg_route, "%s nexthop(%pFX) for %pRN", -	       reg ? "Registering" : "Unregistering", &p, rn); +		cmd = ZEBRA_NEXTHOP_REGISTER; +		DEBUGD(&static_dbg_route, "Registering nexthop(%pFX) for %pRN", +		       &lookup.nh, rn); +	} else { +		bool was_zebra_registered; -	if (zclient_send_rnh(zclient, cmd, &p, si->safi, false, false, -			     nh->nh_vrf_id) == ZCLIENT_SEND_FAILURE) -		zlog_warn("%s: Failure to send nexthop to zebra", __func__); -} -/* - * When nexthop gets updated via configuration then use the - * already registered NH and resend the route to zebra - */ -int static_zebra_nh_update(struct static_nexthop *nh) -{ -	struct static_path *pn = nh->pn; -	struct route_node *rn = pn->rn; -	struct static_route_info *si = static_route_info_from_rnode(rn); -	struct static_nht_data *nhtd, lookup = {}; -	struct prefix p = {}; -	afi_t afi = AFI_IP; +		was_zebra_registered = nhtd->registered; +		if (static_nht_hash_decref(&nhtd)) +			/* still got references alive */ +			return; -	if (!nh->nh_registered) -		return 0; +		/* NB: nhtd is now NULL. */ +		if (!was_zebra_registered) +			return; -	switch (nh->type) { -	case STATIC_IFNAME: -	case STATIC_BLACKHOLE: -		return 0; -	case STATIC_IPV4_GATEWAY: -	case STATIC_IPV4_GATEWAY_IFNAME: -		p.family = AF_INET; -		p.prefixlen = IPV4_MAX_BITLEN; -		p.u.prefix4 = nh->addr.ipv4; -		afi = AFI_IP; -		break; -	case STATIC_IPV6_GATEWAY: -	case STATIC_IPV6_GATEWAY_IFNAME: -		p.family = AF_INET6; -		p.prefixlen = IPV6_MAX_BITLEN; -		p.u.prefix6 = nh->addr.ipv6; -		afi = AFI_IP6; -		break; +		cmd = ZEBRA_NEXTHOP_UNREGISTER; +		DEBUGD(&static_dbg_route, +		       "Unregistering nexthop(%pFX) for %pRN", &lookup.nh, rn);  	} -	lookup.nh = p; -	lookup.nh_vrf_id = nh->nh_vrf_id; -	lookup.safi = si->safi; - -	nhtd = static_nht_hash_find(static_nht_hash, &lookup); -	if (nhtd && nhtd->nh_num) { -		nh->state = STATIC_START; -		static_nht_update(&rn->p, &nhtd->nh, nhtd->nh_num, afi, -				  si->safi, nh->nh_vrf_id); -		return 1; -	} -	return 0; +	if (zclient_send_rnh(zclient, cmd, &lookup.nh, si->safi, false, false, +			     nh->nh_vrf_id) == ZCLIENT_SEND_FAILURE) +		zlog_warn("%s: Failure to send nexthop %pFX for %pRN to zebra", +			  __func__, &lookup.nh, rn); +	else if (reg) +		nhtd->registered = true;  }  extern void static_zebra_route_add(struct static_path *pn, bool install) diff --git a/staticd/static_zebra.h b/staticd/static_zebra.h index e30fa3fd57..1cf13dcbbb 100644 --- a/staticd/static_zebra.h +++ b/staticd/static_zebra.h @@ -33,7 +33,6 @@ extern void static_zebra_init(void);  extern void static_zebra_stop(void);  extern void static_zebra_vrf_register(struct vrf *vrf);  extern void static_zebra_vrf_unregister(struct vrf *vrf); -extern int static_zebra_nh_update(struct static_nexthop *nh);  #ifdef __cplusplus  } diff --git a/tests/bgpd/test_aspath.c b/tests/bgpd/test_aspath.c index 7288dc0869..3cc9a63072 100644 --- a/tests/bgpd/test_aspath.c +++ b/tests/bgpd/test_aspath.c @@ -40,7 +40,7 @@  #define FAILED VT100_RED "failed" VT100_RESET  /* need these to link in libbgp */ -struct zebra_privs_t *bgpd_privs = NULL; +struct zebra_privs_t bgpd_privs = {};  struct thread_master *master = NULL;  static int failed = 0; diff --git a/tests/bgpd/test_capability.c b/tests/bgpd/test_capability.c index 3568411387..44d15d6014 100644 --- a/tests/bgpd/test_capability.c +++ b/tests/bgpd/test_capability.c @@ -44,7 +44,7 @@  #define OPT_PARAM  2  /* need these to link in libbgp */ -struct zebra_privs_t *bgpd_privs = NULL; +struct zebra_privs_t bgpd_privs = {};  struct thread_master *master = NULL;  static int failed = 0; diff --git a/tests/bgpd/test_ecommunity.c b/tests/bgpd/test_ecommunity.c index 317bfff8ab..7147e3faf3 100644 --- a/tests/bgpd/test_ecommunity.c +++ b/tests/bgpd/test_ecommunity.c @@ -30,7 +30,7 @@  #include "bgpd/bgp_ecommunity.h"  /* need these to link in libbgp */ -struct zebra_privs_t *bgpd_privs = NULL; +struct zebra_privs_t bgpd_privs = {};  struct thread_master *master = NULL;  static int failed = 0; diff --git a/tests/bgpd/test_mp_attr.c b/tests/bgpd/test_mp_attr.c index 909930d6bc..c5ce5d3cd2 100644 --- a/tests/bgpd/test_mp_attr.c +++ b/tests/bgpd/test_mp_attr.c @@ -49,7 +49,7 @@  #define OPT_PARAM  2  /* need these to link in libbgp */ -struct zebra_privs_t *bgpd_privs = NULL; +struct zebra_privs_t bgpd_privs = {};  struct thread_master *master = NULL;  static int failed = 0; diff --git a/tests/bgpd/test_packet.c b/tests/bgpd/test_packet.c index 27afa6a121..2ce8b561bb 100644 --- a/tests/bgpd/test_packet.c +++ b/tests/bgpd/test_packet.c @@ -37,7 +37,7 @@  #include "bgpd/bgp_network.h"  /* need these to link in libbgp */ -struct zebra_privs_t *bgpd_privs = NULL; +struct zebra_privs_t bgpd_privs = {};  struct thread_master *master = NULL;  static struct bgp *bgp; diff --git a/tests/isisd/test_topologies.c b/tests/isisd/test_topologies.c index ca103948f3..b3e500a33c 100644 --- a/tests/isisd/test_topologies.c +++ b/tests/isisd/test_topologies.c @@ -31,7 +31,7 @@   * - The Router-ID is 10.0.255.X, where X is the node number;   * - The default link metric is 10;   * - When SR is enabled, Adj-SIDs and Prefix-SIDs are generated automatically; - * - When SR is enabled, the default SRGB is [16000-23999] (can be overriden). + * - When SR is enabled, the default SRGB is [16000-23999] (can be overridden).   *   * Test topology 1:   * ================ diff --git a/tests/lib/test_frrlua.c b/tests/lib/test_frrlua.c index a81446f9ca..fb6b77c0f8 100644 --- a/tests/lib/test_frrlua.c +++ b/tests/lib/test_frrlua.c @@ -61,7 +61,7 @@ static void test_encode_decode(void)  	assert(strncmp(p_a_str, p_b_str, sizeof(p_b_str)) == 0);  	assert(lua_gettop(L) == 0); -	struct interface ifp_a; +	struct interface ifp_a = {};  	struct interface ifp_b = ifp_a;  	lua_pushinterface(L, &ifp_a); @@ -79,7 +79,7 @@ static void test_encode_decode(void)  	assert(ifp_a.ll_type == ifp_b.ll_type);  	assert(lua_gettop(L) == 0); -	struct in_addr addr_a; +	struct in_addr addr_a = {};  	struct in_addr addr_b = addr_a;  	lua_pushinaddr(L, &addr_a); @@ -87,7 +87,7 @@ static void test_encode_decode(void)  	assert(addr_a.s_addr == addr_b.s_addr);  	assert(lua_gettop(L) == 0); -	struct in6_addr in6addr_a; +	struct in6_addr in6addr_a = {};  	struct in6_addr in6addr_b = in6addr_a;  	lua_pushin6addr(L, &in6addr_a); diff --git a/tests/lib/test_grpc.py b/tests/lib/test_grpc.py index 2e292fadc9..7f722de422 100644 --- a/tests/lib/test_grpc.py +++ b/tests/lib/test_grpc.py @@ -13,6 +13,10 @@ class TestGRPC(object):          'S["GRPC_TRUE"]=""\n' not in open("../config.status").readlines(),          reason="GRPC not enabled",      ) +    @pytest.mark.skipif( +        not os.path.isdir("/usr/share/yang"), +        reason="YANG models aren't installed in /usr/share/yang", +    )      def test_exits_cleanly(self):          basedir = os.path.dirname(inspect.getsourcefile(type(self)))          program = os.path.join(basedir, self.program) diff --git a/tests/lib/test_table.c b/tests/lib/test_table.c index 9b6539e3bc..cef93ad0c5 100644 --- a/tests/lib/test_table.c +++ b/tests/lib/test_table.c @@ -382,7 +382,7 @@ static void verify_prefix_iter_cmp(const char *p1, const char *p2,  	assert(exp_result == result);  	/* -	 * Also check the reverse comparision. +	 * Also check the reverse comparison.  	 */  	result = route_table_prefix_iter_cmp((struct prefix *)&p2_pfx,  					     (struct prefix *)&p1_pfx); @@ -398,7 +398,7 @@ static void verify_prefix_iter_cmp(const char *p1, const char *p2,  /*   * test_prefix_iter_cmp   * - * Tests comparision of prefixes according to order of iteration. + * Tests comparison of prefixes according to order of iteration.   */  static void test_prefix_iter_cmp(void)  { diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py index 961d72bd15..ec66c8caef 100644 --- a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py +++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py @@ -122,7 +122,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_auth/test_bgp_auth.py b/tests/topotests/bgp_auth/test_bgp_auth.py index f01c7f206a..9e8136c17b 100644 --- a/tests/topotests/bgp_auth/test_bgp_auth.py +++ b/tests/topotests/bgp_auth/test_bgp_auth.py @@ -158,7 +158,7 @@ def setup_module(mod):      # This is a sample of configuration loading.      router_list = tgen.routers() -    # For all registred routers, load the zebra configuration file +    # For all registered routers, load the zebra configuration file      for rname, router in router_list.items():          router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")          router.load_config(TopoRouter.RD_OSPF) diff --git a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py index f416f3d2a4..b18e32f6bd 100644 --- a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py @@ -137,7 +137,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -371,7 +371,7 @@ def test_bgp_timers_functionality(request):      # Creating configuration from JSON      reset_config_on_routers(tgen) -    # Api call to modfiy BGP timerse +    # Api call to modify BGP timerse      input_dict = {          "r1": {              "bgp": { diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py index 123461caa9..e4c25ff5cb 100644 --- a/tests/topotests/bgp_communities_topo1/test_bgp_communities.py +++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities.py @@ -99,7 +99,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py b/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py index 947efa8f8a..f6ee9ea795 100644 --- a/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py +++ b/tests/topotests/bgp_communities_topo1/test_bgp_communities_topo2.py @@ -103,7 +103,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py index 8a7192be29..31f033ae12 100644 --- a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py +++ b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py @@ -169,7 +169,7 @@ def test_bgp_community_update_path_change():              if (                  len(                      tgen.gears["c1"].run( -                        'grep "10.0.1.2 rcvd 192.168.255.254/32 IPv4 unicast...duplicate ignored" bgpd.log' +                        'grep "10.0.1.2(x1) rcvd 192.168.255.254/32 IPv4 unicast...duplicate ignored" bgpd.log'                      )                  )                  > 0 diff --git a/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py index ad999a1aff..2784e956fa 100644 --- a/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py +++ b/tests/topotests/bgp_ecmp_topo2/test_ebgp_ecmp_topo2.py @@ -29,7 +29,7 @@ Following tests are covered to test ecmp functionality on EBGP.  3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.  4. Verify routes are cleared from BGP and RIB table of DUT when     redistribute static configuration is removed. -5. Shut BGP neigbors one by one and verify BGP and routing table updated +5. Shut BGP neighbors one by one and verify BGP and routing table updated     accordingly in DUT  6. Delete static routes and verify routers are cleared from BGP table and RIB     of DUT. @@ -105,7 +105,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -475,7 +475,7 @@ def test_ecmp_remove_redistribute_static(request):  @pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])  def test_ecmp_shut_bgp_neighbor(request, test_type): -    """Shut BGP neigbors one by one and verify BGP and routing table updated +    """Shut BGP neighbors one by one and verify BGP and routing table updated      accordingly in DUT"""      tc_name = request.node.name diff --git a/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py index 28047424b4..704e8fdf04 100644 --- a/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py +++ b/tests/topotests/bgp_ecmp_topo2/test_ibgp_ecmp_topo2.py @@ -29,7 +29,7 @@ Following tests are covered to test ecmp functionality on EBGP.  3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.  4. Verify routes are cleared from BGP and RIB table of DUT when     redistribute static configuration is removed. -5. Shut BGP neigbors one by one and verify BGP and routing table updated +5. Shut BGP neighbors one by one and verify BGP and routing table updated     accordingly in DUT  6. Delete static routes and verify routers are cleared from BGP table and RIB     of DUT. @@ -105,7 +105,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -476,7 +476,7 @@ def test_ecmp_remove_redistribute_static(request):  @pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])  def test_ecmp_shut_bgp_neighbor(request, test_type): -    """Shut BGP neigbors one by one and verify BGP and routing table updated +    """Shut BGP neighbors one by one and verify BGP and routing table updated      accordingly in DUT"""      tc_name = request.node.name diff --git a/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py b/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py index 17f5fb08b9..86a8751621 100755 --- a/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py +++ b/tests/topotests/bgp_evpn_overlay_index_gateway/test_bgp_evpn_overlay_index_gateway.py @@ -135,7 +135,7 @@ def setup_module(mod):          l3mdev_accept = 0      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      tgen.start_topology()      # Configure MAC address for hosts as these MACs are advertised with EVPN type-2 routes @@ -192,7 +192,7 @@ def setup_module(mod):          pe.cmd_raises("sysctl -w net.ipv4.udp_l3mdev_accept={}".format(l3mdev_accept))          pe.cmd_raises("sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept)) -    # For all registred routers, load the zebra configuration file +    # For all registered routers, load the zebra configuration file      for (name, router) in tgen.routers().items():          router.load_config(              TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(name)) diff --git a/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py b/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py index 40972d4a6a..5d0a326afb 100755 --- a/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py +++ b/tests/topotests/bgp_evpn_vxlan_topo1/test_bgp_evpn_vxlan.py @@ -118,7 +118,7 @@ def setup_module(mod):      # This is a sample of configuration loading.      router_list = tgen.routers() -    # For all registred routers, load the zebra configuration file +    # For all registered routers, load the zebra configuration file      for rname, router in router_list.items():          router.load_config(              TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-1.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-1.py index 290bf16fea..f155325502 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-1.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-1.py @@ -20,7 +20,7 @@  #  """ -Following tests are covered to test BGP Gracefull Restart functionality. +Following tests are covered to test BGP Graceful Restart functionality.  Basic Common Test steps for all the test case below :  - Create topology (setup module)    Creating 2 routers topology, r1, r2 in IBGP @@ -81,7 +81,7 @@ Basic Common Test steps for all the test case below :      Global Mode : GR Restart      PerPeer Mode :  None      GR Mode effective : GR Restart -23. Transition from Peer-level disbale to Global inherit helper +23. Transition from Peer-level disable to Global inherit helper      Global Mode : None      PerPeer Mode :  GR Disable      GR Mode effective : GR Disable @@ -176,7 +176,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-2.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-2.py index 0647ad5d06..dda3bd4b30 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-2.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-2.py @@ -20,7 +20,7 @@  #  """ -Following tests are covered to test BGP Gracefull Restart functionality. +Following tests are covered to test BGP Graceful Restart functionality.  Basic Common Test steps for all the test case below :  - Create topology (setup module)    Creating 2 routers topology, r1, r2 in IBGP @@ -81,7 +81,7 @@ Basic Common Test steps for all the test case below :      Global Mode : GR Restart      PerPeer Mode :  None      GR Mode effective : GR Restart -23. Transition from Peer-level disbale to Global inherit helper +23. Transition from Peer-level disable to Global inherit helper      Global Mode : None      PerPeer Mode :  GR Disable      GR Mode effective : GR Disable @@ -176,7 +176,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-3.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-3.py index 0c3ff6451e..e4b533b295 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-3.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-3.py @@ -20,7 +20,7 @@  #  """ -Following tests are covered to test BGP Gracefull Restart functionality. +Following tests are covered to test BGP Graceful Restart functionality.  Basic Common Test steps for all the test case below :  - Create topology (setup module)    Creating 2 routers topology, r1, r2 in IBGP @@ -81,7 +81,7 @@ Basic Common Test steps for all the test case below :      Global Mode : GR Restart      PerPeer Mode :  None      GR Mode effective : GR Restart -23. Transition from Peer-level disbale to Global inherit helper +23. Transition from Peer-level disable to Global inherit helper      Global Mode : None      PerPeer Mode :  GR Disable      GR Mode effective : GR Disable @@ -176,7 +176,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-4.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-4.py index 791ca37eae..835ef41de1 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-4.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1-4.py @@ -20,7 +20,7 @@  #  """ -Following tests are covered to test BGP Gracefull Restart functionality. +Following tests are covered to test BGP Graceful Restart functionality.  Basic Common Test steps for all the test case below :  - Create topology (setup module)    Creating 2 routers topology, r1, r2 in IBGP @@ -81,7 +81,7 @@ Basic Common Test steps for all the test case below :      Global Mode : GR Restart      PerPeer Mode :  None      GR Mode effective : GR Restart -23. Transition from Peer-level disbale to Global inherit helper +23. Transition from Peer-level disable to Global inherit helper      Global Mode : None      PerPeer Mode :  GR Disable      GR Mode effective : GR Disable @@ -176,7 +176,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -1418,7 +1418,7 @@ def test_BGP_GR_TC_49_p1(request):  def BGP_GR_TC_52_p1(request):      """ -    Test Objective : Transition from Peer-level disbale to Global inherit helper +    Test Objective : Transition from Peer-level disable to Global inherit helper      Global Mode : None      PerPeer Mode :  GR Disable      GR Mode effective : GR Disable diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-1.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-1.py index 064fde1633..3afe38857b 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-1.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-1.py @@ -173,7 +173,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-2.py index 4356c4d591..535f272ef4 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-2.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-2.py @@ -173,7 +173,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-3.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-3.py index 86d676dd8b..e60552ed10 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-3.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-3.py @@ -173,7 +173,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -1041,7 +1041,7 @@ def test_BGP_GR_15_p2(request):      logger.info(          "[Step 2] : Test Setup "          "[Helper Mode]R6-----R1[Restart Mode]" -        "--------R2[Helper Mode] Initilized" +        "--------R2[Helper Mode] Initialized"      )      # Configure graceful-restart diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-4.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-4.py index 889f47f377..1df77ebeb2 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-4.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2-4.py @@ -175,7 +175,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -456,7 +456,7 @@ def test_BGP_GR_20_p1(request):      reset_config_on_routers(tgen)      logger.info( -        "[Step 1] : Test Setup " "[Restart Mode]R3-----R1[Restart Mode] Initilized" +        "[Step 1] : Test Setup " "[Restart Mode]R3-----R1[Restart Mode] Initialized"      )      # Configure graceful-restart @@ -602,7 +602,7 @@ def test_BGP_GR_21_p2(request):      reset_config_on_routers(tgen)      logger.info( -        "[Step 1] : Test Setup " "[Helper Mode]R6-----R1[Restart Mode] Initilized" +        "[Step 1] : Test Setup " "[Helper Mode]R6-----R1[Restart Mode] Initialized"      )      # Configure graceful-restart @@ -676,7 +676,7 @@ def test_BGP_GR_21_p2(request):      logger.info(          "[Step 2] : Test Setup "          "[Restart Mode]R2-----[Helper Mode]R1[Disable Mode]" -        "--------R6[Helper Mode] Initilized" +        "--------R6[Helper Mode] Initialized"      )      # Configure graceful-restart @@ -821,7 +821,7 @@ def test_BGP_GR_22_p2(request):      reset_config_on_routers(tgen)      logger.info( -        "[Step 1] : Test Setup " "[Helper Mode]R3-----R1[Restart Mode] Initilized" +        "[Step 1] : Test Setup " "[Helper Mode]R3-----R1[Restart Mode] Initialized"      )      # Configure graceful-restart @@ -901,7 +901,7 @@ def test_BGP_GR_22_p2(request):      logger.info(          "[Step 2] : Test Setup "          "[Restart Mode]R2-----[Helper Mode]R1[Disable Mode]" -        "--------R3[Helper Mode] Initilized" +        "--------R3[Helper Mode] Initialized"      )      # Configure graceful-restart diff --git a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py b/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py index c19ee06bab..6bf8b96309 100644 --- a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py +++ b/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py @@ -266,7 +266,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py index 14b8055d97..58133c44a9 100644 --- a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py +++ b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py @@ -108,7 +108,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py index e842e64ada..a79ce0e3a0 100644 --- a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py +++ b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py @@ -103,7 +103,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py index e9de3a5e15..68436177d8 100644 --- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py +++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py @@ -123,7 +123,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -155,7 +155,7 @@ def teardown_module():  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -182,7 +182,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py index b31c8499e8..1d424caa30 100644 --- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py +++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py @@ -133,7 +133,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -161,7 +161,7 @@ def teardown_module():  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -188,7 +188,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py index bc5c4ddcd7..16d6b1993d 100644 --- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py +++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py @@ -126,7 +126,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -154,7 +154,7 @@ def teardown_module():  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -181,7 +181,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py index 3ce0293ffe..862cae42e9 100644 --- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py +++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py @@ -124,7 +124,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -152,7 +152,7 @@ def teardown_module():  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -180,7 +180,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py index a5a8b5fe68..1a91257f06 100644 --- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py +++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py @@ -112,7 +112,7 @@ def setup_module(mod):      topo = tgen.json_topo      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -141,7 +141,7 @@ def teardown_module():  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -169,7 +169,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py index fa3598ff8e..03cfded514 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py @@ -141,7 +141,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py index 6b62b2c5ee..b11cda3cd0 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py @@ -130,7 +130,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_lu_topo1/test_bgp_lu.py b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py index 8941854593..8955f2794d 100644 --- a/tests/topotests/bgp_lu_topo1/test_bgp_lu.py +++ b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py @@ -92,7 +92,7 @@ def setup_module(mod):      # This is a sample of configuration loading.      router_list = tgen.routers() -    # For all registred routers, load the zebra configuration file +    # For all registered routers, load the zebra configuration file      for rname, router in router_list.items():          router.load_config(              TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) diff --git a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py index fbe1b038e3..7eb5bda2d8 100644 --- a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py +++ b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py @@ -204,7 +204,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py index 05961b1104..04ebe61a02 100644 --- a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py +++ b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py @@ -157,7 +157,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -1224,7 +1224,7 @@ def test_shut_noshut_p1(request):      result = create_router_bgp(tgen, topo, input_dict_3)      assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) -    step("Api call to modfiy BGP timers") +    step("Api call to modify BGP timers")      input_dict_4 = {          "r1": { @@ -1757,7 +1757,7 @@ def test_vrf_vlan_routing_table_p1(request):              tc_name, result          ) -    step("Api call to modfiy BGP timers") +    step("Api call to modify BGP timers")      input_dict_4 = {          "r3": { @@ -2422,7 +2422,7 @@ def test_delete_and_re_add_vrf_p1(request):          result = verify_rib(tgen, addr_type, dut, input_dict_2)          assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) -    step("Api call to modfiy BGP timers") +    step("Api call to modify BGP timers")      input_dict_4 = {          "r1": { @@ -3034,7 +3034,7 @@ def test_vrf_name_significance_p1(request):          result = verify_rib(tgen, addr_type, dut, input_dict_4)          assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) -    step("Api call to modfiy BGP timers") +    step("Api call to modify BGP timers")      input_dict_4 = {          "r3": { @@ -3437,7 +3437,7 @@ def test_vrf_name_significance_p1(request):      result = create_router_bgp(tgen, topo_modify["routers"])      assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) -    step("Api call to modfiy BGP timers") +    step("Api call to modify BGP timers")      input_dict_4 = {          "r3": { diff --git a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py index 1bd4c233d8..870c2f7a10 100644 --- a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py +++ b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py @@ -115,7 +115,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py b/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py index 64093497cb..2dc95cee21 100644 --- a/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py +++ b/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py @@ -100,7 +100,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py b/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py index e255b4e88c..04f866f35b 100644 --- a/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py +++ b/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py @@ -130,7 +130,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py index 1367d77e55..8c9d2c9dbf 100644 --- a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py +++ b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py @@ -119,7 +119,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_route_map/test_route_map_topo1.py b/tests/topotests/bgp_route_map/test_route_map_topo1.py index 6556c050bb..655a3dc899 100644 --- a/tests/topotests/bgp_route_map/test_route_map_topo1.py +++ b/tests/topotests/bgp_route_map/test_route_map_topo1.py @@ -131,7 +131,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_route_map/test_route_map_topo2.py b/tests/topotests/bgp_route_map/test_route_map_topo2.py index eccb2c1bf2..4da7eeb2ff 100644 --- a/tests/topotests/bgp_route_map/test_route_map_topo2.py +++ b/tests/topotests/bgp_route_map/test_route_map_topo2.py @@ -176,7 +176,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/bgp_set_aspath_replace/__init__.py b/tests/topotests/bgp_set_aspath_replace/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/__init__.py diff --git a/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf b/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf new file mode 100644 index 0000000000..1e98f4e491 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/r1/bgpd.conf @@ -0,0 +1,17 @@ +! +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 192.168.1.2 remote-as external + neighbor 192.168.1.2 timers 3 10 + address-family ipv4 unicast +  neighbor 192.168.1.2 route-map r2 in + exit-address-family +! +ip prefix-list p1 seq 5 permit 172.16.255.31/32 +! +route-map r2 permit 10 + match ip address prefix-list p1 + set as-path replace 65003 +route-map r2 permit 20 + set as-path replace any +! diff --git a/tests/topotests/bgp_set_aspath_replace/r1/zebra.conf b/tests/topotests/bgp_set_aspath_replace/r1/zebra.conf new file mode 100644 index 0000000000..acf120b200 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/r1/zebra.conf @@ -0,0 +1,6 @@ +! +interface r1-eth0 + ip address 192.168.1.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_set_aspath_replace/r2/bgpd.conf b/tests/topotests/bgp_set_aspath_replace/r2/bgpd.conf new file mode 100644 index 0000000000..23367f94ff --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/r2/bgpd.conf @@ -0,0 +1,8 @@ +! +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 3 10 + neighbor 192.168.2.1 remote-as external + neighbor 192.168.2.1 timers 3 10 +! diff --git a/tests/topotests/bgp_set_aspath_replace/r2/zebra.conf b/tests/topotests/bgp_set_aspath_replace/r2/zebra.conf new file mode 100644 index 0000000000..f229954341 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/r2/zebra.conf @@ -0,0 +1,9 @@ +! +interface r2-eth0 + ip address 192.168.1.2/24 +! +interface r2-eth1 + ip address 192.168.2.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_set_aspath_replace/r3/bgpd.conf b/tests/topotests/bgp_set_aspath_replace/r3/bgpd.conf new file mode 100644 index 0000000000..b7a7ceda13 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/r3/bgpd.conf @@ -0,0 +1,9 @@ +! +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 192.168.2.2 remote-as external + neighbor 192.168.2.2 timers 3 10 + address-family ipv4 unicast +  redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_set_aspath_replace/r3/zebra.conf b/tests/topotests/bgp_set_aspath_replace/r3/zebra.conf new file mode 100644 index 0000000000..3fa6c64484 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/r3/zebra.conf @@ -0,0 +1,10 @@ +! +int lo + ip address 172.16.255.31/32 + ip address 172.16.255.32/32 +! +interface r3-eth0 + ip address 192.168.2.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py b/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py new file mode 100644 index 0000000000..d5549ae899 --- /dev/null +++ b/tests/topotests/bgp_set_aspath_replace/test_bgp_set_aspath_replace.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python + +# +# test_bgp_set_aspath_replace.py +# +# Copyright (c) 2022 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if `set as-path replace` is working correctly for route-maps. +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): +    for routern in range(1, 5): +        tgen.add_router("r{}".format(routern)) + +    switch = tgen.add_switch("s1") +    switch.add_link(tgen.gears["r1"]) +    switch.add_link(tgen.gears["r2"]) + +    switch = tgen.add_switch("s2") +    switch.add_link(tgen.gears["r2"]) +    switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): +    tgen = Topogen(build_topo, mod.__name__) +    tgen.start_topology() + +    router_list = tgen.routers() + +    for i, (rname, router) in enumerate(router_list.items(), 1): +        router.load_config( +            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) +        ) +        router.load_config( +            TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) +        ) + +    tgen.start_router() + + +def teardown_module(mod): +    tgen = get_topogen() +    tgen.stop_topology() + + +def test_bgp_maximum_prefix_out(): +    tgen = get_topogen() + +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    def _bgp_converge(router): +        output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast json")) +        expected = { +            "routes": { +                "172.16.255.31/32": [{"path": "65002 65001"}], +                "172.16.255.32/32": [{"path": "65001 65001"}], +            } +        } +        return topotest.json_cmp(output, expected) + +    test_func = functools.partial(_bgp_converge, tgen.gears["r1"]) +    _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + +    assert result is None, "Failed overriding incoming AS-PATH with route-map" + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py b/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py index 0d27474cbd..d612ad2c94 100755 --- a/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py +++ b/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py @@ -154,7 +154,7 @@ def setup_module(mod):      router_list = tgen.routers() -    # For all registred routers, load the zebra configuration file +    # For all registered routers, load the zebra configuration file      for rname, router in router_list.items():          router.load_config(              TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) diff --git a/tests/topotests/bgp_tcp_mss/test_bgp_vrf_tcp_mss.py b/tests/topotests/bgp_tcp_mss/test_bgp_vrf_tcp_mss.py index c48bd8a439..7094c2805d 100644 --- a/tests/topotests/bgp_tcp_mss/test_bgp_vrf_tcp_mss.py +++ b/tests/topotests/bgp_tcp_mss/test_bgp_vrf_tcp_mss.py @@ -126,7 +126,7 @@ def setup_module(mod):      topo = tgen.json_topo      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON      build_config_from_json(tgen, topo) diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py index 07ba0964d4..7d71ef761e 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py @@ -131,7 +131,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Run these tests for kernel version 4.19 or above diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py index 8ba96ef7a0..391c272dbc 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py @@ -118,7 +118,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Run these tests for kernel version 4.19 or above diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py b/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py index 1cf1b29097..a9aefc5003 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py @@ -126,7 +126,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Run these tests for kernel version 4.19 or above diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py b/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py index 82daf08e18..97016caa75 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py @@ -127,7 +127,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Run these tests for kernel version 4.19 or above diff --git a/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo1.py b/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo1.py index d9d4f4f8b2..b95e71c2eb 100644 --- a/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo1.py +++ b/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo1.py @@ -129,7 +129,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Run these tests for kernel version 4.19 or above diff --git a/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo2.py b/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo2.py index e930b62706..9291fbd966 100644 --- a/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo2.py +++ b/tests/topotests/bgp_vrf_lite_best_path_test/test_bgp_vrf_lite_best_path_topo2.py @@ -123,7 +123,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Run these tests for kernel version 4.19 or above diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py index 72d1251d25..332ffdf6d5 100644 --- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py +++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py @@ -141,7 +141,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py index b3ff9d79ca..a641fec584 100644 --- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py +++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py @@ -158,7 +158,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py index 107b5e9624..fe4a256484 100755 --- a/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py +++ b/tests/topotests/example_topojson_test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py @@ -110,7 +110,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # This function only purpose is to create configuration diff --git a/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py index b03215d21c..8bc28528dc 100755 --- a/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py +++ b/tests/topotests/example_topojson_test/test_topo_json_single_link/test_example_topojson.py @@ -111,7 +111,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # This function only purpose is to create configuration diff --git a/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py index 594b156f8b..caf0a7c138 100755 --- a/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py +++ b/tests/topotests/example_topojson_test/test_topo_json_single_link_loopback/test_example_topojson.py @@ -112,7 +112,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # This function only purpose is to create configuration diff --git a/tests/topotests/grpc_basic/test_basic_grpc.py b/tests/topotests/grpc_basic/test_basic_grpc.py index b6812a5afc..6bd0be9fd9 100644 --- a/tests/topotests/grpc_basic/test_basic_grpc.py +++ b/tests/topotests/grpc_basic/test_basic_grpc.py @@ -122,7 +122,7 @@ def test_get_config(tgen):      nrepeat = 5      r1 = tgen.gears["r1"] -    step("'GET' inteface config 10 times, once per invocation") +    step("'GET' interface config 10 times, once per invocation")      for i in range(0, nrepeat):          output = run_grpc_client(r1, GRPCP_ZEBRA, "GET,/frr-interface:lib") diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 0b97637c1e..5a5c7e3df4 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -1186,12 +1186,14 @@ def add_interfaces_to_vlan(tgen, input_dict):                              interface, vlan_intf, vlan                          )                          logger.info("[DUT: %s]: Running command: %s", dut, cmd) -                        rnode.run(cmd) +                        result = rnode.run(cmd) +                        logger.info("result %s", result)                          # Bringing interface up                          cmd = "ip link set {} up".format(vlan_intf)                          logger.info("[DUT: %s]: Running command: %s", dut, cmd) -                        rnode.run(cmd) +                        result = rnode.run(cmd) +                        logger.info("result %s", result)                          # Assigning IP address                          ifaddr = ipaddress.ip_interface( @@ -1204,7 +1206,8 @@ def add_interfaces_to_vlan(tgen, input_dict):                              ifaddr.version, vlan_intf, ifaddr                          )                          logger.info("[DUT: %s]: Running command: %s", dut, cmd) -                        rnode.run(cmd) +                        result = rnode.run(cmd) +                        logger.info("result %s", result)  def tcpdump_capture_start( @@ -2928,7 +2931,7 @@ def addKernelRoute(      Parameters:      -----------      * `tgen`  : Topogen object -    * `router`: router for which kernal routes needs to be added +    * `router`: router for which kernel routes needs to be added      * `intf`: interface name, for which kernel routes needs to be added      * `bindToAddress`: bind to <host>, an interface or multicast                         address @@ -2969,7 +2972,7 @@ def addKernelRoute(          output = rnode.run(cmd)          def check_in_kernel(rnode, verify_cmd, grp_addr, router): -            # Verifying if ip route added to kernal +            # Verifying if ip route added to kernel              errormsg = None              result = rnode.run(verify_cmd)              logger.debug("{}\n{}".format(verify_cmd, result)) @@ -4207,7 +4210,7 @@ def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):  def get_ipv6_linklocal_address(topo, node, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/lib/ltemplate.py b/tests/topotests/lib/ltemplate.py index 18882285ed..2544023c43 100644 --- a/tests/topotests/lib/ltemplate.py +++ b/tests/topotests/lib/ltemplate.py @@ -93,7 +93,7 @@ class LTemplate:          # This is a sample of configuration loading.          router_list = tgen.routers() -        # For all registred routers, load the zebra configuration file +        # For all registered routers, load the zebra configuration file          for rname, router in router_list.items():              logger.info("Setting up %s" % rname)              for rd_val in TopoRouter.RD: diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py index 1f723eab93..b4a6358389 100644 --- a/tests/topotests/lib/pim.py +++ b/tests/topotests/lib/pim.py @@ -602,7 +602,7 @@ def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected      return True -@retry(retry_timeout=40) +@retry(retry_timeout=40, diag_pct=0)  def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):      """      Verify IGMP groups are received from an intended interface @@ -676,7 +676,7 @@ def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):      return True -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_upstream_iif(      tgen,      dut, @@ -959,7 +959,7 @@ def verify_join_state_and_timer(      return True -@retry(retry_timeout=120) +@retry(retry_timeout=120, diag_pct=0)  def verify_ip_mroutes(      tgen,      dut, @@ -1164,7 +1164,7 @@ def verify_ip_mroutes(      return True if return_uptime == False else uptime_dict -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_pim_rp_info(      tgen,      topo, @@ -1329,7 +1329,7 @@ def verify_pim_rp_info(      return True -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_pim_state(      tgen,      dut, @@ -1516,7 +1516,7 @@ def get_pim_interface_traffic(tgen, input_dict):      return output_dict -@retry(retry_timeout=40) +@retry(retry_timeout=40, diag_pct=0)  def verify_pim_interface(      tgen, topo, dut, interface=None, interface_ip=None, expected=True  ): @@ -2295,7 +2295,7 @@ def verify_pim_grp_rp_source(      return errormsg -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True):      """      Verify all PIM interface are up and running, config is verified @@ -2351,7 +2351,7 @@ def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True):      return True -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_ip_pim_upstream_rpf(      tgen, topo, dut, interface, group_addresses, rp=None, expected=True  ): @@ -2551,7 +2551,7 @@ def enable_disable_pim_bsm(tgen, router, intf, enable=True):      return result -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_ip_pim_join(      tgen, topo, dut, interface, group_addresses, src_address=None, expected=True  ): @@ -2644,7 +2644,7 @@ def verify_ip_pim_join(      return True -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True):      """      Verify igmp interface details, verifying following configs: @@ -2934,7 +2934,7 @@ def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True):      return True if stats_return == False else igmp_stats -@retry(retry_timeout=60) +@retry(retry_timeout=60, diag_pct=0)  def verify_pim_config(tgen, input_dict, expected=True):      """      Verify pim interface details, verifying following configs: @@ -3060,7 +3060,7 @@ def verify_pim_config(tgen, input_dict, expected=True):      return True -@retry(retry_timeout=40) +@retry(retry_timeout=20, diag_pct=0)  def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=True):      """      Verify multicast traffic by running @@ -3303,7 +3303,7 @@ def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses):      return refCount -@retry(retry_timeout=40) +@retry(retry_timeout=40, diag_pct=0)  def verify_multicast_flag_state(      tgen, dut, src_address, group_addresses, flag, expected=True  ): @@ -3400,7 +3400,7 @@ def verify_multicast_flag_state(      return True -@retry(retry_timeout=40) +@retry(retry_timeout=40, diag_pct=0)  def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip, expected=True):      """      Verify all IGMP interface are up and running, config is verified diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 4ed5b2f825..c04506f47e 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -443,7 +443,7 @@ class Topogen(object):      def start_router(self, router=None):          """          Call the router startRouter method. -        If no router is specified it is called for all registred routers. +        If no router is specified it is called for all registered routers.          """          if router is None:              # pylint: disable=r1704 diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index e786ae02cd..27b566a8f5 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -746,7 +746,7 @@ def proto_name_to_number(protocol):  def ip4_route(node):      """      Gets a structured return of the command 'ip route'. It can be used in -    conjuction with json_cmp() to provide accurate assert explanations. +    conjunction with json_cmp() to provide accurate assert explanations.      Return example:      { @@ -787,7 +787,7 @@ def ip4_route(node):  def ip4_vrf_route(node):      """      Gets a structured return of the command 'ip route show vrf {0}-cust1'. -    It can be used in conjuction with json_cmp() to provide accurate assert explanations. +    It can be used in conjunction with json_cmp() to provide accurate assert explanations.      Return example:      { @@ -831,7 +831,7 @@ def ip4_vrf_route(node):  def ip6_route(node):      """      Gets a structured return of the command 'ip -6 route'. It can be used in -    conjuction with json_cmp() to provide accurate assert explanations. +    conjunction with json_cmp() to provide accurate assert explanations.      Return example:      { @@ -871,7 +871,7 @@ def ip6_route(node):  def ip6_vrf_route(node):      """      Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'. -    It can be used in conjuction with json_cmp() to provide accurate assert explanations. +    It can be used in conjunction with json_cmp() to provide accurate assert explanations.      Return example:      { @@ -913,7 +913,7 @@ def ip6_vrf_route(node):  def ip_rules(node):      """      Gets a structured return of the command 'ip rule'. It can be used in -    conjuction with json_cmp() to provide accurate assert explanations. +    conjunction with json_cmp() to provide accurate assert explanations.      Return example:      [ diff --git a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py index 4ab160b52e..1762535cee 100644 --- a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py +++ b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py @@ -34,7 +34,7 @@ Tests covered in this suite  3.2 Verify if  no forwarding bit is set , FRR is not forwarding the      BSM to other PIM nbrs  3.3 Verify multicast BSM is sent to new router when unicast BSM is disabled -4.1 Verfiy BSM arrived on non bsm capable interface is dropped and +4.1 Verify BSM arrived on non bsm capable interface is dropped and      not processed  4.2 Verify group to RP info updated correctly in FRR node, after shut and      no-shut of BSM enable interfaces @@ -170,7 +170,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. @@ -313,7 +313,7 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,      result = add_rp_interfaces_and_pim_config(tgen, topo, "lo", rp, rp_mapping)      assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) -    # Add kernal routes to sender and receiver +    # Add kernel routes to sender and receiver      for group, rp_list in rp_mapping.items():          mask = group.split("/")[1]          if int(mask) == 32: @@ -865,7 +865,7 @@ def test_new_router_fwd_p0(request):  def test_int_bsm_config_p1(request):      """ -    1. Verfiy BSM arrived on non bsm capable interface is dropped and +    1. Verify BSM arrived on non bsm capable interface is dropped and         not processed      2. Verify group to RP info updated correctly in FRR node, after shut and         no-shut of BSM enable interfaces diff --git a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py index 5f641b5286..31cd8f9858 100644 --- a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py +++ b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py @@ -150,7 +150,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. @@ -247,7 +247,7 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,          result = create_static_routes(tgen, input_dict)          assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) -    # Add kernal route for source +    # Add kernel route for source      group = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["pkt_dst"]      bsr_interface = topo["routers"][bsr]["links"][fhr]["interface"]      result = addKernelRoute(tgen, bsr, bsr_interface, group) @@ -260,18 +260,18 @@ def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr,      result = add_rp_interfaces_and_pim_config(tgen, topo, "lo", rp, rp_mapping)      assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) -    # Add kernal routes to sender and receiver +    # Add kernel routes to sender and receiver      for group, rp_list in rp_mapping.items():          mask = group.split("/")[1]          if int(mask) == 32:              group = group.split("/")[0] -        # Add kernal routes for sender +        # Add kernel routes for sender          s_interface = topo["routers"][sender]["links"][fhr]["interface"]          result = addKernelRoute(tgen, sender, s_interface, group)          assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) -        # Add kernal routes for receiver +        # Add kernel routes for receiver          r_interface = topo["routers"][receiver]["links"][lhr]["interface"]          result = addKernelRoute(tgen, receiver, r_interface, group)          assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) diff --git a/tests/topotests/multicast_pim_dr_nondr_test/__init__.py b/tests/topotests/multicast_pim_dr_nondr_test/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/__init__.py diff --git a/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_ospf_topo2.json b/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_ospf_topo2.json new file mode 100644 index 0000000000..4a4eb9a0f0 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_ospf_topo2.json @@ -0,0 +1,195 @@ +{ +    "ipv4base": "10.0.0.0", +    "ipv4mask": 24, +    "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, +    "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, +    "switches": { +        "s1": { +            "links": { +                "i1": {"ipv4": "auto"}, +                "r1": {"ipv4": "auto", "pim": "enable"}, +                "r2": {"ipv4": "auto", "pim": "enable"} +            } +        } +    }, +    "routers": { +        "r1": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r4": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r4": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r2": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r4": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r4": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r4": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r1": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r2": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r5": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r1": {}, +                    "r2": {}, +                    "r5": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r5": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r4": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "i2": { +                    "ipv4": "auto", +                    "pim": "enable" +                } +            }, +            "ospf": { +                "neighbors": { +                    "r4": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "i1": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback"} +            } +        }, +        "i2": { +            "links": { +                "r5": {"ipv4": "auto"} +            } +        } +    } +} diff --git a/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_static_routes_topo1.json b/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_static_routes_topo1.json new file mode 100644 index 0000000000..9d7d766536 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_static_routes_topo1.json @@ -0,0 +1,85 @@ +{ +    "ipv4base": "10.0.0.0", +    "ipv4mask": 24, +    "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, +    "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, +    "switches": { +        "s1": { +            "links": { +                "i1": {"ipv4": "auto"}, +                "r1": {"ipv4": "auto", "pim": "enable"}, +                "r2": {"ipv4": "auto", "pim": "enable"}, +                "r3": {"ipv4": "auto", "pim": "enable"} +            } +        } +    }, +    "routers": { +        "r1": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, +                "r4": {"ipv4": "auto", "pim": "enable"} +            }, +            "static_routes": [{ +                "network": ["10.0.0.1/24", "1.0.5.17/32"], +                "next_hop": "10.0.1.2" +            }] +        }, +        "r2": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, +                "r4": {"ipv4": "auto", "pim": "enable"} +            }, +            "static_routes": [{ +                "network": ["10.0.0.1/24", "1.0.5.17/32"], +                "next_hop": "10.0.2.2" +            }] +        }, +        "r3": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"} +            }, +            "static_routes": [{ +                "network": ["10.0.0.1/24", "1.0.5.17/32"], +                "next_hop": "10.1.1.1" +            }] +        }, +        "r4": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, +                "r1": {"ipv4": "auto", "pim": "enable"}, +                "r2": {"ipv4": "auto", "pim": "enable"}, +                "r5": {"ipv4": "auto", "pim": "enable"} +            }, +            "static_routes": [ +            { +                "network": ["10.1.1.4/24"], +                "next_hop": "10.0.2.1" +            }, +            { +                "network": ["10.0.0.1/24"], +                "next_hop": "10.0.3.2" +            }] +        }, +        "r5": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, +                "r4": {"ipv4": "auto", "pim": "enable"}, +                "i2": {"ipv4": "auto", "pim": "enable"} +            }, +            "static_routes": [{ +                "network": ["10.1.1.4/24", "1.0.5.17/32"], +                "next_hop": "10.0.3.1" +            }] +        }, +        "i1": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback"} +            } +        }, +        "i2": { +            "links": { +                "r5": {"ipv4": "auto"} +            } +        } +    } +} diff --git a/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_transit_router_topo3.json b/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_transit_router_topo3.json new file mode 100644 index 0000000000..f078657862 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/pim_dr_nondr_with_transit_router_topo3.json @@ -0,0 +1,241 @@ +{ +    "ipv4base": "10.0.0.0", +    "ipv4mask": 24, +    "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, +    "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, +    "switches": { +        "s1": { +            "links": { +                "i1": {"ipv4": "auto"}, +                "r1": {"ipv4": "auto", "pim": "enable"}, +                "r2": {"ipv4": "auto", "pim": "enable"} +            } +        } +    }, +    "routers": { +        "r1": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r6": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r6": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r2": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r6": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r6": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r4": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r6": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r5": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r6": {}, +                    "r5": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r5": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r4": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "i2": { +                    "ipv4": "auto", +                    "pim": "enable" +                } +            }, +            "ospf": { +                "neighbors": { +                    "r4": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "r6": { +            "links": { +                "lo": { +                    "ipv4": "auto", +                    "type": "loopback", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r1": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r2": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                }, +                "r4": { +                    "ipv4": "auto", +                    "pim": "enable", +                    "ospf": { +                        "area": "0.0.0.0", +                        "hello_interval": 1, +                        "dead_interval": 4 +                    } +                } +            }, +            "ospf": { +                "neighbors": { +                    "r1": {}, +                    "r2": {}, +                    "r4": {} +                }, +                "redistribute": [ +                    { +                        "redist_type": "static" +                    }, +                    { +                        "redist_type": "connected" +                    } +                ] +            } +        }, +        "i1": { +            "links": { +                "lo": {"ipv4": "auto", "type": "loopback"} +            } +        }, +        "i2": { +            "links": { +                "r5": {"ipv4": "auto"} +            } +        } +    } +} diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py new file mode 100755 index 0000000000..1adc034206 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py @@ -0,0 +1,1135 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: +1. Configure IGMP local join on DR and non DR +""" + +import os +import sys +import json +import time +import datetime +from time import sleep +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( +    start_topology, +    write_test_header, +    write_test_footer, +    step, +    reset_config_on_routers, +    shutdown_bringup_interface, +    apply_raw_config, +    add_interfaces_to_vlan, +    kill_router_daemons, +    start_router_daemons, +    create_static_routes, +    check_router_status, +    topo_daemons, +    required_linux_kernel_version, +) +from lib.pim import ( +    create_pim_config, +    create_igmp_config, +    verify_ip_mroutes, +    clear_ip_mroute, +    clear_ip_pim_interface_traffic, +    verify_pim_config, +    verify_upstream_iif, +    verify_multicast_traffic, +    verify_multicast_flag_state, +    verify_igmp_groups, +    McastTesterHelper +) +from lib.topolog import logger +from lib.topojson import build_config_from_json + +HELLO_TIMER = 1 +HOLD_TIMER = 3 + +pytestmark = [pytest.mark.pimd] + +TOPOLOGY = """ + +Descripton: Configuring OSPF on r1/r2/r4/r5 for RP reachablility. +IPs are assigned automatically to routers, start IP and subnet is defined in respective JSON file +JSON snippet: +    "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24}, + +                               r5 ------- i2 +                    10.0.3.2/24| 10.0.0.2/24 +                               | +                    10.0.3.1/24| +                  ------------ r4 ---------- +                |   10.0.1.2/24  10.0.2.2/24 | +    10.0.1.1/24 |                            | 10.0.2.1/24 +                r1 ----------- s1 ---------- r2 +                  10.0.4.2/24  | 10.0.4.3/24 +                               | +                               |10.0.4.1/24 +                               i1 + +    Description: +    i1, i2  - FRR running iperf to send IGMP +                                     join and traffic +    r1, r2, r4, r5 - FRR router +    s1 - OVS switch +""" + +# Global variables +VLAN_1 = 2501 +GROUP_RANGE = "225.0.0.0/8" +IGMP_JOIN = "225.1.1.1" +VLAN_INTF_ADRESS_1 = "10.0.8.3/24" +SAME_VLAN_IP_1 = {"ip": "10.1.1.1", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_2 = {"ip": "10.1.1.2", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_3 = {"ip": "10.1.1.3", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_4 = {"ip": "10.1.1.4", "subnet": "255.255.255.0", "cidr": "24"} +GROUP_RANGE_1 = ["225.1.1.1/32", "225.1.1.2/32"] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2"] +GROUP_RANGE_2 = ["226.1.1.1/32", "226.1.1.2/32"] +IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2"] +GROUP_RANGE_3 = ["227.1.1.1/32", "227.1.1.2/32"] +IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2"] + +intf_r1_s1 = None +intf_r1_s1_addr = None +intf_r2_s1 = None +intf_r2_s1_addr = None +intf_r3_s1 = None +intf_r3_s1_addr = None +intf_i1_s1 = None +intf_i1_s1_addr = None + + +def setup_module(mod): +    """ +    Sets up the pytest environment + +    * `mod`: module name +    """ + +    # Required linux kernel version for this suite to run. +    result = required_linux_kernel_version("4.19") +    if result is not True: +        pytest.skip("Kernel requirements are not met") + +    testsuite_run_time = time.asctime(time.localtime(time.time())) +    logger.info("Testsuite start time: {}".format(testsuite_run_time)) +    logger.info("=" * 40) +    logger.info("Master Topology: \n {}".format(TOPOLOGY)) + +    logger.info("Running setup_module to create topology") + +    testdir = os.path.dirname(os.path.realpath(__file__)) +    json_file = "{}/pim_dr_nondr_with_ospf_topo2.json".format(testdir) +    tgen = Topogen(json_file, mod.__name__) +    global topo +    topo = tgen.json_topo +    # ... and here it calls Mininet initialization functions. + +    # get list of daemons needs to be started for this suite. +    daemons = topo_daemons(tgen, tgen.json_topo) + +    # Starting topology, create tmp files which are loaded to routers +    #  to start deamons and then start routers +    start_topology(tgen, daemons) + +    # Don"t run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    # Creating configuration from JSON +    build_config_from_json(tgen, tgen.json_topo) + +    # XXX Replace this using "with McastTesterHelper()... " in each test if possible. +    global app_helper +    app_helper = McastTesterHelper(tgen) + +    logger.info("Running setup_module() done") + + +def teardown_module(): +    """Teardown the pytest environment""" + +    logger.info("Running teardown_module to delete topology") + +    tgen = get_topogen() + +    app_helper.cleanup() + +    # Stop toplogy and Remove tmp files +    tgen.stop_topology() + +    logger.info( +        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) +    ) +    logger.info("=" * 40) + + +##################################################### +# +#   Local APIs +# +##################################################### + + +def pre_config_for_receiver_dr_tests( +    tgen, topo, tc_name, highest_priority, lowest_priority +): +    """ +    API to do common pre-configuration for receiver test cases + +    parameters: +    ----------- +    * `tgen`: topogen object +    * `topo`: input json data +    * `tc_name`: caller test case name +    * `highest_priority`: router which will be having highest DR priority +    * `lowest_priority`: router which will be having lowest DR priority +    """ + +    global intf_r1_s1, intf_r1_s1_addr, intf_r2_s1, intf_r2_s1_addr, intf_i1_s1, intf_i1_s1_addr + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure {0} is DR initially configuring highest IP on {0} and R2 " +        "second highest, {1} is lower".format(highest_priority, lowest_priority) +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    if lowest_priority == "r1": +        lowest_pr_intf = intf_r1_s1 +    else: +        lowest_pr_intf = intf_r2_s1 + +    if highest_priority == "r1": +        highest_pr_intf = intf_r1_s1 +    else: +        highest_pr_intf = intf_r2_s1 + +    vlan_input = { +        lowest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        lowest_pr_intf: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        highest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        highest_pr_intf: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    raw_config = { +        lowest_priority: { +            "raw_config": [ +                "interface {}.{}".format(lowest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        highest_priority: { +            "raw_config": [ +                "interface {}.{}".format(highest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Send IGMP join for groups 226.1.1.1 to 226.1.1.5") + +    vlan_intf_i1_s1 = "{}.{}".format(intf_i1_s1, VLAN_1) +    result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, join_intf=vlan_intf_i1_s1) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Enable OSPF between r1 and r2") + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip ospf area 0.0.0.0", +                    "ip ospf dead-interval 4", +                    "ip ospf hello-interval 1", +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Start traffic from R4 connected source") + +    result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r5") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    return True + + +def pre_config_for_source_dr_tests( +    tgen, topo, tc_name, highest_priority, lowest_priority +): +    """ +    API to do common pre-configuration for source test cases + +    parameters: +    ----------- +    * `tgen`: topogen object +    * `topo`: input json data +    * `tc_name`: caller test case name +    * `highest_priority`: router which will be having highest DR priority +    * `lowest_priority`: router which will be having lowest DR priority +    """ + +    global intf_r1_s1, intf_r1_s1_addr, intf_r2_s1, intf_r2_s1_addr, intf_i1_s1, intf_i1_s1_addr + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure {0} is DR initially configuring highest IP on {0} and R2 " +        "second highest, {1} is lower".format(highest_priority, lowest_priority) +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    if lowest_priority == "r1": +        lowest_pr_intf = intf_r1_s1 +    else: +        lowest_pr_intf = intf_r2_s1 + +    if highest_priority == "r1": +        highest_pr_intf = intf_r1_s1 +    else: +        highest_pr_intf = intf_r2_s1 + +    vlan_input = { +        lowest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        lowest_pr_intf: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        highest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        highest_pr_intf: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "Configure IGMP and PIM on switch connected receiver nodes , " +        "configure PIM nbr with hello timer 1" +    ) + +    raw_config = { +        lowest_priority: { +            "raw_config": [ +                "interface {}.{}".format(lowest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +            ] +        }, +        highest_priority: { +            "raw_config": [ +                "interface {}.{}".format(highest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure IGMP on R5 port and send IGMP join for groups " "(226.1.1.1-5)") + +    intf_r5_i2 = topo["routers"]["r5"]["links"]["i2"]["interface"] +    input_dict = { +        "r5": {"igmp": {"interfaces": {intf_r5_i2: {"igmp": {"version": "2"}}}}} +    } +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    result = app_helper.run_join("i2", IGMP_JOIN_RANGE_1, "r5") +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Enable OSPF between r1 and r2") + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip ospf area 0.0.0.0", +                    "ip ospf dead-interval 4", +                    "ip ospf hello-interval 1", +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Start traffic from Source node") + +    vlan_intf_i1_s1 = "{}.{}".format(intf_i1_s1, VLAN_1) +    result = app_helper.run_traffic("i1", IGMP_JOIN_RANGE_1, bind_intf=vlan_intf_i1_s1) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    return True + + +##################################################### +# +#   Testcases +# +##################################################### + + +def test_configuring_igmp_local_join_on_reciever_dr_non_dr_nodes_p1(request): +    """ +    Configure IGMP local join on DR and non DR +    """ + +    tgen = get_topogen() +    tc_name = request.node.name +    write_test_header(tc_name) + +    # Creating configuration from JSON +    app_helper.stop_all_hosts() +    clear_ip_mroute(tgen) +    check_router_status(tgen) +    reset_config_on_routers(tgen) +    clear_ip_pim_interface_traffic(tgen, topo) + +    # Don"t run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure R1 is DR initially configuring highest IP on R1 and R2 " +        "second highest, R1 is lower" +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    vlan_input = { +        "r1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_r1_s1: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        "r2": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_r2_s1: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}.{}".format(intf_r1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}.{}".format(intf_r2_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1 + GROUP_RANGE_3, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Send IGMP local join for groups 226.1.1.1 to 226.1.1.5") + +    vlan_intf_r1_s1 = "{}.{}".format(intf_r1_s1, VLAN_1) +    input_dict = { +        "r1": { +            "igmp": { +                "interfaces": { +                    vlan_intf_r1_s1: { +                        "igmp": {"version": "2", "join": IGMP_JOIN_RANGE_1} +                    } +                } +            } +        } +    } + +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Enable OSPF between all the nodes") + +    step("Configure local join on R1 for group range (227.1.1.1)") + +    vlan_intf_r1_s1 = "{}.{}".format(intf_r1_s1, VLAN_1) +    input_dict = { +        "r1": { +            "igmp": { +                "interfaces": { +                    vlan_intf_r1_s1: { +                        "igmp": {"version": "2", "join": IGMP_JOIN_RANGE_3} +                    } +                } +            } +        } +    } + +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Start traffic from R4 connected source") + +    input_src = {"i2": topo["routers"]["i2"]["links"]["r5"]["interface"]} + +    result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3, "r5") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R1, R2 and R2 has IGMP groups for 226.x.x.x and 227.1.1.1 groups") + +    intf_r1_s1 = "{}.{}".format( +        topo["routers"]["r1"]["links"]["s1"]["interface"], VLAN_1 +    ) +    intf_r2_s1 = "{}.{}".format( +        topo["routers"]["r2"]["links"]["s1"]["interface"], VLAN_1 +    ) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        result = verify_igmp_groups( +            tgen, dut, intf, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3 +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R1 is DR, R2 has 226.x.x.x and 227.1.1.1 (*,G) mroute with SC flag") +    step("(S,G) mroute for 226.1.1.1 group present on R2") + +    source_i2 = topo["routers"]["i2"]["links"]["r5"]["ipv4"].split("/")[0] +    input_dict_r2 = [ +        { +            "dut": "r2", +            "src_address": "*", +            "iif": topo["routers"]["r2"]["links"]["r4"]["interface"], +            "oil": "{}.{}".format( +                topo["routers"]["r2"]["links"]["s1"]["interface"], VLAN_1 +            ), +        } +    ] + +    for data in input_dict_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, flag in zip(["r2"], ["SC"]): +        step("{} has (*,G) flag as {}".format(dut, flag)) +        result = verify_multicast_flag_state(tgen, dut, "*", IGMP_JOIN_RANGE_1, flag) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Delete local join from DR node") +    input_dict = { +        "r1": { +            "igmp": { +                "interfaces": { +                    vlan_intf_r1_s1: { +                        "igmp": { +                            "version": "2", +                            "join": IGMP_JOIN_RANGE_3, +                            "delete_attr": True, +                        } +                    } +                } +            } +        } +    } + +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step( +        "After removing local join 227.1.1.1 group removed from IGMP join " +        "of R1, R2 node , using 'show ip igmp groups json'" +    ) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        result = verify_igmp_groups(tgen, dut, intf, IGMP_JOIN_RANGE_3, expected=False) +        assert result is not True, ( +            "Testcase {} : Failed \n " +            "IGMP groups are still present \n Error: {}".format(tc_name, result) +        ) + +    step("(*,G) mroute for 227.1.1.1 group removed from R1 node") +    step( +        "After remove of local join from R1 and R2 node verify (*,G) and (S,G) " +        "mroutes should not present on R1, R2 and R3 nodes" +    ) + +    for data in input_dict_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_3, +            data["iif"], +            data["oil"], +            expected=False, +        ) +        assert result is not True, ( +            "Testcase {} : Failed \n " +            "mroutes are still present \n Error: {}".format(tc_name, result) +        ) + +    step("Configure local join on R2 for group range (227.1.1.1)") + +    input_dict = { +        "r2": { +            "igmp": { +                "interfaces": { +                    intf_r2_s1: {"igmp": {"version": "2", "join": IGMP_JOIN_RANGE_3}} +                } +            } +        } +    } + +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step( +        "After configuring local join on R2 non DR node, IGMP groups for 26.x.x.x and " +        "227.1.1.1 present on all the nodes" +    ) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        result = verify_igmp_groups( +            tgen, dut, intf, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3 +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R2 has 227.1.1.1 (*,G) mroute with SC flag") + +    for dut, flag in zip(["r2"], ["SC"]): +        step("{} has (*,G) flag as {}".format(dut, flag)) +        result = verify_multicast_flag_state(tgen, dut, "*", IGMP_JOIN_RANGE_3, flag) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure local join on R1 for group range (227.1.1.1)") + +    input_dict = { +        "r1": { +            "igmp": { +                "interfaces": { +                    vlan_intf_r1_s1: { +                        "igmp": {"version": "2", "join": IGMP_JOIN_RANGE_3} +                    } +                } +            } +        } +    } + +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step( +        "After configuring 227.1.1.1 on R1 node, verify no change on IGMP groups on all the nodes" +    ) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        result = verify_igmp_groups( +            tgen, dut, intf, IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3 +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R2 has 227.1.1.1 (*,G) mroute with SC flag") + +    step("r2 has (*,G) flag as SC") +    result = verify_multicast_flag_state(tgen, "r2", "*", IGMP_JOIN_RANGE_3, "SC") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R1 should not have (*,G) join and (S,G) join present") + +    input_dict_r1 = [ +        { +            "dut": "r1", +            "src_address": "*", +            "iif": topo["routers"]["r1"]["links"]["r4"]["interface"], +            "oil": "{}.{}".format( +                topo["routers"]["r1"]["links"]["s1"]["interface"], VLAN_1 +            ), +        }, +        { +            "dut": "r1", +            "src_address": source_i2, +            "iif": topo["routers"]["r1"]["links"]["r4"]["interface"], +            "oil": "{}.{}".format( +                topo["routers"]["r1"]["links"]["s1"]["interface"], VLAN_1 +            ), +        }, +    ] + +    for data in input_dict_r1: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3, +            data["iif"], +            data["oil"], +            expected=False, +        ) +        assert result is not True, ( +            "Testcase {} : Failed \n " +            "Mroutes are still present \n Error: {}".format(tc_name, result) +        ) + +    step("Remove local join from DR and Non DR node") + +    input_dict = { +        "r1": { +            "igmp": { +                "interfaces": { +                    vlan_intf_r1_s1: { +                        "igmp": { +                            "version": "2", +                            "join": IGMP_JOIN_RANGE_3, +                            "delete_attr": True, +                        } +                    } +                } +            } +        }, +        "r2": { +            "igmp": { +                "interfaces": { +                    intf_r2_s1: { +                        "igmp": { +                            "version": "2", +                            "join": IGMP_JOIN_RANGE_3, +                            "delete_attr": True, +                        } +                    } +                } +            } +        }, +    } + +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step( +        "After remove of local join from R1 and R2 node verify (*,G) and (S,G) mroutes " +        "should not present on R1, R2 nodes" +    ) + +    for data in input_dict_r1: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3, +            data["iif"], +            data["oil"], +            expected=False, +        ) +        assert result is not True, ( +            "Testcase {} : Failed \n " +            "Mroutes are still present \n Error: {}".format(tc_name, result) +        ) + +    for data in input_dict_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1 + IGMP_JOIN_RANGE_3, +            data["iif"], +            data["oil"], +            expected=False, +        ) +        assert result is not True, ( +            "Testcase {} : Failed \n " +            "Mroutes are still present \n Error: {}".format(tc_name, result) +        ) + +    write_test_footer(tc_name) + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py new file mode 100755 index 0000000000..aa7448f354 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py @@ -0,0 +1,934 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: +1. Verify mroute while rebooting DR /Non DR nodes( r1, r2 , r3 on all the nodes) +""" + +import os +import sys +import json +import time +import datetime +from time import sleep +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( +    start_topology, +    write_test_header, +    write_test_footer, +    step, +    reset_config_on_routers, +    apply_raw_config, +    add_interfaces_to_vlan, +    stop_router, +    start_router, +    check_router_status, +    topo_daemons, +    required_linux_kernel_version, +) +from lib.pim import ( +    create_pim_config, +    create_igmp_config, +    verify_ip_mroutes, +    clear_ip_mroute, +    clear_ip_pim_interface_traffic, +    verify_pim_config, +    verify_upstream_iif, +    verify_multicast_flag_state, +    McastTesterHelper, +) +from lib.topolog import logger +from lib.topojson import build_config_from_json + +HELLO_TIMER = 1 +HOLD_TIMER = 3 + +pytestmark = [pytest.mark.pimd] + +TOPOLOGY = """ + +Descripton: Configuring static routes on r1/r2/r3/r4/r5 for RP reachablility. +IPs are assigned automatically to routers, start IP and subnet is defined in respective JSON file +JSON snippet: +    "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24}, + +                               r5 ------- i2 +                    10.0.3.2/24| 10.0.0.2/24 +                               | +                    10.0.3.1/24| +                  ------------ r4 ---------- +                |   10.0.1.2/24  10.0.2.2/24 | +    10.0.1.1/24 |                            | 10.0.2.1/24 +                r1 ----------- s1 ---------- r2 +                  10.0.4.2/24  | 10.0.4.3/24 +                               | +                               |10.0.4.4/24 +                   i1 -------- r3 +                    10.0.4.1/24 +    Description: +    i1, i2  - FRR running iperf to send IGMP +                                     join and traffic +    r1, r2, r3, r4, r5 - FRR ruter +    s1 - OVS switch +""" + +# Global variables +VLAN_1 = 2501 +GROUP_RANGE = "225.0.0.0/8" +IGMP_JOIN = "225.1.1.1" +VLAN_INTF_ADRESS_1 = "10.0.8.3/24" +SAME_VLAN_IP_1 = {"ip": "10.1.1.1", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_2 = {"ip": "10.1.1.2", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_3 = {"ip": "10.1.1.3", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_4 = {"ip": "10.1.1.4", "subnet": "255.255.255.0", "cidr": "24"} +GROUP_RANGE_1 = [ +    "225.1.1.1/32", +    "225.1.1.2/32", +    "225.1.1.3/32", +    "225.1.1.4/32", +    "225.1.1.5/32", +] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +GROUP_RANGE_2 = [ +    "226.1.1.1/32", +    "226.1.1.2/32", +    "226.1.1.3/32", +    "226.1.1.4/32", +    "226.1.1.5/32", +] +IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"] +GROUP_RANGE_3 = [ +    "227.1.1.1/32", +    "227.1.1.2/32", +    "227.1.1.3/32", +    "227.1.1.4/32", +    "227.1.1.5/32", +] +IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"] + +intf_r1_s1 = None +intf_r1_s1_addr = None +intf_r2_s1 = None +intf_r2_s1_addr = None +intf_r3_s1 = None +intf_r3_s1_addr = None +intf_i1_s1 = None +intf_i1_s1_addr = None + + +def setup_module(mod): +    """ +    Sets up the pytest environment + +    * `mod`: module name +    """ + +    # Required linux kernel version for this suite to run. +    result = required_linux_kernel_version("4.19") +    if result is not True: +        pytest.skip("Kernel requirements are not met") + +    testsuite_run_time = time.asctime(time.localtime(time.time())) +    logger.info("Testsuite start time: {}".format(testsuite_run_time)) +    logger.info("=" * 40) +    logger.info("Master Topology: \n {}".format(TOPOLOGY)) + +    logger.info("Running setup_module to create topology") + +    testdir = os.path.dirname(os.path.realpath(__file__)) +    json_file = "{}/pim_dr_nondr_with_static_routes_topo1.json".format(testdir) +    tgen = Topogen(json_file, mod.__name__) +    global topo +    topo = tgen.json_topo +    # ... and here it calls Mininet initialization functions. + +    # get list of daemons needs to be started for this suite. +    daemons = topo_daemons(tgen, tgen.json_topo) + +    # Starting topology, create tmp files which are loaded to routers +    #  to start deamons and then start routers +    start_topology(tgen, daemons) + +    # Don"t run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    # Creating configuration from JSON +    build_config_from_json(tgen, tgen.json_topo) + +    # XXX Replace this using "with McastTesterHelper()... " in each test if possible. +    global app_helper +    app_helper = McastTesterHelper(tgen) + +    logger.info("Running setup_module() done") + + +def teardown_module(): +    """Teardown the pytest environment""" + +    logger.info("Running teardown_module to delete topology") + +    tgen = get_topogen() + +    app_helper.cleanup() + +    # Stop toplogy and Remove tmp files +    tgen.stop_topology() + +    logger.info( +        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) +    ) +    logger.info("=" * 40) + +##################################################### +# +#   Local APIs +# +##################################################### + + +def pre_config_for_receiver_dr_tests( +    tgen, topo, tc_name, highest_priority, lowest_priority +): +    """ +    API to do common pre-configuration for receiver test cases + +    parameters: +    ----------- +    * `tgen`: topogen object +    * `topo`: input json data +    * `tc_name`: caller test case name +    * `highest_priority`: router which will be having highest DR priority +    * `lowest_priority`: router which will be having lowest DR priority +    """ + +    global intf_r1_s1, intf_r1_s1_addr, intf_r2_s1, intf_r2_s1_addr, intf_r3_s1, intf_r3_s1_addr, intf_i1_s1, intf_i1_s1_addr + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure {0} is DR initially configuring highest IP on {0} and R2 " +        "second highest, {1} is lower".format(highest_priority, lowest_priority) +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_r3_s1 = topo["routers"]["r3"]["links"]["s1"]["interface"] +    intf_r3_s1_addr = topo["routers"]["r3"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    if lowest_priority == "r1": +        lowest_pr_intf = intf_r1_s1 +    else: +        lowest_pr_intf = intf_r3_s1 + +    if highest_priority == "r1": +        highest_pr_intf = intf_r1_s1 +    else: +        highest_pr_intf = intf_r3_s1 + +    vlan_input = { +        lowest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        lowest_pr_intf: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        "r2": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_r2_s1: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        highest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        highest_pr_intf: { +                            "ip": SAME_VLAN_IP_3["ip"], +                            "subnet": SAME_VLAN_IP_3["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "r3": { +            "raw_config": [ +                "interface {}".format(intf_r3_s1), +                "no ip address {}".format(intf_r3_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    raw_config = { +        lowest_priority: { +            "raw_config": [ +                "interface {}.{}".format(lowest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}.{}".format(intf_r2_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        highest_priority: { +            "raw_config": [ +                "interface {}.{}".format(highest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_3["ip"], SAME_VLAN_IP_3["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2", "r3"], [intf_r1_s1, intf_r2_s1, intf_r3_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Send IGMP join for groups 226.1.1.1 to 226.1.1.5") + +    vlan_intf_i1_s1 = "{}.{}".format(intf_i1_s1, VLAN_1) +    result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, join_intf=vlan_intf_i1_s1) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Using static routes instead OSPF: Enable OSPF between all the nodes") + +    step("Start traffic from R4 connected source") + +    result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r5") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    return True + + +def pre_config_for_source_dr_tests( +    tgen, topo, tc_name, highest_priority, lowest_priority +): +    """ +    API to do common pre-configuration for source test cases + +    parameters: +    ----------- +    * `tgen`: topogen object +    * `topo`: input json data +    * `tc_name`: caller test case name +    * `highest_priority`: router which will be having highest DR priority +    * `lowest_priority`: router which will be having lowest DR priority +    """ + +    global intf_r1_s1, intf_r1_s1_addr, intf_r2_s1, intf_r2_s1_addr, intf_r3_s1, intf_r3_s1_addr, intf_i1_s1, intf_i1_s1_addr + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure {0} is DR initially configuring highest IP on {0} and R2 " +        "second highest, {1} is lower".format(highest_priority, lowest_priority) +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_r3_s1 = topo["routers"]["r3"]["links"]["s1"]["interface"] +    intf_r3_s1_addr = topo["routers"]["r3"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    if lowest_priority == "r1": +        lowest_pr_intf = intf_r1_s1 +    else: +        lowest_pr_intf = intf_r3_s1 + +    if highest_priority == "r1": +        highest_pr_intf = intf_r1_s1 +    else: +        highest_pr_intf = intf_r3_s1 + +    vlan_input = { +        lowest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        lowest_pr_intf: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        "r2": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_r2_s1: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        highest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        highest_pr_intf: { +                            "ip": SAME_VLAN_IP_3["ip"], +                            "subnet": SAME_VLAN_IP_3["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "r3": { +            "raw_config": [ +                "interface {}".format(intf_r3_s1), +                "no ip address {}".format(intf_r3_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "Configure IGMP and PIM on switch connected receiver nodes , " +        "configure PIM nbr with hello timer 1" +    ) + +    raw_config = { +        lowest_priority: { +            "raw_config": [ +                "interface {}.{}".format(lowest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}.{}".format(intf_r2_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +            ] +        }, +        highest_priority: { +            "raw_config": [ +                "interface {}.{}".format(highest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_3["ip"], SAME_VLAN_IP_3["cidr"]), +                "ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2", "r3"], [intf_r1_s1, intf_r2_s1, intf_r3_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure IGMP on R5 port and send IGMP join for groups " "(226.1.1.1-5)") + +    intf_r5_i2 = topo["routers"]["r5"]["links"]["i2"]["interface"] +    input_dict = { +        "r5": {"igmp": {"interfaces": {intf_r5_i2: {"igmp": {"version": "2"}}}}} +    } +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    input_src = {"i2": topo["routers"]["i2"]["links"]["r5"]["interface"]} + +    result = app_helper.run_join("i2", IGMP_JOIN_RANGE_1, "r5") +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Using static routes instead OSPF: Enable OSPF between all the nodes") + +    step("Start traffic from Source node") + +    vlan_intf_i1_s1 = "{}.{}".format(intf_i1_s1, VLAN_1) +    result = app_helper.run_traffic("i1", IGMP_JOIN_RANGE_1, bind_intf=vlan_intf_i1_s1) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    return True + + +##################################################### +# +#   Testcases +# +##################################################### + + +def test_pim_source_dr_functionality_while_rebooting_dr_non_dr_nodes_p1(request): +    """ +    Verify mroute while rebooting DR /Non DR nodes( r1, r2 , r3 on all the nodes) +    """ + +    tgen = get_topogen() +    tc_name = request.node.name +    write_test_header(tc_name) + +    # Creating configuration from JSON +    app_helper.stop_all_hosts() +    clear_ip_mroute(tgen) +    check_router_status(tgen) +    reset_config_on_routers(tgen) +    clear_ip_pim_interface_traffic(tgen, topo) + +    # Don"t run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    result = pre_config_for_source_dr_tests(tgen, topo, tc_name, "r1", "r3") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R1 is the DR , verify using 'show ip pim interface json'") + +    vlan_intf_r1_s1 = "{}.{}".format(intf_r1_s1, VLAN_1) +    input_dict_dr = { +        "r1": { +            "pim": { +                "interfaces": {vlan_intf_r1_s1: {"drAddress": SAME_VLAN_IP_3["ip"]}} +            } +        } +    } +    result = verify_pim_config(tgen, input_dict_dr) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "R2 is transit router for R3 to reach R4, mroute should have (s, g) mroute with " +        "OIL towards R4, using 'show ip mroute json'" +    ) +    step( +        "R2 (s, g) upstream should be in join state verify using " +        "'show ip pim upstream json'" +    ) +    step( +        "R1 has (S, G) mroute with NONE OIL and upstream as not joined, verify using " +        "'show ip mroute json' 'show ip pim upstream json'" +    ) + +    source_i1 = SAME_VLAN_IP_4["ip"] +    input_dict_r1_r2 = [ +        { +            "dut": "r1", +            "src_address": source_i1, +            "oil": "none", +            "iif": "{}.{}".format( +                topo["routers"]["r1"]["links"]["s1"]["interface"], VLAN_1 +            ), +        }, +        { +            "dut": "r2", +            "src_address": source_i1, +            "oil": topo["routers"]["r2"]["links"]["r4"]["interface"], +            "iif": "{}.{}".format( +                topo["routers"]["r2"]["links"]["s1"]["interface"], VLAN_1 +            ), +        }, +    ] + +    for data in input_dict_r1_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +        if data["dut"] == "r2": +            result = verify_upstream_iif( +                tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 +            ) +            assert result is True, "Testcase {} : Failed Error: {}".format( +                tc_name, result +            ) +        else: +            result = verify_upstream_iif( +                tgen, +                data["dut"], +                data["iif"], +                data["src_address"], +                IGMP_JOIN_RANGE_1, +                expected=False, +            ) +            assert result is not True, ( +                "Testcase {} : Failed \n " +                "Upstream is still joined state \n Error: {}".format(tc_name, result) +            ) + +    step("Reboot R3 node") +    stop_router(tgen, "r3") + +    step("After reboot of R3 verify R1 became DR, using 'show ip pim interface json'") + +    result = verify_pim_config(tgen, input_dict_dr) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R3 should not have any mroute and upstream") +    step("R2 has mroute with OIL towards R4 /R1 , verify using 'show ip mroute'") +    step( +        "R2 has upstream with Join RejP state verify using 'show ip pim upstream json'" +    ) +    step("R1 has mroute with none OIL and upstream with Not Join") + +    for data in input_dict_r1_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +        if data["dut"] == "r2": +            result = verify_upstream_iif( +                tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 +            ) +            assert result is True, "Testcase {} : Failed Error: {}".format( +                tc_name, result +            ) +        else: +            result = verify_upstream_iif( +                tgen, +                data["dut"], +                data["iif"], +                data["src_address"], +                IGMP_JOIN_RANGE_1, +                expected=False, +            ) +            assert result is not True, ( +                "Testcase {} : Failed \n " +                "Upstream is still joined state \n Error: {}".format(tc_name, result) +            ) + +    step("Reboot R2 node") +    stop_router(tgen, "r2") + +    step("After reboot of R2, R1 became DR verify using 'show ip pim interface json'") + +    result = verify_pim_config(tgen, input_dict_dr) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "R3 and R2 should not have any mroute and upstream , verify using " +        "'show ip mroute json' 'show ip pim upstream json'" +    ) +    step("R1 has mroute created with OIL towards R4 , using 'show ip mroute json'") +    step( +        "R1 has upstream with Join Rej Prune , verify using 'show ip pim upstream json'" +    ) + +    for data in input_dict_r1_r2: +        if data["dut"] == "r1": +            result = verify_ip_mroutes( +                tgen, +                data["dut"], +                data["src_address"], +                IGMP_JOIN_RANGE_1, +                data["iif"], +                data["oil"], +            ) +            assert result is True, "Testcase {} : Failed Error: {}".format( +                tc_name, result +            ) + +            result = verify_upstream_iif( +                tgen, +                data["dut"], +                data["iif"], +                data["src_address"], +                IGMP_JOIN_RANGE_1, +                expected=False, +            ) +            assert result is not True, ( +                "Testcase {} : Failed \n " +                "Upstream is still joined state \n Error: {}".format(tc_name, result) +            ) + +    step("Reboot R1 node using FRR stop") +    stop_router(tgen, "r1") + +    step( +        "After stop of all the routers, verify upstream and mroutes should " +        "not present in any of them" +    ) + +    for data in input_dict_r1_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +            expected=False, +        ) +        assert result is not True, ( +            "Testcase {} : Failed \n " +            "mroutes are still present \n Error: {}".format(tc_name, result) +        ) + +    step("start FRR for all the nodes") +    start_router(tgen, "r1") +    start_router(tgen, "r2") +    start_router(tgen, "r3") + +    step("After start of all the routers, R1 became DR") + +    result = verify_pim_config(tgen, input_dict_dr) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for data in input_dict_r1_r2: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +        if data["dut"] == "r2": +            result = verify_upstream_iif( +                tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 +            ) +            assert result is True, "Testcase {} : Failed Error: {}".format( +                tc_name, result +            ) + +    write_test_footer(tc_name) + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py new file mode 100755 index 0000000000..1e290a0348 --- /dev/null +++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py @@ -0,0 +1,829 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: +1. Verify mroutes when transit router present between RP and Source DR +""" + +import os +import sys +import json +import time +import datetime +from time import sleep +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( +    start_topology, +    write_test_header, +    write_test_footer, +    step, +    reset_config_on_routers, +    apply_raw_config, +    add_interfaces_to_vlan, +    check_router_status, +    topo_daemons, +    required_linux_kernel_version, +) +from lib.pim import ( +    create_pim_config, +    create_igmp_config, +    verify_ip_mroutes, +    clear_ip_mroute, +    clear_ip_pim_interface_traffic, +    verify_pim_config, +    verify_upstream_iif, +    verify_multicast_traffic, +    McastTesterHelper, +) +from lib.topolog import logger +from lib.topojson import build_config_from_json + +HELLO_TIMER = 1 +HOLD_TIMER = 3 + +pytestmark = [pytest.mark.pimd] + +TOPOLOGY = """ + +Descripton: Configuring OSPF on r1/r2/r4/r5/r6 for RP reachablility, We have r6 as a transit router between +            r1/r2 and r4. +IPs are assigned automatically to routers, start IP and subnet is defined in respective JSON file +JSON snippet: +    "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24}, + +                               r5 ------- i2 +                    10.0.3.2/24| 10.0.0.2/24 +                               | +                    10.0.3.1/24| +                               r4 +                               |10.0.4.1/24 +                               | +                    10.0.4.2/24| +                  ------------ r6 ---------- +                |   10.0.1.2/24  10.0.2.2/24 | +    10.0.1.1/24 |                            | 10.0.2.1/24 +                r1 ----------- s1 ---------- r2 +                  10.0.5.2/24  | 10.0.5.3/24 +                               | +                               |10.0.5.1/24 +                               i1 + + + +    Description: +    i1, i2  - FRR running iperf to send IGMP +                                     join and traffic +    r1, r2, r4, r5, r6 - FRR ruter +    s1 - OVS switch +""" + +# Global variables +VLAN_1 = 2501 +GROUP_RANGE = "225.0.0.0/8" +IGMP_JOIN = "225.1.1.1" +VLAN_INTF_ADRESS_1 = "10.0.8.3/24" +SAME_VLAN_IP_1 = {"ip": "10.1.1.1", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_2 = {"ip": "10.1.1.2", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_3 = {"ip": "10.1.1.3", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_4 = {"ip": "10.1.1.4", "subnet": "255.255.255.0", "cidr": "24"} +GROUP_RANGE_1 = [ +    "225.1.1.1/32", +    "225.1.1.2/32", +    "225.1.1.3/32", +    "225.1.1.4/32", +    "225.1.1.5/32", +] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +GROUP_RANGE_2 = [ +    "226.1.1.1/32", +    "226.1.1.2/32", +    "226.1.1.3/32", +    "226.1.1.4/32", +    "226.1.1.5/32", +] +IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"] +GROUP_RANGE_3 = [ +    "227.1.1.1/32", +    "227.1.1.2/32", +    "227.1.1.3/32", +    "227.1.1.4/32", +    "227.1.1.5/32", +] +IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"] + +intf_r1_s1 = None +intf_r1_s1_addr = None +intf_r2_s1 = None +intf_r2_s1_addr = None +intf_r3_s1 = None +intf_r3_s1_addr = None +intf_i1_s1 = None +intf_i1_s1_addr = None + + +def setup_module(mod): +    """ +    Sets up the pytest environment + +    * `mod`: module name +    """ + +    # Required linux kernel version for this suite to run. +    result = required_linux_kernel_version("4.19") +    if result is not True: +        pytest.skip("Kernel requirements are not met") + +    testsuite_run_time = time.asctime(time.localtime(time.time())) +    logger.info("Testsuite start time: {}".format(testsuite_run_time)) +    logger.info("=" * 40) +    logger.info("Master Topology: \n {}".format(TOPOLOGY)) + +    logger.info("Running setup_module to create topology") + +    testdir = os.path.dirname(os.path.realpath(__file__)) +    json_file = "{}/pim_dr_nondr_with_transit_router_topo3.json".format(testdir) +    tgen = Topogen(json_file, mod.__name__) +    global topo +    topo = tgen.json_topo +    # ... and here it calls Mininet initialization functions. + +    # get list of daemons needs to be started for this suite. +    daemons = topo_daemons(tgen, tgen.json_topo) + +    # Starting topology, create tmp files which are loaded to routers +    #  to start deamons and then start routers +    start_topology(tgen, daemons) + +    # Don"t run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    # Creating configuration from JSON +    build_config_from_json(tgen, tgen.json_topo) + +    # XXX Replace this using "with McastTesterHelper()... " in each test if possible. +    global app_helper +    app_helper = McastTesterHelper(tgen) + +    logger.info("Running setup_module() done") + + +def teardown_module(): +    """Teardown the pytest environment""" + +    logger.info("Running teardown_module to delete topology") + +    tgen = get_topogen() + +    app_helper.cleanup() + +    # Stop toplogy and Remove tmp files +    tgen.stop_topology() + +    logger.info( +        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) +    ) +    logger.info("=" * 40) + + +##################################################### +# +#   Local APIs +# +##################################################### + + +def pre_config_for_receiver_dr_tests( +    tgen, topo, tc_name, highest_priority, lowest_priority +): +    """ +    API to do common pre-configuration for receiver test cases + +    parameters: +    ----------- +    * `tgen`: topogen object +    * `topo`: input json data +    * `tc_name`: caller test case name +    * `highest_priority`: router which will be having highest DR priority +    * `lowest_priority`: router which will be having lowest DR priority +    """ + +    global intf_r1_s1, intf_r1_s1_addr, intf_r2_s1, intf_r2_s1_addr, intf_i1_s1, intf_i1_s1_addr + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure {0} is DR initially configuring highest IP on {0} and R2 " +        "second highest, {1} is lower".format(highest_priority, lowest_priority) +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    if lowest_priority == "r1": +        lowest_pr_intf = intf_r1_s1 +    else: +        lowest_pr_intf = intf_r2_s1 + +    if highest_priority == "r1": +        highest_pr_intf = intf_r1_s1 +    else: +        highest_pr_intf = intf_r2_s1 + +    vlan_input = { +        lowest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        lowest_pr_intf: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        highest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        highest_pr_intf: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    raw_config = { +        lowest_priority: { +            "raw_config": [ +                "interface {}.{}".format(lowest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        highest_priority: { +            "raw_config": [ +                "interface {}.{}".format(highest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +                "ip igmp", +                "ip igmp version 2", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Send IGMP join for groups 226.1.1.1 to 226.1.1.5") + +    vlan_intf_i1_s1 = "{}.{}".format(intf_i1_s1, VLAN_1) +    result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, join_intf=vlan_intf_i1_s1) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Enable OSPF between r1 and r2") + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip ospf area 0.0.0.0", +                    "ip ospf dead-interval 4", +                    "ip ospf hello-interval 1", +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Start traffic from R4 connected source") + +    result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "r5") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    return True + + +def pre_config_for_source_dr_tests( +    tgen, topo, tc_name, highest_priority, lowest_priority +): +    """ +    API to do common pre-configuration for source test cases + +    parameters: +    ----------- +    * `tgen`: topogen object +    * `topo`: input json data +    * `tc_name`: caller test case name +    * `highest_priority`: router which will be having highest DR priority +    * `lowest_priority`: router which will be having lowest DR priority +    """ + +    global intf_r1_s1, intf_r1_s1_addr, intf_r2_s1, intf_r2_s1_addr, intf_i1_s1, intf_i1_s1_addr + +    step("Configure IGMP and PIM on switch connected receiver nodes") +    step("Configure PIM on all upstream interfaces") + +    step("Configure link between R1, R2 ,R3 and receiver on" " same vlan") +    step( +        "Make sure {0} is DR initially configuring highest IP on {0} and R2 " +        "second highest, {1} is lower".format(highest_priority, lowest_priority) +    ) + +    intf_r1_s1 = topo["routers"]["r1"]["links"]["s1"]["interface"] +    intf_r1_s1_addr = topo["routers"]["r1"]["links"]["s1"]["ipv4"] + +    intf_r2_s1 = topo["routers"]["r2"]["links"]["s1"]["interface"] +    intf_r2_s1_addr = topo["routers"]["r2"]["links"]["s1"]["ipv4"] + +    intf_i1_s1 = topo["routers"]["i1"]["links"]["s1"]["interface"] +    intf_i1_s1_addr = topo["routers"]["i1"]["links"]["s1"]["ipv4"] + +    if lowest_priority == "r1": +        lowest_pr_intf = intf_r1_s1 +    else: +        lowest_pr_intf = intf_r2_s1 + +    if highest_priority == "r1": +        highest_pr_intf = intf_r1_s1 +    else: +        highest_pr_intf = intf_r2_s1 + +    vlan_input = { +        lowest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        lowest_pr_intf: { +                            "ip": SAME_VLAN_IP_1["ip"], +                            "subnet": SAME_VLAN_IP_1["subnet"], +                        } +                    } +                ] +            } +        }, +        highest_priority: { +            "vlan": { +                VLAN_1: [ +                    { +                        highest_pr_intf: { +                            "ip": SAME_VLAN_IP_2["ip"], +                            "subnet": SAME_VLAN_IP_2["subnet"], +                        } +                    } +                ] +            } +        }, +        "i1": { +            "vlan": { +                VLAN_1: [ +                    { +                        intf_i1_s1: { +                            "ip": SAME_VLAN_IP_4["ip"], +                            "subnet": SAME_VLAN_IP_4["subnet"], +                        } +                    } +                ] +            } +        }, +    } + +    add_interfaces_to_vlan(tgen, vlan_input) + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}".format(intf_r1_s1), +                "no ip address {}".format(intf_r1_s1_addr), +                "no ip pim", +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}".format(intf_r2_s1), +                "no ip address {}".format(intf_r2_s1_addr), +                "no ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}".format(intf_i1_s1), +                "no ip address {}".format(intf_i1_s1_addr), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "Configure IGMP and PIM on switch connected receiver nodes , " +        "configure PIM nbr with hello timer 1" +    ) + +    raw_config = { +        lowest_priority: { +            "raw_config": [ +                "interface {}.{}".format(lowest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +                "ip pim", +            ] +        }, +        highest_priority: { +            "raw_config": [ +                "interface {}.{}".format(highest_pr_intf, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +                "ip pim", +            ] +        }, +        "i1": { +            "raw_config": [ +                "interface {}.{}".format(intf_i1_s1, VLAN_1), +                "ip address {}/{}".format(SAME_VLAN_IP_4["ip"], SAME_VLAN_IP_4["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip pim hello {} {}".format(HELLO_TIMER, HOLD_TIMER), +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure R4 as RP on all the nodes for group range 224.0.0.0/24") + +    input_dict = { +        "r4": { +            "pim": { +                "rp": [ +                    { +                        "rp_addr": topo["routers"]["r4"]["links"]["lo"]["ipv4"].split( +                            "/" +                        )[0], +                        "group_addr_range": GROUP_RANGE_1, +                    } +                ] +            } +        } +    } + +    result = create_pim_config(tgen, topo, input_dict) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Configure IGMP on R5 port and send IGMP join for groups " "(226.1.1.1-5)") + +    intf_r5_i2 = topo["routers"]["r5"]["links"]["i2"]["interface"] +    input_dict = { +        "r5": {"igmp": {"interfaces": {intf_r5_i2: {"igmp": {"version": "2"}}}}} +    } +    result = create_igmp_config(tgen, topo, input_dict) +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    result = app_helper.run_join("i2", IGMP_JOIN_RANGE_1, "r5") +    assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + +    step("Enable OSPF between r1 and r2") + +    for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]): +        raw_config = { +            dut: { +                "raw_config": [ +                    "interface {}.{}".format(intf, VLAN_1), +                    "ip ospf area 0.0.0.0", +                    "ip ospf dead-interval 4", +                    "ip ospf hello-interval 1", +                ] +            } +        } +        result = apply_raw_config(tgen, raw_config) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Start traffic from Source node") + +    vlan_intf_i1_s1 = "{}.{}".format(intf_i1_s1, VLAN_1) +    result = app_helper.run_traffic("i1", IGMP_JOIN_RANGE_1, bind_intf=vlan_intf_i1_s1) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    return True + + +##################################################### +# +#   Testcases +# +##################################################### + + +def test_mroute_when_transit_router_present_between_rp_and_source_dr_p1(request): +    """ +    Verify mroutes when transit router present between RP and Source DR +    """ + +    tgen = get_topogen() +    tc_name = request.node.name +    write_test_header(tc_name) + +    # Creating configuration from JSON +    app_helper.stop_all_hosts() +    clear_ip_mroute(tgen) +    check_router_status(tgen) +    reset_config_on_routers(tgen) +    clear_ip_pim_interface_traffic(tgen, topo) + +    # Don"t run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    result = pre_config_for_source_dr_tests(tgen, topo, tc_name, "r1", "r2") +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("Taken care in base config: Add router R6 between RP and R1 , R2") + +    step("R1 is the DR, mroute and upstream created on R1") + +    vlan_intf_r1_s1 = "{}.{}".format(intf_r1_s1, VLAN_1) +    input_dict_dr = { +        "r1": { +            "pim": { +                "interfaces": {vlan_intf_r1_s1: {"drAddress": SAME_VLAN_IP_2["ip"]}} +            } +        } +    } +    result = verify_pim_config(tgen, input_dict_dr) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("On R1 Mroute OIL towards R6, upstream in join Rej Prune state") + +    source_i1 = SAME_VLAN_IP_4["ip"] +    input_dict_r1 = [ +        { +            "dut": "r1", +            "src_address": source_i1, +            "oil": topo["routers"]["r1"]["links"]["r6"]["interface"], +            "iif": "{}.{}".format( +                topo["routers"]["r1"]["links"]["s1"]["interface"], VLAN_1 +            ), +        } +    ] + +    for data in input_dict_r1: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +        result = verify_upstream_iif( +            tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "R5 have mroute created and traffic is received, verify using " +        "'show ip mroute json' 'show ip multicast json'" +    ) + +    input_dict_r5 = [ +        { +            "dut": "r5", +            "src_address": "*", +            "iif": topo["routers"]["r5"]["links"]["r4"]["interface"], +            "oil": topo["routers"]["r5"]["links"]["i2"]["interface"], +        }, +        { +            "dut": "r5", +            "src_address": source_i1, +            "iif": topo["routers"]["r5"]["links"]["r4"]["interface"], +            "oil": topo["routers"]["r5"]["links"]["i2"]["interface"], +        }, +    ] + +    for data in input_dict_r5: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    input_dict_traffic_r5 = { +        "r5": { +            "traffic_received": [topo["routers"]["r5"]["links"]["r4"]["interface"]], +            "traffic_sent": [topo["routers"]["r5"]["links"]["i2"]["interface"]], +        } +    } + +    result = verify_multicast_traffic(tgen, input_dict_traffic_r5) +    assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + +    step("Make R2 as DR configuring higher priority value") + +    raw_config = { +        "r1": { +            "raw_config": [ +                "interface {}.{}".format(intf_r1_s1, VLAN_1), +                "no ip address {}/{}".format( +                    SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_1["cidr"] +                ), +                "ip address {}/{}".format(SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"]), +            ] +        }, +        "r2": { +            "raw_config": [ +                "interface {}.{}".format(intf_r2_s1, VLAN_1), +                "no ip address {}/{}".format( +                    SAME_VLAN_IP_1["ip"], SAME_VLAN_IP_1["cidr"] +                ), +                "ip address {}/{}".format(SAME_VLAN_IP_2["ip"], SAME_VLAN_IP_2["cidr"]), +            ] +        }, +    } + +    result = apply_raw_config(tgen, raw_config) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step("R2 is the DR, mroute and upstream created on R2") + +    vlan_intf_r2_s1 = "{}.{}".format(intf_r2_s1, VLAN_1) +    input_dict_dr = { +        "r2": { +            "pim": { +                "interfaces": {vlan_intf_r2_s1: {"drAddress": SAME_VLAN_IP_2["ip"]}} +            } +        } +    } +    result = verify_pim_config(tgen, input_dict_dr) +    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    step( +        "R5 have mroute created and traffic is received, verify using " +        "'show ip mroute json' 'show ip multicast json'" +    ) + +    for data in input_dict_r5: +        result = verify_ip_mroutes( +            tgen, +            data["dut"], +            data["src_address"], +            IGMP_JOIN_RANGE_1, +            data["iif"], +            data["oil"], +        ) +        assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + +    result = verify_multicast_traffic(tgen, input_dict_traffic_r5) +    assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + +    write_test_footer(tc_name) + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py index 9929f4b3c7..75fae6bb59 100755 --- a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py +++ b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py @@ -179,7 +179,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, tgen.json_topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. diff --git a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py index 57561c78eb..c2128cbad6 100755 --- a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py +++ b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py @@ -176,7 +176,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. diff --git a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py index 907c75e9ee..6f3867cc2c 100755 --- a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py +++ b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py @@ -188,7 +188,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. @@ -2834,9 +2834,7 @@ def test_mroute_after_removing_RP_sending_IGMP_prune_p2(request):      intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]      input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} -    traffic_before = verify_multicast_traffic( -        tgen, input_traffic, return_traffic=True, expected=False -    ) +    traffic_before = verify_multicast_traffic(tgen, input_traffic, return_traffic=True)      assert isinstance(traffic_before, dict), (          "Testcase {} : Failed \n traffic_before is not dictionary \n "          "Error: {}".format(tc_name, result) @@ -2861,9 +2859,7 @@ def test_mroute_after_removing_RP_sending_IGMP_prune_p2(request):      intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]      input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} -    traffic_after = verify_multicast_traffic( -        tgen, input_traffic, return_traffic=True, expected=False -    ) +    traffic_after = verify_multicast_traffic(tgen, input_traffic, return_traffic=True)      assert isinstance(traffic_after, dict), (          "Testcase {} : Failed \n traffic_after is not dictionary \n "          "Error: {}".format(tc_name, result) @@ -3274,8 +3270,6 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):      intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]      shutdown_bringup_interface(tgen, "r2", intf_r2_l1, False) -    app_helper.stop_host("i2") -      step("Verify RP info after Shut the link from FHR to RP from RP node")      dut = "l1"      rp_address = "1.0.5.17" @@ -3426,8 +3420,6 @@ def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request):      step("Verify PIM Nbrs after Shut the link from FHR to RP from FHR node") -    app_helper.stop_host("i6") -      step("Verify RP info after Shut the link from FHR to RP from FHR node")      dut = "l1"      rp_address = "1.0.5.17" diff --git a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py index 1b7158d597..b255bddcef 100755 --- a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py +++ b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py @@ -155,7 +155,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py index a5cec93813..b9a7cc7cd7 100755 --- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py +++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py @@ -69,7 +69,7 @@ TC_18 : Verify RPF interface updated in mroute when higher preferred RP gets          deleted  TC_19 : Verify IIF and OIL in "show ip pim state" updated when higher          preferred overlapping RP is deleted -TC_20 : Verfiy PIM upstream IIF updated when higher preferred overlapping RP +TC_20 : Verify PIM upstream IIF updated when higher preferred overlapping RP          deleted  TC_21_1 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in            LHR router @@ -94,7 +94,7 @@ TC_30 : Verify IIF and OIL change to other path after shut the primary path  TC_31 : Verify RP info and (*,G) mroute after deleting the RP and shut / no          shut the RPF interface.  TC_32 : Verify RP info and (*,G) mroute after deleting the RP and shut / no -        shut the RPF inteface +        shut the RPF interface  """  import os @@ -227,7 +227,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, TOPO)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Don"t run this test if we have any failure. @@ -1157,7 +1157,7 @@ def test_send_join_on_higher_preffered_rp_p1(request):                  gets deleted       TC_19_P1 : Verify IIF and OIL in "show ip pim state" updated when higher                  preferred overlapping RP is deleted -     TC_20_P1 : Verfiy PIM upstream IIF updated when higher preferred +     TC_20_P1 : Verify PIM upstream IIF updated when higher preferred                  overlapping RP deleted      Topology used: @@ -1354,7 +1354,7 @@ def test_send_join_on_higher_preffered_rp_p1(request):      assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)      step( -        "r1 : Verfiy upstream IIF updated when higher preferred overlapping" +        "r1 : Verify upstream IIF updated when higher preferred overlapping"          "RP deleted"      )      result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) @@ -3820,7 +3820,7 @@ def test_delete_RP_shut_noshut_upstream_interface_p1(request):  def test_delete_RP_shut_noshut_RP_interface_p1(request):      """      TC_32_P1: Verify RP info and (*,G) mroute after deleting the RP and shut/ -           no shut the RPF inteface +           no shut the RPF interface      Topology used:                  ________r2_____ diff --git a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py index eb8561c404..d17aeda3ea 100644 --- a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py +++ b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py @@ -134,6 +134,7 @@ def build_topo(tgen):      switch = tgen.add_switch("s4")      switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet") +  def setup_module(mod):      "Sets up the pytest environment"      tgen = Topogen(build_topo, mod.__name__) @@ -585,10 +586,11 @@ def test_nssa_range():      logger.info("Expecting NSSA range to be added on r3")      routes = {          "2001:db8:1000::/64": { -            "metricType":2, -            "metricCost":20, -            "metricCostE2":10, -        }} +            "metricType": 2, +            "metricCost": 20, +            "metricCostE2": 10, +        } +    }      expect_ospfv3_routes("r3", routes, wait=30, type="external-2", detail=True)      # Change the NSSA range cost. @@ -601,10 +603,11 @@ def test_nssa_range():      logger.info("Expecting NSSA range to be updated with new cost")      routes = {          "2001:db8:1000::/64": { -            "metricType":2, -            "metricCost":20, -            "metricCostE2":1000, -        }} +            "metricType": 2, +            "metricCost": 20, +            "metricCostE2": 1000, +        } +    }      expect_ospfv3_routes("r3", routes, wait=30, type="external-2", detail=True)      # Configure the NSSA range to not be advertised. @@ -631,12 +634,12 @@ def test_nssa_range():      logger.info("Expecting previously summarized routes to be re-added")      routes = {          "2001:db8:1000::1/128": { -            "metricType":2, -            "metricCost":20, +            "metricType": 2, +            "metricCostE2": 20,          },          "2001:db8:1000::2/128": { -            "metricType":2, -            "metricCost":20, +            "metricType": 2, +            "metricCostE2": 20,          },      }      expect_ospfv3_routes("r3", routes, wait=30, type="external-2", detail=True) diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py index f9fa55e275..f42bc47d46 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py @@ -146,7 +146,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py index e63f59e846..2c9959c499 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py @@ -138,7 +138,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py index 030b77c609..252481799c 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py @@ -106,7 +106,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py index 86f3213fce..a0ab828717 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py @@ -117,7 +117,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py index a578272e21..2b479db3c2 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py @@ -120,7 +120,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py index 4a5660f42f..00feefc4d0 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py @@ -121,7 +121,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py index b32483f7ad..497a8b900b 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py @@ -120,7 +120,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py index aa34208acb..1917bd42f5 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py @@ -118,7 +118,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py index 7c09e71ef8..3b84a99cdf 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py @@ -107,7 +107,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py index adc1b2cf3a..22d768d9f6 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py @@ -133,7 +133,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py index 88c87dcecd..8bd81a3854 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py @@ -129,7 +129,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py index 73193582a6..c7ee723b3e 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py @@ -114,7 +114,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py index 2c7c6df37e..ff182be66f 100644 --- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py +++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper.py @@ -123,7 +123,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py index 01ddbc1521..5903649d67 100644 --- a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py +++ b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py @@ -79,7 +79,7 @@ def setup_module(mod):      # This is a sample of configuration loading.      router_list = tgen.routers() -    # For all registred routers, load the zebra and ospf configuration file +    # For all registered routers, load the zebra and ospf configuration file      for rname, router in router_list.items():          router.load_config(              TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py index 36cde06c3e..3967f5f42a 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py @@ -161,7 +161,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py index baa0071f9c..d32a05a88e 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py @@ -116,7 +116,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py index 9353cd923b..75be0928ab 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py @@ -121,7 +121,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON @@ -195,7 +195,7 @@ def red_connected(dut, config=True):  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -222,7 +222,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py index fe8be0a4b3..ce880b413b 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py @@ -134,7 +134,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON @@ -218,7 +218,7 @@ def red_connected(dut, config=True):  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -245,7 +245,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py index 64a067cd1a..bdc4c139f7 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py @@ -66,7 +66,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py index dc3b915d49..7b41c80ce3 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py @@ -143,7 +143,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py index d7cf951c5f..0c9457b39e 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py @@ -135,7 +135,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py index 21d03fadfb..df3a0249ea 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py @@ -126,7 +126,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON @@ -166,7 +166,7 @@ def teardown_module(mod):  def get_llip(onrouter, intf):      """ -    API to get the link local ipv6 address of a perticular interface +    API to get the link local ipv6 address of a particular interface      Parameters      ---------- @@ -193,7 +193,7 @@ def get_llip(onrouter, intf):  def get_glipv6(onrouter, intf):      """ -    API to get the global ipv6 address of a perticular interface +    API to get the global ipv6 address of a particular interface      Parameters      ---------- diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py index 9ec06ec36b..c9824e79c5 100644 --- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py +++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py @@ -120,7 +120,7 @@ def setup_module(mod):      daemons = topo_daemons(tgen, topo)      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen, daemons)      # Creating configuration from JSON diff --git a/tests/topotests/simple_snmp_test/test_simple_snmp.py b/tests/topotests/simple_snmp_test/test_simple_snmp.py index 35f0210134..1ca06c6571 100755 --- a/tests/topotests/simple_snmp_test/test_simple_snmp.py +++ b/tests/topotests/simple_snmp_test/test_simple_snmp.py @@ -69,7 +69,7 @@ def setup_module(mod):      router_list = tgen.routers() -    # For all registred routers, load the zebra configuration file +    # For all registered routers, load the zebra configuration file      for rname, router in router_list.items():          router.load_config(              TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py index 809a0a3240..abfba029f4 100644 --- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py @@ -94,7 +94,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py index b85aa43ca4..f04279a081 100644 --- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py @@ -135,7 +135,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py index 0e6ab6183c..6c76c928ec 100644 --- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py @@ -118,7 +118,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py index 7a7c5d63a7..3f49ced7ca 100644 --- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py @@ -101,7 +101,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py index e06d0fca3c..6b7b2adfd9 100644 --- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py @@ -94,7 +94,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py index cb6c879459..350a117b94 100644 --- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py @@ -135,7 +135,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py index 1ac91e1f5f..1861d9ad49 100644 --- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py @@ -117,7 +117,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py index 42d86f22da..d5e5148eae 100644 --- a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py @@ -96,7 +96,7 @@ def setup_module(mod):      # ... and here it calls Mininet initialization functions.      # Starting topology, create tmp files which are loaded to routers -    #  to start deamons and then start routers +    #  to start daemons and then start routers      start_topology(tgen)      # Creating configuration from JSON @@ -175,7 +175,7 @@ def test_static_routes_rmap_pfxlist_p0_tc7_ibgp(request):      )      for addr_type in ADDR_TYPES: -        # Api call to modfiy BGP timerse +        # Api call to modify BGP timerse          input_dict = {              "r2": {                  "bgp": { diff --git a/tests/topotests/zebra_multiple_connected/r1/ip_route.json b/tests/topotests/zebra_multiple_connected/r1/ip_route.json new file mode 100644 index 0000000000..c29f2f9786 --- /dev/null +++ b/tests/topotests/zebra_multiple_connected/r1/ip_route.json @@ -0,0 +1,62 @@ +{ +  "10.0.1.0/24":[ +    { +      "prefix":"10.0.1.0/24", +      "prefixLen":24, +      "protocol":"connected", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "directlyConnected":true, +          "interfaceName":"r1-eth1", +          "active":true +        } +      ] +    }, +    { +      "prefix":"10.0.1.0/24", +      "prefixLen":24, +      "protocol":"connected", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "directlyConnected":true, +          "interfaceName":"r1-eth0", +          "active":true +        } +      ] +    } +  ], +  "192.168.1.1/32":[ +    { +      "prefix":"192.168.1.1/32", +      "prefixLen":32, +      "protocol":"kernel", +      "vrfName":"default", +      "selected":true, +      "destSelected":true, +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "ip":"10.0.1.99", +          "interfaceName":"r1-eth1", +          "active":true +        } +      ] +    } +  ] +} diff --git a/tests/topotests/zebra_multiple_connected/r1/ip_route2.json b/tests/topotests/zebra_multiple_connected/r1/ip_route2.json new file mode 100644 index 0000000000..26995654f7 --- /dev/null +++ b/tests/topotests/zebra_multiple_connected/r1/ip_route2.json @@ -0,0 +1,102 @@ +{ +  "10.0.1.0/24":[ +    { +      "prefix":"10.0.1.0/24", +      "prefixLen":24, +      "protocol":"connected", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "directlyConnected":true, +          "interfaceName":"r1-eth1", +          "active":true +        } +      ] +    }, +    { +      "prefix":"10.0.1.0/24", +      "prefixLen":24, +      "protocol":"connected", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "directlyConnected":true, +          "interfaceName":"r1-eth0", +          "active":true +        } +      ] +    } +  ], +  "10.0.1.30/32":[ +    { +      "prefix":"10.0.1.30/32", +      "prefixLen":32, +      "protocol":"kernel", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "directlyConnected":true, +          "interfaceName":"r1-eth1", +          "active":true +        } +      ] +    } +  ], +  "10.9.9.0/24":[ +    { +      "prefix":"10.9.9.0/24", +      "prefixLen":24, +      "protocol":"kernel", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "ip":"10.0.1.30", +          "afi":"ipv4", +          "interfaceName":"r1-eth1", +          "active":true +        } +      ] +    } +  ], +  "192.168.1.1/32":[ +    { +      "prefix":"192.168.1.1/32", +      "prefixLen":32, +      "protocol":"kernel", +      "vrfName":"default", +      "distance":0, +      "metric":0, +      "installed":true, +      "table":254, +      "nexthops":[ +        { +          "fib":true, +          "ip":"10.0.1.99", +          "afi":"ipv4", +          "interfaceName":"r1-eth1", +          "active":true +        } +      ] +    } +  ] +} diff --git a/tests/topotests/zebra_multiple_connected/r1/zebra.conf b/tests/topotests/zebra_multiple_connected/r1/zebra.conf new file mode 100644 index 0000000000..81adcadea8 --- /dev/null +++ b/tests/topotests/zebra_multiple_connected/r1/zebra.conf @@ -0,0 +1,9 @@ +interface r1-eth0 +  ip address 10.0.1.1/24 +! +interface r1-eth1 +  ip address 10.0.1.2/24 +! +interface r1-eth2 +  ip address 10.0.1.3/24 +!
\ No newline at end of file diff --git a/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py b/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py new file mode 100644 index 0000000000..8882cf5bda --- /dev/null +++ b/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python + +# +# test_zebra_multiple_connected.py +# +# Copyright (c) 2022 by +# Nvidia Corporation +# Donald Sharp +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_zebra_multiple_connected.py: Testing multiple connected + +""" + +import os +import re +import sys +import pytest +import json +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. + +##################################################### +## +##   Network Topology Definition +## +##################################################### + + +def build_topo(tgen): +    for routern in range(1, 4): +        tgen.add_router("r{}".format(routern)) + +    # On main router +    # First switch is for a dummy interface (for local network) +    switch = tgen.add_switch("sw1") +    switch.add_link(tgen.gears["r1"]) + +    # Switches for zebra +    # switch 2 switch is for connection to zebra router +    switch = tgen.add_switch("sw2") +    switch.add_link(tgen.gears["r1"]) +    switch.add_link(tgen.gears["r2"]) + +    # switch 4 is stub on remote zebra router +    switch = tgen.add_switch("sw4") +    switch.add_link(tgen.gears["r3"]) + +    # switch 3 is between zebra routers +    switch = tgen.add_switch("sw3") +    switch.add_link(tgen.gears["r2"]) +    switch.add_link(tgen.gears["r3"]) + + +##################################################### +## +##   Tests starting +## +##################################################### + + +def setup_module(module): +    "Setup topology" +    tgen = Topogen(build_topo, module.__name__) +    tgen.start_topology() + +    # This is a sample of configuration loading. +    router_list = tgen.routers() +    for rname, router in router_list.items(): +        router.load_config( +            TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) +        ) + +    tgen.start_router() + + +def teardown_module(_mod): +    "Teardown the pytest environment" +    tgen = get_topogen() + +    # This function tears down the whole topology. +    tgen.stop_topology() + + +def test_zebra_connected_multiple(): +    "Test multiple connected routes that have a kernel route pointing at one" + +    tgen = get_topogen() +    # Don't run this test if we have any failure. +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    router = tgen.gears["r1"] +    router.run("ip route add 192.168.1.1/32 via 10.0.1.99 dev r1-eth1") +    router.run("ip link add dummy1 type dummy") +    router.run("ip link set dummy1 up") +    router.run("ip link set dummy1 down") + +    routes = "{}/{}/ip_route.json".format(CWD, router.name) +    expected = json.loads(open(routes).read()) + +    test_func = partial( +        topotest.router_json_cmp, router, "show ip route json", expected +    ) + +    _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) +    assert result is None, "Kernel route is missing from zebra" + + +def test_zebra_system_recursion(): +    "Test a system route recursing through another system route" + +    tgen = get_topogen() +    if tgen.routers_have_failure(): +        pytest.skip(tgen.errors) + +    router = tgen.gears["r1"] +    router.run("ip route add 10.0.1.30/32 dev r1-eth1") +    router.run("ip route add 10.9.9.0/24 via 10.0.1.30 dev r1-eth1") +    router.run("ip link add dummy2 type dummy") +    router.run("ip link set dummy2 up") +    router.run("ip link set dummy2 down") + +    routes = "{}/{}/ip_route2.json".format(CWD, router.name) +    expected = json.loads(open(routes).read()) +    test_func = partial( +        topotest.router_json_cmp, router, "show ip route json", expected +    ) + +    _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) +    assert result is None, "Kernel route is missing from zebra" + + +if __name__ == "__main__": +    args = ["-s"] + sys.argv[1:] +    sys.exit(pytest.main(args)) diff --git a/tools/etc/frr/daemons b/tools/etc/frr/daemons index b1526888ed..7f8e1e29fc 100644 --- a/tools/etc/frr/daemons +++ b/tools/etc/frr/daemons @@ -21,6 +21,7 @@ ripd=no  ripngd=no  isisd=no  pimd=no +pim6d=no  ldpd=no  nhrpd=no  eigrpd=no @@ -46,6 +47,7 @@ ripd_options="   -A 127.0.0.1"  ripngd_options=" -A ::1"  isisd_options="  -A 127.0.0.1"  pimd_options="   -A 127.0.0.1" +pim6d_options="" # no telnet port by default  ldpd_options="   -A 127.0.0.1"  nhrpd_options="  -A 127.0.0.1"  eigrpd_options=" -A 127.0.0.1" diff --git a/tools/etc/rsyslog.d/45-frr.conf b/tools/etc/rsyslog.d/45-frr.conf index feeeb13f13..2a1992eabf 100644 --- a/tools/etc/rsyslog.d/45-frr.conf +++ b/tools/etc/rsyslog.d/45-frr.conf @@ -14,6 +14,8 @@ if  $programname == 'babeld' or      $programname == 'ospf6d' or      $programname == 'ospfd' or      $programname == 'pimd' or +    $programname == 'pim6d' or +    $programname == 'pathd' or      $programname == 'ripd' or      $programname == 'ripngd' or      $programname == 'vrrpd' or @@ -32,6 +34,8 @@ if  $programname == 'babeld' or      $programname == 'ospf6d' or      $programname == 'ospfd' or      $programname == 'pimd' or +    $programname == 'pim6d' or +    $programname == 'pathd' or      $programname == 'ripd' or      $programname == 'ripngd' or      $programname == 'vrrpd' or diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 2b76c43f7b..8a2b689ac0 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -763,6 +763,38 @@ def check_for_exit_vrf(lines_to_add, lines_to_del):      return (lines_to_add, lines_to_del) +def bgp_delete_inst_move_line(lines_to_del): +    # Deletion of bgp default inst followed by +    # bgp vrf inst leads to issue of default +    # instance can not be removed. +    # Move the bgp default instance line to end. +    bgp_defult_inst = False +    bgp_vrf_inst = False + +    for (ctx_keys, line) in lines_to_del: +        # Find bgp default inst +        if ( +            ctx_keys[0].startswith("router bgp") +            and not line +            and "vrf" not in ctx_keys[0] +        ): +            bgp_defult_inst = True +        # Find bgp vrf inst +        if ctx_keys[0].startswith("router bgp") and not line and "vrf" in ctx_keys[0]: +            bgp_vrf_inst = True + +    if bgp_defult_inst and bgp_vrf_inst: +        for (ctx_keys, line) in lines_to_del: +            # move bgp default inst to end +            if ( +                ctx_keys[0].startswith("router bgp") +                and not line +                and "vrf" not in ctx_keys[0] +            ): +                lines_to_del.remove((ctx_keys, line)) +                lines_to_del.append((ctx_keys, line)) + +  def bgp_delete_nbr_remote_as_line(lines_to_add):      # Handle deletion of neighbor <nbr> remote-as line from      # lines_to_add if the nbr is configured with peer-group and @@ -841,19 +873,44 @@ def bgp_delete_nbr_remote_as_line(lines_to_add):          lines_to_add.remove((ctx_keys, line)) -""" -This method handles deletion of bgp peer group config. -The objective is to delete config lines related to peers -associated with the peer-group and move the peer-group -config line to the end of the lines_to_del list. -""" +def bgp_remove_neighbor_cfg(lines_to_del, del_nbr_dict): + +    # This method handles deletion of bgp neighbor configs, +    # if there is neighbor to peer-group cmd is in delete list. +    # As 'no neighbor .* peer-group' deletes the neighbor, +    # subsequent neighbor speciic config line deletion results +    # in error. +    lines_to_del_to_del = [] + +    for (ctx_keys, line) in lines_to_del: +        if ( +            ctx_keys[0].startswith("router bgp") +            and line +            and line.startswith("neighbor ") +        ): +            if ctx_keys[0] in del_nbr_dict: +                for nbr in del_nbr_dict[ctx_keys[0]]: +                    re_nbr_pg = re.search('neighbor (\S+) .*peer-group (\S+)', line) +                    nb_exp = "neighbor %s .*" % nbr +                    if not re_nbr_pg: +                        re_nb = re.search(nb_exp, line) +                        if re_nb: +                            lines_to_del_to_del.append((ctx_keys, line)) + +    for (ctx_keys, line) in lines_to_del_to_del: +        lines_to_del.remove((ctx_keys, line))  def delete_move_lines(lines_to_add, lines_to_del): +    # This method handles deletion of bgp peer group config. +    # The objective is to delete config lines related to peers +    # associated with the peer-group and move the peer-group +    # config line to the end of the lines_to_del list.      bgp_delete_nbr_remote_as_line(lines_to_add)      del_dict = dict() +    del_nbr_dict = dict()      # Stores the lines to move to the end of the pending list.      lines_to_del_to_del = []      # Stores the lines to move to end of the pending list. @@ -937,6 +994,16 @@ def delete_move_lines(lines_to_add, lines_to_del):              if re_nb_remoteas:                  lines_to_del_to_app.append((ctx_keys, line)) +            # 'no neighbor peer [interface] peer-group <>' is in lines_to_del +            # copy the neighbor and look for all config removal lines associated +            # to neighbor and delete them from the lines_to_del +            re_nbr_pg = re.search('neighbor (\S+) .*peer-group (\S+)', line) +            if re_nbr_pg: +                if ctx_keys[0] not in del_nbr_dict: +                    del_nbr_dict[ctx_keys[0]] = list() +                if re_nbr_pg.group(1) not in del_nbr_dict[ctx_keys[0]]: +                    del_nbr_dict[ctx_keys[0]].append(re_nbr_pg.group(1)) +              # {'router bgp 65001': {'PG': [], 'PG1': []},              # 'router bgp 65001 vrf vrf1': {'PG': [], 'PG1': []}}              if ctx_keys[0] not in del_dict: @@ -948,6 +1015,9 @@ def delete_move_lines(lines_to_add, lines_to_del):                  found_pg_del_cmd = True      if found_pg_del_cmd == False: +        bgp_delete_inst_move_line(lines_to_del) +        if del_nbr_dict: +            bgp_remove_neighbor_cfg(lines_to_del, del_nbr_dict)          return (lines_to_add, lines_to_del)      for (ctx_keys, line) in lines_to_del_to_app: @@ -1001,6 +1071,8 @@ def delete_move_lines(lines_to_add, lines_to_del):          lines_to_del.remove((ctx_keys, line))          lines_to_del.append((ctx_keys, line)) +    bgp_delete_inst_move_line(lines_to_del) +      return (lines_to_add, lines_to_del) @@ -1752,7 +1824,7 @@ if __name__ == "__main__":      elif args.reload:          if not os.path.isdir("/var/log/frr/"): -            os.makedirs("/var/log/frr/") +            os.makedirs("/var/log/frr/", mode=0o0755)          logging.basicConfig(              filename="/var/log/frr/frr-reload.log", @@ -1814,6 +1886,7 @@ if __name__ == "__main__":          "ospfd",          "pbrd",          "pimd", +        "pim6d",          "ripd",          "ripngd",          "sharpd", diff --git a/tools/frr.in b/tools/frr.in index 889c075f81..27b2c0ab84 100755 --- a/tools/frr.in +++ b/tools/frr.in @@ -27,7 +27,7 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter  # Local Daemon selection may be done by using /etc/frr/daemons.  # See /usr/share/doc/frr/README.Debian.gz for further information.  # Keep zebra first and do not list watchfrr! -DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd" +DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"  MAX_INSTANCES=5  RELOAD_SCRIPT="$D_PATH/frr-reload.py" diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in index d95f2d4be7..14b397555b 100644 --- a/tools/frrcommon.sh.in +++ b/tools/frrcommon.sh.in @@ -35,13 +35,20 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter  # - keep zebra first  # - watchfrr does NOT belong in this list -DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd" +DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"  RELOAD_SCRIPT="$D_PATH/frr-reload.py"  #  # general helpers  # +is_user_root () { +	[ "${EUID:-$(id -u)}" -eq 0 ] || { +		log_failure_msg "Only users having EUID=0 can start/stop daemons" +		return 1 +	} +} +  debug() {  	[ -n "$watchfrr_debug" ] || return 0 @@ -150,6 +157,8 @@ daemon_prep() {  daemon_start() {  	local dmninst daemon inst args instopt wrap bin +	is_user_root || exit 1 +  	all=false  	[ "$1" = "--all" ] && { all=true; shift; } @@ -183,6 +192,8 @@ daemon_stop() {  	local dmninst daemon inst pidfile vtyfile pid cnt fail  	daemon_inst "$1" +	is_user_root || exit 1 +  	pidfile="$V_PATH/$daemon${inst:+-$inst}.pid"  	vtyfile="$V_PATH/$daemon${inst:+-$inst}.vty" @@ -253,11 +264,11 @@ all_start() {  all_stop() {  	local pids reversed -	daemon_list daemons disabled -	[ "$1" = "--reallyall" ] && daemons="$daemons $disabled" +	daemon_list enabled_daemons disabled_daemons +	[ "$1" = "--reallyall" ] && enabled_daemons="$enabled_daemons $disabled_daemons"  	reversed="" -	for dmninst in $daemons; do +	for dmninst in $enabled_daemons; do  		reversed="$dmninst $reversed"  	done diff --git a/tools/frrinit.sh.in b/tools/frrinit.sh.in index df5f0853da..ee10b89e52 100644 --- a/tools/frrinit.sh.in +++ b/tools/frrinit.sh.in @@ -127,6 +127,9 @@ reload)  	;;  *) -	log_failure_msg "Unknown command: $1" >&2 +	echo "Usage:" +	echo "    ${0} <start|stop|restart|force-reload|reload|status> [namespace]" +	echo "    ${0} stop namespace1"  	exit 1 +	;;  esac diff --git a/vrrpd/vrrp_vty.c b/vrrpd/vrrp_vty.c index c11254c71a..634a55dbc3 100644 --- a/vrrpd/vrrp_vty.c +++ b/vrrpd/vrrp_vty.c @@ -503,11 +503,11 @@ static void vrrp_show(struct vty *vty, struct vrrp_vrouter *vr)  		       vr->accept_mode ? "Yes" : "No");  	ttable_add_row(tt, "%s|%d ms", "Advertisement Interval",  		       vr->advertisement_interval * CS2MS); -	ttable_add_row(tt, "%s|%d ms", -		       "Master Advertisement Interval (v4)", +	ttable_add_row(tt, "%s|%d ms (stale)", +		       "Master Advertisement Interval (v4) Rx",  		       vr->v4->master_adver_interval * CS2MS); -	ttable_add_row(tt, "%s|%d ms", -		       "Master Advertisement Interval (v6)", +	ttable_add_row(tt, "%s|%d ms (stale)", +		       "Master Advertisement Interval (v6) Rx",  		       vr->v6->master_adver_interval * CS2MS);  	ttable_add_row(tt, "%s|%u", "Advertisements Tx (v4)",  		       vr->v4->stats.adver_tx_cnt); diff --git a/vtysh/extract.pl.in b/vtysh/extract.pl.in index 07819ea76d..228a136b71 100755 --- a/vtysh/extract.pl.in +++ b/vtysh/extract.pl.in @@ -116,9 +116,9 @@ sub scan_file {  	}          elsif ($file =~ /lib\/plist\.c$/) {              if ($defun_array[1] =~ m/ipv6/) { -                $protocol = "VTYSH_RIPNGD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD"; +                $protocol = "VTYSH_RIPNGD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_PIM6D|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD";              } else { -                $protocol = "VTYSH_RIPD|VTYSH_OSPFD|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD"; +                $protocol = "VTYSH_RIPD|VTYSH_OSPFD|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_PIMD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD";              }          }          elsif ($file =~ /lib\/if_rmap\.c$/) { diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index ed1f1fb5bb..74886254bd 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -2955,6 +2955,7 @@ DEFUN (show_yang_operational_data,           [{\  	   format <json|xml>\  	   |translate WORD\ +	   |with-config\  	 }]" DAEMONS_LIST,         SHOW_STR         "YANG information\n" @@ -2965,6 +2966,7 @@ DEFUN (show_yang_operational_data,         "Extensible Markup Language\n"         "Translate operational data\n"         "YANG module translator\n" +       "Merge configuration data\n"         DAEMONS_STR)  {  	return show_one_daemon(vty, argv, argc - 1, argv[argc - 1]->text); @@ -3202,7 +3204,7 @@ DEFUN (vtysh_write_terminal,         "Skip \"Building configuration...\" header\n")  {  	unsigned int i; -	char line[] = "do write terminal\n"; +	char line[] = "do write terminal";  	if (!strcmp(argv[argc - 1]->arg, "no-header"))  		argc--; @@ -3281,7 +3283,7 @@ static void backup_config_file(const char *fbackup)  int vtysh_write_config_integrated(void)  {  	unsigned int i; -	char line[] = "do write terminal\n"; +	char line[] = "do write terminal";  	FILE *fp;  	int fd;  #ifdef FRR_USER @@ -3359,6 +3361,18 @@ int vtysh_write_config_integrated(void)  		err++;  	} +	if (fflush(fp) != 0) { +		printf("%% Warning: fflush() failed on %s: %s\n", frr_config, +		       safe_strerror(errno)); +		err++; +	} + +	if (fsync(fd) < 0) { +		printf("%% Warning: fsync() failed on %s: %s\n", frr_config, +		       safe_strerror(errno)); +		err++; +	} +  	fclose(fp);  	printf("Integrated configuration saved to %s\n", frr_config); @@ -3394,12 +3408,12 @@ DEFUN (vtysh_write_memory,         "Write configuration to the file (same as write memory)\n")  {  	int ret = CMD_SUCCESS; -	char line[] = "do write memory\n"; +	char line[] = "do write memory";  	unsigned int i;  	vty_out(vty, "Note: this version of vtysh never writes vtysh.conf\n"); -	/* If integrated frr.conf explicitely set. */ +	/* If integrated frr.conf explicitly set. */  	if (want_config_integrated()) {  		ret = CMD_WARNING_CONFIG_FAILED; @@ -3721,7 +3735,7 @@ DEFPY (vtysh_terminal_monitor,         "Receive log messages to active VTY session\n"         DAEMONS_STR)  { -	static const char line[] = "terminal monitor\n"; +	static const char line[] = "terminal monitor";  	int ret_all = CMD_SUCCESS, ret, fd;  	size_t i, ok = 0; @@ -3773,7 +3787,7 @@ DEFPY (no_vtysh_terminal_monitor,         "Receive log messages to active VTY session\n"         DAEMONS_STR)  { -	static const char line[] = "no terminal monitor\n"; +	static const char line[] = "no terminal monitor";  	int ret_all = CMD_SUCCESS, ret;  	size_t i, ok = 0; diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c index 4f50da380c..51e4f802c9 100644 --- a/watchfrr/watchfrr.c +++ b/watchfrr/watchfrr.c @@ -54,6 +54,7 @@  #define DEFAULT_LOGLEVEL	LOG_INFO  #define DEFAULT_MIN_RESTART	60  #define DEFAULT_MAX_RESTART	600 +#define DEFAULT_OPERATIONAL_TIMEOUT 60  #define DEFAULT_RESTART_CMD	WATCHFRR_SH_PATH " restart %s"  #define DEFAULT_START_CMD	WATCHFRR_SH_PATH " start %s" @@ -70,14 +71,14 @@ struct thread_master *master;  static bool watch_only = false;  const char *pathspace; -typedef enum { +enum restart_phase {  	PHASE_NONE = 0,  	PHASE_INIT,  	PHASE_STOPS_PENDING,  	PHASE_WAITING_DOWN,  	PHASE_ZEBRA_RESTART_PENDING,  	PHASE_WAITING_ZEBRA_UP -} restart_phase_t; +};  static const char *const phase_str[] = {  	"Idle", @@ -103,15 +104,17 @@ struct restart_info {  };  static struct global_state { -	restart_phase_t phase; +	enum restart_phase phase;  	struct thread *t_phase_hanging;  	struct thread *t_startup_timeout; +	struct thread *t_operational;  	const char *vtydir;  	long period;  	long timeout;  	long restart_timeout;  	long min_restart_interval;  	long max_restart_interval; +	long operational_timeout;  	struct daemon *daemons;  	const char *restart_command;  	const char *start_command; @@ -131,18 +134,19 @@ static struct global_state {  	.loglevel = DEFAULT_LOGLEVEL,  	.min_restart_interval = DEFAULT_MIN_RESTART,  	.max_restart_interval = DEFAULT_MAX_RESTART, +	.operational_timeout = DEFAULT_OPERATIONAL_TIMEOUT,  	.restart_command = DEFAULT_RESTART_CMD,  	.start_command = DEFAULT_START_CMD,  	.stop_command = DEFAULT_STOP_CMD,  }; -typedef enum { +enum daemon_state {  	DAEMON_INIT,  	DAEMON_DOWN,  	DAEMON_CONNECTING,  	DAEMON_UP,  	DAEMON_UNRESPONSIVE -} daemon_state_t; +};  #define IS_UP(DMN)                                                             \  	(((DMN)->state == DAEMON_UP) || ((DMN)->state == DAEMON_UNRESPONSIVE)) @@ -153,7 +157,7 @@ static const char *const state_str[] = {  struct daemon {  	const char *name; -	daemon_state_t state; +	enum daemon_state state;  	int fd;  	struct timeval echo_sent;  	unsigned int connect_tries; @@ -177,6 +181,7 @@ struct daemon {  #define OPTION_MAXRESTART 2001  #define OPTION_DRY        2002  #define OPTION_NETNS      2003 +#define OPTION_MAXOPERATIONAL 2004  static const struct option longopts[] = {  	{"daemon", no_argument, NULL, 'd'}, @@ -191,6 +196,7 @@ static const struct option longopts[] = {  	{"dry", no_argument, NULL, OPTION_DRY},  	{"min-restart-interval", required_argument, NULL, OPTION_MINRESTART},  	{"max-restart-interval", required_argument, NULL, OPTION_MAXRESTART}, +	{"operational-timeout", required_argument, NULL, OPTION_MAXOPERATIONAL},  	{"pid-file", required_argument, NULL, 'p'},  	{"blank-string", required_argument, NULL, 'b'},  #ifdef GNU_LINUX @@ -265,6 +271,9 @@ Otherwise, the interval is doubled (but capped at the -M value).\n\n",      --max-restart-interval\n\  		Set the maximum seconds to wait between invocations of daemon\n\  		restart commands (default is %d).\n\ +    --operational-timeout\n\ +                Set the time before systemd is notified that we are considered\n\ +                operational again after a daemon restart (default is %d).\n\  -i, --interval	Set the status polling interval in seconds (default is %d)\n\  -t, --timeout	Set the unresponsiveness timeout in seconds (default is %d)\n\  -T, --restart-timeout\n\ @@ -296,10 +305,10 @@ Otherwise, the interval is doubled (but capped at the -M value).\n\n",  -v, --version	Print program version\n\  -h, --help	Display this help and exit\n",  		frr_vtydir, DEFAULT_LOGLEVEL, LOG_EMERG, LOG_DEBUG, LOG_DEBUG, -		DEFAULT_MIN_RESTART, DEFAULT_MAX_RESTART, DEFAULT_PERIOD, -		DEFAULT_TIMEOUT, DEFAULT_RESTART_TIMEOUT, -		DEFAULT_RESTART_CMD, DEFAULT_START_CMD, DEFAULT_STOP_CMD, -		frr_vtydir); +		DEFAULT_MIN_RESTART, DEFAULT_MAX_RESTART, +		DEFAULT_OPERATIONAL_TIMEOUT, DEFAULT_PERIOD, DEFAULT_TIMEOUT, +		DEFAULT_RESTART_TIMEOUT, DEFAULT_RESTART_CMD, DEFAULT_START_CMD, +		DEFAULT_STOP_CMD, frr_vtydir);  }  static pid_t run_background(char *shell_cmd) @@ -502,8 +511,6 @@ static int run_job(struct restart_info *restart, const char *cmdtype,  			restart->pid = 0;  	} -	systemd_send_status("FRR Operational"); -  	/* Calculate the new restart interval. */  	if (update_interval) {  		if (delay.tv_sec > 2 * gs.max_restart_interval) @@ -584,6 +591,11 @@ static void restart_done(struct daemon *dmn)  		SET_WAKEUP_DOWN(dmn);  } +static void daemon_restarting_operational(struct thread *thread) +{ +	systemd_send_status("FRR Operational"); +} +  static void daemon_down(struct daemon *dmn, const char *why)  {  	if (IS_UP(dmn) || (dmn->state == DAEMON_INIT)) @@ -603,6 +615,8 @@ static void daemon_down(struct daemon *dmn, const char *why)  	THREAD_OFF(dmn->t_wakeup);  	if (try_connect(dmn) < 0)  		SET_WAKEUP_DOWN(dmn); + +	systemd_send_status("FRR partially operational");  	phase_check();  } @@ -721,8 +735,15 @@ static void daemon_up(struct daemon *dmn, const char *why)  	gs.numdown--;  	dmn->connect_tries = 0;  	zlog_notice("%s state -> up : %s", dmn->name, why); -	if (gs.numdown == 0) +	if (gs.numdown == 0) {  		daemon_send_ready(0); + +		THREAD_OFF(gs.t_operational); + +		thread_add_timer(master, daemon_restarting_operational, NULL, +				 gs.operational_timeout, &gs.t_operational); +	} +  	SET_WAKEUP_ECHO(dmn);  	phase_check();  } @@ -848,7 +869,7 @@ static void phase_hanging(struct thread *t_hanging)  	gs.phase = PHASE_NONE;  } -static void set_phase(restart_phase_t new_phase) +static void set_phase(enum restart_phase new_phase)  {  	gs.phase = new_phase;  	thread_cancel(&gs.t_phase_hanging); @@ -889,6 +910,7 @@ static void phase_check(void)  	case PHASE_WAITING_DOWN:  		if (gs.numdown + IS_UP(gs.special) < gs.numdaemons)  			break; +		systemd_send_status("Phased Restart");  		zlog_info("Phased restart: all routing daemons now down.");  		run_job(&gs.special->restart, "restart", gs.restart_command, 1,  			1); @@ -898,6 +920,7 @@ static void phase_check(void)  	case PHASE_ZEBRA_RESTART_PENDING:  		if (gs.special->restart.pid)  			break; +		systemd_send_status("Zebra Restarting");  		zlog_info("Phased restart: %s restart job completed.",  			  gs.special->name);  		set_phase(PHASE_WAITING_ZEBRA_UP); @@ -1030,6 +1053,12 @@ void watchfrr_status(struct vty *vty)  	struct timeval delay;  	vty_out(vty, "watchfrr global phase: %s\n", phase_str[gs.phase]); +	vty_out(vty, " Restart Command: %pSQq\n", gs.restart_command); +	vty_out(vty, " Start Command: %pSQq\n", gs.start_command); +	vty_out(vty, " Stop Command: %pSQq\n", gs.stop_command); +	vty_out(vty, " Min Restart Interval: %ld\n", gs.min_restart_interval); +	vty_out(vty, " Max Restart Interval: %ld\n", gs.max_restart_interval); +	vty_out(vty, " Restart Timeout: %ld\n", gs.restart_timeout);  	if (gs.restart.pid)  		vty_out(vty, "    global restart running, pid %ld\n",  			(long)gs.restart.pid); @@ -1395,6 +1424,18 @@ int main(int argc, char **argv)  				frr_help_exit(1);  			}  		} break; +		case OPTION_MAXOPERATIONAL: { +			char garbage[3]; + +			if ((sscanf(optarg, "%ld%1s", &gs.operational_timeout, +				    garbage) != 1) || +			    (gs.max_restart_interval < 0)) { +				fprintf(stderr, +					"Invalid Operational_timeout argument: %s\n", +					optarg); +				frr_help_exit(1); +			} +		} break;  		case OPTION_NETNS:  			netns_en = true;  			if (optarg && strchr(optarg, '/')) { diff --git a/yang/frr-bgp-bmp.yang b/yang/frr-bgp-bmp.yang index cf945cabef..8c3de839b6 100644 --- a/yang/frr-bgp-bmp.yang +++ b/yang/frr-bgp-bmp.yang @@ -86,7 +86,7 @@ submodule frr-bgp-bmp {            type uint32 {              range "100..86400000";            } -          units "miliseconds"; +          units "milliseconds";            default "30000";            description              "Minimum connection retry interval."; @@ -96,7 +96,7 @@ submodule frr-bgp-bmp {            type uint32 {              range "100..86400000";            } -          units "miliseconds"; +          units "milliseconds";            default "720000";            description              "Maximum connection retry interval."; @@ -170,7 +170,7 @@ submodule frr-bgp-bmp {            type uint32 {              range "100..86400000";            } -          units "miliseconds"; +          units "milliseconds";            description              "Interval to send BMP Stats.";          } diff --git a/yang/frr-bgp-common-structure.yang b/yang/frr-bgp-common-structure.yang index 3378c10c03..4c12b956c4 100644 --- a/yang/frr-bgp-common-structure.yang +++ b/yang/frr-bgp-common-structure.yang @@ -159,7 +159,7 @@ submodule frr-bgp-common-structure {          type uint16 {            range "50..60000";          } -        units "miliseconds"; +        units "milliseconds";          default "300";          description            "Required min receive interval."; @@ -170,7 +170,7 @@ submodule frr-bgp-common-structure {          type uint16 {            range "50..60000";          } -        units "miliseconds"; +        units "milliseconds";          default "300";          description            "Desired min transmit interval."; diff --git a/yang/frr-bgp-common.yang b/yang/frr-bgp-common.yang index 157e4cd614..2b1babdd28 100644 --- a/yang/frr-bgp-common.yang +++ b/yang/frr-bgp-common.yang @@ -444,7 +444,7 @@ submodule frr-bgp-common {          type uint32 {            range "0..4294967295";          } -        units "miliseconds"; +        units "milliseconds";          default "1000";          description            "Configures the Subgroup coalesce timer."; diff --git a/yang/frr-bgp-route-map.yang b/yang/frr-bgp-route-map.yang index 74008bc078..eaa7891f0c 100644 --- a/yang/frr-bgp-route-map.yang +++ b/yang/frr-bgp-route-map.yang @@ -282,6 +282,12 @@ module frr-bgp-route-map {        "Set the BGP AS-path attribute";    } +  identity as-path-replace { +    base frr-route-map:rmap-set-type; +    description +      "Replace ASNs to local AS number"; +  } +    identity set-community {      base frr-route-map:rmap-set-type;      description @@ -793,6 +799,15 @@ module frr-bgp-route-map {        }      } +    case as-path-replace { +      when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:as-path-replace')"; +      leaf replace-as-path { +        type string; +        description +          "Replace ASNs to local AS number"; +      } +    } +      case community {        when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-community')";        choice community { diff --git a/yang/frr-gmp.yang b/yang/frr-gmp.yang index f48fcd45f1..298b2aafda 100644 --- a/yang/frr-gmp.yang +++ b/yang/frr-gmp.yang @@ -96,6 +96,7 @@ module frr-gmp {        type uint8 {          range "1..2";        } +      default "2";        description          "MLD version.";      } diff --git a/zebra/connected.c b/zebra/connected.c index 4f4e8be34b..eb2720335e 100644 --- a/zebra/connected.c +++ b/zebra/connected.c @@ -327,6 +327,8 @@ void connected_add_ipv4(struct interface *ifp, int flags,  	/* If we get a notification from the kernel,  	 * we can safely assume the address is known to the kernel */  	SET_FLAG(ifc->conf, ZEBRA_IFC_QUEUED); +	if (!if_is_operative(ifp)) +		SET_FLAG(ifc->conf, ZEBRA_IFC_DOWN);  	/* Allocate new connected address. */  	p = prefix_ipv4_new(); @@ -548,6 +550,8 @@ void connected_add_ipv6(struct interface *ifp, int flags,  	/* If we get a notification from the kernel,  	 * we can safely assume the address is known to the kernel */  	SET_FLAG(ifc->conf, ZEBRA_IFC_QUEUED); +	if (!if_is_operative(ifp)) +		SET_FLAG(ifc->conf, ZEBRA_IFC_DOWN);  	/* Allocate new connected address. */  	p = prefix_ipv6_new(); diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c index 873aea236d..ffd52da8d8 100644 --- a/zebra/if_netlink.c +++ b/zebra/if_netlink.c @@ -75,6 +75,7 @@  #include "zebra/zebra_evpn_mh.h"  #include "zebra/zebra_l2.h"  #include "zebra/netconf_netlink.h" +#include "zebra/zebra_trace.h"  extern struct zebra_privs_t zserv_privs;  uint8_t frr_protodown_r_bit = FRR_PROTODOWN_REASON_DEFAULT_BIT; @@ -965,6 +966,8 @@ static int netlink_interface(struct nlmsghdr *h, ns_id_t ns_id, int startup)  	ns_id_t link_nsid = ns_id;  	uint8_t bypass = 0; +	frrtrace(3, frr_zebra, netlink_interface, h, ns_id, startup); +  	zns = zebra_ns_lookup(ns_id);  	ifi = NLMSG_DATA(h); @@ -1119,6 +1122,9 @@ static int netlink_request_intf_addr(struct nlsock *netlink_cmd, int family,  		char buf[256];  	} req; +	frrtrace(4, frr_zebra, netlink_request_intf_addr, netlink_cmd, family, +		 type, filter_mask); +  	/* Form the request, specifying filter (rtattr) if needed. */  	memset(&req, 0, sizeof(req));  	req.n.nlmsg_type = type; @@ -1373,6 +1379,8 @@ int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id, int startup)  	uint32_t metric = METRIC_MAX;  	uint32_t kernel_flags = 0; +	frrtrace(3, frr_zebra, netlink_interface_addr, h, ns_id, startup); +  	zns = zebra_ns_lookup(ns_id);  	ifa = NLMSG_DATA(h); diff --git a/zebra/interface.c b/zebra/interface.c index a70326ebb3..677ec4650f 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -958,7 +958,7 @@ void if_nbr_mac_to_ipv4ll_neigh_update(struct interface *ifp,  	/*  	 * We need to note whether or not we originated a v6  	 * neighbor entry for this interface.  So that when -	 * someone unwisely accidently deletes this entry +	 * someone unwisely accidentally deletes this entry  	 * we can shove it back in.  	 */  	zif->v6_2_v4_ll_neigh_entry = !!add; diff --git a/zebra/rib.h b/zebra/rib.h index c8abfaf023..281791d1f8 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -424,6 +424,9 @@ extern struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,  extern struct route_entry *rib_match_ipv4_multicast(vrf_id_t vrf_id,  						    struct in_addr addr,  						    struct route_node **rn_out); +extern struct route_entry *rib_match_ipv6_multicast(vrf_id_t vrf_id, +						    struct in6_addr addr, +						    struct route_node **rn_out);  extern struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p,  					   vrf_id_t vrf_id); diff --git a/zebra/router-id.c b/zebra/router-id.c index ea438b4367..9f56cf0e6a 100644 --- a/zebra/router-id.c +++ b/zebra/router-id.c @@ -342,7 +342,7 @@ DEFUN (ip_router_id_in_vrf,         "Manually set the router-id\n"         "IP address to use for router-id\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	int idx = 0;  	struct prefix rid; @@ -372,7 +372,7 @@ DEFUN (ipv6_router_id_in_vrf,         "Manually set the IPv6 router-id\n"         "IPV6 address to use for router-id\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	int idx = 0;  	struct prefix rid; @@ -458,7 +458,7 @@ DEFUN (no_ip_router_id_in_vrf,         "Remove the manually configured router-id\n"         "IP address to use for router-id\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	struct prefix rid; @@ -486,7 +486,7 @@ DEFUN (no_ipv6_router_id_in_vrf,         "Remove the manually configured IPv6 router-id\n"         "IPv6 address to use for router-id\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	struct prefix rid; diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index f2cf9122fa..4e97323cf6 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -79,6 +79,7 @@  #include "zebra/zebra_vxlan.h"  #include "zebra/zebra_errors.h"  #include "zebra/zebra_evpn_mh.h" +#include "zebra/zebra_trace.h"  #ifndef AF_MPLS  #define AF_MPLS 28 @@ -700,6 +701,9 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,  	void *src = NULL;     /* IPv6 srcdest   source prefix */  	enum blackhole_type bh_type = BLACKHOLE_UNSPEC; +	frrtrace(3, frr_zebra, netlink_route_change_read_unicast, h, ns_id, +		 startup); +  	rtm = NLMSG_DATA(h);  	if (startup && h->nlmsg_type != RTM_NEWROUTE) @@ -1041,11 +1045,23 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,  	if (tb[RTA_IIF])  		iif = *(int *)RTA_DATA(tb[RTA_IIF]); -	if (tb[RTA_SRC]) -		m->sg.src = *(struct in_addr *)RTA_DATA(tb[RTA_SRC]); +	if (tb[RTA_SRC]) { +		if (rtm->rtm_family == RTNL_FAMILY_IPMR) +			m->src.ipaddr_v4 = +				*(struct in_addr *)RTA_DATA(tb[RTA_SRC]); +		else +			m->src.ipaddr_v6 = +				*(struct in6_addr *)RTA_DATA(tb[RTA_SRC]); +	} -	if (tb[RTA_DST]) -		m->sg.grp = *(struct in_addr *)RTA_DATA(tb[RTA_DST]); +	if (tb[RTA_DST]) { +		if (rtm->rtm_family == RTNL_FAMILY_IPMR) +			m->grp.ipaddr_v4 = +				*(struct in_addr *)RTA_DATA(tb[RTA_DST]); +		else +			m->grp.ipaddr_v6 = +				*(struct in6_addr *)RTA_DATA(tb[RTA_DST]); +	}  	if (tb[RTA_EXPIRES])  		m->lastused = *(unsigned long long *)RTA_DATA(tb[RTA_EXPIRES]); @@ -1070,6 +1086,17 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,  		}  	} +	if (rtm->rtm_family == RTNL_FAMILY_IPMR) { +		SET_IPADDR_V4(&m->src); +		SET_IPADDR_V4(&m->grp); +	} else if (rtm->rtm_family == RTNL_FAMILY_IP6MR) { +		SET_IPADDR_V6(&m->src); +		SET_IPADDR_V6(&m->grp); +	} else { +		zlog_warn("%s: Invalid rtm_family received", __func__); +		return 0; +	} +  	if (IS_ZEBRA_DEBUG_KERNEL) {  		struct interface *ifp = NULL;  		struct zebra_vrf *zvrf = NULL; @@ -1085,11 +1112,10 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,  		zvrf = zebra_vrf_lookup_by_id(vrf);  		ifp = if_lookup_by_index(iif, vrf);  		zlog_debug( -			"MCAST VRF: %s(%d) %s (%pI4,%pI4) IIF: %s(%d) OIF: %s jiffies: %lld", +			"MCAST VRF: %s(%d) %s (%pIA,%pIA) IIF: %s(%d) OIF: %s jiffies: %lld",  			zvrf_name(zvrf), vrf, nl_msg_type_to_str(h->nlmsg_type), -			&m->sg.src, &m->sg.grp, ifp ? ifp->name : "Unknown", -			iif, oif_list, -			m->lastused); +			&m->src, &m->grp, ifp ? ifp->name : "Unknown", iif, +			oif_list, m->lastused);  	}  	return 0;  } @@ -2259,13 +2285,29 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)  	req.n.nlmsg_flags = NLM_F_REQUEST;  	req.n.nlmsg_pid = zns->netlink_cmd.snl.nl_pid; -	req.ndm.ndm_family = RTNL_FAMILY_IPMR;  	req.n.nlmsg_type = RTM_GETROUTE;  	nl_attr_put32(&req.n, sizeof(req), RTA_IIF, mroute->ifindex);  	nl_attr_put32(&req.n, sizeof(req), RTA_OIF, mroute->ifindex); -	nl_attr_put32(&req.n, sizeof(req), RTA_SRC, mroute->sg.src.s_addr); -	nl_attr_put32(&req.n, sizeof(req), RTA_DST, mroute->sg.grp.s_addr); + +	if (mroute->family == AF_INET) { +		req.ndm.ndm_family = RTNL_FAMILY_IPMR; +		nl_attr_put(&req.n, sizeof(req), RTA_SRC, +			    &mroute->src.ipaddr_v4, +			    sizeof(mroute->src.ipaddr_v4)); +		nl_attr_put(&req.n, sizeof(req), RTA_DST, +			    &mroute->grp.ipaddr_v4, +			    sizeof(mroute->grp.ipaddr_v4)); +	} else { +		req.ndm.ndm_family = RTNL_FAMILY_IP6MR; +		nl_attr_put(&req.n, sizeof(req), RTA_SRC, +			    &mroute->src.ipaddr_v6, +			    sizeof(mroute->src.ipaddr_v6)); +		nl_attr_put(&req.n, sizeof(req), RTA_DST, +			    &mroute->grp.ipaddr_v6, +			    sizeof(mroute->grp.ipaddr_v6)); +	} +  	/*  	 * What?  	 * @@ -2906,6 +2948,8 @@ int netlink_nexthop_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)  	uint8_t grp_count = 0;  	struct rtattr *tb[NHA_MAX + 1] = {}; +	frrtrace(3, frr_zebra, netlink_nexthop_change, h, ns_id, startup); +  	nhm = NLMSG_DATA(h);  	if (ns_id) @@ -3273,7 +3317,7 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id)  	memcpy(&mac, RTA_DATA(tb[NDA_LLADDR]), ETH_ALEN); -	if ((NDA_VLAN <= NDA_MAX) && tb[NDA_VLAN]) { +	if (tb[NDA_VLAN]) {  		vid_present = 1;  		vid = *(uint16_t *)RTA_DATA(tb[NDA_VLAN]);  		snprintf(vid_buf, sizeof(vid_buf), " VLAN %u", vid); @@ -3802,7 +3846,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id)  	 * interface  	 * and is linked to the bridge  	 * (b) In the case of a VLAN-unaware bridge, the SVI is the bridge -	 * inteface +	 * interface  	 * itself  	 */  	if (IS_ZEBRA_IF_VLAN(ifp)) { diff --git a/zebra/rtadv.c b/zebra/rtadv.c index 2ce5072945..ca833999cb 100644 --- a/zebra/rtadv.c +++ b/zebra/rtadv.c @@ -490,9 +490,11 @@ static void rtadv_timer(struct thread *thread)  	RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id)  		FOR_ALL_INTERFACES (vrf, ifp) { -			if (if_is_loopback(ifp) || !if_is_operative(ifp) -			    || (vrf_is_backend_netns() -				&& ifp->vrf->vrf_id != zvrf->vrf->vrf_id)) +			if (if_is_loopback(ifp) || !if_is_operative(ifp) || +			    IS_ZEBRA_IF_BRIDGE_SLAVE(ifp) || +			    !connected_get_linklocal(ifp) || +			    (vrf_is_backend_netns() && +			     ifp->vrf->vrf_id != zvrf->vrf->vrf_id))  				continue;  			zif = ifp->info; diff --git a/zebra/rule_netlink.c b/zebra/rule_netlink.c index fbf2620375..135f065428 100644 --- a/zebra/rule_netlink.c +++ b/zebra/rule_netlink.c @@ -42,6 +42,7 @@  #include "zebra/zebra_pbr.h"  #include "zebra/zebra_errors.h"  #include "zebra/zebra_dplane.h" +#include "zebra/zebra_trace.h"  /* definitions */ @@ -243,6 +244,8 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)  	uint8_t proto = 0;  	uint8_t ip_proto = 0; +	frrtrace(3, frr_zebra, netlink_rule_change, h, ns_id, startup); +  	/* Basic validation followed by extracting attributes. */  	if (h->nlmsg_type != RTM_NEWRULE && h->nlmsg_type != RTM_DELRULE)  		return 0; diff --git a/zebra/subdir.am b/zebra/subdir.am index 8cb1237c22..a09b895cee 100644 --- a/zebra/subdir.am +++ b/zebra/subdir.am @@ -50,7 +50,7 @@ man8 += $(MANBUILD)/frr-zebra.8  ## endif ZEBRA  endif -zebra_zebra_LDADD = lib/libfrr.la $(LIBCAP) +zebra_zebra_LDADD = lib/libfrr.la $(LIBCAP) $(UST_LIBS)  if HAVE_PROTOBUF3  zebra_zebra_LDADD += mlag/libmlag_pb.la $(PROTOBUF_C_LIBS)  zebra/zebra_mlag.$(OBJEXT): mlag/mlag.pb-c.h @@ -120,6 +120,7 @@ zebra_zebra_SOURCES = \  	zebra/zebra_routemap_nb_config.c \  	zebra/zebra_script.c \  	zebra/zebra_srte.c \ +	zebra/zebra_trace.c \  	zebra/zebra_vrf.c \  	zebra/zebra_vty.c \  	zebra/zebra_vxlan.c \ @@ -191,6 +192,7 @@ noinst_HEADERS += \  	zebra/zebra_router.h \  	zebra/zebra_script.h \  	zebra/zebra_srte.h \ +	zebra/zebra_trace.h \  	zebra/zebra_vrf.h \  	zebra/zebra_vxlan.h \  	zebra/zebra_vxlan_private.h \ diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index fd475e4cee..e996f6c956 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -659,11 +659,18 @@ int zsend_redistribute_route(int cmd, struct zserv *client,   * Modified version of zsend_ipv4_nexthop_lookup(): Query unicast rib if   * nexthop is not found on mrib. Returns both route metric and protocol   * distance. + * + * *XXX* this ZAPI call is slated to be removed at some point in the future + * since MRIB support in PIM is hopelessly broken in its interactions with NHT. + * The plan is to make pimd use NHT to receive URIB and MRIB in parallel and + * make the decision there, which will obsolete this ZAPI op. + * (Otherwise we would need to implement sending NHT updates for the result of + * this "URIB-MRIB-combined" table, but we only decide that here on the fly, + * so it'd be rather complex to do NHT for.)   */ -static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client, -					  struct in_addr addr, -					  struct route_entry *re, -					  struct zebra_vrf *zvrf) +static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr, +				     struct route_entry *re, +				     struct zebra_vrf *zvrf)  {  	struct stream *s;  	unsigned long nump; @@ -675,8 +682,8 @@ static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,  	stream_reset(s);  	/* Fill in result. */ -	zclient_create_header(s, ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf)); -	stream_put_in_addr(s, &addr); +	zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf)); +	stream_put_ipaddr(s, addr);  	if (re) {  		struct nexthop_group *nhg; @@ -2229,14 +2236,28 @@ static void zread_route_del(ZAPI_HANDLER_ARGS)  }  /* MRIB Nexthop lookup for IPv4. */ -static void zread_ipv4_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS) +static void zread_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS)  { -	struct in_addr addr; -	struct route_entry *re; +	struct ipaddr addr; +	struct route_entry *re = NULL; + +	STREAM_GET_IPADDR(msg, &addr); + +	switch (addr.ipa_type) { +	case IPADDR_V4: +		re = rib_match_ipv4_multicast(zvrf_id(zvrf), addr.ipaddr_v4, +					      NULL); +		break; +	case IPADDR_V6: +		re = rib_match_ipv6_multicast(zvrf_id(zvrf), addr.ipaddr_v6, +					      NULL); +		break; +	case IPADDR_NONE: +		/* ??? */ +		goto stream_failure; +	} -	STREAM_GET(&addr.s_addr, msg, IPV4_MAX_BYTELEN); -	re = rib_match_ipv4_multicast(zvrf_id(zvrf), addr, NULL); -	zsend_ipv4_nexthop_lookup_mrib(client, addr, re, zvrf); +	zsend_nexthop_lookup_mrib(client, &addr, re, zvrf);  stream_failure:  	return; @@ -3685,7 +3706,7 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = {  	[ZEBRA_REDISTRIBUTE_DELETE] = zebra_redistribute_delete,  	[ZEBRA_REDISTRIBUTE_DEFAULT_ADD] = zebra_redistribute_default_add,  	[ZEBRA_REDISTRIBUTE_DEFAULT_DELETE] = zebra_redistribute_default_delete, -	[ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB] = zread_ipv4_nexthop_lookup_mrib, +	[ZEBRA_NEXTHOP_LOOKUP_MRIB] = zread_nexthop_lookup_mrib,  	[ZEBRA_HELLO] = zread_hello,  	[ZEBRA_NEXTHOP_REGISTER] = zread_rnh_register,  	[ZEBRA_NEXTHOP_UNREGISTER] = zread_rnh_unregister, diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index d034c8f306..4e753c9d1a 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -2679,7 +2679,7 @@ done:  }  /** - * dplane_ctx_intf_init() - Initialize a context block for a inteface update + * dplane_ctx_intf_init() - Initialize a context block for a interface update   *   * @ctx:	Dataplane context to init   * @op:		Operation being performed diff --git a/zebra/zebra_evpn.c b/zebra/zebra_evpn.c index d9d21462fb..d223a21eda 100644 --- a/zebra/zebra_evpn.c +++ b/zebra/zebra_evpn.c @@ -649,10 +649,9 @@ static int zebra_evpn_map_vlan_ns(struct ns *ns,  	struct zebra_l2info_vxlan *vxl = NULL;  	struct zebra_from_svi_param *in_param =  		(struct zebra_from_svi_param *)_in_param; -	int found = 0; -	if (!in_param) -		return NS_WALK_STOP; +	assert(p_zevpn && in_param); +  	br_if = in_param->br_if;  	zif = in_param->zif;  	assert(zif); @@ -676,17 +675,13 @@ static int zebra_evpn_map_vlan_ns(struct ns *ns,  		if (!in_param->bridge_vlan_aware  		    || vxl->access_vlan == in_param->vid) { -			found = 1; -			break; +			zevpn = zebra_evpn_lookup(vxl->vni); +			*p_zevpn = zevpn; +			return NS_WALK_STOP;  		}  	} -	if (!found) -		return NS_WALK_CONTINUE; -	zevpn = zebra_evpn_lookup(vxl->vni); -	if (p_zevpn) -		*p_zevpn = zevpn; -	return NS_WALK_STOP; +	return NS_WALK_CONTINUE;  }  /* @@ -831,8 +826,7 @@ static int zvni_map_to_macvlan_ns(struct ns *ns,  	struct interface *tmp_if = NULL;  	struct zebra_if *zif; -	if (!in_param) -		return NS_WALK_STOP; +	assert(in_param && p_ifp);  	/* Identify corresponding VLAN interface. */  	for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) { @@ -846,8 +840,7 @@ static int zvni_map_to_macvlan_ns(struct ns *ns,  			continue;  		if (zif->link == in_param->svi_if) { -			if (p_ifp) -				*p_ifp = tmp_if; +			*p_ifp = tmp_if;  			return NS_WALK_STOP;  		}  	} diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c index 46d789bc34..128d233969 100644 --- a/zebra/zebra_evpn_mac.c +++ b/zebra/zebra_evpn_mac.c @@ -1872,7 +1872,7 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(  	return mac;  } -/* update local fowarding info. return true if a dest-ES change +/* update local forwarding info. return true if a dest-ES change   * is detected   */  static bool zebra_evpn_local_mac_update_fwd_info(struct zebra_mac *mac, diff --git a/zebra/zebra_evpn_mh.c b/zebra/zebra_evpn_mh.c index 02eda4a438..b1e48374c4 100644 --- a/zebra/zebra_evpn_mh.c +++ b/zebra/zebra_evpn_mh.c @@ -3286,7 +3286,7 @@ DEFPY_HIDDEN(zebra_evpn_es_bypass, zebra_evpn_es_bypass_cmd,  	} else {  		if (!zebra_evpn_is_if_es_capable(zif)) {  			vty_out(vty, -				"%%DF bypass cannot be associated with this interface type\n"); +				"%% DF bypass cannot be associated with this interface type\n");  			return CMD_WARNING;  		}  		zebra_evpn_es_bypass_cfg_update(zif, true); @@ -3311,7 +3311,7 @@ DEFPY(zebra_evpn_es_pref, zebra_evpn_es_pref_cmd,  	} else {  		if (!zebra_evpn_is_if_es_capable(zif)) {  			vty_out(vty, -				"%%DF preference cannot be associated with this interface type\n"); +				"%% DF preference cannot be associated with this interface type\n");  			return CMD_WARNING;  		}  		zebra_evpn_es_df_pref_update(zif, df_pref); @@ -3341,25 +3341,26 @@ DEFPY(zebra_evpn_es_sys_mac,  		ret = zebra_evpn_es_sys_mac_update(zif, &zero_mac);  		if (ret == -1) { -			vty_out(vty, "%%Failed to clear ES sysmac\n"); +			vty_out(vty, "%% Failed to clear ES sysmac\n");  			return CMD_WARNING;  		}  	} else {  		if (!zebra_evpn_is_if_es_capable(zif)) {  			vty_out(vty, -				"%%ESI cannot be associated with this interface type\n"); +				"%% ESI cannot be associated with this interface type\n");  			return CMD_WARNING;  		}  		if  (!mac || is_zero_mac(&mac->eth_addr)) { -			vty_out(vty, "%%ES sysmac value is invalid\n"); +			vty_out(vty, "%% ES sysmac value is invalid\n");  			return CMD_WARNING;  		}  		ret = zebra_evpn_es_sys_mac_update(zif, &mac->eth_addr);  		if (ret == -1) { -			vty_out(vty, "%%ESI already exists on a different interface\n"); +			vty_out(vty, +				"%% ESI already exists on a different interface\n");  			return CMD_WARNING;  		}  	} @@ -3392,25 +3393,27 @@ DEFPY(zebra_evpn_es_id,  			ret = zebra_evpn_es_type0_esi_update(zif, zero_esi);  		if (ret == -1) { -			vty_out(vty, "%%Failed to clear ES local id\n"); +			vty_out(vty, +				"%% Failed to clear ES local id or ESI name\n");  			return CMD_WARNING;  		}  	} else {  		if (!zebra_evpn_is_if_es_capable(zif)) {  			vty_out(vty, -				"%%ESI cannot be associated with this interface type\n"); +				"%% ESI cannot be associated with this interface type\n");  			return CMD_WARNING;  		}  		if (esi_str) {  			if (!str_to_esi(esi_str, &esi)) { -				vty_out(vty, "%% Malformed ESI\n"); +				vty_out(vty, "%% Malformed ESI name\n");  				return CMD_WARNING;  			}  			ret = zebra_evpn_es_type0_esi_update(zif, &esi);  		} else {  			if (!es_lid) { -				vty_out(vty, "%%Specify local ES ID\n"); +				vty_out(vty, +					"%% Specify ES local id or ESI name\n");  				return CMD_WARNING;  			}  			ret = zebra_evpn_es_lid_update(zif, es_lid); @@ -3418,7 +3421,7 @@ DEFPY(zebra_evpn_es_id,  		if (ret == -1) {  			vty_out(vty, -					"%%ESI already exists on a different interface\n"); +				"%% ESI already exists on a different interface\n");  			return CMD_WARNING;  		}  	} @@ -3758,18 +3761,10 @@ static inline bool zebra_evpn_mh_is_all_uplinks_down(void)  static void zebra_evpn_mh_uplink_oper_flags_update(struct zebra_if *zif,  						   bool set)  { -	if (set) { -		if (if_is_operative(zif->ifp)) { -			if (!(zif->flags & ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP)) { -				zif->flags |= ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP; -				++zmh_info->uplink_oper_up_cnt; -			} -		} else { -			if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP) { -				zif->flags &= ~ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP; -				if (zmh_info->uplink_oper_up_cnt) -					--zmh_info->uplink_oper_up_cnt; -			} +	if (set && if_is_operative(zif->ifp)) { +		if (!(zif->flags & ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP)) { +			zif->flags |= ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP; +			++zmh_info->uplink_oper_up_cnt;  		}  	} else {  		if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP) { diff --git a/zebra/zebra_evpn_mh.h b/zebra/zebra_evpn_mh.h index ce7b920de1..7e288853bb 100644 --- a/zebra/zebra_evpn_mh.h +++ b/zebra/zebra_evpn_mh.h @@ -369,17 +369,6 @@ extern void zebra_evpn_mh_json(json_object *json);  extern bool zebra_evpn_nhg_is_local_es(uint32_t nhg_id,  				       struct zebra_evpn_es **local_es);  extern int zebra_evpn_mh_redirect_off(struct vty *vty, bool redirect_off); -extern int zebra_evpn_mh_startup_delay_update(struct vty *vty, -					      uint32_t duration, -					      bool set_default); -extern void zebra_evpn_mh_uplink_oper_update(struct zebra_if *zif); -extern void zebra_evpn_mh_update_protodown_bond_mbr(struct zebra_if *zif, -						    bool clear, -						    const char *caller); -extern bool zebra_evpn_is_es_bond(struct interface *ifp); -extern bool zebra_evpn_is_es_bond_member(struct interface *ifp); -extern void zebra_evpn_mh_print(struct vty *vty); -extern void zebra_evpn_mh_json(json_object *json);  extern void zebra_evpn_l2_nh_show(struct vty *vty, bool uj);  extern void zebra_evpn_acc_bd_svi_set(struct zebra_if *vlan_zif,  				      struct zebra_if *br_zif, bool is_up); diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c index ec22c5dd48..d4aced47f9 100644 --- a/zebra/zebra_fpm_netlink.c +++ b/zebra/zebra_fpm_netlink.c @@ -398,7 +398,7 @@ static int netlink_route_info_encode(struct netlink_route_info *ri,  	req->r.rtm_family = ri->af;  	/* -	 * rtm_table field is a uchar field which can accomodate table_id less +	 * rtm_table field is a uchar field which can accommodate table_id less  	 * than 256.  	 * To support table id greater than 255, if the table_id is greater than  	 * 255, set rtm_table to RT_TABLE_UNSPEC and add RTA_TABLE attribute diff --git a/zebra/zebra_mroute.c b/zebra/zebra_mroute.c index ef0f2d8924..5d38c37901 100644 --- a/zebra/zebra_mroute.c +++ b/zebra/zebra_mroute.c @@ -39,20 +39,37 @@ void zebra_ipmr_route_stats(ZAPI_HANDLER_ARGS)  	int suc = -1;  	memset(&mroute, 0, sizeof(mroute)); -	STREAM_GET(&mroute.sg.src, msg, 4); -	STREAM_GET(&mroute.sg.grp, msg, 4); -	STREAM_GETL(msg, mroute.ifindex); +	STREAM_GETL(msg, mroute.family); -	if (IS_ZEBRA_DEBUG_KERNEL) { -		char sbuf[40]; -		char gbuf[40]; +	switch (mroute.family) { +	case AF_INET: +		SET_IPADDR_V4(&mroute.src); +		SET_IPADDR_V4(&mroute.grp); +		STREAM_GET(&mroute.src.ipaddr_v4, msg, +			   sizeof(mroute.src.ipaddr_v4)); +		STREAM_GET(&mroute.grp.ipaddr_v4, msg, +			   sizeof(mroute.grp.ipaddr_v4)); +		break; +	case AF_INET6: +		SET_IPADDR_V6(&mroute.src); +		SET_IPADDR_V6(&mroute.grp); +		STREAM_GET(&mroute.src.ipaddr_v6, msg, +			   sizeof(mroute.src.ipaddr_v6)); +		STREAM_GET(&mroute.grp.ipaddr_v6, msg, +			   sizeof(mroute.grp.ipaddr_v6)); +		break; +	default: +		zlog_warn("%s: Invalid address family received while parsing", +			  __func__); +		return; +	} -		inet_ntop(AF_INET, &mroute.sg.src, sbuf, sizeof(sbuf)); -		inet_ntop(AF_INET, &mroute.sg.grp, gbuf, sizeof(gbuf)); +	STREAM_GETL(msg, mroute.ifindex); -		zlog_debug("Asking for (%s,%s)[%s(%u)] mroute information", -			   sbuf, gbuf, zvrf->vrf->name, zvrf->vrf->vrf_id); -	} +	if (IS_ZEBRA_DEBUG_KERNEL) +		zlog_debug("Asking for (%pIA,%pIA)[%s(%u)] mroute information", +			   &mroute.src, &mroute.grp, zvrf->vrf->name, +			   zvrf->vrf->vrf_id);  	suc = kernel_get_ipmr_sg_stats(zvrf, &mroute); @@ -62,8 +79,19 @@ stream_failure:  	stream_reset(s);  	zclient_create_header(s, ZEBRA_IPMR_ROUTE_STATS, zvrf_id(zvrf)); -	stream_put_in_addr(s, &mroute.sg.src); -	stream_put_in_addr(s, &mroute.sg.grp); + +	if (mroute.family == AF_INET) { +		stream_write(s, &mroute.src.ipaddr_v4, +			     sizeof(mroute.src.ipaddr_v4)); +		stream_write(s, &mroute.grp.ipaddr_v4, +			     sizeof(mroute.grp.ipaddr_v4)); +	} else { +		stream_write(s, &mroute.src.ipaddr_v6, +			     sizeof(mroute.src.ipaddr_v6)); +		stream_write(s, &mroute.grp.ipaddr_v6, +			     sizeof(mroute.grp.ipaddr_v6)); +	} +  	stream_put(s, &mroute.lastused, sizeof(mroute.lastused));  	stream_putl(s, (uint32_t)suc); diff --git a/zebra/zebra_mroute.h b/zebra/zebra_mroute.h index 3c12b82da3..6c56c2e41d 100644 --- a/zebra/zebra_mroute.h +++ b/zebra/zebra_mroute.h @@ -29,7 +29,9 @@ extern "C" {  #endif  struct mcast_route_data { -	struct prefix_sg sg; +	int family; +	struct ipaddr src; +	struct ipaddr grp;  	unsigned int ifindex;  	unsigned long long lastused;  }; diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 1b926dba5f..069d35c6a3 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -43,6 +43,7 @@  #include "zebra_dplane.h"  #include "zebra/interface.h"  #include "zebra/zapi_msg.h" +#include "zebra/rib.h"  DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry");  DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected"); @@ -1960,6 +1961,61 @@ static int resolve_backup_nexthops(const struct nexthop *nexthop,  }  /* + * So this nexthop resolution has decided that a connected route + * is the correct choice.  At this point in time if FRR has multiple + * connected routes that all point to the same prefix one will be + * selected, *but* the particular interface may not be the one + * that the nexthop points at.  Let's look at all the available + * connected routes on this node and if any of them auto match + * the routes nexthops ifindex that is good enough for a match + * + * This code is depending on the fact that a nexthop->ifindex is 0 + * if it is not known, if this assumption changes, yummy! + * Additionally a ifindx of 0 means figure it out for us. + */ +static struct route_entry * +zebra_nhg_connected_ifindex(struct route_node *rn, struct route_entry *match, +			    int32_t curr_ifindex) +{ +	struct nexthop *newhop = match->nhe->nhg.nexthop; +	struct route_entry *re; + +	assert(newhop); /* What a kick in the patooey */ + +	if (curr_ifindex == 0) +		return match; + +	if (curr_ifindex == newhop->ifindex) +		return match; + +	/* +	 * At this point we know that this route is matching a connected +	 * but there are possibly a bunch of connected routes that are +	 * alive that should be considered as well.  So let's iterate over +	 * all the re's and see if they are connected as well and maybe one +	 * of those ifindexes match as well. +	 */ +	RNODE_FOREACH_RE (rn, re) { +		if (re->type != ZEBRA_ROUTE_CONNECT) +			continue; + +		if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) +			continue; + +		/* +		 * zebra has a connected route that is not removed +		 * let's test if it is good +		 */ +		newhop = re->nhe->nhg.nexthop; +		assert(newhop); +		if (curr_ifindex == newhop->ifindex) +			return re; +	} + +	return match; +} + +/*   * Given a nexthop we need to properly recursively resolve,   * do a table lookup to find and match if at all possible.   * Set the nexthop->ifindex and resolution info as appropriate. @@ -2209,25 +2265,25 @@ static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe,  			continue;  		} -		if (match->type == ZEBRA_ROUTE_CONNECT) { -			/* Directly point connected route. */ +		if ((match->type == ZEBRA_ROUTE_CONNECT) || +		    (RIB_SYSTEM_ROUTE(match) && RSYSTEM_ROUTE(type))) { +			match = zebra_nhg_connected_ifindex(rn, match, +							    nexthop->ifindex); +  			newhop = match->nhe->nhg.nexthop; -			if (newhop) { -				if (nexthop->type == NEXTHOP_TYPE_IPV4 -				    || nexthop->type == NEXTHOP_TYPE_IPV6) -					nexthop->ifindex = newhop->ifindex; -				else if (nexthop->ifindex != newhop->ifindex) { -					if (IS_ZEBRA_DEBUG_RIB_DETAILED) -						zlog_debug( -							"%s: %pNHv given ifindex does not match nexthops ifindex found found: %pNHv", -							__func__, nexthop, -							newhop); -					/* -					 * NEXTHOP_TYPE_*_IFINDEX but ifindex -					 * doesn't match what we found. -					 */ -					return 0; -				} +			if (nexthop->type == NEXTHOP_TYPE_IPV4 || +			    nexthop->type == NEXTHOP_TYPE_IPV6) +				nexthop->ifindex = newhop->ifindex; +			else if (nexthop->ifindex != newhop->ifindex) { +				if (IS_ZEBRA_DEBUG_RIB_DETAILED) +					zlog_debug( +						"%s: %pNHv given ifindex does not match nexthops ifindex found: %pNHv", +						__func__, nexthop, newhop); +				/* +				 * NEXTHOP_TYPE_*_IFINDEX but ifindex +				 * doesn't match what we found. +				 */ +				return 0;  			}  			if (IS_ZEBRA_DEBUG_NHG_DETAIL) @@ -2946,10 +3002,12 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)  						 nhe->zapi_session, nhe->id,  						 ZAPI_NHG_FAIL_INSTALL); -			flog_err( -				EC_ZEBRA_DP_INSTALL_FAIL, -				"Failed to install Nexthop ID (%u) into the kernel", -				nhe->id); +			if (!(zebra_nhg_proto_nexthops_only() && +			      !PROTO_OWNED(nhe))) +				flog_err( +					EC_ZEBRA_DP_INSTALL_FAIL, +					"Failed to install Nexthop ID (%u) into the kernel", +					nhe->id);  		}  		break; diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index c6840a503c..2732967ee6 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -487,6 +487,62 @@ struct route_entry *rib_match_ipv4_multicast(vrf_id_t vrf_id,  	return re;  } +struct route_entry *rib_match_ipv6_multicast(vrf_id_t vrf_id, +					     struct in6_addr addr, +					     struct route_node **rn_out) +{ +	struct route_entry *re = NULL, *mre = NULL, *ure = NULL; +	struct route_node *m_rn = NULL, *u_rn = NULL; +	union g_addr gaddr = {.ipv6 = addr}; + +	switch (zrouter.ipv4_multicast_mode) { +	case MCAST_MRIB_ONLY: +		return rib_match(AFI_IP6, SAFI_MULTICAST, vrf_id, &gaddr, +				 rn_out); +	case MCAST_URIB_ONLY: +		return rib_match(AFI_IP6, SAFI_UNICAST, vrf_id, &gaddr, rn_out); +	case MCAST_NO_CONFIG: +	case MCAST_MIX_MRIB_FIRST: +		re = mre = rib_match(AFI_IP6, SAFI_MULTICAST, vrf_id, &gaddr, +				     &m_rn); +		if (!mre) +			re = ure = rib_match(AFI_IP6, SAFI_UNICAST, vrf_id, +					     &gaddr, &u_rn); +		break; +	case MCAST_MIX_DISTANCE: +		mre = rib_match(AFI_IP6, SAFI_MULTICAST, vrf_id, &gaddr, &m_rn); +		ure = rib_match(AFI_IP6, SAFI_UNICAST, vrf_id, &gaddr, &u_rn); +		if (mre && ure) +			re = ure->distance < mre->distance ? ure : mre; +		else if (mre) +			re = mre; +		else if (ure) +			re = ure; +		break; +	case MCAST_MIX_PFXLEN: +		mre = rib_match(AFI_IP6, SAFI_MULTICAST, vrf_id, &gaddr, &m_rn); +		ure = rib_match(AFI_IP6, SAFI_UNICAST, vrf_id, &gaddr, &u_rn); +		if (mre && ure) +			re = u_rn->p.prefixlen > m_rn->p.prefixlen ? ure : mre; +		else if (mre) +			re = mre; +		else if (ure) +			re = ure; +		break; +	} + +	if (rn_out) +		*rn_out = (re == mre) ? m_rn : u_rn; + +	if (IS_ZEBRA_DEBUG_RIB) +		zlog_debug("%s: %pI6: vrf: %s(%u) found %s, using %s", __func__, +			   &addr, vrf_id_to_name(vrf_id), vrf_id, +			   mre ? (ure ? "MRIB+URIB" : "MRIB") +			       : ure ? "URIB" : "nothing", +			   re == ure ? "URIB" : re == mre ? "MRIB" : "none"); +	return re; +} +  struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p, vrf_id_t vrf_id)  {  	struct route_table *table; diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c index e99cb76119..2cc84a1f7f 100644 --- a/zebra/zebra_routemap.c +++ b/zebra/zebra_routemap.c @@ -638,7 +638,7 @@ DEFPY_YANG (ip_protocol,  	assert(proto);  	assert(rmap); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -672,7 +672,7 @@ DEFPY_YANG (no_ip_protocol,  	assert(proto); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -719,7 +719,7 @@ DEFPY_YANG (ipv6_protocol,  	assert(rmap);  	assert(proto); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -753,7 +753,7 @@ DEFPY_YANG (no_ipv6_protocol,  	assert(proto); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -801,7 +801,7 @@ DEFPY_YANG (ip_protocol_nht_rmap,  	assert(proto);  	assert(rmap); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -835,7 +835,7 @@ DEFPY_YANG (no_ip_protocol_nht_rmap,  	assert(proto); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -883,7 +883,7 @@ DEFPY_YANG (ipv6_protocol_nht_rmap,  	assert(rmap);  	assert(proto); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -917,7 +917,7 @@ DEFPY_YANG (no_ipv6_protocol_nht_rmap,  	assert(proto); -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; diff --git a/zebra/zebra_script.c b/zebra/zebra_script.c index 9805390a6d..d247f87708 100644 --- a/zebra/zebra_script.c +++ b/zebra/zebra_script.c @@ -17,6 +17,8 @@   * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA   */ +#include "zebra.h" +  #include "zebra_script.h"  #ifdef HAVE_SCRIPTING diff --git a/zebra/zebra_srv6_vty.c b/zebra/zebra_srv6_vty.c index ebe0fffcb2..62ce17326c 100644 --- a/zebra/zebra_srv6_vty.c +++ b/zebra/zebra_srv6_vty.c @@ -354,8 +354,12 @@ static int zebra_sr_config(struct vty *vty)  			inet_ntop(AF_INET6, &locator->prefix.prefix,  				  str, sizeof(str));  			vty_out(vty, "   locator %s\n", locator->name); -			vty_out(vty, "    prefix %s/%u\n", str, +			vty_out(vty, "    prefix %s/%u", str,  				locator->prefix.prefixlen); +			if (locator->function_bits_length) +				vty_out(vty, " func-bits %u", +					locator->function_bits_length); +			vty_out(vty, "\n");  			vty_out(vty, "   exit\n");  			vty_out(vty, "   !\n");  		} diff --git a/zebra/zebra_trace.c b/zebra/zebra_trace.c new file mode 100644 index 0000000000..fef5ad20ac --- /dev/null +++ b/zebra/zebra_trace.c @@ -0,0 +1,6 @@ +#define TRACEPOINT_CREATE_PROBES +#define TRACEPOINT_DEFINE + +#include <zebra.h> + +#include "zebra_trace.h" diff --git a/zebra/zebra_trace.h b/zebra/zebra_trace.h new file mode 100644 index 0000000000..49a0c8e793 --- /dev/null +++ b/zebra/zebra_trace.h @@ -0,0 +1,130 @@ +/* Tracing for zebra + * + * Copyright (C) 2020  NVIDIA Corporation + * Donald Sharp + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if !defined(__ZEBRA_TRACE_H__) || defined(TRACEPOINT_HEADER_MULTI_READ) +#define __ZEBRA_TRACE_H__ + +#include "lib/trace.h" + +#ifdef HAVE_LTTNG + +#undef TRACEPOINT_PROVIDER +#define TRACEPOINT_PROVIDER frr_zebra + +#undef TRACEPOINT_INCLUDE +#define TRACEPOINT_INCLUDE "zebra/zebra_trace.h" + +#include <lttng/tracepoint.h> + +#include <lib/ns.h> +#include <lib/table.h> + +#include <zebra/zebra_ns.h> + +TRACEPOINT_EVENT( +	frr_zebra, +	netlink_request_intf_addr, +	TP_ARGS(struct nlsock *, netlink_cmd, +		int, family, +		int, type, +		uint32_t, filter_mask), +	TP_FIELDS( +		ctf_integer_hex(intptr_t, netlink_cmd, netlink_cmd) +		ctf_integer(int, family, family) +		ctf_integer(int, type, type) +		ctf_integer(uint32_t, filter_mask, filter_mask) +		) +	) + +TRACEPOINT_EVENT( +	frr_zebra, +	netlink_interface, +	TP_ARGS( +		struct nlmsghdr *, header, +		ns_id_t, ns_id, +		int, startup), +	TP_FIELDS( +		ctf_integer_hex(intptr_t, header, header) +		ctf_integer(uint32_t, ns_id, ns_id) +		ctf_integer(uint32_t, startup, startup) +		) +	) + +TRACEPOINT_EVENT( +	frr_zebra, +	netlink_nexthop_change, +	TP_ARGS( +		struct nlmsghdr *, header, +		ns_id_t, ns_id, +		int, startup), +	TP_FIELDS( +		ctf_integer_hex(intptr_t, header, header) +		ctf_integer(uint32_t, ns_id, ns_id) +		ctf_integer(uint32_t, startup, startup) +		) +	) + +TRACEPOINT_EVENT( +	frr_zebra, +	netlink_interface_addr, +	TP_ARGS( +		struct nlmsghdr *, header, +		ns_id_t, ns_id, +		int, startup), +	TP_FIELDS( +		ctf_integer_hex(intptr_t, header, header) +		ctf_integer(uint32_t, ns_id, ns_id) +		ctf_integer(uint32_t, startup, startup) +		) +	) + +TRACEPOINT_EVENT( +	frr_zebra, +	netlink_route_change_read_unicast, +	TP_ARGS( +		struct nlmsghdr *, header, +		ns_id_t, ns_id, +		int, startup), +	TP_FIELDS( +		ctf_integer_hex(intptr_t, header, header) +		ctf_integer(uint32_t, ns_id, ns_id) +		ctf_integer(uint32_t, startup, startup) +		) +	) + +TRACEPOINT_EVENT( +	frr_zebra, +	netlink_rule_change, +	TP_ARGS( +		struct nlmsghdr *, header, +		ns_id_t, ns_id, +		int, startup), +	TP_FIELDS( +		ctf_integer_hex(intptr_t, header, header) +		ctf_integer(uint32_t, ns_id, ns_id) +		ctf_integer(uint32_t, startup, startup) +		) +	) + +#include <lttng/tracepoint-event.h> + +#endif /* HAVE_LTTNG */ + +#endif /* __ZEBRA_TRACE_H__ */ diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h index a24a008b76..21e7f286f3 100644 --- a/zebra/zebra_vrf.h +++ b/zebra/zebra_vrf.h @@ -192,14 +192,10 @@ struct zebra_vrf {  /*   * special macro to allow us to get the correct zebra_vrf   */ -#define ZEBRA_DECLVAR_CONTEXT(A, B)                                            \ -	struct vrf *A;                                                         \ -	if (vty->node == CONFIG_NODE)                                          \ -		A = vrf_lookup_by_id(VRF_DEFAULT);                             \ -	else                                                                   \ -		A = VTY_GET_CONTEXT(vrf);                                      \ -	VTY_CHECK_CONTEXT(A);                                                  \ -	struct zebra_vrf *B = A->info +#define ZEBRA_DECLVAR_CONTEXT_VRF(vrfptr, zvrfptr)                             \ +	VTY_DECLVAR_CONTEXT_VRF(vrfptr);                                       \ +	struct zebra_vrf *zvrfptr = vrfptr->info;                              \ +	MACRO_REQUIRE_SEMICOLON() /* end */  static inline vrf_id_t zvrf_id(struct zebra_vrf *zvrf)  { diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 22c65e3c0c..b20d8daf38 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -1413,7 +1413,7 @@ DEFUN (ip_nht_default_route,         "Filter Next Hop tracking route resolution\n"         "Resolve via default route\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -1752,7 +1752,7 @@ DEFUN (no_ip_nht_default_route,         "Filter Next Hop tracking route resolution\n"         "Resolve via default route\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -1772,7 +1772,7 @@ DEFUN (ipv6_nht_default_route,         "Filter Next Hop tracking route resolution\n"         "Resolve via default route\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -1793,7 +1793,7 @@ DEFUN (no_ipv6_nht_default_route,         "Filter Next Hop tracking route resolution\n"         "Resolve via default route\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -2676,77 +2676,6 @@ static void vty_show_ip_route_summary_prefix(struct vty *vty,  	}  } -/* - * Show IPv6 mroute command.Used to dump - * the Multicast routing table. - */ -DEFUN (show_ipv6_mroute, -       show_ipv6_mroute_cmd, -       "show ipv6 mroute [vrf NAME]", -       SHOW_STR -       IP_STR -       "IPv6 Multicast routing table\n" -       VRF_CMD_HELP_STR) -{ -	struct route_table *table; -	struct route_node *rn; -	struct route_entry *re; -	int first = 1; -	vrf_id_t vrf_id = VRF_DEFAULT; - -	if (argc == 5) -		VRF_GET_ID(vrf_id, argv[4]->arg, false); - -	table = zebra_vrf_table(AFI_IP6, SAFI_MULTICAST, vrf_id); -	if (!table) -		return CMD_SUCCESS; - -	/* Show all IPv6 route. */ -	for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) -		RNODE_FOREACH_RE (rn, re) { -			if (first) { -				vty_out(vty, SHOW_ROUTE_V6_HEADER); -				first = 0; -			} -			vty_show_ip_route(vty, rn, re, NULL, false, false); -		} -	return CMD_SUCCESS; -} - -DEFUN (show_ipv6_mroute_vrf_all, -       show_ipv6_mroute_vrf_all_cmd, -       "show ipv6 mroute vrf all", -       SHOW_STR -       IP_STR -       "IPv6 Multicast routing table\n" -       VRF_ALL_CMD_HELP_STR) -{ -	struct route_table *table; -	struct route_node *rn; -	struct route_entry *re; -	struct vrf *vrf; -	struct zebra_vrf *zvrf; -	int first = 1; - -	RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { -		if ((zvrf = vrf->info) == NULL -		    || (table = zvrf->table[AFI_IP6][SAFI_MULTICAST]) == NULL) -			continue; - -		/* Show all IPv6 route. */ -		for (rn = route_top(table); rn; rn = srcdest_route_next(rn)) -			RNODE_FOREACH_RE (rn, re) { -				if (first) { -					vty_out(vty, SHOW_ROUTE_V6_HEADER); -					first = 0; -				} -				vty_show_ip_route(vty, rn, re, NULL, false, -						  false); -			} -	} -	return CMD_SUCCESS; -} -  DEFUN (allow_external_route_update,         allow_external_route_update_cmd,         "allow-external-route-update", @@ -2950,7 +2879,7 @@ DEFUN (vrf_vni_mapping,  {  	int filter = 0; -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	assert(vrf);  	assert(zvrf); @@ -2979,7 +2908,7 @@ DEFUN (no_vrf_vni_mapping,  {  	int filter = 0; -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	vni_t vni = strtoul(argv[2]->arg, NULL, 10);  	assert(vrf); @@ -4395,7 +4324,7 @@ DEFUN(ip_table_range, ip_table_range_cmd,        "Start Routing Table\n"        "End Routing Table\n")  { -	ZEBRA_DECLVAR_CONTEXT(vrf, zvrf); +	ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);  	if (!zvrf)  		return CMD_WARNING; @@ -4529,11 +4458,6 @@ void zebra_vty_init(void)  	install_element(VRF_NODE, &no_ipv6_nht_default_route_cmd);  	install_element(CONFIG_NODE, &rnh_hide_backups_cmd); -	install_element(VIEW_NODE, &show_ipv6_mroute_cmd); - -	/* Commands for VRF */ -	install_element(VIEW_NODE, &show_ipv6_mroute_vrf_all_cmd); -  	install_element(VIEW_NODE, &show_frr_cmd);  	install_element(VIEW_NODE, &show_evpn_global_cmd);  	install_element(VIEW_NODE, &show_evpn_vni_cmd); diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 13e1f63457..fc7eb8c87a 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -1754,6 +1754,8 @@ static int zl3vni_map_to_vxlan_if_ns(struct ns *ns,  	if (!zvrf)  		return NS_WALK_STOP; +	assert(_pifp); +  	/* loop through all vxlan-interface */  	for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) { @@ -1784,8 +1786,7 @@ static int zl3vni_map_to_vxlan_if_ns(struct ns *ns,  		zl3vni->local_vtep_ip = vxl->vtep_ip; -		if (_pifp) -			*_pifp = (void *)ifp; +		*_pifp = (void *)ifp;  		return NS_WALK_STOP;  	} @@ -1856,7 +1857,6 @@ struct zebra_l3vni *zl3vni_from_vrf(vrf_id_t vrf_id)  static int zl3vni_from_svi_ns(struct ns *ns, void *_in_param, void **_p_zl3vni)  { -	int found = 0;  	struct zebra_ns *zns = ns->info;  	struct zebra_l3vni **p_zl3vni = (struct zebra_l3vni **)_p_zl3vni;  	struct zebra_from_svi_param *in_param = @@ -1866,8 +1866,7 @@ static int zl3vni_from_svi_ns(struct ns *ns, void *_in_param, void **_p_zl3vni)  	struct zebra_if *zif = NULL;  	struct zebra_l2info_vxlan *vxl = NULL; -	if (!in_param) -		return NS_WALK_STOP; +	assert(in_param && p_zl3vni);  	/* loop through all vxlan-interface */  	for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) { @@ -1886,17 +1885,12 @@ static int zl3vni_from_svi_ns(struct ns *ns, void *_in_param, void **_p_zl3vni)  		if (!in_param->bridge_vlan_aware  		    || vxl->access_vlan == in_param->vid) { -			found = 1; -			break; +			*p_zl3vni = zl3vni_lookup(vxl->vni); +			return NS_WALK_STOP;  		}  	} -	if (!found) -		return NS_WALK_CONTINUE; - -	if (p_zl3vni) -		*p_zl3vni = zl3vni_lookup(vxl->vni); -	return NS_WALK_STOP; +	return NS_WALK_CONTINUE;  }  /* @@ -5130,8 +5124,13 @@ int zebra_vxlan_if_update(struct interface *ifp, uint16_t chgflags)  		zevpn_vxlan_if_set(zevpn, ifp, true /* set */);  		vlan_if = zvni_map_to_svi(vxl->access_vlan,  					  zif->brslave_info.br_if); -		if (vlan_if) +		if (vlan_if) {  			zevpn->svi_if = vlan_if; +			zevpn->vrf_id = vlan_if->vrf->vrf_id; +			zl3vni = zl3vni_from_vrf(vlan_if->vrf->vrf_id); +			if (zl3vni) +				listnode_add_sort_nodup(zl3vni->l2vnis, zevpn); +		}  		/* Take further actions needed.  		 * Note that if we are here, there is a change of interest. diff --git a/zebra/zserv.c b/zebra/zserv.c index 630c76c989..403f6c0d99 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -172,7 +172,7 @@ void zserv_log_message(const char *errmsg, struct stream *msg,  }  /* - * Gracefully shut down a client connection. + * Gracefuly shut down a client connection.   *   * Cancel any pending tasks for the client's thread. Then schedule a task on   * the main thread to shut down the calling thread. @@ -568,7 +568,7 @@ DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));   * Deinitialize zebra client.   *   * - Deregister and deinitialize related internal resources - * - Gracefully close socket + * - Gracefuly close socket   * - Free associated resources   * - Free client structure   *  | 
