summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/message.c2
-rw-r--r--bfdd/bfd_packet.c6
-rw-r--r--bgpd/bgp_bmp.c19
-rw-r--r--bgpd/bgp_evpn.c19
-rw-r--r--bgpd/bgp_mplsvpn.c376
-rw-r--r--bgpd/bgp_mplsvpn.h20
-rw-r--r--bgpd/bgp_packet.c8
-rw-r--r--bgpd/bgp_updgrp.c37
-rw-r--r--bgpd/bgp_updgrp.h7
-rw-r--r--bgpd/bgp_updgrp_adv.c48
-rw-r--r--bgpd/bgp_updgrp_packet.c14
-rw-r--r--bgpd/bgpd.c24
-rw-r--r--configure.ac7
-rw-r--r--debian/control2
-rwxr-xr-xdebian/rules7
-rw-r--r--doc/developer/packaging-debian.rst4
-rw-r--r--doc/user/pimv6.rst6
-rw-r--r--lib/prefix.h2
-rwxr-xr-x[-rw-r--r--]lib/resolver.c3
-rw-r--r--lib/routemap.c12
-rw-r--r--lib/routemap.h4
-rw-r--r--lib/typesafe.h19
-rw-r--r--pimd/pim6_cmd.c33
-rw-r--r--pimd/pim6_stubs.c68
-rw-r--r--pimd/pim_bsm.c150
-rw-r--r--pimd/pim_bsm.h14
-rw-r--r--pimd/pim_cmd.c153
-rw-r--r--pimd/pim_cmd_common.c167
-rw-r--r--pimd/pim_cmd_common.h7
-rw-r--r--pimd/pim_iface.c4
-rw-r--r--pimd/pim_ifchannel.c5
-rw-r--r--pimd/pim_join.c16
-rw-r--r--pimd/pim_jp_agg.c7
-rw-r--r--pimd/pim_msg.c2
-rw-r--r--pimd/pim_nb_config.c4
-rw-r--r--pimd/pim_neighbor.c2
-rw-r--r--pimd/pim_nht.c159
-rw-r--r--pimd/pim_nht.h16
-rw-r--r--pimd/pim_register.c16
-rw-r--r--pimd/pim_rp.c153
-rw-r--r--pimd/pim_rpf.c40
-rw-r--r--pimd/pim_rpf.h2
-rw-r--r--pimd/pim_str.h17
-rw-r--r--pimd/pim_tib.c7
-rw-r--r--pimd/pim_upstream.c69
-rw-r--r--pimd/pim_upstream.h2
-rw-r--r--pimd/pim_vxlan.c6
-rw-r--r--pimd/pim_zebra.c6
-rw-r--r--pimd/subdir.am13
-rw-r--r--redhat/frr.spec.in18
-rw-r--r--snapcraft/snapcraft.yaml.in2
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py2537
-rw-r--r--tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py2102
-rw-r--r--tools/etc/rsyslog.d/45-frr.conf2
-rwxr-xr-xtools/frrcommon.sh.in4
-rw-r--r--zebra/debug_nl.c17
-rw-r--r--zebra/rt_netlink.c61
-rw-r--r--zebra/zebra_evpn_neigh.c5
58 files changed, 5628 insertions, 904 deletions
diff --git a/babeld/message.c b/babeld/message.c
index c2ea2a2683..7d45d91bf7 100644
--- a/babeld/message.c
+++ b/babeld/message.c
@@ -636,7 +636,7 @@ parse_packet(const unsigned char *from, struct interface *ifp,
len - parsed_len, channels);
}
- if (ignore_update)
+ if (!ignore_update)
update_route(router_id, prefix, plen, seqno, metric,
interval, neigh, nh, channels,
channels_len(channels));
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index 6b0afef65f..d34d642762 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -219,8 +219,8 @@ void ptm_bfd_echo_fp_snd(struct bfd_session *bfd)
/* add eth hdr */
eth = (struct ethhdr *)(sendbuff);
- memcpy(eth->h_source, bfd->ifp->hw_addr, sizeof(bfd->ifp->hw_addr));
- memcpy(eth->h_dest, bfd->peer_hw_addr, sizeof(bfd->peer_hw_addr));
+ memcpy(eth->h_source, bfd->ifp->hw_addr, sizeof(eth->h_source));
+ memcpy(eth->h_dest, bfd->peer_hw_addr, sizeof(eth->h_dest));
total_len += sizeof(struct ethhdr);
@@ -1569,6 +1569,7 @@ int bp_echo_socket(const struct vrf *vrf)
-1) {
zlog_warn("%s: setsockopt(SO_ATTACH_FILTER): %s", __func__,
strerror(errno));
+ close(s);
return -1;
}
@@ -1579,6 +1580,7 @@ int bp_echo_socket(const struct vrf *vrf)
if (bind(s, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
zlog_warn("Failed to bind echo socket: %s",
safe_strerror(errno));
+ close(s);
return -1;
}
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index b561b50ff5..ef985e08b2 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -136,6 +136,12 @@ static int bmp_listener_cmp(const struct bmp_listener *a,
DECLARE_SORTLIST_UNIQ(bmp_listeners, struct bmp_listener, bli,
bmp_listener_cmp);
+static void bmp_listener_put(struct bmp_listener *bl)
+{
+ bmp_listeners_del(&bl->targets->listeners, bl);
+ XFREE(MTYPE_BMP_LISTENER, bl);
+}
+
static int bmp_targets_cmp(const struct bmp_targets *a,
const struct bmp_targets *b)
{
@@ -1541,11 +1547,16 @@ static struct bmp_bgp *bmp_bgp_get(struct bgp *bgp)
static void bmp_bgp_put(struct bmp_bgp *bmpbgp)
{
struct bmp_targets *bt;
+ struct bmp_listener *bl;
bmp_bgph_del(&bmp_bgph, bmpbgp);
- frr_each_safe(bmp_targets, &bmpbgp->targets, bt)
+ frr_each_safe (bmp_targets, &bmpbgp->targets, bt) {
+ frr_each_safe (bmp_listeners, &bt->listeners, bl)
+ bmp_listener_put(bl);
+
bmp_targets_put(bt);
+ }
bmp_mirrorq_fini(&bmpbgp->mirrorq);
XFREE(MTYPE_BMP, bmpbgp);
@@ -1675,12 +1686,6 @@ static struct bmp_listener *bmp_listener_get(struct bmp_targets *bt,
return bl;
}
-static void bmp_listener_put(struct bmp_listener *bl)
-{
- bmp_listeners_del(&bl->targets->listeners, bl);
- XFREE(MTYPE_BMP_LISTENER, bl);
-}
-
static void bmp_listener_start(struct bmp_listener *bl)
{
int sock, ret;
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 3483ece5b8..bbbe538acc 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -1753,6 +1753,16 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
bgp_attr_set_pmsi_tnl_type(&attr, PMSI_TNLTYPE_INGR_REPL);
}
+ /* router mac is only needed for type-2 routes here. */
+ if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
+ uint8_t af_flags = 0;
+
+ if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_SVI_IP))
+ SET_FLAG(af_flags, BGP_EVPN_MACIP_TYPE_SVI_IP);
+
+ bgp_evpn_get_rmac_nexthop(vpn, p, &attr, af_flags);
+ }
+
if (bgp_debug_zebra(NULL)) {
char buf3[ESI_STR_LEN];
@@ -1763,15 +1773,6 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn,
vpn->vni, p, &attr.rmac, &attr.mp_nexthop_global_in,
esi_to_str(esi, buf3, sizeof(buf3)));
}
- /* router mac is only needed for type-2 routes here. */
- if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
- uint8_t af_flags = 0;
-
- if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_SVI_IP))
- SET_FLAG(af_flags, BGP_EVPN_MACIP_TYPE_SVI_IP);
-
- bgp_evpn_get_rmac_nexthop(vpn, p, &attr, af_flags);
- }
vni2label(vpn->vni, &(attr.label));
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index cc4ff57f4e..52180b3e48 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -767,23 +767,74 @@ static void unsetsids(struct bgp_path_info *bpi)
memset(extra->sid, 0, sizeof(extra->sid));
}
+static bool leak_update_nexthop_valid(struct bgp *to_bgp, struct bgp_dest *bn,
+ struct attr *new_attr, afi_t afi,
+ safi_t safi,
+ struct bgp_path_info *source_bpi,
+ struct bgp_path_info *bpi,
+ struct bgp *bgp_orig,
+ const struct prefix *p, int debug)
+{
+ struct bgp_path_info *bpi_ultimate;
+ struct bgp *bgp_nexthop;
+ bool nh_valid;
+
+ bpi_ultimate = bgp_get_imported_bpi_ultimate(source_bpi);
+
+ if (bpi->extra && bpi->extra->bgp_orig)
+ bgp_nexthop = bpi->extra->bgp_orig;
+ else
+ bgp_nexthop = bgp_orig;
+
+ /*
+ * No nexthop tracking for redistributed routes or for
+ * EVPN-imported routes that get leaked.
+ */
+ if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
+ is_pi_family_evpn(bpi_ultimate))
+ nh_valid = 1;
+ else
+ /*
+ * TBD do we need to do anything about the
+ * 'connected' parameter?
+ */
+ nh_valid = bgp_find_or_add_nexthop(to_bgp, bgp_nexthop, afi,
+ safi, bpi, NULL, 0, p);
+
+ /*
+ * If you are using SRv6 VPN instead of MPLS, it need to check
+ * the SID allocation. If the sid is not allocated, the rib
+ * will be invalid.
+ */
+ if (to_bgp->srv6_enabled &&
+ (!new_attr->srv6_l3vpn && !new_attr->srv6_vpn)) {
+ nh_valid = false;
+ }
+
+ if (debug)
+ zlog_debug("%s: %pFX nexthop is %svalid (in vrf %s)", __func__,
+ p, (nh_valid ? "" : "not "),
+ bgp_nexthop->name_pretty);
+
+ return nh_valid;
+}
+
/*
* returns pointer to new bgp_path_info upon success
*/
static struct bgp_path_info *
-leak_update(struct bgp *bgp, /* destination bgp instance */
- struct bgp_dest *bn, struct attr *new_attr, /* already interned */
+leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
+ struct attr *new_attr, /* already interned */
afi_t afi, safi_t safi, struct bgp_path_info *source_bpi,
- mpls_label_t *label, uint32_t num_labels, void *parent,
- struct bgp *bgp_orig, struct prefix *nexthop_orig,
- int nexthop_self_flag, int debug)
+ mpls_label_t *label, uint32_t num_labels, struct bgp *bgp_orig,
+ struct prefix *nexthop_orig, int nexthop_self_flag, int debug)
{
const struct prefix *p = bgp_dest_get_prefix(bn);
struct bgp_path_info *bpi;
- struct bgp_path_info *bpi_ultimate;
struct bgp_path_info *new;
struct bgp_path_info_extra *extra;
uint32_t num_sids = 0;
+ void *parent = source_bpi;
if (new_attr->srv6_l3vpn || new_attr->srv6_vpn)
num_sids = 1;
@@ -791,7 +842,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (debug)
zlog_debug(
"%s: entry: leak-to=%s, p=%pBD, type=%d, sub_type=%d",
- __func__, bgp->name_pretty, bn, source_bpi->type,
+ __func__, to_bgp->name_pretty, bn, source_bpi->type,
source_bpi->sub_type);
/*
@@ -809,7 +860,6 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
* schemes that could be implemented in the future.
*
*/
- bpi_ultimate = bgp_get_imported_bpi_ultimate(source_bpi);
/*
* match parent
@@ -827,7 +877,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (debug) {
zlog_debug(
"%s: ->%s(s_flags: 0x%x b_flags: 0x%x): %pFX: Found route, being removed, not leaking",
- __func__, bgp->name_pretty,
+ __func__, to_bgp->name_pretty,
source_bpi->flags, bpi->flags, p);
}
return NULL;
@@ -840,7 +890,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (debug)
zlog_debug(
"%s: ->%s: %pBD: Found route, no change",
- __func__, bgp->name_pretty, bn);
+ __func__, to_bgp->name_pretty, bn);
return NULL;
}
@@ -858,8 +908,9 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (!ecommunity_cmp(
bgp_attr_get_ecommunity(bpi->attr),
bgp_attr_get_ecommunity(new_attr))) {
- vpn_leak_to_vrf_withdraw(bgp, bpi);
- bgp_aggregate_decrement(bgp, p, bpi, afi, safi);
+ vpn_leak_to_vrf_withdraw(to_bgp, bpi);
+ bgp_aggregate_decrement(to_bgp, p, bpi, afi,
+ safi);
bgp_path_info_delete(bn, bpi);
}
}
@@ -871,7 +922,7 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED))
bgp_path_info_restore(bn, bpi);
else
- bgp_aggregate_decrement(bgp, p, bpi, afi, safi);
+ bgp_aggregate_decrement(to_bgp, p, bpi, afi, safi);
bgp_attr_unintern(&bpi->attr);
bpi->attr = new_attr;
bpi->uptime = bgp_clock();
@@ -914,54 +965,21 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (nexthop_self_flag)
bgp_path_info_set_flag(bn, bpi, BGP_PATH_ANNC_NH_SELF);
- struct bgp *bgp_nexthop = bgp;
- int nh_valid;
-
- if (bpi->extra && bpi->extra->bgp_orig)
- bgp_nexthop = bpi->extra->bgp_orig;
-
- /*
- * No nexthop tracking for redistributed routes or for
- * EVPN-imported routes that get leaked.
- */
- if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
- is_pi_family_evpn(bpi_ultimate))
- nh_valid = 1;
+ if (leak_update_nexthop_valid(to_bgp, bn, new_attr, afi, safi,
+ source_bpi, bpi, bgp_orig, p,
+ debug))
+ bgp_path_info_set_flag(bn, bpi, BGP_PATH_VALID);
else
- /*
- * TBD do we need to do anything about the
- * 'connected' parameter?
- */
- nh_valid = bgp_find_or_add_nexthop(
- bgp, bgp_nexthop, afi, safi, bpi, NULL, 0, p);
-
- /*
- * If you are using SRv6 VPN instead of MPLS, it need to check
- * the SID allocation. If the sid is not allocated, the rib
- * will be invalid.
- */
- if (bgp->srv6_enabled
- && (!new_attr->srv6_l3vpn && !new_attr->srv6_vpn)) {
bgp_path_info_unset_flag(bn, bpi, BGP_PATH_VALID);
- nh_valid = false;
- }
-
- if (debug)
- zlog_debug("%s: nexthop is %svalid (in vrf %s)",
- __func__, (nh_valid ? "" : "not "),
- bgp_nexthop->name_pretty);
-
- if (nh_valid)
- bgp_path_info_set_flag(bn, bpi, BGP_PATH_VALID);
/* Process change. */
- bgp_aggregate_increment(bgp, p, bpi, afi, safi);
- bgp_process(bgp, bn, afi, safi);
+ bgp_aggregate_increment(to_bgp, p, bpi, afi, safi);
+ bgp_process(to_bgp, bn, afi, safi);
bgp_dest_unlock_node(bn);
if (debug)
zlog_debug("%s: ->%s: %pBD Found route, changed attr",
- __func__, bgp->name_pretty, bn);
+ __func__, to_bgp->name_pretty, bn);
return bpi;
}
@@ -970,14 +988,14 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (debug) {
zlog_debug(
"%s: ->%s(s_flags: 0x%x): %pFX: New route, being removed, not leaking",
- __func__, bgp->name_pretty,
+ __func__, to_bgp->name_pretty,
source_bpi->flags, p);
}
return NULL;
}
new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_IMPORTED, 0,
- bgp->peer_self, new_attr, bn);
+ to_bgp->peer_self, new_attr, bn);
if (nexthop_self_flag)
bgp_path_info_set_flag(bn, new, BGP_PATH_ANNC_NH_SELF);
@@ -1019,67 +1037,28 @@ leak_update(struct bgp *bgp, /* destination bgp instance */
if (nexthop_orig)
new->extra->nexthop_orig = *nexthop_orig;
- /*
- * nexthop tracking for unicast routes
- */
- struct bgp *bgp_nexthop = bgp;
- int nh_valid;
-
- if (new->extra->bgp_orig)
- bgp_nexthop = new->extra->bgp_orig;
-
- /*
- * No nexthop tracking for redistributed routes because
- * their originating protocols will do the tracking and
- * withdraw those routes if the nexthops become unreachable
- * This also holds good for EVPN-imported routes that get
- * leaked.
- */
- if (bpi_ultimate->sub_type == BGP_ROUTE_REDISTRIBUTE ||
- is_pi_family_evpn(bpi_ultimate))
- nh_valid = 1;
+ if (leak_update_nexthop_valid(to_bgp, bn, new_attr, afi, safi,
+ source_bpi, new, bgp_orig, p, debug))
+ bgp_path_info_set_flag(bn, new, BGP_PATH_VALID);
else
- /*
- * TBD do we need to do anything about the
- * 'connected' parameter?
- */
- nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop, afi, safi,
- new, NULL, 0, p);
-
- /*
- * If you are using SRv6 VPN instead of MPLS, it need to check
- * the SID allocation. If the sid is not allocated, the rib
- * will be invalid.
- */
- if (bgp->srv6_enabled
- && (!new->attr->srv6_l3vpn && !new->attr->srv6_vpn)) {
bgp_path_info_unset_flag(bn, new, BGP_PATH_VALID);
- nh_valid = false;
- }
-
- if (debug)
- zlog_debug("%s: nexthop is %svalid (in vrf %s)",
- __func__, (nh_valid ? "" : "not "),
- bgp_nexthop->name_pretty);
- if (nh_valid)
- bgp_path_info_set_flag(bn, new, BGP_PATH_VALID);
- bgp_aggregate_increment(bgp, p, new, afi, safi);
+ bgp_aggregate_increment(to_bgp, p, new, afi, safi);
bgp_path_info_add(bn, new);
bgp_dest_unlock_node(bn);
- bgp_process(bgp, bn, afi, safi);
+ bgp_process(to_bgp, bn, afi, safi);
if (debug)
zlog_debug("%s: ->%s: %pBD: Added new route", __func__,
- bgp->name_pretty, bn);
+ to_bgp->name_pretty, bn);
return new;
}
/* cf vnc_import_bgp_add_route_mode_nvegroup() and add_vnc_route() */
-void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
- struct bgp *bgp_vrf, /* from */
+void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
+ struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vrf) /* route */
{
int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
@@ -1095,7 +1074,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
int nexthop_self_flag = 0;
if (debug)
- zlog_debug("%s: from vrf %s", __func__, bgp_vrf->name_pretty);
+ zlog_debug("%s: from vrf %s", __func__, from_bgp->name_pretty);
if (debug && bgp_attr_get_ecommunity(path_vrf->attr)) {
char *s = ecommunity_ecom2str(
@@ -1103,11 +1082,11 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
zlog_debug("%s: %s path_vrf->type=%d, EC{%s}", __func__,
- bgp_vrf->name, path_vrf->type, s);
+ from_bgp->name, path_vrf->type, s);
XFREE(MTYPE_ECOMMUNITY_STR, s);
}
- if (!bgp_vpn)
+ if (!to_bgp)
return;
if (!afi) {
@@ -1120,10 +1099,10 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
if (!is_route_injectable_into_vpn(path_vrf))
return;
- if (!vpn_leak_to_vpn_active(bgp_vrf, afi, &debugmsg)) {
+ if (!vpn_leak_to_vpn_active(from_bgp, afi, &debugmsg)) {
if (debug)
zlog_debug("%s: %s skipping: %s", __func__,
- bgp_vrf->name, debugmsg);
+ from_bgp->name, debugmsg);
return;
}
@@ -1133,23 +1112,23 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
/*
* route map handling
*/
- if (bgp_vrf->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_TOVPN]) {
+ if (from_bgp->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_TOVPN]) {
struct bgp_path_info info;
route_map_result_t ret;
memset(&info, 0, sizeof(info));
- info.peer = bgp_vpn->peer_self;
+ info.peer = to_bgp->peer_self;
info.attr = &static_attr;
- ret = route_map_apply(
- bgp_vrf->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_TOVPN],
- p, &info);
+ ret = route_map_apply(from_bgp->vpn_policy[afi]
+ .rmap[BGP_VPN_POLICY_DIR_TOVPN],
+ p, &info);
if (RMAP_DENYMATCH == ret) {
bgp_attr_flush(&static_attr); /* free any added parts */
if (debug)
zlog_debug(
"%s: vrf %s route map \"%s\" says DENY, returning",
- __func__, bgp_vrf->name_pretty,
- bgp_vrf->vpn_policy[afi]
+ __func__, from_bgp->name_pretty,
+ from_bgp->vpn_policy[afi]
.rmap[BGP_VPN_POLICY_DIR_TOVPN]
->name);
return;
@@ -1177,17 +1156,17 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
old_ecom = bgp_attr_get_ecommunity(&static_attr);
if (old_ecom) {
new_ecom = ecommunity_dup(old_ecom);
- if (CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
- BGP_CONFIG_VRF_TO_VRF_EXPORT))
+ if (CHECK_FLAG(from_bgp->af_flags[afi][SAFI_UNICAST],
+ BGP_CONFIG_VRF_TO_VRF_EXPORT))
ecommunity_strip_rts(new_ecom);
- new_ecom = ecommunity_merge(new_ecom,
- bgp_vrf->vpn_policy[afi]
- .rtlist[BGP_VPN_POLICY_DIR_TOVPN]);
+ new_ecom = ecommunity_merge(
+ new_ecom, from_bgp->vpn_policy[afi]
+ .rtlist[BGP_VPN_POLICY_DIR_TOVPN]);
if (!old_ecom->refcnt)
ecommunity_free(&old_ecom);
} else {
new_ecom = ecommunity_dup(
- bgp_vrf->vpn_policy[afi]
+ from_bgp->vpn_policy[afi]
.rtlist[BGP_VPN_POLICY_DIR_TOVPN]);
}
bgp_attr_set_ecommunity(&static_attr, new_ecom);
@@ -1204,10 +1183,10 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
/* Nexthop */
/* if policy nexthop not set, use 0 */
- if (CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
+ if (CHECK_FLAG(from_bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_NEXTHOP_SET)) {
struct prefix *nexthop =
- &bgp_vrf->vpn_policy[afi].tovpn_nexthop;
+ &from_bgp->vpn_policy[afi].tovpn_nexthop;
switch (nexthop->family) {
case AF_INET:
@@ -1228,7 +1207,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
assert(0);
}
} else {
- if (!CHECK_FLAG(bgp_vrf->af_flags[afi][SAFI_UNICAST],
+ if (!CHECK_FLAG(from_bgp->af_flags[afi][SAFI_UNICAST],
BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
if (afi == AFI_IP) {
/*
@@ -1266,7 +1245,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
nexthop_self_flag = 1;
}
- label_val = bgp_vrf->vpn_policy[afi].tovpn_label;
+ label_val = from_bgp->vpn_policy[afi].tovpn_label;
if (label_val == MPLS_LABEL_NONE) {
encode_label(MPLS_LABEL_IMPLICIT_NULL, &label);
} else {
@@ -1275,12 +1254,13 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
/* Set originator ID to "me" */
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
- static_attr.originator_id = bgp_vpn->router_id;
+ static_attr.originator_id = to_bgp->router_id;
/* Set SID for SRv6 VPN */
- if (bgp_vrf->vpn_policy[afi].tovpn_sid_locator) {
- encode_label(bgp_vrf->vpn_policy[afi].tovpn_sid_transpose_label,
- &label);
+ if (from_bgp->vpn_policy[afi].tovpn_sid_locator) {
+ encode_label(
+ from_bgp->vpn_policy[afi].tovpn_sid_transpose_label,
+ &label);
static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN,
sizeof(struct bgp_attr_srv6_l3vpn));
static_attr.srv6_l3vpn->sid_flags = 0x00;
@@ -1298,7 +1278,7 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
static_attr.srv6_l3vpn->transposition_offset =
BGP_PREFIX_SID_SRV6_TRANSPOSITION_OFFSET;
memcpy(&static_attr.srv6_l3vpn->sid,
- bgp_vrf->vpn_policy[afi].tovpn_sid_locator,
+ from_bgp->vpn_policy[afi].tovpn_sid_locator,
sizeof(struct in6_addr));
}
@@ -1317,14 +1297,14 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
/* Now new_attr is an allocated interned attr */
- bn = bgp_afi_node_get(bgp_vpn->rib[afi][safi], afi, safi, p,
- &(bgp_vrf->vpn_policy[afi].tovpn_rd));
+ bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p,
+ &(from_bgp->vpn_policy[afi].tovpn_rd));
struct bgp_path_info *new_info;
- new_info = leak_update(bgp_vpn, bn, new_attr, afi, safi, path_vrf,
- &label, 1, path_vrf, bgp_vrf, NULL,
- nexthop_self_flag, debug);
+ new_info =
+ leak_update(to_bgp, bn, new_attr, afi, safi, path_vrf, &label,
+ 1, from_bgp, NULL, nexthop_self_flag, debug);
/*
* Routes actually installed in the vpn RIB must also be
@@ -1336,11 +1316,11 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
* because of loop checking.
*/
if (new_info)
- vpn_leak_to_vrf_update(bgp_vrf, new_info);
+ vpn_leak_to_vrf_update(from_bgp, new_info);
}
-void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */
- struct bgp *bgp_vrf, /* from */
+void vpn_leak_from_vrf_withdraw(struct bgp *to_bgp, /* to */
+ struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vrf) /* route */
{
int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
@@ -1354,11 +1334,11 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */
if (debug) {
zlog_debug(
"%s: entry: leak-from=%s, p=%pBD, type=%d, sub_type=%d",
- __func__, bgp_vrf->name_pretty, path_vrf->net,
+ __func__, from_bgp->name_pretty, path_vrf->net,
path_vrf->type, path_vrf->sub_type);
}
- if (!bgp_vpn)
+ if (!to_bgp)
return;
if (!afi) {
@@ -1371,7 +1351,7 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */
if (!is_route_injectable_into_vpn(path_vrf))
return;
- if (!vpn_leak_to_vpn_active(bgp_vrf, afi, &debugmsg)) {
+ if (!vpn_leak_to_vpn_active(from_bgp, afi, &debugmsg)) {
if (debug)
zlog_debug("%s: skipping: %s", __func__, debugmsg);
return;
@@ -1380,8 +1360,8 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */
if (debug)
zlog_debug("%s: withdrawing (path_vrf=%p)", __func__, path_vrf);
- bn = bgp_afi_node_get(bgp_vpn->rib[afi][safi], afi, safi, p,
- &(bgp_vrf->vpn_policy[afi].tovpn_rd));
+ bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p,
+ &(from_bgp->vpn_policy[afi].tovpn_rd));
if (!bn)
return;
@@ -1397,17 +1377,16 @@ void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, /* to */
if (bpi) {
/* withdraw from looped vrfs as well */
- vpn_leak_to_vrf_withdraw(bgp_vpn, bpi);
+ vpn_leak_to_vrf_withdraw(to_bgp, bpi);
- bgp_aggregate_decrement(bgp_vpn, p, bpi, afi, safi);
+ bgp_aggregate_decrement(to_bgp, p, bpi, afi, safi);
bgp_path_info_delete(bn, bpi);
- bgp_process(bgp_vpn, bn, afi, safi);
+ bgp_process(to_bgp, bn, afi, safi);
}
bgp_dest_unlock_node(bn);
}
-void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */
- struct bgp *bgp_vrf, /* from */
+void vpn_leak_from_vrf_withdraw_all(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi)
{
int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
@@ -1415,9 +1394,9 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */
safi_t safi = SAFI_MPLS_VPN;
/*
- * Walk vpn table, delete bpi with bgp_orig == bgp_vrf
+ * Walk vpn table, delete bpi with bgp_orig == from_bgp
*/
- for (pdest = bgp_table_top(bgp_vpn->rib[afi][safi]); pdest;
+ for (pdest = bgp_table_top(to_bgp->rib[afi][safi]); pdest;
pdest = bgp_route_next(pdest)) {
struct bgp_table *table;
@@ -1446,28 +1425,26 @@ void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn, /* to */
continue;
if (!bpi->extra)
continue;
- if ((struct bgp *)bpi->extra->bgp_orig
- == bgp_vrf) {
+ if ((struct bgp *)bpi->extra->bgp_orig ==
+ from_bgp) {
/* delete route */
if (debug)
zlog_debug("%s: deleting it",
__func__);
/* withdraw from leak-to vrfs as well */
- vpn_leak_to_vrf_withdraw(bgp_vpn, bpi);
+ vpn_leak_to_vrf_withdraw(to_bgp, bpi);
bgp_aggregate_decrement(
- bgp_vpn,
- bgp_dest_get_prefix(bn), bpi,
- afi, safi);
+ to_bgp, bgp_dest_get_prefix(bn),
+ bpi, afi, safi);
bgp_path_info_delete(bn, bpi);
- bgp_process(bgp_vpn, bn, afi, safi);
+ bgp_process(to_bgp, bn, afi, safi);
}
}
}
}
}
-void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn, /* to */
- struct bgp *bgp_vrf, /* from */
+void vpn_leak_from_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi)
{
struct bgp_dest *bn;
@@ -1476,9 +1453,9 @@ void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn, /* to */
if (debug)
zlog_debug("%s: entry, afi=%d, vrf=%s", __func__, afi,
- bgp_vrf->name_pretty);
+ from_bgp->name_pretty);
- for (bn = bgp_table_top(bgp_vrf->rib[afi][SAFI_UNICAST]); bn;
+ for (bn = bgp_table_top(from_bgp->rib[afi][SAFI_UNICAST]); bn;
bn = bgp_route_next(bn)) {
if (debug)
@@ -1490,14 +1467,14 @@ void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn, /* to */
zlog_debug(
"%s: calling vpn_leak_from_vrf_update",
__func__);
- vpn_leak_from_vrf_update(bgp_vpn, bgp_vrf, bpi);
+ vpn_leak_from_vrf_update(to_bgp, from_bgp, bpi);
}
}
}
static void
-vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
- struct bgp *bgp_vpn, /* from */
+vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
+ struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vpn) /* route */
{
const struct prefix *p = bgp_dest_get_prefix(path_vpn->net);
@@ -1518,7 +1495,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
- if (!vpn_leak_from_vpn_active(bgp_vrf, afi, &debugmsg)) {
+ if (!vpn_leak_from_vpn_active(to_bgp, afi, &debugmsg)) {
if (debug)
zlog_debug("%s: skipping: %s", __func__, debugmsg);
return;
@@ -1526,18 +1503,18 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
/* Check for intersection of route targets */
if (!ecom_intersect(
- bgp_vrf->vpn_policy[afi].rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
+ to_bgp->vpn_policy[afi].rtlist[BGP_VPN_POLICY_DIR_FROMVPN],
bgp_attr_get_ecommunity(path_vpn->attr))) {
if (debug)
zlog_debug(
"from vpn (%s) to vrf (%s), skipping after no intersection of route targets",
- bgp_vpn->name_pretty, bgp_vrf->name_pretty);
+ from_bgp->name_pretty, to_bgp->name_pretty);
return;
}
if (debug)
zlog_debug("%s: updating %pFX to vrf %s", __func__, p,
- bgp_vrf->name_pretty);
+ to_bgp->name_pretty);
/* shallow copy */
static_attr = *path_vpn->attr;
@@ -1547,8 +1524,8 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
/* If doing VRF-to-VRF leaking, strip RTs. */
old_ecom = bgp_attr_get_ecommunity(&static_attr);
- if (old_ecom && CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
- BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
+ if (old_ecom && CHECK_FLAG(to_bgp->af_flags[afi][safi],
+ BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
new_ecom = ecommunity_dup(old_ecom);
ecommunity_strip_rts(new_ecom);
bgp_attr_set_ecommunity(&static_attr, new_ecom);
@@ -1580,7 +1557,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
nexthop_orig.u.prefix4 = path_vpn->attr->mp_nexthop_global_in;
nexthop_orig.prefixlen = IPV4_MAX_BITLEN;
- if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+ if (CHECK_FLAG(to_bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
static_attr.nexthop.s_addr =
nexthop_orig.u.prefix4.s_addr;
@@ -1597,7 +1574,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
nexthop_orig.u.prefix6 = path_vpn->attr->mp_nexthop_global;
nexthop_orig.prefixlen = IPV6_MAX_BITLEN;
- if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+ if (CHECK_FLAG(to_bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
static_attr.mp_nexthop_global = nexthop_orig.u.prefix6;
}
@@ -1607,15 +1584,15 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
/*
* route map handling
*/
- if (bgp_vrf->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_FROMVPN]) {
+ if (to_bgp->vpn_policy[afi].rmap[BGP_VPN_POLICY_DIR_FROMVPN]) {
struct bgp_path_info info;
route_map_result_t ret;
memset(&info, 0, sizeof(info));
- info.peer = bgp_vrf->peer_self;
+ info.peer = to_bgp->peer_self;
info.attr = &static_attr;
info.extra = path_vpn->extra; /* Used for source-vrf filter */
- ret = route_map_apply(bgp_vrf->vpn_policy[afi]
+ ret = route_map_apply(to_bgp->vpn_policy[afi]
.rmap[BGP_VPN_POLICY_DIR_FROMVPN],
p, &info);
if (RMAP_DENYMATCH == ret) {
@@ -1623,8 +1600,8 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
if (debug)
zlog_debug(
"%s: vrf %s vpn-policy route map \"%s\" says DENY, returning",
- __func__, bgp_vrf->name_pretty,
- bgp_vrf->vpn_policy[afi]
+ __func__, to_bgp->name_pretty,
+ to_bgp->vpn_policy[afi]
.rmap[BGP_VPN_POLICY_DIR_FROMVPN]
->name);
return;
@@ -1640,7 +1617,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
new_attr = bgp_attr_intern(&static_attr);
bgp_attr_flush(&static_attr);
- bn = bgp_afi_node_get(bgp_vrf->rib[afi][safi], afi, safi, p, NULL);
+ bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p, NULL);
/*
* ensure labels are copied
@@ -1654,7 +1631,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
* labels for these routes enables the non-labeled nexthops
* from the originating VRF to be considered valid for this route.
*/
- if (!CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
+ if (!CHECK_FLAG(to_bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
/* work back to original route */
bpi_ultimate = bgp_get_imported_bpi_ultimate(path_vpn);
@@ -1692,14 +1669,14 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *bgp_vrf, /* to */
if (path_vpn->extra && path_vpn->extra->bgp_orig)
src_vrf = path_vpn->extra->bgp_orig;
else
- src_vrf = bgp_vpn;
+ src_vrf = from_bgp;
- leak_update(bgp_vrf, bn, new_attr, afi, safi, path_vpn, pLabels,
- num_labels, path_vpn, /* parent */
- src_vrf, &nexthop_orig, nexthop_self_flag, debug);
+ leak_update(to_bgp, bn, new_attr, afi, safi, path_vpn, pLabels,
+ num_labels, src_vrf, &nexthop_orig, nexthop_self_flag,
+ debug);
}
-void vpn_leak_to_vrf_update(struct bgp *bgp_vpn, /* from */
+void vpn_leak_to_vrf_update(struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vpn) /* route */
{
struct listnode *mnode, *mnnode;
@@ -1715,12 +1692,12 @@ void vpn_leak_to_vrf_update(struct bgp *bgp_vpn, /* from */
if (!path_vpn->extra
|| path_vpn->extra->bgp_orig != bgp) { /* no loop */
- vpn_leak_to_vrf_update_onevrf(bgp, bgp_vpn, path_vpn);
+ vpn_leak_to_vrf_update_onevrf(bgp, from_bgp, path_vpn);
}
}
}
-void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn, /* from */
+void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vpn) /* route */
{
const struct prefix *p;
@@ -1804,8 +1781,7 @@ void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn, /* from */
}
}
-void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, /* to */
- afi_t afi)
+void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi)
{
struct bgp_dest *bn;
struct bgp_path_info *bpi;
@@ -1817,40 +1793,38 @@ void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, /* to */
/*
* Walk vrf table, delete bpi with bgp_orig in a different vrf
*/
- for (bn = bgp_table_top(bgp_vrf->rib[afi][safi]); bn;
+ for (bn = bgp_table_top(to_bgp->rib[afi][safi]); bn;
bn = bgp_route_next(bn)) {
for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
bpi = bpi->next) {
- if (bpi->extra
- && bpi->extra->bgp_orig != bgp_vrf
- && bpi->extra->parent
- && is_pi_family_vpn(bpi->extra->parent)) {
+ if (bpi->extra && bpi->extra->bgp_orig != to_bgp &&
+ bpi->extra->parent &&
+ is_pi_family_vpn(bpi->extra->parent)) {
/* delete route */
- bgp_aggregate_decrement(bgp_vrf,
+ bgp_aggregate_decrement(to_bgp,
bgp_dest_get_prefix(bn),
bpi, afi, safi);
bgp_path_info_delete(bn, bpi);
- bgp_process(bgp_vrf, bn, afi, safi);
+ bgp_process(to_bgp, bn, afi, safi);
}
}
}
}
-void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, /* to */
- struct bgp *bgp_vpn, /* from */
+void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *vpn_from,
afi_t afi)
{
struct bgp_dest *pdest;
safi_t safi = SAFI_MPLS_VPN;
- assert(bgp_vpn);
+ assert(vpn_from);
/*
* Walk vpn table
*/
- for (pdest = bgp_table_top(bgp_vpn->rib[afi][safi]); pdest;
+ for (pdest = bgp_table_top(vpn_from->rib[afi][safi]); pdest;
pdest = bgp_route_next(pdest)) {
struct bgp_table *table;
struct bgp_dest *bn;
@@ -1867,11 +1841,11 @@ void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, /* to */
for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
bpi = bpi->next) {
- if (bpi->extra
- && bpi->extra->bgp_orig == bgp_vrf)
+ if (bpi->extra &&
+ bpi->extra->bgp_orig == to_bgp)
continue;
- vpn_leak_to_vrf_update_onevrf(bgp_vrf, bgp_vpn,
+ vpn_leak_to_vrf_update_onevrf(to_bgp, vpn_from,
bpi);
}
}
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 8c2eae279c..fcabb16435 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -53,27 +53,27 @@ extern int bgp_show_mpls_vpn(struct vty *vty, afi_t afi, struct prefix_rd *prd,
enum bgp_show_type type, void *output_arg,
int tags, bool use_json);
-extern void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, struct bgp *bgp_vrf,
+extern void vpn_leak_from_vrf_update(struct bgp *to_bgp, struct bgp *from_bgp,
struct bgp_path_info *path_vrf);
-extern void vpn_leak_from_vrf_withdraw(struct bgp *bgp_vpn, struct bgp *bgp_vrf,
+extern void vpn_leak_from_vrf_withdraw(struct bgp *to_bgp, struct bgp *from_bgp,
struct bgp_path_info *path_vrf);
-extern void vpn_leak_from_vrf_withdraw_all(struct bgp *bgp_vpn,
- struct bgp *bgp_vrf, afi_t afi);
+extern void vpn_leak_from_vrf_withdraw_all(struct bgp *to_bgp,
+ struct bgp *from_bgp, afi_t afi);
-extern void vpn_leak_from_vrf_update_all(struct bgp *bgp_vpn,
- struct bgp *bgp_vrf, afi_t afi);
+extern void vpn_leak_from_vrf_update_all(struct bgp *to_bgp,
+ struct bgp *from_bgp, afi_t afi);
-extern void vpn_leak_to_vrf_withdraw_all(struct bgp *bgp_vrf, afi_t afi);
+extern void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi);
-extern void vpn_leak_to_vrf_update_all(struct bgp *bgp_vrf, struct bgp *bgp_vpn,
+extern void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi);
-extern void vpn_leak_to_vrf_update(struct bgp *bgp_vpn,
+extern void vpn_leak_to_vrf_update(struct bgp *from_bgp,
struct bgp_path_info *path_vpn);
-extern void vpn_leak_to_vrf_withdraw(struct bgp *bgp_vpn,
+extern void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp,
struct bgp_path_info *path_vpn);
extern void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi);
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 9def9622d9..7613ccc7df 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -780,7 +780,7 @@ struct bgp_notify bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify)
bn.subcode = notify->raw_data[1];
bn.length = notify->length - 2;
- bn.raw_data = XCALLOC(MTYPE_BGP_NOTIFICATION, bn.length);
+ bn.raw_data = XMALLOC(MTYPE_BGP_NOTIFICATION, bn.length);
memcpy(bn.raw_data, notify->raw_data + 2, bn.length);
return bn;
@@ -2121,6 +2121,12 @@ static int bgp_notify_receive(struct peer *peer, bgp_size_t size)
if (outer.length) {
XFREE(MTYPE_BGP_NOTIFICATION, outer.data);
XFREE(MTYPE_BGP_NOTIFICATION, outer.raw_data);
+
+ /* If this is a Hard Reset notification, we MUST free
+ * the inner (encapsulated) notification too.
+ */
+ if (hard_reset)
+ XFREE(MTYPE_BGP_NOTIFICATION, inner.raw_data);
outer.length = 0;
}
}
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index c2b6632643..ea8d2330c5 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -421,10 +421,9 @@ static unsigned int updgrp_hash_key_make(const void *p)
if (bgp_debug_neighbor_events(peer)) {
zlog_debug(
- "%pBP Update Group Hash: sort: %d UpdGrpFlags: %" PRIu64
- " UpdGrpAFFlags: %u",
+ "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %u",
peer, peer->sort,
- (uint64_t)(peer->flags & PEER_UPDGRP_FLAGS),
+ (intmax_t)(peer->flags & PEER_UPDGRP_FLAGS),
flags & PEER_UPDGRP_AF_FLAGS);
zlog_debug(
"%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
@@ -464,10 +463,8 @@ static unsigned int updgrp_hash_key_make(const void *p)
peer->shared_network &&
peer_afi_active_nego(peer, AFI_IP6));
zlog_debug(
- "%pBP Update Group Hash: Lonesoul: %" PRIu64
- " ORF prefix: %u ORF old: %u max prefix out: %u",
- peer,
- (uint64_t)CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
+ "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %u",
+ peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ORF_PREFIX_SM_RCV),
CHECK_FLAG(peer->af_cap[afi][safi],
@@ -1482,7 +1479,24 @@ static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
"u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change",
updgrp->id, subgrp->id,
ctx->policy_name);
- subgroup_default_originate(subgrp, 0);
+ if (route_map_lookup_by_name(ctx->policy_name)) {
+ /*
+ * When there is change in routemap, this flow
+ * is triggered. the routemap is still present
+ * in lib, hence its a update flow. The flag
+ * needs to be unset.
+ */
+ UNSET_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_DEFAULT_ORIGINATE);
+ subgroup_default_originate(subgrp, 0);
+ } else {
+ /*
+ * This is a explicit withdraw, since the
+ * routemap is not present in routemap lib. need
+ * to pass 1 for withdraw arg.
+ */
+ subgroup_default_originate(subgrp, 1);
+ }
}
update_subgroup_set_needs_refresh(subgrp, 0);
}
@@ -1860,6 +1874,13 @@ update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
safi = SUBGRP_SAFI(subgrp);
if (peer->default_rmap[afi][safi].name) {
+ /*
+ * When there is change in routemap this flow will
+ * be triggered. We need to unset the Flag to ensure
+ * the update flow gets triggered.
+ */
+ UNSET_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_DEFAULT_ORIGINATE);
subgroup_default_originate(subgrp, 0);
}
}
diff --git a/bgpd/bgp_updgrp.h b/bgpd/bgp_updgrp.h
index e3309ab7c5..473017c809 100644
--- a/bgpd/bgp_updgrp.h
+++ b/bgpd/bgp_updgrp.h
@@ -252,6 +252,13 @@ struct update_subgroup {
#define SUBGRP_STATUS_DEFAULT_ORIGINATE (1 << 0)
#define SUBGRP_STATUS_FORCE_UPDATES (1 << 1)
#define SUBGRP_STATUS_TABLE_REPARSING (1 << 2)
+/*
+ * This flag has been added to ensure that the SNT counters
+ * gets incremented and decremented only during the creation
+ * and deletion workflows of default originate,
+ * not during the update workflow.
+ */
+#define SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED (1 << 3)
uint16_t flags;
#define SUBGRP_FLAG_NEEDS_REFRESH (1 << 0)
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index 518ce485af..0f7f2f4c02 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -796,8 +796,11 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
struct peer *peer;
struct bgp_adj_out *adj;
route_map_result_t ret = RMAP_DENYMATCH;
+ route_map_result_t new_ret = RMAP_DENYMATCH;
afi_t afi;
safi_t safi;
+ int pref = 65536;
+ int new_pref = 0;
if (!subgrp)
return;
@@ -853,34 +856,45 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw)
tmp_pi.attr = &tmp_attr;
- ret = route_map_apply_ext(
+ new_ret = route_map_apply_ext(
peer->default_rmap[afi][safi].map,
- bgp_dest_get_prefix(dest), pi, &tmp_pi);
-
- if (ret == RMAP_DENYMATCH) {
- bgp_attr_flush(&tmp_attr);
- continue;
- } else {
- new_attr = bgp_attr_intern(&tmp_attr);
-
+ bgp_dest_get_prefix(dest), pi, &tmp_pi,
+ &new_pref);
+
+ if (new_ret == RMAP_PERMITMATCH) {
+ if (new_pref < pref) {
+ pref = new_pref;
+ bgp_attr_flush(new_attr);
+ new_attr = bgp_attr_intern(
+ tmp_pi.attr);
+ bgp_attr_flush(tmp_pi.attr);
+ }
subgroup_announce_reset_nhop(
(peer_cap_enhe(peer, afi, safi)
? AF_INET6
: AF_INET),
new_attr);
-
- break;
- }
- }
- if (ret == RMAP_PERMITMATCH) {
- bgp_dest_unlock_node(dest);
- break;
+ ret = new_ret;
+ } else
+ bgp_attr_flush(&tmp_attr);
}
}
bgp->peer_self->rmap_type = 0;
- if (ret == RMAP_DENYMATCH)
+ if (ret == RMAP_DENYMATCH) {
+ /*
+ * If its a implicit withdraw due to routemap
+ * deny operation need to set the flag back.
+ * This is a convertion of update flow to
+ * withdraw flow.
+ */
+ if (!withdraw &&
+ (!CHECK_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_DEFAULT_ORIGINATE)))
+ SET_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_DEFAULT_ORIGINATE);
withdraw = 1;
+ }
}
/* Check if the default route is in local BGP RIB which is
diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c
index c4a3ca7500..88a81f255d 100644
--- a/bgpd/bgp_updgrp_packet.c
+++ b/bgpd/bgp_updgrp_packet.c
@@ -1142,7 +1142,12 @@ void subgroup_default_update_packet(struct update_subgroup *subgrp,
(void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, &vecarr);
subgroup_trigger_write(subgrp);
- subgrp->scount++;
+
+ if (!CHECK_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED)) {
+ subgrp->scount++;
+ SET_FLAG(subgrp->sflags, SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED);
+ }
}
void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
@@ -1235,7 +1240,12 @@ void subgroup_default_withdraw_packet(struct update_subgroup *subgrp)
(void)bpacket_queue_add(SUBGRP_PKTQ(subgrp), s, NULL);
subgroup_trigger_write(subgrp);
- subgrp->scount--;
+
+ if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED)) {
+ subgrp->scount--;
+ UNSET_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_PEER_DEFAULT_ORIGINATED);
+ }
}
static void
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 0bfcf5163f..122830343c 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -5275,9 +5275,13 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
{
struct peer *member;
struct listnode *node, *nnode;
+ struct update_subgroup *subgrp;
/* Set flag and configuration on peer. */
peer_af_flag_set(peer, afi, safi, PEER_FLAG_DEFAULT_ORIGINATE);
+
+ subgrp = peer_subgroup(peer, afi, safi);
+
if (rmap) {
if (!peer->default_rmap[afi][safi].name
|| strcmp(rmap, peer->default_rmap[afi][safi].name) != 0) {
@@ -5285,6 +5289,17 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
XFREE(MTYPE_ROUTE_MAP_NAME,
peer->default_rmap[afi][safi].name);
+ /*
+ * When there is a change in route-map policy,
+ * this flow gets triggered. Since, the default
+ * route is already originated, the flag is set.
+ * The flag should be unset here,
+ * to trigger the flow of sending update message.
+ */
+ if (subgrp)
+ UNSET_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_DEFAULT_ORIGINATE);
+
route_map_counter_decrement(peer->default_rmap[afi][safi].map);
peer->default_rmap[afi][safi].name =
XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap);
@@ -5296,6 +5311,15 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
XFREE(MTYPE_ROUTE_MAP_NAME,
peer->default_rmap[afi][safi].name);
+ /*
+ * This is triggered in case of route-map deletion.
+ * The flag needs to be unset, to trigger the flow
+ * of sending an update message.
+ */
+ if (subgrp)
+ UNSET_FLAG(subgrp->sflags,
+ SUBGRP_STATUS_DEFAULT_ORIGINATE);
+
route_map_counter_decrement(peer->default_rmap[afi][safi].map);
peer->default_rmap[afi][safi].name = NULL;
peer->default_rmap[afi][safi].map = NULL;
diff --git a/configure.ac b/configure.ac
index 330752a79f..b7e17d3565 100644
--- a/configure.ac
+++ b/configure.ac
@@ -635,6 +635,8 @@ AC_ARG_ENABLE([isisd],
AS_HELP_STRING([--disable-isisd], [do not build isisd]))
AC_ARG_ENABLE([pimd],
AS_HELP_STRING([--disable-pimd], [do not build pimd]))
+AC_ARG_ENABLE([pim6d],
+ AS_HELP_STRING([--enable-pim6d], [build pim6d]))
AC_ARG_ENABLE([pbrd],
AS_HELP_STRING([--disable-pbrd], [do not build pbrd]))
AC_ARG_ENABLE([sharpd],
@@ -1751,6 +1753,10 @@ AS_IF([test "$enable_pimd" != "no"], [
AC_DEFINE([HAVE_PIMD], [1], [pimd])
])
+AS_IF([test "$enable_pim6d" = "yes"], [
+ AC_DEFINE([HAVE_PIM6D], [1], [pim6d])
+])
+
AS_IF([test "$enable_pbrd" != "no"], [
AC_DEFINE([HAVE_PBRD], [1], [pbrd])
])
@@ -2711,6 +2717,7 @@ AM_CONDITIONAL([BABELD], [test "$enable_babeld" != "no"])
AM_CONDITIONAL([OSPF6D], [test "$enable_ospf6d" != "no"])
AM_CONDITIONAL([ISISD], [test "$enable_isisd" != "no"])
AM_CONDITIONAL([PIMD], [test "$enable_pimd" != "no"])
+AM_CONDITIONAL([PIM6D], [test "$enable_pim6d" = "yes"])
AM_CONDITIONAL([PBRD], [test "$enable_pbrd" != "no"])
AM_CONDITIONAL([SHARPD], [test "$enable_sharpd" = "yes"])
AM_CONDITIONAL([STATICD], [test "$enable_staticd" != "no"])
diff --git a/debian/control b/debian/control
index 7c90979a22..e8bf1a8ffa 100644
--- a/debian/control
+++ b/debian/control
@@ -19,7 +19,7 @@ Build-Depends: bison,
libpcre2-dev,
libpython3-dev,
libreadline-dev,
- librtr-dev (>= 0.8.0) <!pkg.frr.nortrlib>,
+ librtr-dev (>= 0.8.0~) <!pkg.frr.nortrlib>,
libsnmp-dev,
libssh-dev <!pkg.frr.nortrlib>,
libyang2-dev,
diff --git a/debian/rules b/debian/rules
index 7a719b7c60..fdb458e6a8 100755
--- a/debian/rules
+++ b/debian/rules
@@ -27,6 +27,12 @@ else
CONF_LUA=--enable-scripting
endif
+ifeq ($(filter pkg.frr.pim6d,$(DEB_BUILD_PROFILES)),)
+ CONF_PIM6=--disable-pim6d
+else
+ CONF_PIM6=--enable-pim6d
+endif
+
export PYTHON=python3
%:
@@ -46,6 +52,7 @@ override_dh_auto_configure:
\
$(CONF_RPKI) \
$(CONF_LUA) \
+ $(CONF_PIM6) \
--with-libpam \
--enable-doc \
--enable-doc-html \
diff --git a/doc/developer/packaging-debian.rst b/doc/developer/packaging-debian.rst
index a81e052490..9aeb78c4fd 100644
--- a/doc/developer/packaging-debian.rst
+++ b/doc/developer/packaging-debian.rst
@@ -64,6 +64,10 @@ buster.)
+================+===================+=========================================+
| pkg.frr.rtrlib | pkg.frr.nortrlib | builds frr-rpki-rtrlib package (or not) |
+----------------+-------------------+-----------------------------------------+
+ | pkg.frr.lua | pkg.frr.nolua | builds lua scripting extension |
+ +----------------+-------------------+-----------------------------------------+
+ | pkg.frr.pim6d | pkg.frr.nopim6d | builds pim6d (work in progress) |
+ +----------------+-------------------+-----------------------------------------+
* the ``-uc -us`` options to disable signing the packages with your GPG key
diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst
index ca7d3872bf..9bbd6abd80 100644
--- a/doc/user/pimv6.rst
+++ b/doc/user/pimv6.rst
@@ -389,6 +389,12 @@ Clear commands reset various variables.
Rescan PIMv6 OIL (output interface list).
+.. clicmd:: clear ipv6 pim [vrf NAME] bsr-data
+
+ This command will clear the BSM scope data struct. This command also
+ removes the next hop tracking for the bsr and resets the upstreams
+ for the dynamically learnt RPs.
+
PIMv6 Debug Commands
====================
diff --git a/lib/prefix.h b/lib/prefix.h
index 3a768572c4..7b2f889874 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -352,7 +352,7 @@ union prefixconstptr {
#define PREFIX_STRLEN 80
/*
- * Longest possible length of a (S,G) string is 36 bytes
+ * Longest possible length of a (S,G) string is 34 bytes
* 123.123.123.123 = 15 * 2
* (,) = 3
* NULL Character at end = 1
diff --git a/lib/resolver.c b/lib/resolver.c
index 93fa84bbe9..0d64ad86e4 100644..100755
--- a/lib/resolver.c
+++ b/lib/resolver.c
@@ -245,6 +245,9 @@ void resolver_resolve(struct resolver_query *query, int af, vrf_id_t vrf_id,
{
int ret;
+ if (hostname == NULL)
+ return;
+
if (query->callback != NULL) {
flog_err(
EC_LIB_RESOLVER,
diff --git a/lib/routemap.c b/lib/routemap.c
index 7fd5a96e5b..9529b79419 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -2540,7 +2540,8 @@ void route_map_notify_pentry_dependencies(const char *affected_name,
*/
route_map_result_t route_map_apply_ext(struct route_map *map,
const struct prefix *prefix,
- void *match_object, void *set_object)
+ void *match_object, void *set_object,
+ int *pref)
{
static int recursion = 0;
enum route_map_cmd_result_t match_ret = RMAP_NOMATCH;
@@ -2676,7 +2677,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
ret = route_map_apply_ext(
nextrm, prefix,
match_object,
- set_object);
+ set_object, NULL);
recursion--;
}
@@ -2721,6 +2722,13 @@ route_map_apply_end:
(map ? map->name : "null"), prefix,
route_map_result_str(ret));
+ if (pref) {
+ if (index != NULL && ret == RMAP_PERMITMATCH)
+ *pref = index->pref;
+ else
+ *pref = 65536;
+ }
+
return (ret);
}
diff --git a/lib/routemap.h b/lib/routemap.h
index 13dafe6849..ad391981e0 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -482,9 +482,9 @@ struct route_map *route_map_lookup_warn_noexist(struct vty *vty, const char *nam
extern route_map_result_t route_map_apply_ext(struct route_map *map,
const struct prefix *prefix,
void *match_object,
- void *set_object);
+ void *set_object, int *pref);
#define route_map_apply(map, prefix, object) \
- route_map_apply_ext(map, prefix, object, object)
+ route_map_apply_ext(map, prefix, object, object, NULL)
extern void route_map_add_hook(void (*func)(const char *));
extern void route_map_delete_hook(void (*func)(const char *));
diff --git a/lib/typesafe.h b/lib/typesafe.h
index 06fdc52e78..50c410ad24 100644
--- a/lib/typesafe.h
+++ b/lib/typesafe.h
@@ -308,6 +308,25 @@ struct dlist_head {
static inline void typesafe_dlist_add(struct dlist_head *head,
struct dlist_item *prev, struct dlist_item *item)
{
+ /* SA on clang-11 thinks this can happen, but in reality -assuming no
+ * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the
+ * termination at the end of the list is not NULL but rather a pointer
+ * back to the head. (This eliminates special-casing the first or last
+ * item.)
+ *
+ * Sadly, can't use assert() here since the libfrr assert / xref code
+ * uses typesafe lists itself... that said, if an assert tripped here
+ * we'd already be way past some memory corruption, so we might as
+ * well just take the SEGV. (In the presence of corruption, we'd see
+ * random SEGVs from places that make no sense at all anyway, an
+ * assert might actually be a red herring.)
+ *
+ * ("assume()" tells the compiler to produce code as if the condition
+ * will always hold; it doesn't have any actual effect here, it'll
+ * just SEGV out on "item->next->prev = item".)
+ */
+ assume(prev->next != NULL);
+
item->next = prev->next;
item->next->prev = item;
item->prev = prev;
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index 1a2829f962..b7a832681d 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -270,7 +270,7 @@ DEFPY (interface_ipv6_pim_drprio,
DEFPY (interface_no_ipv6_pim_drprio,
interface_no_ipv6_pim_drprio_cmd,
- "no ip pim drpriority [(1-4294967295)]",
+ "no ipv6 pim drpriority [(1-4294967295)]",
NO_STR
IPV6_STR
PIM_STR
@@ -1072,14 +1072,15 @@ DEFPY (show_ipv6_pim_neighbor_vrf_all,
DEFPY (show_ipv6_pim_nexthop,
show_ipv6_pim_nexthop_cmd,
- "show ipv6 pim [vrf NAME] nexthop",
+ "show ipv6 pim [vrf NAME] nexthop [json$json]",
SHOW_STR
IPV6_STR
PIM_STR
VRF_CMD_HELP_STR
- "PIM cached nexthop rpf information\n")
+ "PIM cached nexthop rpf information\n"
+ JSON_STR)
{
- return pim_show_nexthop_cmd_helper(vrf, vty);
+ return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
}
DEFPY (show_ipv6_pim_nexthop_lookup,
@@ -1264,7 +1265,7 @@ DEFPY (clear_ipv6_mroute,
"clear ipv6 mroute [vrf NAME]$name",
CLEAR_STR
IPV6_STR
- "Reset multicast routes\n"
+ MROUTE_STR
VRF_CMD_HELP_STR)
{
struct vrf *v = pim_cmd_lookup(vty, name);
@@ -1308,6 +1309,26 @@ DEFPY (clear_ipv6_mroute_count,
return clear_ip_mroute_count_command(vty, name);
}
+DEFPY (clear_ipv6_pim_bsr_db,
+ clear_ipv6_pim_bsr_db_cmd,
+ "clear ipv6 pim [vrf NAME] bsr-data",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset pim bsr data\n")
+{
+ struct vrf *v;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+ if (!v)
+ return CMD_WARNING;
+
+ pim_bsm_clear(v->info);
+
+ return CMD_SUCCESS;
+}
+
DEFPY (debug_pimv6,
debug_pimv6_cmd,
"[no] debug pimv6",
@@ -1577,6 +1598,8 @@ void pim_cmd_init(void)
install_element(ENABLE_NODE, &clear_ipv6_mroute_cmd);
install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd);
install_element(ENABLE_NODE, &clear_ipv6_mroute_count_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_bsr_db_cmd);
+
install_element(ENABLE_NODE, &debug_pimv6_cmd);
install_element(ENABLE_NODE, &debug_pimv6_nht_cmd);
install_element(ENABLE_NODE, &debug_pimv6_nht_det_cmd);
diff --git a/pimd/pim6_stubs.c b/pimd/pim6_stubs.c
deleted file mode 100644
index 8213b9e97f..0000000000
--- a/pimd/pim6_stubs.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * PIMv6 temporary stubs
- * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; see the file COPYING; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <zebra.h>
-
-#include "pimd.h"
-#include "pim_nht.h"
-#include "pim_zlookup.h"
-#include "pim_pim.h"
-#include "pim_register.h"
-#include "pim_cmd.h"
-#include "pim_bsm.h"
-
-/*
- * NH lookup / NHT
- */
-void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
-{
-}
-
-void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
-{
-}
-
-bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
-{
- return false;
-}
-
-void pim_bsm_proc_free(struct pim_instance *pim)
-{
-}
-
-void pim_bsm_proc_init(struct pim_instance *pim)
-{
-}
-
-struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
- struct prefix *grp)
-{
- return NULL;
-}
-
-void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
-{
-}
-
-int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
- uint32_t buf_size, bool no_fwd)
-{
- return 0;
-}
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index 8ef3c43a99..1f7dd2f3f9 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -130,11 +130,7 @@ int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *node1,
return 1;
if (node1->hash > node2->hash)
return -1;
- if (node1->rp_address.s_addr < node2->rp_address.s_addr)
- return 1;
- if (node1->rp_address.s_addr > node2->rp_address.s_addr)
- return -1;
- return 0;
+ return pim_addr_cmp(node2->rp_address, node1->rp_address);
}
static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
@@ -173,11 +169,10 @@ static void pim_on_bs_timer(struct thread *t)
__func__, scope->sz_id);
pim_nht_bsr_del(scope->pim, scope->current_bsr);
-
/* Reset scope zone data */
scope->accept_nofwd_bsm = false;
scope->state = ACCEPT_ANY;
- scope->current_bsr.s_addr = INADDR_ANY;
+ scope->current_bsr = PIMADDR_ANY;
scope->current_bsr_prio = 0;
scope->current_bsr_first_ts = 0;
scope->current_bsr_last_ts = 0;
@@ -353,10 +348,9 @@ static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
THREAD_OFF(bsrp->g2rp_timer);
if (PIM_DEBUG_BSM)
zlog_debug(
- "%s : starting g2rp timer for grp: %pFX - rp: %pI4 with timeout %d secs(Actual Hold time : %d secs)",
- __func__, &bsrp->bsgrp_node->group,
- &bsrp->rp_address, hold_time,
- bsrp->rp_holdtime);
+ "%s : starting g2rp timer for grp: %pFX - rp: %pPAs with timeout %d secs(Actual Hold time : %d secs)",
+ __func__, &bsrp->bsgrp_node->group, &bsrp->rp_address,
+ hold_time, bsrp->rp_holdtime);
thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
&bsrp->g2rp_timer);
@@ -374,7 +368,7 @@ static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
return;
if (PIM_DEBUG_BSM)
- zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pI4",
+ zlog_debug("%s : stopping g2rp timer for grp: %pFX - rp: %pPAs",
__func__, &bsrp->bsgrp_node->group,
&bsrp->rp_address);
@@ -462,8 +456,7 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
route_unlock_node(rn);
if (active && pend) {
- if ((active->rp_address.s_addr
- != pend->rp_address.s_addr))
+ if (pim_addr_cmp(active->rp_address, pend->rp_address))
pim_rp_change(pim, pend->rp_address,
bsgrp_node->group, RP_SRC_BSR);
}
@@ -531,18 +524,17 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list);
}
-static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
+static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr,
uint32_t bsr_prio)
{
- if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
+ if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr))
return true;
if (bsr_prio > pim->global_scope.current_bsr_prio)
return true;
else if (bsr_prio == pim->global_scope.current_bsr_prio) {
- if (ntohl(bsr.s_addr)
- >= ntohl(pim->global_scope.current_bsr.s_addr))
+ if (pim_addr_cmp(bsr, pim->global_scope.current_bsr) >= 0)
return true;
else
return false;
@@ -550,10 +542,10 @@ static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
return false;
}
-static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
+static void pim_bsm_update(struct pim_instance *pim, pim_addr bsr,
uint32_t bsr_prio)
{
- if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+ if (pim_addr_cmp(bsr, pim->global_scope.current_bsr)) {
pim_nht_bsr_del(pim, pim->global_scope.current_bsr);
pim_nht_bsr_add(pim, bsr);
@@ -571,7 +563,7 @@ void pim_bsm_clear(struct pim_instance *pim)
struct route_node *rn;
struct route_node *rpnode;
struct bsgrp_node *bsgrp;
- struct prefix nht_p;
+ pim_addr nht_p;
struct prefix g_all;
struct rp_info *rp_all;
struct pim_upstream *up;
@@ -583,7 +575,7 @@ void pim_bsm_clear(struct pim_instance *pim)
/* Reset scope zone data */
pim->global_scope.accept_nofwd_bsm = false;
pim->global_scope.state = ACCEPT_ANY;
- pim->global_scope.current_bsr.s_addr = INADDR_ANY;
+ pim->global_scope.current_bsr = PIMADDR_ANY;
pim->global_scope.current_bsr_prio = 0;
pim->global_scope.current_bsr_first_ts = 0;
pim->global_scope.current_bsr_last_ts = 0;
@@ -617,16 +609,14 @@ void pim_bsm_clear(struct pim_instance *pim)
}
/* Deregister addr with Zebra NHT */
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP) {
- zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
+ zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
__func__, &nht_p);
}
- pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+ pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
if (!pim_get_all_mcast_group(&g_all))
return;
@@ -634,7 +624,7 @@ void pim_bsm_clear(struct pim_instance *pim)
rp_all = pim_rp_find_match_group(pim, &g_all);
if (rp_all == rp_info) {
- pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
+ rp_all->rp.rpf_addr = PIMADDR_ANY;
rp_all->i_am_rp = 0;
} else {
/* Delete the rp_info from rp-list */
@@ -944,7 +934,6 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
struct pim_interface *pim_ifp;
struct bsm_scope *scope;
struct bsm_frag *bsfrag;
- char neigh_src_str[INET_ADDRSTRLEN];
uint32_t pim_mtu;
bool no_fwd = true;
bool ret = false;
@@ -982,13 +971,13 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
if (!pim_ifp->ucast_bsm_accept) {
dst_addr = qpim_all_pim_routers_addr;
if (PIM_DEBUG_BSM)
- zlog_debug("%s: Sending BSM mcast to %s", __func__,
- neigh_src_str);
+ zlog_debug("%s: Sending BSM mcast to %pPA", __func__,
+ &neigh->source_addr);
} else {
dst_addr = neigh->source_addr;
if (PIM_DEBUG_BSM)
- zlog_debug("%s: Sending BSM ucast to %s", __func__,
- neigh_src_str);
+ zlog_debug("%s: Sending BSM ucast to %pPA", __func__,
+ &neigh->source_addr);
}
pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
pim_hello_require(ifp);
@@ -1040,7 +1029,7 @@ struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
return bsgrp;
}
-static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
+static uint32_t hash_calc_on_grp_rp(struct prefix group, pim_addr rp,
uint8_t hashmasklen)
{
uint64_t temp;
@@ -1058,13 +1047,24 @@ static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
/* in_addr stores ip in big endian, hence network byte order
* convert to uint32 before processing hash
*/
+#if PIM_IPV == 4
grpaddr = ntohl(group.u.prefix4.s_addr);
+#else
+ grpaddr = group.u.prefix6.s6_addr32[0] ^ group.u.prefix6.s6_addr32[1] ^
+ group.u.prefix6.s6_addr32[2] ^ group.u.prefix6.s6_addr32[3];
+#endif
/* Avoid shifting by 32 bit on a 32 bit register */
if (hashmasklen)
grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
else
grpaddr = grpaddr & mask;
+
+#if PIM_IPV == 4
rp_add = ntohl(rp.s_addr);
+#else
+ rp_add = rp.s6_addr32[0] ^ rp.s6_addr32[1] ^ rp.s6_addr32[2] ^
+ rp.s6_addr32[3];
+#endif
temp = 1103515245 * ((1103515245 * (uint64_t)grpaddr + 12345) ^ rp_add)
+ 12345;
hash = temp & (0x7fffffff);
@@ -1083,8 +1083,7 @@ static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
bsm_rpinfo->rp_prio = rp->rp_pri;
bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
- memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
- sizeof(struct in_addr));
+ bsm_rpinfo->rp_address = rp->rpaddr.addr;
bsm_rpinfo->elapse_time = 0;
/* Back pointer to the group node. */
@@ -1142,6 +1141,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
int frag_rp_cnt = 0;
int offset = 0;
int ins_count = 0;
+ pim_addr grp_addr;
while (buflen > offset) {
if (offset + (int)sizeof(struct bsmmsg_grpinfo) > buflen) {
@@ -1153,31 +1153,28 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
}
/* Extract Group tlv from BSM */
memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
+ grp_addr = grpinfo.group.addr;
- if (PIM_DEBUG_BSM) {
- char grp_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
- sizeof(grp_str));
+ if (PIM_DEBUG_BSM)
zlog_debug(
- "%s, Group %s Rpcount:%d Fragment-Rp-count:%d",
- __func__, grp_str, grpinfo.rp_count,
+ "%s, Group %pPAs Rpcount:%d Fragment-Rp-count:%d",
+ __func__, &grp_addr, grpinfo.rp_count,
grpinfo.frag_rp_count);
- }
buf += sizeof(struct bsmmsg_grpinfo);
offset += sizeof(struct bsmmsg_grpinfo);
- group.family = AF_INET;
- if (grpinfo.group.mask > IPV4_MAX_BITLEN) {
+ group.family = PIM_AF;
+ if (grpinfo.group.mask > PIM_MAX_BITLEN) {
if (PIM_DEBUG_BSM)
zlog_debug(
- "%s, v4 prefix length specified: %d is too long",
+ "%s, prefix length specified: %d is too long",
__func__, grpinfo.group.mask);
return false;
}
+
+ pim_addr_to_prefix(&group, grp_addr);
group.prefixlen = grpinfo.group.mask;
- group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
/* Get the Group node for the BSM rp table */
bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
@@ -1189,14 +1186,10 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
if (!bsgrp)
continue;
- if (PIM_DEBUG_BSM) {
- char grp_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<Group?>", grpinfo.group.addr,
- grp_str, sizeof(grp_str));
- zlog_debug("%s, Rp count is zero for group: %s",
- __func__, grp_str);
- }
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Rp count is zero for group: %pPAs",
+ __func__, &grp_addr);
old_rpinfo = bsm_rpinfos_first(bsgrp->bsrp_list);
if (old_rpinfo)
@@ -1249,13 +1242,12 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
offset += sizeof(struct bsmmsg_rpinfo);
if (PIM_DEBUG_BSM) {
- char rp_str[INET_ADDRSTRLEN];
+ pim_addr rp_addr;
- pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
- rp_str, sizeof(rp_str));
+ rp_addr = rpinfo.rpaddr.addr;
zlog_debug(
- "%s, Rp address - %s; pri:%d hold:%d",
- __func__, rp_str, rpinfo.rp_pri,
+ "%s, Rp address - %pPAs; pri:%d hold:%d",
+ __func__, &rp_addr, rpinfo.rp_pri,
rpinfo.rp_holdtime);
}
@@ -1287,8 +1279,8 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
struct pim_interface *pim_ifp = NULL;
struct bsm_frag *bsfrag;
struct pim_instance *pim;
- char bsr_str[INET_ADDRSTRLEN];
uint16_t frag_tag;
+ pim_addr bsr_addr;
bool empty_bsm = false;
/* BSM Packet acceptance validation */
@@ -1330,16 +1322,17 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
}
bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
- pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
- sizeof(bsr_str));
- if (bshdr->hm_len > IPV4_MAX_BITLEN) {
- zlog_warn("Bad hashmask length for IPv4; got %hhu, expected value in range 0-32",
- bshdr->hm_len);
+ if (bshdr->hm_len > PIM_MAX_BITLEN) {
+ zlog_warn(
+ "Bad hashmask length for %s; got %hhu, expected value in range 0-32",
+ PIM_AF_NAME, bshdr->hm_len);
pim->bsm_dropped++;
return -1;
}
pim->global_scope.hashMasklen = bshdr->hm_len;
frag_tag = ntohs(bshdr->frag_tag);
+ /* NB: bshdr->bsr_addr.addr is packed/unaligned => memcpy */
+ memcpy(&bsr_addr, &bshdr->bsr_addr.addr, sizeof(bsr_addr));
/* Identify empty BSM */
if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
@@ -1361,7 +1354,7 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
}
/* Drop if bsr is not preferred bsr */
- if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
+ if (!is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) {
if (PIM_DEBUG_BSM)
zlog_debug("%s : Received a non-preferred BSM",
__func__);
@@ -1377,30 +1370,25 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
} else {
if (PIM_DEBUG_BSM)
zlog_debug(
- "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
- __func__, bsr_str);
+ "%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false",
+ __func__, &bsr_addr);
pim->bsm_dropped++;
pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
return -1;
}
}
-#if PIM_IPV == 4
- if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr))
-#else
- if (0)
-#endif
- {
+ if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) {
/* Multicast BSMs are only accepted if source interface & IP
* match RPF towards the BSR's IP address, or they have
* no-forward set
*/
- if (!no_fwd && !pim_nht_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
- ifp, sg->src)) {
+ if (!no_fwd &&
+ !pim_nht_bsr_rpf_check(pim, bsr_addr, ifp, sg->src)) {
if (PIM_DEBUG_BSM)
zlog_debug(
- "BSM check: RPF to BSR %s is not %pPA%%%s",
- bsr_str, &sg->src, ifp->name);
+ "BSM check: RPF to BSR %pPAs is not %pPA%%%s",
+ &bsr_addr, &sg->src, ifp->name);
pim->bsm_dropped++;
return -1;
}
@@ -1459,7 +1447,7 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
}
/* update the scope information from bsm */
- pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
+ pim_bsm_update(pim, bsr_addr, bshdr->bsr_prio);
if (!no_fwd) {
pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
index 910067109e..90bd2f0877 100644
--- a/pimd/pim_bsm.h
+++ b/pimd/pim_bsm.h
@@ -61,7 +61,7 @@ struct bsm_scope {
int sz_id; /* scope zone id */
enum ncbsr_state state; /* non candidate BSR state */
bool accept_nofwd_bsm; /* no fwd bsm accepted for scope */
- struct in_addr current_bsr; /* current elected BSR for the sz */
+ pim_addr current_bsr; /* current elected BSR for the sz */
uint32_t current_bsr_prio; /* current BSR priority */
int64_t current_bsr_first_ts; /* current BSR elected time */
int64_t current_bsr_last_ts; /* Last BSM received from E-BSR */
@@ -185,18 +185,30 @@ struct bsm_hdr {
uint16_t frag_tag;
uint8_t hm_len;
uint8_t bsr_prio;
+#if PIM_IPV == 4
struct pim_encoded_ipv4_unicast bsr_addr;
+#else
+ struct pim_encoded_ipv6_unicast bsr_addr;
+#endif
} __attribute__((packed));
struct bsmmsg_grpinfo {
+#if PIM_IPV == 4
struct pim_encoded_group_ipv4 group;
+#else
+ struct pim_encoded_group_ipv6 group;
+#endif
uint8_t rp_count;
uint8_t frag_rp_count;
uint16_t reserved;
} __attribute__((packed));
struct bsmmsg_rpinfo {
+#if PIM_IPV == 4
struct pim_encoded_ipv4_unicast rpaddr;
+#else
+ struct pim_encoded_ipv6_unicast rpaddr;
+#endif
uint16_t rp_holdtime;
uint8_t rp_pri;
uint8_t reserved;
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 91cc3aa79b..f62b90a9d6 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -860,12 +860,11 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
frr_each (bsm_frags, pim->global_scope.bsm_frags, bsfrag) {
char grp_str[PREFIX_STRLEN];
- char rp_str[INET_ADDRSTRLEN];
- char bsr_str[INET_ADDRSTRLEN];
struct bsmmsg_grpinfo *group;
- struct bsmmsg_rpinfo *rpaddr;
+ struct bsmmsg_rpinfo *bsm_rpinfo;
struct prefix grp;
struct bsm_hdr *hdr;
+ pim_addr bsr_addr;
uint32_t offset = 0;
uint8_t *buf;
uint32_t len = 0;
@@ -879,17 +878,16 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
len -= PIM_MSG_HEADER_LEN;
hdr = (struct bsm_hdr *)buf;
+ /* NB: bshdr->bsr_addr.addr is packed/unaligned => memcpy */
+ memcpy(&bsr_addr, &hdr->bsr_addr.addr, sizeof(bsr_addr));
/* BSM starts with bsr header */
buf += sizeof(struct bsm_hdr);
len -= sizeof(struct bsm_hdr);
- pim_inet4_dump("<BSR Address?>", hdr->bsr_addr.addr, bsr_str,
- sizeof(bsr_str));
-
-
if (uj) {
- json_object_string_add(json, "BSR address", bsr_str);
+ json_object_string_addf(json, "BSR address", "%pPA",
+ &bsr_addr);
json_object_int_add(json, "BSR priority",
hdr->bsr_prio);
json_object_int_add(json, "Hashmask Length",
@@ -901,7 +899,7 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
vty_out(vty, "------------------\n");
vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",
"BSR-Priority", "Hashmask-len", "Fragment-Tag");
- vty_out(vty, "%-15s %-15d %-15d %-15d\n", bsr_str,
+ vty_out(vty, "%-15pPA %-15d %-15d %-15d\n", &bsr_addr,
hdr->bsr_prio, hdr->hm_len,
ntohs(hdr->frag_tag));
}
@@ -913,9 +911,16 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
if (group->group.family == PIM_MSG_ADDRESS_FAMILY_IPV4)
grp.family = AF_INET;
+ else if (group->group.family ==
+ PIM_MSG_ADDRESS_FAMILY_IPV6)
+ grp.family = AF_INET6;
grp.prefixlen = group->group.mask;
- grp.u.prefix4.s_addr = group->group.addr.s_addr;
+#if PIM_IPV == 4
+ grp.u.prefix4 = group->group.addr;
+#else
+ grp.u.prefix6 = group->group.addr;
+#endif
prefix2str(&grp, grp_str, sizeof(grp_str));
@@ -954,31 +959,35 @@ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
"RpAddress HoldTime Priority\n");
while (frag_rp_cnt--) {
- rpaddr = (struct bsmmsg_rpinfo *)buf;
+ pim_addr rp_addr;
+
+ bsm_rpinfo = (struct bsmmsg_rpinfo *)buf;
+ /* unaligned, again */
+ memcpy(&rp_addr, &bsm_rpinfo->rpaddr,
+ sizeof(rp_addr));
buf += sizeof(struct bsmmsg_rpinfo);
offset += sizeof(struct bsmmsg_rpinfo);
- pim_inet4_dump("<Rp addr?>",
- rpaddr->rpaddr.addr, rp_str,
- sizeof(rp_str));
-
if (uj) {
json_row = json_object_new_object();
- json_object_string_add(
- json_row, "Rp Address", rp_str);
+ json_object_string_addf(
+ json_row, "Rp Address", "%pPA",
+ &rp_addr);
json_object_int_add(
json_row, "Rp HoldTime",
- ntohs(rpaddr->rp_holdtime));
+ ntohs(bsm_rpinfo->rp_holdtime));
json_object_int_add(json_row,
"Rp Priority",
- rpaddr->rp_pri);
- json_object_object_add(
- json_group, rp_str, json_row);
+ bsm_rpinfo->rp_pri);
+ json_object_object_addf(
+ json_group, json_row, "%pPA",
+ &rp_addr);
} else {
- vty_out(vty, "%-15s %-12d %d\n", rp_str,
- ntohs(rpaddr->rp_holdtime),
- rpaddr->rp_pri);
+ vty_out(vty, "%-15pPA %-12d %d\n",
+ &rp_addr,
+ ntohs(bsm_rpinfo->rp_holdtime),
+ bsm_rpinfo->rp_pri);
}
}
vty_out(vty, "\n");
@@ -998,24 +1007,17 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
struct bsgrp_node *bsgrp;
struct bsm_rpinfo *bsm_rp;
struct route_node *rn;
- char bsr_str[INET_ADDRSTRLEN];
json_object *json = NULL;
json_object *json_group = NULL;
json_object *json_row = NULL;
- if (pim->global_scope.current_bsr.s_addr == INADDR_ANY)
- strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
-
- else
- pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, bsr_str,
- sizeof(bsr_str));
-
if (uj) {
json = json_object_new_object();
- json_object_string_add(json, "BSR Address", bsr_str);
- } else {
- vty_out(vty, "BSR Address %s\n", bsr_str);
- }
+ json_object_string_addf(json, "BSR Address", "%pPA",
+ &pim->global_scope.current_bsr);
+ } else
+ vty_out(vty, "BSR Address %pPA\n",
+ &pim->global_scope.current_bsr);
for (rn = route_top(pim->global_scope.bsrp_table); rn;
rn = route_next(rn)) {
@@ -1045,27 +1047,24 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
}
frr_each (bsm_rpinfos, bsgrp->bsrp_list, bsm_rp) {
- char rp_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<Rp Address?>", bsm_rp->rp_address,
- rp_str, sizeof(rp_str));
-
if (uj) {
json_row = json_object_new_object();
- json_object_string_add(json_row, "Rp Address",
- rp_str);
+ json_object_string_addf(json_row, "Rp Address",
+ "%pPA",
+ &bsm_rp->rp_address);
json_object_int_add(json_row, "Rp HoldTime",
bsm_rp->rp_holdtime);
json_object_int_add(json_row, "Rp Priority",
bsm_rp->rp_prio);
json_object_int_add(json_row, "Hash Val",
bsm_rp->hash);
- json_object_object_add(json_group, rp_str,
- json_row);
+ json_object_object_addf(json_group, json_row,
+ "%pPA",
+ &bsm_rp->rp_address);
} else {
- vty_out(vty, "%-15s %-15u %-15u %-15u\n",
- rp_str, bsm_rp->rp_prio,
+ vty_out(vty, "%-15pPA %-15u %-15u %-15u\n",
+ &bsm_rp->rp_address, bsm_rp->rp_prio,
bsm_rp->rp_holdtime, bsm_rp->hash);
}
}
@@ -1086,26 +1085,23 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
}
frr_each (bsm_rpinfos, bsgrp->partial_bsrp_list, bsm_rp) {
- char rp_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<Rp Addr?>", bsm_rp->rp_address, rp_str,
- sizeof(rp_str));
-
if (uj) {
json_row = json_object_new_object();
- json_object_string_add(json_row, "Rp Address",
- rp_str);
+ json_object_string_addf(json_row, "Rp Address",
+ "%pPA",
+ &bsm_rp->rp_address);
json_object_int_add(json_row, "Rp HoldTime",
bsm_rp->rp_holdtime);
json_object_int_add(json_row, "Rp Priority",
bsm_rp->rp_prio);
json_object_int_add(json_row, "Hash Val",
bsm_rp->hash);
- json_object_object_add(json_group, rp_str,
- json_row);
+ json_object_object_addf(json_group, json_row,
+ "%pPA",
+ &bsm_rp->rp_address);
} else {
- vty_out(vty, "%-15s %-15u %-15u %-15u\n",
- rp_str, bsm_rp->rp_prio,
+ vty_out(vty, "%-15pPA %-15u %-15u %-15u\n",
+ &bsm_rp->rp_address, bsm_rp->rp_prio,
bsm_rp->rp_holdtime, bsm_rp->hash);
}
}
@@ -1451,11 +1447,9 @@ static void pim_show_bsr(struct pim_instance *pim,
char last_bsm_seen[10];
time_t now;
char bsr_state[20];
- char bsr_str[PREFIX_STRLEN];
json_object *json = NULL;
- if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) {
- strlcpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+ if (pim_addr_is_any(pim->global_scope.current_bsr)) {
pim_time_uptime(uptime, sizeof(uptime),
pim->global_scope.current_bsr_first_ts);
pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
@@ -1463,8 +1457,6 @@ static void pim_show_bsr(struct pim_instance *pim,
}
else {
- pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr,
- bsr_str, sizeof(bsr_str));
now = pim_time_monotonic_sec();
pim_time_uptime(uptime, sizeof(uptime),
(now - pim->global_scope.current_bsr_first_ts));
@@ -1486,9 +1478,11 @@ static void pim_show_bsr(struct pim_instance *pim,
strlcpy(bsr_state, "", sizeof(bsr_state));
}
+
if (uj) {
json = json_object_new_object();
- json_object_string_add(json, "bsr", bsr_str);
+ json_object_string_addf(json, "bsr", "%pPA",
+ &pim->global_scope.current_bsr);
json_object_int_add(json, "priority",
pim->global_scope.current_bsr_prio);
json_object_int_add(json, "fragmentTag",
@@ -1500,7 +1494,8 @@ static void pim_show_bsr(struct pim_instance *pim,
else {
vty_out(vty, "PIMv2 Bootstrap information\n");
- vty_out(vty, "Current preferred BSR address: %s\n", bsr_str);
+ vty_out(vty, "Current preferred BSR address: %pPA\n",
+ &pim->global_scope.current_bsr);
vty_out(vty,
"Priority Fragment-Tag State UpTime\n");
vty_out(vty, " %-12d %-12d %-13s %7s\n",
@@ -1662,7 +1657,7 @@ DEFPY (clear_ip_mroute,
"clear ip mroute [vrf NAME]$name",
CLEAR_STR
IP_STR
- "Reset multicast routes\n"
+ MROUTE_STR
VRF_CMD_HELP_STR)
{
struct vrf *v = pim_cmd_lookup(vty, name);
@@ -2782,14 +2777,15 @@ DEFPY (show_ip_pim_rpf_vrf_all,
DEFPY (show_ip_pim_nexthop,
show_ip_pim_nexthop_cmd,
- "show ip pim [vrf NAME] nexthop",
+ "show ip pim [vrf NAME] nexthop [json$json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
- "PIM cached nexthop rpf information\n")
+ "PIM cached nexthop rpf information\n"
+ JSON_STR)
{
- return pim_show_nexthop_cmd_helper(vrf, vty);
+ return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
}
DEFPY (show_ip_pim_nexthop_lookup,
@@ -4166,26 +4162,21 @@ DEFPY_HIDDEN (pim_test_sg_keepalive,
"The Group we are resetting\n")
{
struct pim_upstream *up;
+ struct vrf *vrf;
struct pim_instance *pim;
pim_sgaddr sg;
sg.src = source;
sg.grp = group;
- if (!name)
- pim = pim_get_pim_instance(VRF_DEFAULT);
- else {
- struct vrf *vrf = vrf_lookup_by_name(name);
-
- if (!vrf) {
- vty_out(vty, "%% Vrf specified: %s does not exist\n",
- name);
- return CMD_WARNING;
- }
-
- pim = pim_get_pim_instance(vrf->vrf_id);
+ vrf = vrf_lookup_by_name(name ? name : VRF_DEFAULT_NAME);
+ if (!vrf) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", name);
+ return CMD_WARNING;
}
+ pim = vrf->info;
+
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
return CMD_WARNING;
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 03689bea8d..f2974edae2 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -898,13 +898,9 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
}
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
- char rpf_addr_str[PREFIX_STRLEN];
const char *rpf_ifname;
struct pim_rpf *rpf = &up->rpf;
- pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str,
- sizeof(rpf_addr_str));
-
rpf_ifname =
rpf->source_nexthop.interface ? rpf->source_nexthop
.interface->name
@@ -932,8 +928,8 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
json_object_string_add(json_row, "group", grp_str);
json_object_string_add(json_row, "rpfInterface",
rpf_ifname);
- json_object_string_add(json_row, "rpfAddress",
- rpf_addr_str);
+ json_object_string_addf(json_row, "rpfAddress", "%pPA",
+ &rpf->rpf_addr);
json_object_string_addf(
json_row, "ribNexthop", "%pPAs",
&rpf->source_nexthop.mrib_nexthop_addr);
@@ -947,9 +943,9 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json)
} else {
vty_out(vty,
- "%-15pPAs %-15pPAs %-16s %-15s %-15pPAs %6d %4d\n",
+ "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPAs %6d %4d\n",
&up->sg.src, &up->sg.grp, rpf_ifname,
- rpf_addr_str,
+ &rpf->rpf_addr,
&rpf->source_nexthop.mrib_nexthop_addr,
rpf->source_nexthop.mrib_route_metric,
rpf->source_nexthop.mrib_metric_preference);
@@ -1352,9 +1348,9 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
if (!up->t_join_timer && up->rpf.source_nexthop.interface) {
struct pim_neighbor *nbr;
- nbr = pim_neighbor_find_prefix(
+ nbr = pim_neighbor_find(
up->rpf.source_nexthop.interface,
- &up->rpf.rpf_addr);
+ up->rpf.rpf_addr);
if (nbr)
pim_time_timer_to_hhmmss(join_timer,
sizeof(join_timer),
@@ -1418,7 +1414,7 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
rpg = RP(pim, up->sg.grp);
json_object_string_addf(json_row, "rpfAddress",
- "%pFX", &rpg->rpf_addr);
+ "%pPA", &rpg->rpf_addr);
} else {
json_object_string_add(json_row, "rpfAddress",
src_str);
@@ -1534,15 +1530,11 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
"Source Group RpfIface RibNextHop RpfAddress \n");
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
- char rpf_addr_str[PREFIX_STRLEN];
struct pim_rpf *rpf;
const char *rpf_ifname;
rpf = &up->rpf;
- pim_addr_dump("<rpf?>", &rpf->rpf_addr, rpf_addr_str,
- sizeof(rpf_addr_str));
-
rpf_ifname =
rpf->source_nexthop.interface ? rpf->source_nexthop
.interface->name
@@ -1573,14 +1565,15 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj)
json_object_string_addf(
json_row, "ribNexthop", "%pPAs",
&rpf->source_nexthop.mrib_nexthop_addr);
- json_object_string_add(json_row, "rpfAddress",
- rpf_addr_str);
+ json_object_string_addf(json_row, "rpfAddress", "%pPA",
+ &rpf->rpf_addr);
json_object_object_add(json_group, src_str, json_row);
} else {
- vty_out(vty, "%-15pPAs %-15pPAs %-16s %-15pPA %-15s\n",
+ vty_out(vty,
+ "%-15pPAs %-15pPAs %-16s %-15pPA %-15pPA\n",
&up->sg.src, &up->sg.grp, rpf_ifname,
&rpf->source_nexthop.mrib_nexthop_addr,
- rpf_addr_str);
+ &rpf->rpf_addr);
}
}
@@ -1679,7 +1672,7 @@ int pim_show_join_cmd_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
return CMD_WARNING;
}
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -1780,7 +1773,7 @@ int pim_show_jp_agg_list_cmd_helper(const char *vrf, struct vty *vty)
vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
return CMD_WARNING;
}
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -2635,40 +2628,90 @@ void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty,
vty_out(vty, "SSM group range : %s\n", range_str);
}
-struct pnc_cache_walk_data {
+struct vty_pnc_cache_walk_data {
struct vty *vty;
struct pim_instance *pim;
};
-static int pim_print_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
+struct json_pnc_cache_walk_data {
+ json_object *json_obj;
+ struct pim_instance *pim;
+};
+
+static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
{
struct pim_nexthop_cache *pnc = bucket->data;
- struct pnc_cache_walk_data *cwd = arg;
+ struct vty_pnc_cache_walk_data *cwd = arg;
struct vty *vty = cwd->vty;
struct pim_instance *pim = cwd->pim;
struct nexthop *nh_node = NULL;
ifindex_t first_ifindex;
struct interface *ifp = NULL;
- char buf[PREFIX_STRLEN];
for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
first_ifindex = nh_node->ifindex;
+
ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
- vty_out(vty, "%-15s ",
- inet_ntop(AF_INET, &pnc->rpf.rpf_addr.u.prefix4, buf,
- sizeof(buf)));
+ vty_out(vty, "%-15pPA ", &pnc->rpf.rpf_addr);
vty_out(vty, "%-16s ", ifp ? ifp->name : "NULL");
+#if PIM_IPV == 4
vty_out(vty, "%pI4 ", &nh_node->gate.ipv4);
+#else
+ vty_out(vty, "%pI6 ", &nh_node->gate.ipv6);
+#endif
vty_out(vty, "\n");
}
return CMD_SUCCESS;
}
+static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
+ void *arg)
+{
+ struct pim_nexthop_cache *pnc = backet->data;
+ struct json_pnc_cache_walk_data *cwd = arg;
+ struct pim_instance *pim = cwd->pim;
+ struct nexthop *nh_node = NULL;
+ ifindex_t first_ifindex;
+ struct interface *ifp = NULL;
+ char addr_str[PIM_ADDRSTRLEN];
+ json_object *json_row = NULL;
+ json_object *json_ifp = NULL;
+ json_object *json_arr = NULL;
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ snprintfrr(addr_str, sizeof(addr_str), "%pPA",
+ &pnc->rpf.rpf_addr);
+ json_object_object_get_ex(cwd->json_obj, addr_str, &json_row);
+ if (!json_row) {
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "address", "%pPA",
+ &pnc->rpf.rpf_addr);
+ json_object_object_addf(cwd->json_obj, json_row, "%pPA",
+ &pnc->rpf.rpf_addr);
+ json_arr = json_object_new_array();
+ json_object_object_add(json_row, "nexthops", json_arr);
+ }
+ json_ifp = json_object_new_object();
+ json_object_string_add(json_ifp, "interface",
+ ifp ? ifp->name : "NULL");
+#if PIM_IPV == 4
+ json_object_string_addf(json_ifp, "nexthop", "%pI4",
+ &nh_node->gate.ipv4);
+#else
+ json_object_string_addf(json_ifp, "nexthop", "%pI6",
+ &nh_node->gate.ipv6);
+#endif
+ json_object_array_add(json_arr, json_ifp);
+ }
+ return CMD_SUCCESS;
+}
+
int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
pim_addr source, pim_addr group)
{
- struct prefix nht_p;
int result = 0;
pim_addr vif_source;
struct prefix grp;
@@ -2698,11 +2741,11 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group))
return CMD_SUCCESS;
- pim_addr_to_prefix(&nht_p, vif_source);
pim_addr_to_prefix(&grp, group);
memset(&nexthop, 0, sizeof(nexthop));
- result = pim_ecmp_nexthop_lookup(v->info, &nexthop, &nht_p, &grp, 0);
+ result =
+ pim_ecmp_nexthop_lookup(v->info, &nexthop, vif_source, &grp, 0);
if (!result) {
vty_out(vty,
@@ -2718,7 +2761,7 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
return CMD_SUCCESS;
}
-int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty)
+int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty, bool uj)
{
struct vrf *v;
@@ -2727,23 +2770,35 @@ int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty)
if (!v)
return CMD_WARNING;
- pim_show_nexthop(v->info, vty);
+ pim_show_nexthop(v->info, vty, uj);
return CMD_SUCCESS;
}
-void pim_show_nexthop(struct pim_instance *pim, struct vty *vty)
+void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj)
{
- struct pnc_cache_walk_data cwd;
+ struct vty_pnc_cache_walk_data cwd;
+ struct json_pnc_cache_walk_data jcwd;
cwd.vty = vty;
cwd.pim = pim;
- vty_out(vty, "Number of registered addresses: %lu\n",
- pim->rpf_hash->count);
- vty_out(vty, "Address Interface Nexthop\n");
- vty_out(vty, "---------------------------------------------\n");
+ jcwd.pim = pim;
+
+ if (uj) {
+ jcwd.json_obj = json_object_new_object();
+ } else {
+ vty_out(vty, "Number of registered addresses: %lu\n",
+ pim->rpf_hash->count);
+ vty_out(vty, "Address Interface Nexthop\n");
+ vty_out(vty, "---------------------------------------------\n");
+ }
- hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd);
+ if (uj) {
+ hash_walk(pim->rpf_hash, pim_print_json_pnc_cache_walkcb,
+ &jcwd);
+ vty_json(vty, jcwd.json_obj);
+ } else
+ hash_walk(pim->rpf_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
}
int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
@@ -4043,7 +4098,7 @@ int pim_show_rpf_helper(const char *vrf, struct vty *vty, bool json)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4099,7 +4154,7 @@ int pim_show_rp_helper(const char *vrf, struct vty *vty, const char *group_str,
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4170,7 +4225,7 @@ int pim_show_secondary_helper(const char *vrf, struct vty *vty)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4193,7 +4248,7 @@ int pim_show_statistics_helper(const char *vrf, struct vty *vty,
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4222,7 +4277,7 @@ int pim_show_upstream_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
return CMD_WARNING;
}
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4286,7 +4341,7 @@ int pim_show_upstream_join_desired_helper(const char *vrf, struct vty *vty,
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4308,7 +4363,7 @@ int pim_show_upstream_rpf_helper(const char *vrf, struct vty *vty, bool uj)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4332,7 +4387,7 @@ int pim_show_state_helper(const char *vrf, struct vty *vty,
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4386,7 +4441,7 @@ int pim_show_multicast_helper(const char *vrf, struct vty *vty)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4421,7 +4476,7 @@ int pim_show_multicast_count_helper(const char *vrf, struct vty *vty, bool json)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4478,7 +4533,7 @@ int pim_show_mroute_helper(const char *vrf, struct vty *vty, pim_addr s_or_g,
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4541,7 +4596,7 @@ int pim_show_mroute_count_helper(const char *vrf, struct vty *vty, bool json)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4597,7 +4652,7 @@ int pim_show_mroute_summary_helper(const char *vrf, struct vty *vty, bool json)
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
@@ -4683,10 +4738,10 @@ void pim_show_interface_traffic(struct pim_instance *pim, struct vty *vty,
pim_ifp->pim_ifstat_join_recv);
json_object_int_add(json_row, "joinTx",
pim_ifp->pim_ifstat_join_send);
- json_object_int_add(json_row, "pruneTx",
- pim_ifp->pim_ifstat_prune_send);
json_object_int_add(json_row, "pruneRx",
pim_ifp->pim_ifstat_prune_recv);
+ json_object_int_add(json_row, "pruneTx",
+ pim_ifp->pim_ifstat_prune_send);
json_object_int_add(json_row, "registerRx",
pim_ifp->pim_ifstat_reg_recv);
json_object_int_add(json_row, "registerTx",
@@ -4833,7 +4888,7 @@ int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
if (!v)
return CMD_WARNING;
- pim = pim_get_pim_instance(v->vrf_id);
+ pim = v->info;
if (!pim) {
vty_out(vty, "%% Unable to find pim instance\n");
diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h
index 67e092e079..1e770e6c8d 100644
--- a/pimd/pim_cmd_common.h
+++ b/pimd/pim_cmd_common.h
@@ -23,9 +23,6 @@
struct pim_upstream;
struct pim_instance;
-/* duplicated from pim_instance.h - needed to avoid dependency mess */
-struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id);
-
const char *pim_cli_get_vrf_name(struct vty *vty);
int pim_process_join_prune_cmd(struct vty *vty, const char *jpi_str);
int pim_process_no_join_prune_cmd(struct vty *vty);
@@ -107,8 +104,8 @@ void ip_pim_ssm_show_group_range(struct pim_instance *pim, struct vty *vty,
bool uj);
int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
pim_addr source, pim_addr group);
-int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty);
-void pim_show_nexthop(struct pim_instance *pim, struct vty *vty);
+int pim_show_nexthop_cmd_helper(const char *vrf, struct vty *vty, bool uj);
+void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj);
int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
const char *json, const char *interface);
int pim_show_neighbors_vrf_all_cmd_helper(struct vty *vty, const char *json,
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index ebe33f6c16..73b6ca951a 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -635,9 +635,7 @@ void pim_if_addr_add(struct connected *ifc)
with RNH address to receive update and add the
interface as nexthop. */
memset(&rpf, 0, sizeof(struct pim_rpf));
- rpf.rpf_addr.family = AF_INET;
- rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
- rpf.rpf_addr.u.prefix4 = ifc->address->u.prefix4;
+ rpf.rpf_addr = pim_addr_from_prefix(ifc->address);
pnc = pim_nexthop_cache_find(pim_ifp->pim, &rpf);
if (pnc)
pim_sendmsg_zebra_rnh(pim_ifp->pim, zclient,
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
index e2d2ab97c9..ac2eee0a30 100644
--- a/pimd/pim_ifchannel.c
+++ b/pimd/pim_ifchannel.c
@@ -685,8 +685,7 @@ static void on_ifjoin_prune_pending_timer(struct thread *t)
struct pim_rpf rpf;
rpf.source_nexthop.interface = ifp;
- pim_addr_to_prefix(&rpf.rpf_addr,
- pim_ifp->primary_address);
+ rpf.rpf_addr = pim_ifp->primary_address;
pim_jp_agg_single_upstream_send(
&rpf, ch->upstream, 0);
}
@@ -751,7 +750,7 @@ static void check_recv_upstream(int is_join, struct interface *recv_ifp,
return;
}
- rpf_addr = pim_addr_from_prefix(&up->rpf.rpf_addr);
+ rpf_addr = up->rpf.rpf_addr;
/* upstream directed to RPF'(S,G) ? */
if (pim_addr_cmp(upstream, rpf_addr)) {
diff --git a/pimd/pim_join.c b/pimd/pim_join.c
index 1b722382b9..8c7ae80d2b 100644
--- a/pimd/pim_join.c
+++ b/pimd/pim_join.c
@@ -86,7 +86,7 @@ static void recv_join(struct interface *ifp, struct pim_neighbor *neigh,
* If the RP sent in the message is not
* our RP for the group, drop the message
*/
- rpf_addr = pim_addr_from_prefix(&rp->rpf_addr);
+ rpf_addr = rp->rpf_addr;
if (pim_addr_cmp(sg->src, rpf_addr)) {
zlog_warn(
"%s: Specified RP(%pPAs) in join is different than our configured RP(%pPAs)",
@@ -427,7 +427,6 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
size_t packet_left = 0;
size_t packet_size = 0;
size_t group_size = 0;
- pim_addr rpf_addr;
if (rpf->source_nexthop.interface)
pim_ifp = rpf->source_nexthop.interface->info;
@@ -436,9 +435,8 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
return -1;
}
- rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
- on_trace(__func__, rpf->source_nexthop.interface, rpf_addr);
+ on_trace(__func__, rpf->source_nexthop.interface, rpf->rpf_addr);
if (!pim_ifp) {
zlog_warn("%s: multicast not enabled on interface %s", __func__,
@@ -446,11 +444,11 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
return -1;
}
- if (pim_addr_is_any(rpf_addr)) {
+ if (pim_addr_is_any(rpf->rpf_addr)) {
if (PIM_DEBUG_PIM_J_P)
zlog_debug(
"%s: upstream=%pPA is myself on interface %s",
- __func__, &rpf_addr,
+ __func__, &rpf->rpf_addr,
rpf->source_nexthop.interface->name);
return 0;
}
@@ -473,7 +471,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
memset(msg, 0, sizeof(*msg));
pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
- rpf_addr);
+ rpf->rpf_addr);
msg->reserved = 0;
msg->holdtime = htons(PIM_JP_HOLDTIME);
@@ -492,7 +490,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
if (PIM_DEBUG_PIM_J_P)
zlog_debug(
"%s: sending (G)=%pPAs to upstream=%pPA on interface %s",
- __func__, &group->group, &rpf_addr,
+ __func__, &group->group, &rpf->rpf_addr,
rpf->source_nexthop.interface->name);
group_size = pim_msg_get_jp_group_size(group->sources);
@@ -516,7 +514,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
memset(msg, 0, sizeof(*msg));
pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
- rpf_addr);
+ rpf->rpf_addr);
msg->reserved = 0;
msg->holdtime = htons(PIM_JP_HOLDTIME);
diff --git a/pimd/pim_jp_agg.c b/pimd/pim_jp_agg.c
index 16774a03f5..44ebbb4dea 100644
--- a/pimd/pim_jp_agg.c
+++ b/pimd/pim_jp_agg.c
@@ -110,7 +110,6 @@ pim_jp_agg_get_interface_upstream_switch_list(struct pim_rpf *rpf)
struct pim_interface *pim_ifp;
struct pim_iface_upstream_switch *pius;
struct listnode *node, *nnode;
- pim_addr rpf_addr;
if (!ifp)
return NULL;
@@ -121,18 +120,16 @@ pim_jp_agg_get_interface_upstream_switch_list(struct pim_rpf *rpf)
if (!pim_ifp)
return NULL;
- rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
-
for (ALL_LIST_ELEMENTS(pim_ifp->upstream_switch_list, node, nnode,
pius)) {
- if (!pim_addr_cmp(pius->address, rpf_addr))
+ if (!pim_addr_cmp(pius->address, rpf->rpf_addr))
break;
}
if (!pius) {
pius = XCALLOC(MTYPE_PIM_JP_AGG_GROUP,
sizeof(struct pim_iface_upstream_switch));
- pius->address = rpf_addr;
+ pius->address = rpf->rpf_addr;
pius->us = list_new();
listnode_add_sort(pim_ifp->upstream_switch_list, pius);
}
diff --git a/pimd/pim_msg.c b/pimd/pim_msg.c
index b67849fd8f..7e57b405f2 100644
--- a/pimd/pim_msg.c
+++ b/pimd/pim_msg.c
@@ -279,7 +279,7 @@ size_t pim_msg_build_jp_groups(struct pim_jp_groups *grp,
struct pim_rpf *rpf = pim_rp_g(pim, source->up->sg.grp);
bits = PIM_ENCODE_SPARSE_BIT | PIM_ENCODE_WC_BIT
| PIM_ENCODE_RPT_BIT;
- stosend = pim_addr_from_prefix(&rpf->rpf_addr);
+ stosend = rpf->rpf_addr;
/* Only Send SGRpt in case of *,G Join */
if (source->is_join)
up = source->up;
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 4fff1b31db..408e86b698 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -334,10 +334,10 @@ static bool is_pim_interface(const struct lyd_node *dnode)
pim_enable_dnode =
yang_dnode_getf(dnode,
"%s/frr-pim:pim/address-family[address-family='%s']/pim-enable",
- if_xpath, "frr-routing:ipv4");
+ if_xpath, FRR_PIM_AF_XPATH_VAL);
igmp_enable_dnode = yang_dnode_getf(dnode,
"%s/frr-gmp:gmp/address-family[address-family='%s']/enable",
- if_xpath, "frr-routing:ipv4");
+ if_xpath, FRR_PIM_AF_XPATH_VAL);
if (((pim_enable_dnode) &&
(yang_dnode_get_bool(pim_enable_dnode, "."))) ||
diff --git a/pimd/pim_neighbor.c b/pimd/pim_neighbor.c
index 0bbed1f99f..6d6dbb6465 100644
--- a/pimd/pim_neighbor.c
+++ b/pimd/pim_neighbor.c
@@ -263,7 +263,7 @@ static void on_neighbor_jp_timer(struct thread *t)
neigh->upstream_jp_agg->count);
rpf.source_nexthop.interface = neigh->interface;
- pim_addr_to_prefix(&rpf.rpf_addr, neigh->source_addr);
+ rpf.rpf_addr = neigh->source_addr;
pim_joinprune_send(&rpf, neigh->upstream_jp_agg);
thread_add_timer(router->master, on_neighbor_jp_timer, neigh,
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index 90eb8ae407..2e5c0598c0 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -52,11 +52,11 @@
void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
struct pim_nexthop_cache *pnc, int command)
{
- struct prefix *p;
+ struct prefix p;
int ret;
- p = &(pnc->rpf.rpf_addr);
- ret = zclient_send_rnh(zclient, command, p, SAFI_UNICAST, false, false,
+ pim_addr_to_prefix(&p, pnc->rpf.rpf_addr);
+ ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false,
pim->vrf->vrf_id);
if (ret == ZCLIENT_SEND_FAILURE)
zlog_warn("sendmsg_nexthop: zclient_send_message() failed");
@@ -65,7 +65,7 @@ void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
zlog_debug(
"%s: NHT %sregistered addr %pFX(%s) with Zebra ret:%d ",
__func__,
- (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", p,
+ (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p,
pim->vrf->name, ret);
return;
@@ -98,7 +98,7 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
pnc->rp_list = list_new();
pnc->rp_list->cmp = pim_rp_list_cmp;
- snprintfrr(hash_name, sizeof(hash_name), "PNC %pFX(%s) Upstream Hash",
+ snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash",
&pnc->rpf.rpf_addr, pim->vrf->name);
pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key,
pim_upstream_equal, hash_name);
@@ -107,7 +107,7 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
}
static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
- struct prefix *addr)
+ pim_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_rpf rpf;
@@ -115,7 +115,7 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
zclient = pim_zebra_zclient_get();
memset(&rpf, 0, sizeof(rpf));
- rpf.rpf_addr = *addr;
+ rpf.rpf_addr = addr;
pnc = pim_nexthop_cache_find(pim, &rpf);
if (!pnc) {
@@ -124,8 +124,8 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
ZEBRA_NEXTHOP_REGISTER);
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug(
- "%s: NHT cache and zebra notification added for %pFX(%s)",
- __func__, addr, pim->vrf->name);
+ "%s: NHT cache and zebra notification added for %pPA(%s)",
+ __func__, &addr, pim->vrf->name);
}
return pnc;
@@ -134,7 +134,7 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
/* TBD: this does several distinct things and should probably be split up.
* (checking state vs. returning pnc vs. adding upstream vs. adding rp)
*/
-int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
+int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
struct pim_upstream *up, struct rp_info *rp,
struct pim_nexthop_cache *out_pnc)
{
@@ -143,7 +143,7 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
pnc = pim_nht_get(pim, addr);
- assertf(up || rp, "addr=%pFX", addr);
+ assertf(up || rp, "addr=%pPA", &addr);
if (rp != NULL) {
ch_node = listnode_lookup(pnc->rp_list, rp);
@@ -163,28 +163,21 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
return 0;
}
-#if PIM_IPV == 4
-void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
+void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc;
- struct prefix pfx;
-
- pfx.family = AF_INET;
- pfx.prefixlen = IPV4_MAX_BITLEN;
- pfx.u.prefix4 = addr;
- pnc = pim_nht_get(pim, &pfx);
+ pnc = pim_nht_get(pim, addr);
pnc->bsr_count++;
}
-#endif /* PIM_IPV == 4 */
static void pim_nht_drop_maybe(struct pim_instance *pim,
struct pim_nexthop_cache *pnc)
{
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
- "%s: NHT %pFX(%s) rp_list count:%d upstream count:%ld BSR count:%u",
+ "%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u",
__func__, &pnc->rpf.rpf_addr, pim->vrf->name,
pnc->rp_list->count, pnc->upstream_hash->count,
pnc->bsr_count);
@@ -206,7 +199,7 @@ static void pim_nht_drop_maybe(struct pim_instance *pim,
}
}
-void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
+void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
struct pim_upstream *up, struct rp_info *rp)
{
struct pim_nexthop_cache *pnc = NULL;
@@ -214,11 +207,11 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
struct pim_upstream *upstream = NULL;
/* Remove from RPF hash if it is the last entry */
- lookup.rpf.rpf_addr = *addr;
+ lookup.rpf.rpf_addr = addr;
pnc = hash_lookup(pim->rpf_hash, &lookup);
if (!pnc) {
- zlog_warn("attempting to delete nonexistent NHT entry %pFX",
- addr);
+ zlog_warn("attempting to delete nonexistent NHT entry %pPA",
+ &addr);
return;
}
@@ -247,8 +240,7 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
pim_nht_drop_maybe(pim, pnc);
}
-#if PIM_IPV == 4
-void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
@@ -258,28 +250,26 @@ void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
* is 0.0.0.0 as that the BSR has not been registered
* for tracking yet.
*/
- if (addr.s_addr == INADDR_ANY)
+ if (pim_addr_is_any(addr))
return;
- lookup.rpf.rpf_addr.family = AF_INET;
- lookup.rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
- lookup.rpf.rpf_addr.u.prefix4 = addr;
+ lookup.rpf.rpf_addr = addr;
pnc = hash_lookup(pim->rpf_hash, &lookup);
if (!pnc) {
- zlog_warn("attempting to delete nonexistent NHT BSR entry %pI4",
+ zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA",
&addr);
return;
}
- assertf(pnc->bsr_count > 0, "addr=%pI4", &addr);
+ assertf(pnc->bsr_count > 0, "addr=%pPA", &addr);
pnc->bsr_count--;
pim_nht_drop_maybe(pim, pnc);
}
-bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct interface *src_ifp, pim_addr src_ip)
{
struct pim_nexthop_cache *pnc = NULL;
@@ -288,9 +278,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
struct nexthop *nh;
struct interface *ifp;
- lookup.rpf.rpf_addr.family = AF_INET;
- lookup.rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
- lookup.rpf.rpf_addr.u.prefix4 = bsr_addr;
+ lookup.rpf.rpf_addr = bsr_addr;
pnc = hash_lookup(pim->rpf_hash, &lookup);
if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) {
@@ -396,12 +384,11 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
if (!nbr)
continue;
- return nh->ifindex == src_ifp->ifindex
- && nhaddr.s_addr == src_ip.s_addr;
+ return nh->ifindex == src_ifp->ifindex &&
+ (!pim_addr_cmp(nhaddr, src_ip));
}
return false;
}
-#endif /* PIM_IPV == 4 */
void pim_rp_nexthop_del(struct rp_info *rp_info)
{
@@ -427,7 +414,7 @@ static void pim_update_rp_nh(struct pim_instance *pim,
// Compute PIM RPF using cached nexthop
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- &rp_info->rp.rpf_addr,
+ rp_info->rp.rpf_addr,
&rp_info->group, 1))
pim_rp_nexthop_del(rp_info);
}
@@ -497,9 +484,8 @@ uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
static int pim_ecmp_nexthop_search(struct pim_instance *pim,
struct pim_nexthop_cache *pnc,
- struct pim_nexthop *nexthop,
- struct prefix *src, struct prefix *grp,
- int neighbor_needed)
+ struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, int neighbor_needed)
{
struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
struct interface *ifps[router->multipath];
@@ -510,7 +496,6 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
uint8_t nh_iter = 0, found = 0;
uint32_t i, num_nbrs = 0;
pim_addr nh_addr = nexthop->mrib_nexthop_addr;
- pim_addr src_addr = pim_addr_from_prefix(src);
pim_addr grp_addr = pim_addr_from_prefix(grp);
if (!pnc || !pnc->nexthop_num || !nexthop)
@@ -544,7 +529,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (curr_route_valid &&
!pim_if_connected_to_source(nexthop->interface,
- src_addr)) {
+ src)) {
nbr = pim_neighbor_find(
nexthop->interface,
nexthop->mrib_nexthop_addr);
@@ -565,7 +550,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: (%pPA,%pPA)(%s) current nexthop %s is valid, skipping new path selection",
- __func__, &src_addr,
+ __func__, &src,
&grp_addr,
pim->vrf->name,
nexthop->interface->name);
@@ -590,12 +575,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
pim_addr nhaddr = nh_node->gate.ipv6;
#endif
nbrs[i] = pim_neighbor_find(ifps[i], nhaddr);
- if (nbrs[i] ||
- pim_if_connected_to_source(ifps[i], src_addr))
+ if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
num_nbrs++;
}
}
if (pim->ecmp_enable) {
+ struct prefix src_pfx;
uint32_t consider = pnc->nexthop_num;
if (neighbor_needed && num_nbrs < consider)
@@ -605,7 +590,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
return 0;
// PIM ECMP flag is enable then choose ECMP path.
- hash_val = pim_compute_ecmp_hash(src, grp);
+ pim_addr_to_prefix(&src_pfx, src);
+ hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
mod_val = hash_val % consider;
}
@@ -617,8 +603,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s %s: could not find interface for ifindex %d (address %pPA(%s))",
- __FILE__, __func__, first_ifindex,
- &src_addr, pim->vrf->name);
+ __FILE__, __func__, first_ifindex, &src,
+ pim->vrf->name);
if (nh_iter == mod_val)
mod_val++; // Select nexthpath
nh_iter++;
@@ -629,15 +615,14 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
zlog_debug(
"%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
__func__, ifp->name, pim->vrf->name,
- first_ifindex, &src_addr);
+ first_ifindex, &src);
if (nh_iter == mod_val)
mod_val++; // Select nexthpath
nh_iter++;
continue;
}
- if (neighbor_needed &&
- !pim_if_connected_to_source(ifp, src_addr)) {
+ if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
nbr = nbrs[nh_iter];
if (!nbr && !if_is_loopback(ifp)) {
if (PIM_DEBUG_PIM_NHT)
@@ -661,14 +646,14 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
#endif
nexthop->mrib_metric_preference = pnc->distance;
nexthop->mrib_route_metric = pnc->metric;
- nexthop->last_lookup = src_addr;
+ nexthop->last_lookup = src;
nexthop->last_lookup_time = pim_time_monotonic_usec();
nexthop->nbr = nbr;
found = 1;
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: (%pPA,%pPA)(%s) selected nhop interface %s addr %pPAs mod_val %u iter %d ecmp %d",
- __func__, &src_addr, &grp_addr,
+ __func__, &src, &grp_addr,
pim->vrf->name, ifp->name, &nh_addr,
mod_val, nh_iter, pim->ecmp_enable);
}
@@ -708,12 +693,12 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
}
if (cmd == ZEBRA_NEXTHOP_UPDATE) {
- prefix_copy(&rpf.rpf_addr, &match);
+ rpf.rpf_addr = pim_addr_from_prefix(&match);
pnc = pim_nexthop_cache_find(pim, &rpf);
if (!pnc) {
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
- "%s: Skipping NHT update, addr %pFX is not in local cached DB.",
+ "%s: Skipping NHT update, addr %pPA is not in local cached DB.",
__func__, &rpf.rpf_addr);
return 0;
}
@@ -740,12 +725,10 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
*/
#if PIM_IPV == 4
nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- nexthop->gate.ipv4 =
- pnc->rpf.rpf_addr.u.prefix4;
+ nexthop->gate.ipv4 = pnc->rpf.rpf_addr;
#else
nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- nexthop->gate.ipv6 =
- pnc->rpf.rpf_addr.u.prefix6;
+ nexthop->gate.ipv6 = pnc->rpf.rpf_addr;
#endif
break;
#if PIM_IPV == 4
@@ -884,7 +867,7 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
}
int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, struct prefix *src,
+ struct pim_nexthop *nexthop, pim_addr src,
struct prefix *grp, int neighbor_needed)
{
struct pim_nexthop_cache *pnc;
@@ -898,14 +881,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
uint8_t i = 0;
uint32_t hash_val = 0, mod_val = 0;
uint32_t num_nbrs = 0;
- pim_addr src_addr = pim_addr_from_prefix(src);
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
- __func__, &src_addr, pim->vrf->name,
+ __func__, &src, pim->vrf->name,
nexthop->last_lookup_time);
- rpf.rpf_addr = *src;
+ rpf.rpf_addr = src;
pnc = pim_nexthop_cache_find(pim, &rpf);
if (pnc) {
@@ -917,13 +899,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
memset(nexthop_tab, 0,
sizeof(struct pim_zlookup_nexthop) * router->multipath);
num_ifindex =
- zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
- src_addr, PIM_NEXTHOP_LOOKUP_MAX);
+ zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
+ PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex < 1) {
if (PIM_DEBUG_PIM_NHT)
zlog_warn(
"%s: could not find nexthop ifindex for address %pPA(%s)",
- __func__, &src_addr, pim->vrf->name);
+ __func__, &src, pim->vrf->name);
return 0;
}
@@ -940,14 +922,14 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
if (ifps[i]) {
nbrs[i] = pim_neighbor_find(
ifps[i], nexthop_tab[i].nexthop_addr);
- if (nbrs[i] ||
- pim_if_connected_to_source(ifps[i], src_addr))
+ if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
num_nbrs++;
}
}
// If PIM ECMP enable then choose ECMP path.
if (pim->ecmp_enable) {
+ struct prefix src_pfx;
uint32_t consider = num_ifindex;
if (neighbor_needed && num_nbrs < consider)
@@ -956,7 +938,8 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
if (consider == 0)
return 0;
- hash_val = pim_compute_ecmp_hash(src, grp);
+ pim_addr_to_prefix(&src_pfx, src);
+ hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
mod_val = hash_val % consider;
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: hash_val %u mod_val %u", __func__,
@@ -972,8 +955,8 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s %s: could not find interface for ifindex %d (address %pPA(%s))",
- __FILE__, __func__, first_ifindex,
- &src_addr, pim->vrf->name);
+ __FILE__, __func__, first_ifindex, &src,
+ pim->vrf->name);
if (i == mod_val)
mod_val++;
i++;
@@ -985,14 +968,13 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
zlog_debug(
"%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
__func__, ifp->name, pim->vrf->name,
- first_ifindex, &src_addr);
+ first_ifindex, &src);
if (i == mod_val)
mod_val++;
i++;
continue;
}
- if (neighbor_needed &&
- !pim_if_connected_to_source(ifp, src_addr)) {
+ if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
nbr = nbrs[i];
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("ifp name: %s(%s), pim nbr: %p",
@@ -1006,7 +988,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
__func__,
&nexthop_tab[i].nexthop_addr,
ifp->name, pim->vrf->name,
- &src_addr);
+ &src);
i++;
continue;
}
@@ -1017,7 +999,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
zlog_debug(
"%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
__func__, &nexthop_tab[i].nexthop_addr,
- &src_addr, ifp->name, pim->vrf->name,
+ &src, ifp->name, pim->vrf->name,
nexthop_tab[i].route_metric,
nexthop_tab[i].protocol_distance);
/* update nexthop data */
@@ -1028,7 +1010,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
nexthop_tab[i].protocol_distance;
nexthop->mrib_route_metric =
nexthop_tab[i].route_metric;
- nexthop->last_lookup = src_addr;
+ nexthop->last_lookup = src;
nexthop->last_lookup_time = pim_time_monotonic_usec();
nexthop->nbr = nbr;
found = 1;
@@ -1042,24 +1024,19 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
return 0;
}
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
- struct prefix *src, struct prefix *grp)
+int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
+ struct prefix *grp)
{
struct pim_nexthop nhop;
int vif_index;
ifindex_t ifindex;
- pim_addr src_addr;
-
- if (PIM_DEBUG_PIM_NHT_DETAIL) {
- src_addr = pim_addr_from_prefix(src);
- }
memset(&nhop, 0, sizeof(nhop));
if (!pim_ecmp_nexthop_lookup(pim, &nhop, src, grp, 1)) {
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: could not find nexthop ifindex for address %pPA(%s)",
- __func__, &src_addr, pim->vrf->name);
+ __func__, &src, pim->vrf->name);
return -1;
}
@@ -1069,7 +1046,7 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
"%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
__func__, ifindex,
ifindex2ifname(ifindex, pim->vrf->vrf_id),
- pim->vrf->name, &src_addr);
+ pim->vrf->name, &src);
vif_index = pim_if_find_vifindex_by_ifindex(pim, ifindex);
@@ -1077,7 +1054,7 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
if (PIM_DEBUG_PIM_NHT) {
zlog_debug(
"%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
- __func__, vif_index, pim->vrf->name, &src_addr);
+ __func__, vif_index, pim->vrf->name, &src);
}
return -2;
}
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index d51f622ece..240e61d98f 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -54,28 +54,28 @@ struct pim_nexthop_cache {
};
int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
-int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
+int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
struct pim_upstream *up, struct rp_info *rp,
struct pim_nexthop_cache *out_pnc);
-void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
+void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
struct pim_upstream *up, struct rp_info *rp);
struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
struct pim_rpf *rpf);
uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, struct prefix *src,
+ struct pim_nexthop *nexthop, pim_addr src,
struct prefix *grp, int neighbor_needed);
void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
struct pim_nexthop_cache *pnc, int command);
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
- struct prefix *src, struct prefix *grp);
+int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
+ struct prefix *grp);
void pim_rp_nexthop_del(struct rp_info *rp_info);
/* for RPF check on BSM message receipt */
-void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr bsr_addr);
-void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr bsr_addr);
+void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr);
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
/* RPF(bsr_addr) == src_ip%src_ifp? */
-bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct interface *src_ifp, pim_addr src_ip);
#endif
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index 8403340d86..0eb49a7f91 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -139,7 +139,6 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
struct pim_instance *pim = pim_ifp->pim;
struct pim_upstream *up = NULL;
struct pim_rpf *rp;
- pim_addr rpf_addr;
pim_sgaddr sg;
struct listnode *up_node;
struct pim_upstream *child;
@@ -174,12 +173,11 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
rp = RP(pim_ifp->pim, sg.grp);
if (rp) {
- rpf_addr = pim_addr_from_prefix(&rp->rpf_addr);
/* As per RFC 7761, Section 4.9.4:
* A special wildcard value consisting of an address field of
* all zeros can be used to indicate any source.
*/
- if ((pim_addr_cmp(sg.src, rpf_addr) == 0) ||
+ if ((pim_addr_cmp(sg.src, rp->rpf_addr) == 0) ||
pim_addr_is_any(sg.src)) {
handling_star = true;
sg.src = PIMADDR_ANY;
@@ -284,11 +282,10 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
unsigned char *b1;
struct pim_interface *pinfo;
struct interface *ifp;
- pim_addr dst = pim_addr_from_prefix(&rpg->rpf_addr);
if (PIM_DEBUG_PIM_REG) {
zlog_debug("Sending %s %sRegister Packet to %pPA", up->sg_str,
- null_register ? "NULL " : "", &dst);
+ null_register ? "NULL " : "", &rpg->rpf_addr);
}
ifp = rpg->source_nexthop.interface;
@@ -310,7 +307,7 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
if (PIM_DEBUG_PIM_REG) {
zlog_debug("%s: Sending %s %sRegister Packet to %pPA on %s",
__func__, up->sg_str, null_register ? "NULL " : "",
- &dst, ifp->name);
+ &rpg->rpf_addr, ifp->name);
}
memset(buffer, 0, 10000);
@@ -327,13 +324,14 @@ void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
*/
src = pim_register_get_unicast_v6_addr(pinfo);
#endif
- pim_msg_build_header(src, dst, buffer, buf_size + PIM_MSG_REGISTER_LEN,
+ pim_msg_build_header(src, rpg->rpf_addr, buffer,
+ buf_size + PIM_MSG_REGISTER_LEN,
PIM_MSG_TYPE_REGISTER, false);
if (!pinfo->pim_passive_enable)
++pinfo->pim_ifstat_reg_send;
- if (pim_msg_send(pinfo->pim_sock_fd, src, dst, buffer,
+ if (pim_msg_send(pinfo->pim_sock_fd, src, rpg->rpf_addr, buffer,
buf_size + PIM_MSG_REGISTER_LEN, ifp)) {
if (PIM_DEBUG_PIM_TRACE) {
zlog_debug(
@@ -618,7 +616,7 @@ int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
}
}
- rp_addr = pim_addr_from_prefix(&(RP(pim, sg.grp))->rpf_addr);
+ rp_addr = (RP(pim, sg.grp))->rpf_addr;
if (i_am_rp && (!pim_addr_cmp(dest_addr, rp_addr))) {
sentRegisterStop = 0;
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index 2b798b14b2..783c9b97e7 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -84,7 +84,7 @@ int pim_rp_list_cmp(void *v1, void *v2)
/*
* Sort by RP IP address
*/
- ret = prefix_cmp(&rp1->rp.rpf_addr, &rp2->rp.rpf_addr);
+ ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
if (ret)
return ret;
@@ -119,7 +119,7 @@ void pim_rp_init(struct pim_instance *pim)
XFREE(MTYPE_PIM_RP, rp_info);
return;
}
- pim_addr_to_prefix(&rp_info->rp.rpf_addr, PIMADDR_ANY);
+ rp_info->rp.rpf_addr = PIMADDR_ANY;
listnode_add(pim->rp_list, rp_info);
@@ -149,12 +149,9 @@ static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
{
struct listnode *node;
struct rp_info *rp_info;
- struct prefix rp_prefix;
-
- pim_addr_to_prefix(&rp_prefix, rp);
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
- if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
+ if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
return rp_info;
}
@@ -189,11 +186,9 @@ static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
{
struct listnode *node;
struct rp_info *rp_info;
- struct prefix rp_prefix;
- pim_addr_to_prefix(&rp_prefix, rp);
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
- if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
+ if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
prefix_same(&rp_info->group, group))
return rp_info;
}
@@ -344,11 +339,9 @@ static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
{
struct listnode *node;
struct pim_secondary_addr *sec_addr;
- pim_addr rpf_addr;
-
- rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ pim_addr sec_paddr;
- if (!pim_addr_cmp(pim_ifp->primary_address, rpf_addr))
+ if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
return 1;
if (!pim_ifp->sec_addr_list) {
@@ -356,9 +349,11 @@ static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
}
for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
- if (prefix_same(&sec_addr->addr, &rp_info->rp.rpf_addr)) {
+ sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
+ /* If an RP-address is self, It should be enough to say
+ * I am RP the prefix-length should not matter here */
+ if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
return 1;
- }
}
return 0;
@@ -388,7 +383,6 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
enum pim_rpf_result rpf_result;
pim_addr old_upstream_addr;
pim_addr new_upstream_addr;
- struct prefix nht_p;
old_upstream_addr = up->upstream_addr;
pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
@@ -408,12 +402,11 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
*/
if (!pim_addr_is_any(old_upstream_addr)) {
/* Deregister addr with Zebra NHT */
- pim_addr_to_prefix(&nht_p, old_upstream_addr);
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
- "%s: Deregister upstream %s addr %pFX with Zebra NHT",
- __func__, up->sg_str, &nht_p);
- pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+ "%s: Deregister upstream %s addr %pPA with Zebra NHT",
+ __func__, up->sg_str, &old_upstream_addr);
+ pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
}
/* Update the upstream address */
@@ -446,7 +439,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
struct listnode *node, *nnode;
struct rp_info *tmp_rp_info;
char buffer[BUFSIZ];
- struct prefix nht_p;
+ pim_addr nht_p;
struct route_node *rn = NULL;
struct pim_upstream *up;
bool upstream_updated = false;
@@ -456,7 +449,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- pim_addr_to_prefix(&rp_info->rp.rpf_addr, rp_addr);
+ rp_info->rp.rpf_addr = rp_addr;
prefix_copy(&rp_info->group, &group);
rp_info->rp_src = rp_src_flag;
@@ -482,8 +475,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
*/
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
tmp_rp_info)) {
- if (prefix_same(&rp_info->rp.rpf_addr,
- &tmp_rp_info->rp.rpf_addr)) {
+ if (!pim_addr_cmp(rp_info->rp.rpf_addr,
+ tmp_rp_info->rp.rpf_addr)) {
if (tmp_rp_info->plist)
pim_rp_del_config(pim, rp_addr, NULL,
tmp_rp_info->plist);
@@ -519,8 +512,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
tmp_rp_info)) {
if (tmp_rp_info->plist &&
- prefix_same(&rp_info->rp.rpf_addr,
- &tmp_rp_info->rp.rpf_addr)) {
+ (!pim_addr_cmp(rp_info->rp.rpf_addr,
+ tmp_rp_info->rp.rpf_addr))) {
pim_rp_del_config(pim, rp_addr, NULL,
tmp_rp_info->plist);
}
@@ -539,7 +532,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
nht_p = rp_all->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
- "%s: NHT Register rp_all addr %pFX grp %pFX ",
+ "%s: NHT Register rp_all addr %pPA grp %pFX ",
__func__, &nht_p, &rp_all->group);
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
@@ -565,12 +558,12 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
pim_rp_check_interfaces(pim, rp_all);
pim_rp_refresh_group_to_rp_mapping(pim);
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
NULL);
if (!pim_ecmp_nexthop_lookup(pim,
&rp_all->rp.source_nexthop,
- &nht_p, &rp_all->group, 1))
+ nht_p, &rp_all->group, 1))
return PIM_RP_NO_PATH;
return PIM_SUCCESS;
}
@@ -660,10 +653,10 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
/* Register addr with Zebra NHT */
nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
+ zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
&rp_info->group, 1))
return PIM_RP_NO_PATH;
@@ -698,7 +691,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
struct prefix g_all;
struct rp_info *rp_info;
struct rp_info *rp_all;
- struct prefix nht_p;
+ pim_addr nht_p;
struct route_node *rn;
bool was_plist = false;
struct rp_info *trp_info;
@@ -753,9 +746,9 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
/* Deregister addr with Zebra NHT */
nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__,
+ zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
&nht_p);
- pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+ pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
if (!pim_get_all_mcast_group(&g_all))
return PIM_RP_BAD_ADDRESS;
@@ -769,7 +762,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
*/
pim_addr rpf_addr;
- rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ rpf_addr = rp_info->rp.rpf_addr;
if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
pim_addr_is_any(up->sg.src)) {
struct prefix grp;
@@ -782,7 +775,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
}
}
}
- pim_addr_to_prefix(&rp_all->rp.rpf_addr, PIMADDR_ANY);
+ rp_all->rp.rpf_addr = PIMADDR_ANY;
rp_all->i_am_rp = 0;
return PIM_SUCCESS;
}
@@ -817,7 +810,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
*/
pim_addr rpf_addr;
- rpf_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ rpf_addr = rp_info->rp.rpf_addr;
if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
pim_addr_is_any(up->sg.src)) {
struct prefix grp;
@@ -851,7 +844,7 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
struct prefix group, enum rp_source rp_src_flag)
{
- struct prefix nht_p;
+ pim_addr nht_p;
struct route_node *rn;
int result = 0;
struct rp_info *rp_info = NULL;
@@ -873,7 +866,7 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
return result;
}
- old_rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ old_rp_addr = rp_info->rp.rpf_addr;
if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
if (rp_info->rp_src != rp_src_flag) {
rp_info->rp_src = rp_src_flag;
@@ -882,24 +875,21 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
}
}
- nht_p.family = PIM_AF;
- nht_p.prefixlen = PIM_MAX_BITLEN;
-
/* Deregister old RP addr with Zebra NHT */
if (!pim_addr_is_any(old_rp_addr)) {
nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
+ zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
__func__, &nht_p);
- pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+ pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
}
pim_rp_nexthop_del(rp_info);
listnode_delete(pim->rp_list, rp_info);
/* Update the new RP address*/
- pim_addr_to_prefix(&rp_info->rp.rpf_addr, new_rp_addr);
+ rp_info->rp.rpf_addr = new_rp_addr;
rp_info->rp_src = rp_src_flag;
rp_info->i_am_rp = 0;
@@ -926,11 +916,11 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
/* Register new RP addr with Zebra NHT */
nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
+ zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
&rp_info->group, 1)) {
route_unlock_node(rn);
return PIM_RP_NO_PATH;
@@ -949,7 +939,7 @@ void pim_rp_setup(struct pim_instance *pim)
{
struct listnode *node;
struct rp_info *rp_info;
- struct prefix nht_p;
+ pim_addr nht_p;
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
@@ -957,9 +947,9 @@ void pim_rp_setup(struct pim_instance *pim)
nht_p = rp_info->rp.rpf_addr;
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- &nht_p, &rp_info->group, 1))
+ nht_p, &rp_info->group, 1))
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
"Unable to lookup nexthop for rp specified");
@@ -994,12 +984,9 @@ void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
i_am_rp_changed = true;
rp_info->i_am_rp = 1;
- if (PIM_DEBUG_PIM_NHT_RP) {
- char rp[PREFIX_STRLEN];
- pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
- rp, sizeof(rp));
- zlog_debug("%s: %s: i am rp", __func__, rp);
- }
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: %pPA: i am rp", __func__,
+ &rp_info->rp.rpf_addr);
}
}
@@ -1032,16 +1019,15 @@ void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
if (old_i_am_rp != rp_info->i_am_rp) {
i_am_rp_changed = true;
if (PIM_DEBUG_PIM_NHT_RP) {
- char rp[PREFIX_STRLEN];
- pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
- rp, sizeof(rp));
- if (rp_info->i_am_rp) {
- zlog_debug("%s: %s: i am rp", __func__,
- rp);
- } else {
- zlog_debug("%s: %s: i am no longer rp",
- __func__, rp);
- }
+ if (rp_info->i_am_rp)
+ zlog_debug("%s: %pPA: i am rp",
+ __func__,
+ &rp_info->rp.rpf_addr);
+ else
+ zlog_debug(
+ "%s: %pPA: i am no longer rp",
+ __func__,
+ &rp_info->rp.rpf_addr);
}
}
}
@@ -1088,18 +1074,18 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
rp_info = pim_rp_find_match_group(pim, &g);
if (rp_info) {
- struct prefix nht_p;
+ pim_addr nht_p;
/* Register addr with Zebra NHT */
nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
- "%s: NHT Register RP addr %pFX grp %pFX with Zebra",
+ "%s: NHT Register RP addr %pPA grp %pFX with Zebra",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
pim_rpf_set_refresh_time(pim);
(void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- &nht_p, &rp_info->group, 1);
+ nht_p, &rp_info->group, 1);
return (&rp_info->rp);
}
@@ -1137,7 +1123,7 @@ int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
}
if (pim_addr_is_any(source))
- *up = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ *up = rp_info->rp.rpf_addr;
else
*up = source;
@@ -1159,7 +1145,7 @@ int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
if (rp_info->rp_src == RP_SRC_BSR)
continue;
- rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ rp_addr = rp_info->rp.rpf_addr;
if (rp_info->plist)
vty_out(vty,
"%s" PIM_AF_NAME
@@ -1215,10 +1201,10 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
* entry for the previous RP
*/
if (prev_rp_info &&
- prefix_cmp(&prev_rp_info->rp.rpf_addr,
- &rp_info->rp.rpf_addr)) {
+ (pim_addr_cmp(prev_rp_info->rp.rpf_addr,
+ rp_info->rp.rpf_addr))) {
json_object_object_addf(
- json, json_rp_rows, "%pFXh",
+ json, json_rp_rows, "%pPA",
&prev_rp_info->rp.rpf_addr);
json_rp_rows = NULL;
}
@@ -1227,7 +1213,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
json_rp_rows = json_object_new_array();
json_row = json_object_new_object();
- json_object_string_addf(json_row, "rpAddress", "%pFXh",
+ json_object_string_addf(json_row, "rpAddress", "%pPA",
&rp_info->rp.rpf_addr);
if (rp_info->rp.source_nexthop.interface)
json_object_string_add(
@@ -1257,7 +1243,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
json_object_array_add(json_rp_rows, json_row);
} else {
- vty_out(vty, "%-15pFXh ", &rp_info->rp.rpf_addr);
+ vty_out(vty, "%-15pPA ", &rp_info->rp.rpf_addr);
if (rp_info->plist)
vty_out(vty, "%-18s ", rp_info->plist);
@@ -1284,7 +1270,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
if (json) {
if (prev_rp_info && json_rp_rows)
- json_object_object_addf(json, json_rp_rows, "%pFXh",
+ json_object_object_addf(json, json_rp_rows, "%pPA",
&prev_rp_info->rp.rpf_addr);
}
}
@@ -1294,7 +1280,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
struct listnode *node = NULL;
struct rp_info *rp_info = NULL;
struct nexthop *nh_node = NULL;
- struct prefix nht_p;
+ pim_addr nht_p;
struct pim_nexthop_cache pnc;
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
@@ -1303,8 +1289,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
nht_p = rp_info->rp.rpf_addr;
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
- if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
- &pnc))
+ if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
continue;
for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
@@ -1329,7 +1314,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
#endif
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
- "%s: addr %pFXh new nexthop addr %pPAs interface %s",
+ "%s: addr %pPA new nexthop addr %pPAs interface %s",
__func__, &nht_p, &nbr->source_addr,
ifp1->name);
}
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index bd4dd31a2c..a28278c581 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -203,11 +203,10 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
{
struct pim_rpf *rpf = &up->rpf;
struct pim_rpf saved;
- struct prefix nht_p;
- struct prefix src, grp;
+ pim_addr src;
+ struct prefix grp;
bool neigh_needed = true;
uint32_t saved_mrib_route_metric;
- pim_addr rpf_addr;
if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
return PIM_RPF_OK;
@@ -226,25 +225,22 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
old->rpf_addr = saved.rpf_addr;
}
- pim_addr_to_prefix(&nht_p, up->upstream_addr);
-
- pim_addr_to_prefix(&src, up->upstream_addr); // RP or Src address
+ src = up->upstream_addr; // RP or Src address
pim_addr_to_prefix(&grp, up->sg.grp);
if ((pim_addr_is_any(up->sg.src) && I_am_RP(pim, up->sg.grp)) ||
PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
neigh_needed = false;
- pim_find_or_track_nexthop(pim, &nht_p, up, NULL, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, &src, &grp,
- neigh_needed)) {
+ pim_find_or_track_nexthop(pim, up->upstream_addr, up, NULL, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, src, &grp,
+ neigh_needed)) {
/* Route is Deleted in Zebra, reset the stored NH data */
pim_upstream_rpf_clear(pim, up);
pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
return PIM_RPF_FAILURE;
}
- rpf_addr = pim_rpf_find_rpf_addr(up);
- pim_addr_to_prefix(&rpf->rpf_addr, rpf_addr);
+ rpf->rpf_addr = pim_rpf_find_rpf_addr(up);
if (pim_rpf_addr_is_inaddr_any(rpf) && PIM_DEBUG_ZEBRA) {
/* RPF'(S,G) not found */
@@ -287,7 +283,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
}
/* detect change in RPF'(S,G) */
- if (!prefix_same(&saved.rpf_addr, &rpf->rpf_addr) ||
+ if (pim_addr_cmp(saved.rpf_addr, rpf->rpf_addr) ||
saved.source_nexthop.interface != rpf->source_nexthop.interface) {
pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
return PIM_RPF_CHANGED;
@@ -321,7 +317,7 @@ void pim_upstream_rpf_clear(struct pim_instance *pim,
router->infinite_assert_metric.metric_preference;
up->rpf.source_nexthop.mrib_route_metric =
router->infinite_assert_metric.route_metric;
- pim_addr_to_prefix(&up->rpf.rpf_addr, PIMADDR_ANY);
+ up->rpf.rpf_addr = PIMADDR_ANY;
pim_upstream_mroute_iif_update(up->channel_oil, __func__);
}
}
@@ -375,15 +371,7 @@ static pim_addr pim_rpf_find_rpf_addr(struct pim_upstream *up)
int pim_rpf_addr_is_inaddr_any(struct pim_rpf *rpf)
{
- pim_addr rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
-
- switch (rpf->rpf_addr.family) {
- case AF_INET:
- case AF_INET6:
- return pim_addr_is_any(rpf_addr);
- default:
- return 0;
- }
+ return pim_addr_is_any(rpf->rpf_addr);
}
int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2)
@@ -399,10 +387,10 @@ unsigned int pim_rpf_hash_key(const void *arg)
const struct pim_nexthop_cache *r = arg;
#if PIM_IPV == 4
- return jhash_1word(r->rpf.rpf_addr.u.prefix4.s_addr, 0);
+ return jhash_1word(r->rpf.rpf_addr.s_addr, 0);
#else
- return jhash2(r->rpf.rpf_addr.u.prefix6.s6_addr32,
- array_size(r->rpf.rpf_addr.u.prefix6.s6_addr32), 0);
+ return jhash2(r->rpf.rpf_addr.s6_addr32,
+ array_size(r->rpf.rpf_addr.s6_addr32), 0);
#endif
}
@@ -413,5 +401,5 @@ bool pim_rpf_equal(const void *arg1, const void *arg2)
const struct pim_nexthop_cache *r2 =
(const struct pim_nexthop_cache *)arg2;
- return prefix_same(&r1->rpf.rpf_addr, &r2->rpf.rpf_addr);
+ return (!pim_addr_cmp(r1->rpf.rpf_addr, r2->rpf.rpf_addr));
}
diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h
index a2289b4cc5..2ddb9832f6 100644
--- a/pimd/pim_rpf.h
+++ b/pimd/pim_rpf.h
@@ -49,7 +49,7 @@ struct pim_nexthop {
struct pim_rpf {
struct pim_nexthop source_nexthop;
- struct prefix rpf_addr; /* RPF'(S,G) */
+ pim_addr rpf_addr; /* RPF'(S,G) */
};
enum pim_rpf_result { PIM_RPF_OK = 0, PIM_RPF_CHANGED, PIM_RPF_FAILURE };
diff --git a/pimd/pim_str.h b/pimd/pim_str.h
index be8b6a9f4f..4481776097 100644
--- a/pimd/pim_str.h
+++ b/pimd/pim_str.h
@@ -27,16 +27,25 @@
#include "prefix.h"
#include "pim_addr.h"
-#include "pim_addr.h"
-
+#if PIM_IPV == 4
/*
- * Longest possible length of a (S,G) string is 36 bytes
+ * Longest possible length of a IPV4 (S,G) string is 34 bytes
* 123.123.123.123 = 16 * 2
* (,) = 3
* NULL Character at end = 1
- * (123.123.123.123,123,123,123,123)
+ * (123.123.123.123,123.123.123.123)
*/
#define PIM_SG_LEN PREFIX_SG_STR_LEN
+#else
+/*
+ * Longest possible length of a IPV6 (S,G) string is 94 bytes
+ * INET6_ADDRSTRLEN * 2 = 46 * 2
+ * (,) = 3
+ * NULL Character at end = 1
+ */
+#define PIM_SG_LEN 96
+#endif
+
#define pim_inet4_dump prefix_mcast_inet4_dump
void pim_addr_dump(const char *onfail, struct prefix *p, char *buf,
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
index a69177a6e5..3b73c430b0 100644
--- a/pimd/pim_tib.c
+++ b/pimd/pim_tib.c
@@ -34,7 +34,7 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
struct pim_interface *pim_oif = oif->info;
int input_iface_vif_index = 0;
pim_addr vif_source;
- struct prefix src, grp;
+ struct prefix grp;
struct pim_nexthop nexthop;
struct pim_upstream *up = NULL;
@@ -43,20 +43,19 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
return pim_channel_oil_add(pim, &sg, __func__);
}
- pim_addr_to_prefix(&src, vif_source); // RP or Src addr
pim_addr_to_prefix(&grp, sg.grp);
up = pim_upstream_find(pim, &sg);
if (up) {
memcpy(&nexthop, &up->rpf.source_nexthop,
sizeof(struct pim_nexthop));
- pim_ecmp_nexthop_lookup(pim, &nexthop, &src, &grp, 0);
+ pim_ecmp_nexthop_lookup(pim, &nexthop, vif_source, &grp, 0);
if (nexthop.interface)
input_iface_vif_index = pim_if_find_vifindex_by_ifindex(
pim, nexthop.interface->ifindex);
} else
input_iface_vif_index =
- pim_ecmp_fib_lookup_if_vif_index(pim, &src, &grp);
+ pim_ecmp_fib_lookup_if_vif_index(pim, vif_source, &grp);
if (PIM_DEBUG_ZEBRA)
zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d",
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index d447357c69..25e7e52705 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -192,7 +192,6 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
struct listnode *node, *nnode;
struct pim_ifchannel *ch;
bool notify_msdp = false;
- struct prefix nht_p;
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
@@ -267,12 +266,11 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
*/
if (!pim_addr_is_any(up->upstream_addr)) {
/* Deregister addr with Zebra NHT */
- pim_addr_to_prefix(&nht_p, up->upstream_addr);
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
- "%s: Deregister upstream %s addr %pFX with Zebra NHT",
- __func__, up->sg_str, &nht_p);
- pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+ "%s: Deregister upstream %s addr %pPA with Zebra NHT",
+ __func__, up->sg_str, &up->upstream_addr);
+ pim_delete_tracked_nexthop(pim, up->upstream_addr, up, NULL);
}
XFREE(MTYPE_PIM_UPSTREAM, up);
@@ -290,16 +288,13 @@ void pim_upstream_send_join(struct pim_upstream *up)
}
if (PIM_DEBUG_PIM_TRACE) {
- char rpf_str[PREFIX_STRLEN];
- pim_addr_dump("<rpf?>", &up->rpf.rpf_addr, rpf_str,
- sizeof(rpf_str));
- zlog_debug("%s: RPF'%s=%s(%s) for Interface %s", __func__,
- up->sg_str, rpf_str,
+ zlog_debug("%s: RPF'%s=%pPA(%s) for Interface %s", __func__,
+ up->sg_str, &up->rpf.rpf_addr,
pim_upstream_state2str(up->join_state),
up->rpf.source_nexthop.interface->name);
if (pim_rpf_addr_is_inaddr_any(&up->rpf)) {
- zlog_debug("%s: can't send join upstream: RPF'%s=%s",
- __func__, up->sg_str, rpf_str);
+ zlog_debug("%s: can't send join upstream: RPF'%s=%pPA",
+ __func__, up->sg_str, &up->rpf.rpf_addr);
/* warning only */
}
}
@@ -345,8 +340,8 @@ static void join_timer_stop(struct pim_upstream *up)
THREAD_OFF(up->t_join_timer);
if (up->rpf.source_nexthop.interface)
- nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
- &up->rpf.rpf_addr);
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr);
if (nbr)
pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
@@ -359,8 +354,8 @@ void join_timer_start(struct pim_upstream *up)
struct pim_neighbor *nbr = NULL;
if (up->rpf.source_nexthop.interface) {
- nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
- &up->rpf.rpf_addr);
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr);
if (PIM_DEBUG_PIM_EVENTS) {
zlog_debug(
@@ -428,7 +423,7 @@ void pim_update_suppress_timers(uint32_t suppress_time)
}
}
-void pim_upstream_join_suppress(struct pim_upstream *up, struct prefix rpf,
+void pim_upstream_join_suppress(struct pim_upstream *up, pim_addr rpf,
int holdtime)
{
long t_joinsuppress_msec;
@@ -451,23 +446,19 @@ void pim_upstream_join_suppress(struct pim_upstream *up, struct prefix rpf,
pim_time_timer_remain_msec(up->t_join_timer);
else {
/* Remove it from jp agg from the nbr for suppression */
- nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
- &up->rpf.rpf_addr);
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr);
if (nbr) {
join_timer_remain_msec =
pim_time_timer_remain_msec(nbr->jp_timer);
}
}
- if (PIM_DEBUG_PIM_TRACE) {
- char rpf_str[INET_ADDRSTRLEN];
-
- pim_addr_dump("<rpf?>", &rpf, rpf_str, sizeof(rpf_str));
+ if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
- "%s %s: detected Join%s to RPF'(S,G)=%s: join_timer=%ld msec t_joinsuppress=%ld msec",
- __FILE__, __func__, up->sg_str, rpf_str,
+ "%s %s: detected Join%s to RPF'(S,G)=%pPA: join_timer=%ld msec t_joinsuppress=%ld msec",
+ __FILE__, __func__, up->sg_str, &rpf,
join_timer_remain_msec, t_joinsuppress_msec);
- }
if (join_timer_remain_msec < t_joinsuppress_msec) {
if (PIM_DEBUG_PIM_TRACE) {
@@ -507,8 +498,8 @@ void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
/* upstream join tracked with neighbor jp timer */
struct pim_neighbor *nbr;
- nbr = pim_neighbor_find_prefix(up->rpf.source_nexthop.interface,
- &up->rpf.rpf_addr);
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr);
if (nbr)
join_timer_remain_msec =
pim_time_timer_remain_msec(nbr->jp_timer);
@@ -517,17 +508,11 @@ void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
join_timer_remain_msec = t_override_msec + 1;
}
- if (PIM_DEBUG_PIM_TRACE) {
- char rpf_str[INET_ADDRSTRLEN];
-
- pim_addr_dump("<rpf?>", &up->rpf.rpf_addr, rpf_str,
- sizeof(rpf_str));
-
+ if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
- "%s: to RPF'%s=%s: join_timer=%ld msec t_override=%d msec",
- debug_label, up->sg_str, rpf_str,
+ "%s: to RPF'%s=%pPA: join_timer=%ld msec t_override=%d msec",
+ debug_label, up->sg_str, &up->rpf.rpf_addr,
join_timer_remain_msec, t_override_msec);
- }
if (join_timer_remain_msec > t_override_msec) {
if (PIM_DEBUG_PIM_TRACE) {
@@ -842,9 +827,7 @@ void pim_upstream_fill_static_iif(struct pim_upstream *up,
up->rpf.source_nexthop.mrib_metric_preference =
ZEBRA_CONNECT_DISTANCE_DEFAULT;
up->rpf.source_nexthop.mrib_route_metric = 0;
- up->rpf.rpf_addr.family = AF_INET;
- up->rpf.rpf_addr.u.prefix4.s_addr = PIM_NET_INADDR_ANY;
-
+ up->rpf.rpf_addr = PIMADDR_ANY;
}
static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
@@ -903,7 +886,7 @@ static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
router->infinite_assert_metric.metric_preference;
up->rpf.source_nexthop.mrib_route_metric =
router->infinite_assert_metric.route_metric;
- pim_addr_to_prefix(&up->rpf.rpf_addr, PIMADDR_ANY);
+ up->rpf.rpf_addr = PIMADDR_ANY;
up->ifchannels = list_new();
up->ifchannels->cmp = (int (*)(void *, void *))pim_ifchannel_compare;
@@ -1079,7 +1062,7 @@ struct pim_upstream *pim_upstream_add(struct pim_instance *pim, pim_sgaddr *sg,
if (PIM_DEBUG_PIM_TRACE) {
if (up)
- zlog_debug("%s(%s): %s, iif %pFX (%s) found: %d: ref_count: %d",
+ zlog_debug("%s(%s): %s, iif %pPA (%s) found: %d: ref_count: %d",
__func__, name,
up->sg_str, &up->rpf.rpf_addr, up->rpf.source_nexthop.interface ?
up->rpf.source_nexthop.interface->name : "Unknown" ,
@@ -1283,7 +1266,7 @@ void pim_upstream_rpf_genid_changed(struct pim_instance *pim,
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
pim_addr rpf_addr;
- rpf_addr = pim_addr_from_prefix(&up->rpf.rpf_addr);
+ rpf_addr = up->rpf.rpf_addr;
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
diff --git a/pimd/pim_upstream.h b/pimd/pim_upstream.h
index 8feffb8fdb..3841d1af7b 100644
--- a/pimd/pim_upstream.h
+++ b/pimd/pim_upstream.h
@@ -317,7 +317,7 @@ void pim_upstream_update_join_desired(struct pim_instance *pim,
struct pim_upstream *up);
void pim_update_suppress_timers(uint32_t suppress_time);
-void pim_upstream_join_suppress(struct pim_upstream *up, struct prefix rpf,
+void pim_upstream_join_suppress(struct pim_upstream *up, pim_addr rpf,
int holdtime);
void pim_upstream_join_timer_decrease_to_t_override(const char *debug_label,
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
index 3565be35bd..5b63d04936 100644
--- a/pimd/pim_vxlan.c
+++ b/pimd/pim_vxlan.c
@@ -303,7 +303,6 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
struct pim_upstream *up;
struct pim_interface *term_ifp;
int flags = 0;
- struct prefix nht_p;
struct pim_instance *pim = vxlan_sg->pim;
if (vxlan_sg->up) {
@@ -353,9 +352,8 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
* iif
*/
if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
- pim_addr_to_prefix(&nht_p, up->upstream_addr);
- pim_delete_tracked_nexthop(vxlan_sg->pim, &nht_p, up,
- NULL);
+ pim_delete_tracked_nexthop(vxlan_sg->pim,
+ up->upstream_addr, up, NULL);
}
/* We are acting FHR; clear out use_rpt setting if any */
pim_upstream_update_use_rpt(up, false /*update_mroute*/);
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
index 7f217d9c2e..2c76fd6868 100644
--- a/pimd/pim_zebra.c
+++ b/pimd/pim_zebra.c
@@ -255,7 +255,7 @@ void pim_zebra_update_all_interfaces(struct pim_instance *pim)
struct pim_rpf rpf;
rpf.source_nexthop.interface = ifp;
- pim_addr_to_prefix(&rpf.rpf_addr, us->address);
+ rpf.rpf_addr = us->address;
pim_joinprune_send(&rpf, us->us);
pim_jp_agg_clear_group(us->us);
}
@@ -269,8 +269,8 @@ void pim_zebra_upstream_rpf_changed(struct pim_instance *pim,
if (old->source_nexthop.interface) {
struct pim_neighbor *nbr;
- nbr = pim_neighbor_find_prefix(old->source_nexthop.interface,
- &old->rpf_addr);
+ nbr = pim_neighbor_find(old->source_nexthop.interface,
+ old->rpf_addr);
if (nbr)
pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
diff --git a/pimd/subdir.am b/pimd/subdir.am
index 1424b2a45b..aa06b86479 100644
--- a/pimd/subdir.am
+++ b/pimd/subdir.am
@@ -22,6 +22,7 @@ pim_common = \
pimd/pim_assert.c \
pimd/pim_bfd.c \
pimd/pim_br.c \
+ pimd/pim_bsm.c \
pimd/pim_cmd_common.c \
pimd/pim_errors.c \
pimd/pim_hello.c \
@@ -64,7 +65,6 @@ pim_common = \
pimd_pimd_SOURCES = \
$(pim_common) \
- pimd/pim_bsm.c \
pimd/pim_cmd.c \
pimd/pim_igmp.c \
pimd/pim_igmp_mtrace.c \
@@ -90,7 +90,6 @@ pimd_pim6d_SOURCES = \
$(pim_common) \
pimd/pim6_main.c \
pimd/pim6_mld.c \
- pimd/pim6_stubs.c \
pimd/pim6_cmd.c \
# end
@@ -171,17 +170,11 @@ clippy_scan += \
pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP)
-if PIMD
-if DEV_BUILD
-#
-# pim6d is only enabled for --enable-dev-build, and NOT installed currently
-# (change noinst_ to sbin_ below to install it.)
-#
-noinst_PROGRAMS += pimd/pim6d
+if PIM6D
+sbin_PROGRAMS += pimd/pim6d
pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6
pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP)
endif
-endif
pimd_test_igmpv3_join_LDADD = lib/libfrr.la
pimd_test_igmpv3_join_SOURCES = pimd/test_igmpv3_join.c
diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in
index 9d3d2b0643..135c065b15 100644
--- a/redhat/frr.spec.in
+++ b/redhat/frr.spec.in
@@ -24,6 +24,7 @@
%{!?with_pam: %global with_pam 0 }
%{!?with_pbrd: %global with_pbrd 1 }
%{!?with_pimd: %global with_pimd 1 }
+%{!?with_pim6d: %global with_pim6d 0 }
%{!?with_vrrpd: %global with_vrrpd 1 }
%{!?with_rtadv: %global with_rtadv 1 }
%{!?with_watchfrr: %global with_watchfrr 1 }
@@ -81,6 +82,7 @@
# if CentOS / RedHat and version < 7, then disable PIMd (too old, won't work)
%if 0%{?rhel} && 0%{?rhel} < 7
%global with_pimd 0
+ %global with_pim6d 0
%endif
# misc internal defines
@@ -102,6 +104,12 @@
%define daemon_pimd ""
%endif
+%if %{with_pim6d}
+ %define daemon_pim6d pim6d
+%else
+ %define daemon_pim6d ""
+%endif
+
%if %{with_pbrd}
%define daemon_pbrd pbrd
%else
@@ -150,7 +158,7 @@
%define daemon_pathd ""
%endif
-%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_pimd} %{daemon_nhrpd} %{daemon_eigrpd} %{daemon_babeld} %{daemon_watchfrr} %{daemon_pbrd} %{daemon_bfdd} %{daemon_vrrpd} %{daemon_pathd}
+%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_pimd} %{daemon_pim6d} %{daemon_nhrpd} %{daemon_eigrpd} %{daemon_babeld} %{daemon_watchfrr} %{daemon_pbrd} %{daemon_bfdd} %{daemon_vrrpd} %{daemon_pathd}
#release sub-revision (the two digits after the CONFDATE)
%{!?release_rev: %global release_rev 01 }
@@ -342,6 +350,11 @@ routing state through standard SNMP MIBs.
%else
--disable-pimd \
%endif
+%if %{with_pim6d}
+ --enable-pim6d \
+%else
+ --disable-pim6d \
+%endif
%if %{with_pbrd}
--enable-pbrd \
%else
@@ -666,6 +679,9 @@ fi
%if %{with_pimd}
%{_sbindir}/pimd
%endif
+%if %{with_pim6d}
+ %{_sbindir}/pim6d
+%endif
%if %{with_pbrd}
%{_sbindir}/pbrd
%endif
diff --git a/snapcraft/snapcraft.yaml.in b/snapcraft/snapcraft.yaml.in
index bf3902d9fa..9729be7b92 100644
--- a/snapcraft/snapcraft.yaml.in
+++ b/snapcraft/snapcraft.yaml.in
@@ -1,5 +1,5 @@
name: frr
-version: @VERSION@
+version: '@VERSION@'
summary: FRRouting BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
description: BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
FRRouting (FRR) is free software which manages TCP/IP based routing
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py
new file mode 100644
index 0000000000..95511568c6
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py
@@ -0,0 +1,2537 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+10. Verify default-originate route after BGP and FRR process restart
+11. Verify default-originate route after shut/no shut and clear BGP neighbor
+"""
+import os
+import sys
+import time
+import pytest
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ modify_as_number,
+ clear_bgp,
+ verify_bgp_rib,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+)
+from lib.common_config import (
+ interface_status,
+ verify_prefix_lists,
+ verify_fib_routes,
+ kill_router_daemons,
+ start_router_daemons,
+ shutdown_bringup_interface,
+ step,
+ required_linux_kernel_version,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_prefix_lists,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+)
+
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+KEEPALIVETIMER = 1
+HOLDDOWNTIMER = 3
+# Global variables
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+NETWORK2_1 = {"ipv4": "198.51.1.2/32", "ipv6": "2001:DB8::1:2/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+r0_connected_address_ipv4 = "192.168.0.0/24"
+r0_connected_address_ipv6 = "fd00::/64"
+r1_connected_address_ipv4 = "192.168.1.0/24"
+r1_connected_address_ipv6 = "fd00:0:0:1::/64"
+r3_connected_address_ipv4 = "192.168.2.0/24"
+r3_connected_address_ipv6 = "fd00:0:0:2::/64"
+r4_connected_address_ipv4 = "192.168.3.0/24"
+r4_connected_address_ipv6 = "fd00:0:0:3::/64"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls micronet initialization functions.
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+ global R0_NETWORK_LOOPBACK, R0_NETWORK_LOOPBACK_NXTHOP, R1_NETWORK_LOOPBACK
+ global R0_NETWORK_CONNECTED, R0_NETWORK_CONNECTED_NXTHOP, R1_NETWORK_CONNECTED, R1_NETWORK_CONNECTED_NXTHOP
+ global R4_NETWORK_LOOPBACK, R4_NETWORK_LOOPBACK_NXTHOP, R3_NETWORK_LOOPBACK
+ global R4_NETWORK_CONNECTED, R4_NETWORK_CONNECTED_NXTHOP, R3_NETWORK_CONNECTED, R3_NETWORK_CONNECTED_NXTHOP
+
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ # There are the global varibles used through out the file these are acheived only after building the topology.
+
+ r0_loopback_address_ipv4 = topo["routers"]["r0"]["links"]["lo"]["ipv4"]
+ r0_loopback_address_ipv4_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv4"
+ ].split("/")[0]
+ r0_loopback_address_ipv6 = topo["routers"]["r0"]["links"]["lo"]["ipv6"]
+ r0_loopback_address_ipv6_nxt_hop = topo["routers"]["r0"]["links"]["r1"][
+ "ipv6"
+ ].split("/")[0]
+
+ r1_loopback_address_ipv4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_loopback_address_ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r1_loopback_address_ipv6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_loopback_address_ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ r4_loopback_address_ipv4 = topo["routers"]["r4"]["links"]["lo"]["ipv4"]
+ r4_loopback_address_ipv4_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv4"
+ ].split("/")[0]
+ r4_loopback_address_ipv6 = topo["routers"]["r4"]["links"]["lo"]["ipv6"]
+ r4_loopback_address_ipv6_nxt_hop = topo["routers"]["r4"]["links"]["r3"][
+ "ipv6"
+ ].split("/")[0]
+
+ r3_loopback_address_ipv4 = topo["routers"]["r3"]["links"]["lo"]["ipv4"]
+ r3_loopback_address_ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv4"
+ ].split("/")[0]
+ r3_loopback_address_ipv6 = topo["routers"]["r3"]["links"]["lo"]["ipv6"]
+ r3_loopback_address_ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"][
+ "ipv6"
+ ].split("/")[0]
+
+ R0_NETWORK_LOOPBACK = {
+ "ipv4": r0_loopback_address_ipv4,
+ "ipv6": r0_loopback_address_ipv6,
+ }
+ R0_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_LOOPBACK = {
+ "ipv4": r1_loopback_address_ipv4,
+ "ipv6": r1_loopback_address_ipv6,
+ }
+
+ R0_NETWORK_CONNECTED = {
+ "ipv4": r0_connected_address_ipv4,
+ "ipv6": r0_connected_address_ipv6,
+ }
+ R0_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r0_loopback_address_ipv4_nxt_hop,
+ "ipv6": r0_loopback_address_ipv6_nxt_hop,
+ }
+
+ R1_NETWORK_CONNECTED = {
+ "ipv4": r1_connected_address_ipv4,
+ "ipv6": r1_connected_address_ipv6,
+ }
+ R1_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r1_loopback_address_ipv4_nxt_hop,
+ "ipv6": r1_loopback_address_ipv6_nxt_hop,
+ }
+
+ R4_NETWORK_LOOPBACK = {
+ "ipv4": r4_loopback_address_ipv4,
+ "ipv6": r4_loopback_address_ipv6,
+ }
+ R4_NETWORK_LOOPBACK_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_LOOPBACK = {
+ "ipv4": r3_loopback_address_ipv4,
+ "ipv6": r3_loopback_address_ipv6,
+ }
+ R4_NETWORK_CONNECTED = {
+ "ipv4": r4_connected_address_ipv4,
+ "ipv6": r4_connected_address_ipv6,
+ }
+ R4_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r4_loopback_address_ipv4_nxt_hop,
+ "ipv6": r4_loopback_address_ipv6_nxt_hop,
+ }
+
+ R3_NETWORK_CONNECTED = {
+ "ipv4": r3_connected_address_ipv4,
+ "ipv6": r3_connected_address_ipv6,
+ }
+ R3_NETWORK_CONNECTED_NXTHOP = {
+ "ipv4": r3_loopback_address_ipv4_nxt_hop,
+ "ipv6": r3_loopback_address_ipv6_nxt_hop,
+ }
+
+ # populating the nexthop for default routes
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_verify_default_originate_after_BGP_and_FRR_restart_p2(request):
+ """
+ Summary: "Verify default-originate route after BGP and FRR process restart "
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure EBGP between R0 to R1 and IBGP between R1 to R2")
+ step("Configure EBGP between R2 to R3 and IBGP between R3 to R4")
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 999,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert (
+ BGP_CONVERGENCE is True
+ ), " Failed convergence after chaning the AS number :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure IPv4 and IPv6 static route (Sv4 , Sv6) on R0 and (S1v4, S1v6)on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the static route on R0 \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the static route on R4 \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed Route {} not found in R0 FIB \n Error: {}".format(
+ tc_name, NETWORK1_1, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed Route {} not found in R4 FIB \n Error: {}".format(
+ tc_name, NETWORK2_1, result
+ )
+
+ step(
+ "Configure redistribute connected and static on R0 (R0-R1) on R4 ( R4-R3) IPv4 and IPv6 address family"
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"},
+ {"redist_type": "connected"},
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the static route \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed : Redistributed routes from R0 is not learned in Router R1 RIB \n Error: {}".format(
+ tc_name, result
+ )
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R1_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R1_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R1_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed : Redistributed routes from R0 is not learned in Router R1 FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R3")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed : Redistributed routes from R4 is not learned in Router R3 RIB \n Error: {}".format(
+ tc_name, result
+ )
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R3_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R3_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R3_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Redistributed routes from R4 is not learned in Router R3 FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure IPv4 and IPv6 prefix-list on R1 for (Sv4 , Sv6) route")
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ }
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ }
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the prefix lists \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify the Prefix - lists")
+ input_dict = {"r3": {"prefix_lists": ["Pv4", "Pv6"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to verify the prefix lists in router R3 \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure IPv4 (RMv4) and IPv6 (RMv6) route-map on R1")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the route-map \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ " Configure default originate with route-map RMv4 and RMv6 for IPv4 and IPv6 bgp neighbors on R1 ( R1-R2) "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the default-originate in R1 towards R2 \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 default route received on R2 with R1 nexthop ")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed : Default routes are not learned in R2 FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed : Default routes are not learned in R2 RIB\n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure redistribute connected and static on R1 IPv4 and IPv6 address family"
+ )
+ redistribute_static = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ "ipv6": {
+ "unicast": {"redistribute": [{"redist_type": "connected"}]}
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify IPv4 and IPv6 static and loopback route advertised from R4 and R0 are received on R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(" Configure default-originate on R3 for R3 to R2 IPv4 and IPv6 BGP neighbors ")
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ STEP = """After configuring the Default Originate From R3 --> R2
+ Both Default routes from R1 and R3 Should present in R2 BGP RIB.
+ 'The Deafult Route from iBGP is preffered over EBGP' thus
+ Default Route From R1->r2 should only present in R2 FIB """
+ step(STEP)
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n IBGP default route should be preffered over EBGP default-originate \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the default route from R1 is recieved both on RIB and FIB on R2")
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=False,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify the static and loopback route advertised from R0 and R4 are received on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(" BGP Daemon restart operation")
+ routers = ["r1", "r2"]
+ for dut in routers:
+ step(
+ "Restart BGPD process on {}, when all the processes are running use watchfrr ".format(
+ dut
+ )
+ )
+ kill_router_daemons(tgen, dut, ["bgpd"])
+ start_router_daemons(tgen, dut, ["bgpd"])
+
+ step("After restarting the BGP daomon Verify the default originate ")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n IBGP default route should be prefeered over EBGP \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify the default route from R1 is is recieved both on RIB and FIB on R2"
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=False,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify the static and loopback route advertised from R0 and R4 are received on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(" Restarting FRR routers operation")
+ """
+ NOTE : Verify that iBGP default route is preffered over eBGP default route
+ """
+ routers = ["r1", "r2"]
+ for dut in routers:
+ step(
+ "Restart FRR router process on {}, when all the processes are running use watchfrr ".format(
+ dut
+ )
+ )
+
+ stop_router(tgen, dut)
+ start_router(tgen, dut)
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert (
+ result is True
+ ), " Testcase {} : After Restarting {} Convergence Failed".format(tc_name, dut)
+
+ step("After restarting the FRR Router Verify the default originate ")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify the default route from R1 is is recieved both on RIB and FIB on R2"
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n IBGP default route should be preffered over EBGP default route \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify the static and loopback route advertised from R0 and R4 are received on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+def test_verify_default_originate_after_shut_no_shut_bgp_neighbor_p1(request):
+ """
+ Summary: "Verify default-originate route after shut/no shut and clear BGP neighbor "
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure EBGP between R0 to R1 and IBGP between R1 to R2")
+ step("Configure EBGP between R2 to R3 and IBGP between R3 to R4")
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 999,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure one IPv4 and one IPv6 static route on R0 and R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify IPv4 and IPv6 static route configured on R0 and R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure redistribute connected and static on R0 (R0-R1) on R4 ( R4-R3) IPv4 and IPv6 address family"
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static",
+ },
+ {
+ "redist_type": "connected",
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static",
+ },
+ {
+ "redist_type": "connected",
+ },
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static",
+ },
+ {
+ "redist_type": "connected",
+ },
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static",
+ },
+ {
+ "redist_type": "connected",
+ },
+ ]
+ }
+ },
+ }
+ }
+ },
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "connected",
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "connected",
+ }
+ ]
+ }
+ },
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4 and IPv6 static route configured on R1 from R0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify IPv4 and IPv6 static route configured on R3 from R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate on R1 for R1 to R2 neighbor for IPv4 and IPv6 peer "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4 and IPv6 bgp default route received on R2 nexthop as R1")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 from R0 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate on R3 for R3 to R2 neighbor for IPv4 and IPv6 peer"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {"unicast": {"default_originate": {"r2": {}}}},
+ "ipv6": {"unicast": {"default_originate": {"r2": {}}}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ STEP = """After configuring the Default Originate From R3 --> R2
+ Both Default routes from R1 and R3 Should present in R2 BGP RIB.
+ 'The Deafult Route from iBGP is preffered over EBGP' thus
+ Default Route From R1->r2 should only present in R2 FIB """
+ step(STEP)
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n IBGP default route should be preffered over EBGP \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the default route from R1 is recieved both on RIB and FIB on R2")
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "After configuring default-originate command , verify static ,connected and loopback routes are advertised on R2 from R0 and R4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ # updating the topology with the updated AS-Number to avoid conflict in con configuring the AS
+ updated_topo = topo
+ updated_topo["routers"]["r0"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r0")
+ updated_topo["routers"]["r1"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r1")
+ updated_topo["routers"]["r2"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r2")
+ updated_topo["routers"]["r3"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r3")
+ updated_topo["routers"]["r4"]["bgp"]["local_as"] = get_dut_as_number(tgen, "r4")
+
+ step(
+ "Shut R1 to R2 IPv4 and IPv6 BGP neighbor from R1 IPv4 and IPv6 address family "
+ )
+
+ local_as = get_dut_as_number(tgen, dut="r1")
+ shut_neighbor = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"shutdown": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"shutdown": True}}}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ interface = topo["routers"]["r2"]["links"]["r1"]["interface"]
+ input_dict = {"r2": {"interface_list": [interface], "status": "down"}}
+
+ result = interface_status(tgen, topo, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Bring down interface failed ! \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify IPv4 and IPv6 default static and loopback route which received from R1 are deleted from R2"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n after shutting down interface routes are not expected \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n after shutting down interface routes are not expected \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify that No impact on IPv4 IPv6 and default route received from R3 ")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "No-Shut R1 to R2 IPv4 and IPv6 BGP neighbor from R1 IPv4 and IPv6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ shut_neighbor = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"shutdown": False}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {"shutdown": False}}}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ interface = topo["routers"]["r2"]["links"]["r1"]["interface"]
+ input_dict = {"r2": {"interface_list": [interface], "status": "up"}}
+
+ result = interface_status(tgen, topo, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Bring up interface failed ! \n Error: {}".format(tc_name, result)
+
+ step(
+ "After no shut Verify IPv4 and IPv6 bgp default route next hop as R1 , static ,connected and loopback received on R2 from r0 and r4 "
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Shut R3 to R2 IPv4 and IPv6 BGP neighbor from R2 IPv4 and IPv6 address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ shut_neighbor = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3": {"shutdown": True}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3": {"shutdown": True}}}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ interface = topo["routers"]["r2"]["links"]["r3"]["interface"]
+ input_dict = {"r2": {"interface_list": [interface], "status": "down"}}
+
+ result = interface_status(tgen, topo, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Bring down interface failed ! \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify IPv4 and IPv6 default static and loopback route which received from R3 are deleted from R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n After shutting down the interface routes are not expected \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_routes(
+ tgen, addr_type, "r2", static_routes_input, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After shutting down the interface routes are not expected \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that Default route is removed i.e advertised from R3")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After shutting down the interface Default route are not expected \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n After shutting down the interface Default route are not expected \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that No impact on IPv4 IPv6 and default route received from R1")
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "No-Shut R3 to R2 IPv4 and IPv6 BGP neighbor from R2 IPv4 and IPv6 address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ shut_neighbor = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3": {"shutdown": False}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3": {"shutdown": False}}}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ interface = topo["routers"]["r2"]["links"]["r3"]["interface"]
+ input_dict = {"r2": {"interface_list": [interface], "status": "up"}}
+
+ result = interface_status(tgen, topo, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Bring up interface failed ! \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that a static ,connected and loopback routes are received from R0 and R4 on R2 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("verify that default route is received on R2 from R1")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that default route is received on R2 from R3")
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("Clear IPv4 and IP6 BGP session from R2 and R1 one by one ")
+ routers = ["r1", "r2"]
+ for dut in routers:
+ for addr_type in ADDR_TYPES:
+
+ clear_bgp(tgen, addr_type, dut)
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("verify that default route is received on R2 from R3")
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ DEFAULT_ROUTE_NXT_HOP = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify the static , loopback and connected routes received from r0 and r4"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Shut BGP neighbor interface R2 (R2 to R1) link ")
+ intf_r2_r1 = topo["routers"]["r2"]["links"]["r1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_r1, False)
+
+ step("Verify the bgp Convergence ")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo, expected=False)
+ assert (
+ BGP_CONVERGENCE is not True
+ ), " :Failed After shutting interface BGP convergence is expected to be faileed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Verify that default route from R1 got deleted from BGP and RIB table")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n After shuting interface default route should be removed from RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("No - Shut BGP neighbor interface R2 (R2 to R1) link ")
+ intf_r2_r1 = topo["routers"]["r2"]["links"]["r1"]["interface"]
+ shutdown_bringup_interface(tgen, "r2", intf_r2_r1, True)
+
+ step("Verify the bgp Convergence ")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("verify that default route is received on R2 from R3")
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify the static , loopback and connected routes received from r0 and r4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Shut link from R3 to R2 from R3")
+ intf_r3_r2 = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "r3", intf_r3_r2, False)
+
+ step("Verify the bgp Convergence ")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo, expected=False)
+ assert (
+ BGP_CONVERGENCE is not True
+ ), " :Failed \nAfter Shuting the interface BGP convegence is expected to be failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Verify that default route from R3 got deleted from BGP and RIB table")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("No-Shut link from R3 to R2 from R3")
+
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+
+ DEFAULT_ROUTE_NXT_HOP_1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_nxt_hop}
+
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+
+ DEFAULT_ROUTE_NXT_HOP_3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_nxt_hop}
+
+ intf_r3_r2 = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ shutdown_bringup_interface(tgen, "r3", intf_r3_r2, True)
+
+ step("Verify the bgp Convergence ")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo, expected=True)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("verify that default route is received on R2 from R3")
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify the static , loopback and connected routes received from r0 and r4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R0_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R0_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R0_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_LOOPBACK[addr_type]],
+ "next_hop": R4_NETWORK_LOOPBACK_NXTHOP[addr_type],
+ },
+ {
+ "network": [R4_NETWORK_CONNECTED[addr_type]],
+ "next_hop": R4_NETWORK_CONNECTED_NXTHOP[addr_type],
+ },
+ ]
+ }
+ }
+
+ result = verify_fib_routes(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py b/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py
new file mode 100644
index 0000000000..272a7fe291
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py
@@ -0,0 +1,2102 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following scenerios are covered.
+1. When there is change in route-map policy associated with default-originate, changes does not reflect.
+2. When route-map associated with default-originate is deleted, default route doesn't get withdrawn
+3. Update message is not being sent when only route-map is removed from the default-originate config.
+4. SNT counter gets incremented on change of every policy associated with default-originate
+5. Route-map with multiple match clauses causes inconsistencies with default-originate.
+6. BGP-Default originate behaviour with BGP attributes
+"""
+import os
+import sys
+import time
+import pytest
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ get_prefix_count_route,
+ modify_as_number,
+ verify_bgp_rib,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+)
+from lib.common_config import (
+ verify_fib_routes,
+ step,
+ required_linux_kernel_version,
+ create_route_maps,
+ interface_status,
+ create_prefix_lists,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ write_test_header,
+ verify_prefix_lists,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+ delete_route_maps,
+)
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_R1, DEFAULT_ROUTE_NXT_HOP_R3
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r3"]["links"]["r2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r3", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_R3 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_default_originate_delete_conditional_routemap(request):
+ """
+ "scenerio covered":
+ 1. When there is change in route-map policy associated with default-originate, changes does not reflect.
+ 2. When route-map associated with default-originate is deleted, default route doesn't get withdrawn
+ 3. Update message is not being sent when only route-map is removed from the default-originate config.
+ 4. SNT counter gets incremented on change of every policy associated with default-originate
+ 5. Route-map with multiple match clauses causes inconsistencies with default-originate.
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , IBGP neighbor between R1 and R2")
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R1 and R0")
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": 999,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": 1000,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 2000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 3000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+
+ step("After changing the BGP remote as , Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert (
+ BGP_CONVERGENCE is True
+ ), "Complete convergence is expected after changing ASN ....! ERROR :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure 1 IPv4 and 1 IPv6 Static route on R0 with next-hop as Null0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the static route \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are configured and up on R0")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r0", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : routes {} not found in R0 FIB \n Error: {}".format(
+ tc_name, static_routes_input, result
+ )
+
+ step(
+ "Configure redistribute static on IPv4 and IPv6 address family on R0 for R0 to R1 neighbor "
+ )
+ redistribute_static = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure redistribute configuration....! \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify IPv4 and IPv6 static route are received on R1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed... Routes {} expected in r1 FIB after configuring the redistribute config on R0 \n Error: {}".format(
+ tc_name, static_routes_input, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", static_routes_input)
+ assert (
+ result is True
+ ), "Testcase {} : Failed... Routes {} expected in r1 RIB after configuring the redistribute config on R0\n Error: {}".format(
+ tc_name, static_routes_input, result
+ )
+
+ step(
+ "Configure IPv4 prefix-list 'Pv4' and and IPv6 prefix-list 'Pv6' on R1 to match BGP route Sv41, IPv6 route Sv61 permit "
+ )
+ input_dict_3 = {
+ "r1": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv4": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ },
+ ]
+ },
+ "ipv6": {
+ "Pv6": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ },
+ ]
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the prefix list \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure IPV4 and IPv6 route-map (RMv4 and RMv6) matching prefix-list (Pv4 and Pv6) respectively on R1"
+ )
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ "set": {
+ "path": {
+ "as_num": "5555",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ "set": {
+ "path": {
+ "as_num": "5555",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the route map \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure default-originate with route-map (RMv4 and RMv6) on R1, on BGP IPv4 and IPv6 address family "
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the default originate \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "After configuring default-originate command , verify default routes are advertised on R2 "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ expected_aspath="5555",
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the default originate \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the default originate \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Changing the as-path policy of the existing route-map")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ "set": {
+ "path": {
+ "as_num": "6666",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ "set": {
+ "path": {
+ "as_num": "6666",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the route map \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify prefix sent count on R1 towards R2 \n Send count shoud not be incremented on change of existing (AS-path) policy "
+ )
+ snapshot = get_prefix_count_route(
+ tgen, topo, dut="r1", peer="r2", link="r1", sent=True, received=False
+ )
+
+ ipv4_prefix_count = False
+ ipv6_prefix_count = False
+ if snapshot["ipv4_count"] == 2:
+ ipv4_prefix_count = True
+ if snapshot["ipv6_count"] == 2:
+ ipv6_prefix_count = True
+
+ assert (
+ ipv4_prefix_count is True
+ ), "Testcase {} : Failed Error: Expected sent Prefix is 2 but obtained {} ".format(
+ tc_name, ipv4_prefix_count
+ )
+ assert (
+ ipv6_prefix_count is True
+ ), "Testcase {} : Failed Error: Expected sent Prefix is 2 but obtained {} ".format(
+ tc_name, ipv6_prefix_count
+ )
+
+ step(
+ "After changing the as-path policy verify the new policy is advertised to router R2"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ expected_aspath="6666",
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Default route with expected attributes is not found in BGP RIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Default route with expected attributes is not found in BGP FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove the as-path policy from the route-map")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ "set": {
+ "path": {
+ "as_num": "6666",
+ "as_action": "prepend",
+ "delete": True,
+ }
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ "set": {
+ "path": {
+ "as_num": "6666",
+ "as_action": "prepend",
+ "delete": True,
+ }
+ },
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the route map \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "After removing the route policy (AS-Path) verify that as-path is removed in r2 "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ )
+ assert result is True, "Testcase {} : Failed ... ! \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed .... !\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Delete the route-map ")
+
+ delete_routemap = {"r1": {"route_maps": ["RMv4", "RMv6"]}}
+ result = delete_route_maps(tgen, delete_routemap)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to delete the route-map\n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "After deleting route-map , verify the default route in FIB and RIB are removed "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : After removing the route-map the default-route is not removed from R2 RIB\n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : After removing the route-map the default-route is not removed from R2 FIB \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Create route-map with with sequnce number 10 ")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ "set": {
+ "path": {
+ "as_num": "9999",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ "set": {
+ "path": {
+ "as_num": "9999",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the route map \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "After Configuring the route-map the dut is expected to receive the route policy (as-path) as 99999"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ expected_aspath="9999",
+ )
+ assert result is True, "Testcase {} : Failed...! \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert result is True, "Testcase {} : Failed ...!\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Create another route-map with seq number less than the previous i. <10 ")
+ input_dict_3 = {
+ "r1": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {"ipv4": {"prefix_lists": "Pv4"}},
+ "set": {
+ "path": {
+ "as_num": "7777",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {"ipv6": {"prefix_lists": "Pv6"}},
+ "set": {
+ "path": {
+ "as_num": "7777",
+ "as_action": "prepend",
+ }
+ },
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert (
+ result is True
+ ), "Testcase {} : Failed to configure the route map \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "On creating new route-map the route-map with lower seq id should be considered "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ metric=0,
+ expected_aspath="7777",
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Route-map with lowest prefix is not considered \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R1,
+ expected=True,
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Route-map with lowest prefix is not considered \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_default_originate_after_BGP_attributes_p1(request):
+ """
+ "Verify different BGP attributes with default-originate route "
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ global topo
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("Configure IPv4 and IPv6 , EBGP neighbor between R3 and R2")
+ step("Configure IPv4 and IPv6 IBGP neighbor between R3 and R4")
+ r0_local_as = topo['routers']['r0']['bgp']['local_as']
+ r1_local_as = topo['routers']['r1']['bgp']['local_as']
+ r2_local_as = topo['routers']['r2']['bgp']['local_as']
+ r3_local_as = topo['routers']['r3']['bgp']['local_as']
+ r4_local_as = topo['routers']['r4']['bgp']['local_as']
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+ "r2": {
+ "bgp": {
+ "local_as": r2_local_as,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 4000,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step(
+ "Configure one IPv4 and one IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, IPv6 route Sv61 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("Verify IPv4 and IPv6 static routes configured on R4 in FIB")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure redistribute static knob on R4 , for R4 to R3 neighbor for IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify After configuring redistribute static , verify route received in BGP table of R3"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ NOTE = """Configure 2 IPv4 prefix-list Pv41 Pv42 and and 2 IPv6 prefix-list Pv61 Pv62 on R3 to match BGP IPv4 route Sv41, 200.1.1.1/24 , IPv6 route Sv61 and 200::1/64"""
+ step(NOTE)
+ input_dict_3 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "Pv41": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv4"],
+ "action": "permit",
+ }
+ ],
+ "Pv42": [
+ {"seqid": "1", "network": "200.1.1.1/24", "action": "permit"}
+ ],
+ },
+ "ipv6": {
+ "Pv61": [
+ {
+ "seqid": "1",
+ "network": NETWORK1_1["ipv6"],
+ "action": "permit",
+ }
+ ],
+ "Pv62": [
+ {"seqid": " 1", "network": "200::1/64", "action": "permit"}
+ ],
+ },
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify IPv4 and IPv6 Prefix list got configured on R3")
+ input_dict = {"r3": {"prefix_lists": ["Pv41", "Pv61", "Pv42", "Pv62"]}}
+ result = verify_prefix_lists(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure 2 sequence of route-map for IPv4 seq1 permit Pv41 and seq2 permit Pv42 and for IPv6 seq1 permit Pv61 , seq2 permit Pv62 on R3"
+ )
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv4": {"prefix_lists": "Pv41"}},
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "match": {"ipv4": {"prefix_lists": "Pv42"}},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "Pv61"}},
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "match": {"ipv6": {"prefix_lists": "Pv62"}},
+ },
+ ],
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Apply on route-map seq1 set as-path prepend to 200 and route-map seq2 set as-path prepend to 300 for IPv4 and IPv6 route-map "
+ )
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "path": {
+ "as_num": "200",
+ "as_action": "prepend",
+ }
+ }
+
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "path": {
+ "as_num": "300",
+ "as_action": "prepend",
+ }
+ }
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "path": {
+ "as_num": "200",
+ "as_action": "prepend",
+ }
+ }
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "path": {
+ "as_num": "300",
+ "as_action": "prepend",
+ }
+ }
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ " Configure default-originate with IPv4 and IPv6 route-map on R3 for R3-R2 IPv4 and IPv6 BGP neighbor"
+ )
+
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify IPv4 and IPv6 default route received on R2 with both the AS path on R2"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=0,
+ expected_aspath="4000 200",
+ )
+
+ step(
+ "Modify AS prepend path adding one more value 500 in route-map sequence 1 and 600 for route-map sequence 2 for IPv4 and IPv6 route-map"
+ )
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "path": {
+ "as_num": "500",
+ "as_action": "prepend",
+ }
+ }
+
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "path": {
+ "as_num": "600",
+ "as_action": "prepend",
+ }
+ }
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "path": {
+ "as_num": "500",
+ "as_action": "prepend",
+ }
+ }
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "path": {
+ "as_num": "600",
+ "as_action": "prepend",
+ }
+ }
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+
+ step("As path 500 added to IPv4 and IPv6 default -originate route received on R2")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=0,
+ expected_aspath="4000 500",
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Apply on route-map seq1 set metric value to 70 and route-map seq2 set metric 80 IPv4 and IPv6 route-map"
+ )
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "metric": 70,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "metric": 80,
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "metric": 70,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "metric": 80,
+ },
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Configured metric value received on R2 along with as-path for IPv4 and IPv6 default routes "
+ )
+
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=70,
+ expected_aspath="4000 500",
+ )
+
+
+ step(
+ "Modify route-map seq1 configure metric 50 and route-map seq2 configure metric 100 IPv4 and IPv6 route-map "
+ )
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "metric": 50,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "metric": 100,
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "metric": 50,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "metric": 100,
+ },
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Configured metric value received on R2 along with as-path for IPv4 and IPv6 default routes "
+ )
+
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=50,
+ expected_aspath="4000 500",
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Delete AS-prepend from IP4 and IPv6 route-map configured on R3 ")
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+
+ "set": {
+ "path": {
+ "as_num": "500",
+ "as_action": "prepend",
+ "delete": True,
+ },
+ "delete": True,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "path": {
+ "as_num": "600",
+ "as_action": "prepend",
+ "delete": True,
+ },
+ "delete": True,
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "path": {
+ "as_num": "500",
+ "as_action": "prepend",
+ "delete": True,
+ },
+ "delete": True,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "path": {
+ "as_num": "600",
+ "as_action": "prepend",
+ "delete": True,
+ },
+ "delete": True,
+ },
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify AS-prepend is deleted from default originate route and metric value only present on R2 for IPv4 and IPv6 default routes "
+ )
+
+
+
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=50,
+ expected_aspath="4000",
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+
+ step("Delete metric value from IP4 and IPv6 route-map configured on R3 ")
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {"metric": 50, "delete": True},
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {"metric": 100, "delete": True},
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {"metric": 50, "delete": True},
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {"metric": 100, "delete": True},
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Metric value deleted from IPv4 and IPv6 default route on R2 ,verify default routes "
+ )
+
+
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ metric=0,
+ expected_aspath="4000",
+ )
+
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ step("Change IPv4 and IPv6 , EBGP to IBGP neighbor between R3 and R2")
+ step("Change IPv4 and IPv6 IBGP to EBGP neighbor between R3 and R4")
+ r0_local_as = topo['routers']['r0']['bgp']['local_as']
+ r1_local_as = topo['routers']['r1']['bgp']['local_as']
+ r2_local_as = topo['routers']['r2']['bgp']['local_as']
+ r3_local_as = topo['routers']['r3']['bgp']['local_as']
+ r4_local_as = topo['routers']['r4']['bgp']['local_as']
+ input_dict = {
+ "r0": {
+ "bgp": {
+ "local_as": r0_local_as,
+ }
+ },
+ "r1": {
+ "bgp": {
+ "local_as": r1_local_as,
+ }
+ },
+
+ "r2": {
+ "bgp": {
+ "local_as": 1111,
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 1111,
+ }
+ },
+ "r4": {
+ "bgp": {
+ "local_as": 5555,
+ }
+ },
+ }
+ result = modify_as_number(tgen, topo, input_dict)
+ try:
+ assert result is True
+ except AssertionError:
+ logger.info("Expected behaviour: {}".format(result))
+ logger.info("BGP config is not created because of invalid ASNs")
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+ step(
+ "Configure one IPv4 and one IPv6, Static route on R4 with next-hop as Null0 IPv4 route Sv41, IPv6 route Sv61 "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("Verify IPv4 and IPv6 static routes configured on R4 in FIB")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure redistribute static knob on R4 , for R4 to R3 neighbor for IPv4 and IPv6 address family "
+ )
+ redistribute_static = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify After configuring redistribute static , verify route received in BGP table of R3"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ " Configure default-originate with IPv4 and IPv6 route-map on R3 for R3-R2 IPv4 and IPv6 BGP neighbor"
+ )
+ local_as = get_dut_as_number(tgen, dut="r3")
+ default_originate_config = {
+ "r3": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv4"}}}
+ },
+ "ipv6": {
+ "unicast": {"default_originate": {"r2": {"route_map": "RMv6"}}}
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify IPv4 and IPv6 default route received on R2 with both the AS path on R2"
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Configure local -preference to 50 on IPv4 and IPv6 route map seq1 and 60 on seq2"
+ )
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "locPrf": 50,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "locPrf": 60,
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "locPrf": 50,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "locPrf": 60,
+ },
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Configured metric value received on R2 along with as-path for IPv4 and IPv6 default routes "
+ )
+
+
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ locPrf=50,
+ )
+
+
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Modify local preference value to 150 on IPv4 and IPv6 route map seq1 and 160 on seq2"
+ )
+ route_map = {
+ "r3": {
+ "route_maps": {
+ "RMv4": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "locPrf": 150,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "locPrf": 160,
+ },
+ },
+ ],
+ "RMv6": [
+ {
+ "action": "permit",
+ "seq_id": "1",
+ "set": {
+ "locPrf": 150,
+ },
+ },
+ {
+ "action": "permit",
+ "seq_id": "2",
+ "set": {
+ "locPrf": 160,
+ },
+ },
+ ],
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, route_map)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify Modified local-preference value received on R2 for IPv4 and IPv6 default routes "
+ )
+
+
+
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ locPrf=150,
+ )
+
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ # updating the topology with the updated AS-Number to avoid conflict in con configuring the AS
+ updated_topo = topo
+ updated_topo['routers']['r0']['bgp']['local_as']=get_dut_as_number(tgen,"r0")
+ updated_topo['routers']['r1']['bgp']['local_as']=get_dut_as_number(tgen,"r1")
+ updated_topo['routers']['r2']['bgp']['local_as']=get_dut_as_number(tgen,"r2")
+ updated_topo['routers']['r3']['bgp']['local_as']=get_dut_as_number(tgen,"r3")
+ updated_topo['routers']['r4']['bgp']['local_as']=get_dut_as_number(tgen,"r4")
+
+ step("Shut IPv4/IPv6 BGP neighbor from R4 ( R4-R3) using 'neighbor x.x.x.x shut' command ")
+ local_as = get_dut_as_number(tgen, dut="r4")
+ shut_neighbor = {
+ "r4": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"shutdown":True}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"shutdown":True}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ interface = topo['routers']['r3']['links']['r4']['interface']
+ input_dict = {
+ "r1": {
+ "interface_list": [interface],
+ "status": "down"
+ }
+ }
+
+ result = interface_status(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Shut down the interface failed ! \n Error: {}".format(tc_name, result)
+
+ step("After shutting the interface verify the BGP convergence")
+ result = verify_bgp_convergence(tgen,topo,expected=False)
+ assert result is not True, "Testcase {} : Failed \n After shutting Down BGP convergence should Fail and return False \n Error: {}".format(tc_name, result)
+
+ step("verify default route deleted from R2 ")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: After Shut down interface the default route is NOT expected but found in RIB -> {}".format( tc_name, result)
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: After Shut down interface the default route is NOT expected but found in FIB -> {}".format( tc_name, result)
+
+
+ step("no Shut IPv4/IPv6 BGP neighbor from R4 ( R4-R3) using 'neighbor x.x.x.x shut' command ")
+ local_as = get_dut_as_number(tgen, dut="r4")
+ shut_neighbor = {
+ "r4": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"shutdown":False}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {"shutdown":False}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, shut_neighbor)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ interface = topo['routers']['r3']['links']['r4']['interface']
+ input_dict = {
+ "r1": {
+ "interface_list": [interface],
+ "status": "up"
+ }
+ }
+
+ result = interface_status(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Bring up interface failed ! \n Error: {}".format(tc_name, result)
+
+ step("After no shutting the interface verify the BGP convergence")
+ result = verify_bgp_convergence(tgen,topo,expected=True)
+ assert result is True, "Testcase {} : Failed \n After shutting Down BGP convergence should Fail and return False \n Error: {}".format(tc_name, result)
+
+ step("After no shut neighbor , verify default route relearn on R2")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True)
+ assert result is True, "Testcase {} : Failed \n Error: After no Shut down interface the default route is expected but found in RIB -> {}".format( tc_name, result)
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected=True)
+ assert result is True, "Testcase {} : Failed \n Error: After Shut down interface the default route is expected but found in FIB -> {}".format( tc_name, result)
+
+
+
+ step("Remove IPv4/IPv6 static route configure on R4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("Verify IPv4 and IPv6 static routes removed on R4 in FIB")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r4", static_routes_input, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("After removing static route , verify default route removed on R2")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= False)
+ assert result is not True, "Testcase {} : Failed \n Error: After removing static the default route is NOT expected but found in RIB -> {}".format( tc_name, result)
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= False)
+ assert result is not True, "Testcase {} : Failed \n Error: After removing static the default route is NOT expected but found in FIB -> {}".format( tc_name, result)
+
+
+ step("Configuring the static route back in r4")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step("Verify IPv4 and IPv6 static routes configured on R4 in FIB")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r4", static_routes_input, expected=True)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_bgp_rib(tgen, addr_type, "r4", static_routes_input, expected=True)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("After adding static route back , verify default route learned on R2")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= True)
+ assert result is True, "Testcase {} : Failed \n Error: After removing static the default route is expected but found in RIB -> {}".format( tc_name, result)
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= True)
+ assert result is True, "Testcase {} : Failed \n Error: After removing static the default route is expected but found in FIB -> {}".format( tc_name, result)
+
+ step("Deactivate IPv4 and IPv6 neighbor configured from R4 ( R4-R3)")
+
+ configure_bgp_on_r1 = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r4": {"deactivate": "ipv4"}}}
+ }
+ },
+
+ },"ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r4": {"deactivate": "ipv6"}}}
+ }
+ },
+
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("After deactivating the BGP neighbor , verify default route removed on R2")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= False)
+ assert result is not True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is NOT expected but found in RIB -> {}".format( tc_name, result)
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= False)
+ assert result is not True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is NOT expected but found in FIB -> {}".format( tc_name, result)
+
+ step("Activate IPv4 and IPv6 neighbor configured from R4 ( R4-R3)")
+
+ configure_bgp_on_r1 = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r4": {"activate": "ipv4"}}}
+ }
+ },
+
+ },"ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r4": {"activate": "ipv6"}}}
+ }
+ },
+
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, updated_topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp convergence.")
+ bgp_convergence = verify_bgp_convergence(tgen, updated_topo)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+ step("After Activating the BGP neighbor , verify default route learned on R2")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= True)
+ assert result is True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is expected but found in RIB -> {}".format( tc_name, result)
+
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_R3,
+ expected= True)
+ assert result is True, "Testcase {} : Failed \n Error: After Deactivating the BGP neighbor the default route is expected but found in FIB -> {}".format( tc_name, result)
+ write_test_footer(tc_name)
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tools/etc/rsyslog.d/45-frr.conf b/tools/etc/rsyslog.d/45-frr.conf
index 469e95ed73..9da15b9f92 100644
--- a/tools/etc/rsyslog.d/45-frr.conf
+++ b/tools/etc/rsyslog.d/45-frr.conf
@@ -5,6 +5,7 @@
$outchannel frr_log,/var/log/frr/frr.log
if $programname == 'babeld' or
$programname == 'bgpd' or
+ $programname == 'bfdd' or
$programname == 'eigrpd' or
$programname == 'frr' or
$programname == 'isisd' or
@@ -24,6 +25,7 @@ if $programname == 'babeld' or
if $programname == 'babeld' or
$programname == 'bgpd' or
+ $programname == 'bfdd' or
$programname == 'eigrpd' or
$programname == 'frr' or
$programname == 'isisd' or
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index 3344ff4954..6eb3223faa 100755
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -208,7 +208,7 @@ daemon_stop() {
[ -r "$pidfile" ] || fail="pid file not found"
$all && [ -n "$fail" ] && return 0
- [ -z "$fail" ] && pid="$(cat \"$pidfile\")"
+ [ -z "$fail" ] && pid="$(cat "$pidfile")"
[ -z "$fail" -a -z "$pid" ] && fail="pid file is empty"
[ -n "$fail" ] || kill -0 "$pid" 2>/dev/null || fail="pid $pid not running"
@@ -242,7 +242,7 @@ daemon_status() {
pidfile="$V_PATH/$daemon${inst:+-$inst}.pid"
[ -r "$pidfile" ] || return 3
- pid="$(cat \"$pidfile\")"
+ pid="$(cat "$pidfile")"
[ -z "$pid" ] && return 1
kill -0 "$pid" 2>/dev/null || return 1
return 0
diff --git a/zebra/debug_nl.c b/zebra/debug_nl.c
index f8b866cd25..a16d442521 100644
--- a/zebra/debug_nl.c
+++ b/zebra/debug_nl.c
@@ -543,6 +543,8 @@ const char *rtm_rta2str(int type)
return "MFC_STATS";
case RTA_NH_ID:
return "NH_ID";
+ case RTA_EXPIRES:
+ return "EXPIRES";
default:
return "UNKNOWN";
}
@@ -1070,9 +1072,11 @@ next_rta:
static void nlroute_dump(struct rtmsg *rtm, size_t msglen)
{
+ struct rta_mfc_stats *mfc_stats;
struct rtattr *rta;
size_t plen;
uint32_t u32v;
+ uint64_t u64v;
/* Get the first attribute and go from there. */
rta = RTM_RTA(rtm);
@@ -1095,6 +1099,11 @@ next_rta:
zlog_debug(" %u", u32v);
break;
+ case RTA_EXPIRES:
+ u64v = *(uint64_t *)RTA_DATA(rta);
+ zlog_debug(" %" PRIu64, u64v);
+ break;
+
case RTA_GATEWAY:
case RTA_DST:
case RTA_SRC:
@@ -1113,6 +1122,14 @@ next_rta:
}
break;
+ case RTA_MFC_STATS:
+ mfc_stats = (struct rta_mfc_stats *)RTA_DATA(rta);
+ zlog_debug(" pkts=%ju bytes=%ju wrong_if=%ju",
+ (uintmax_t)mfc_stats->mfcs_packets,
+ (uintmax_t)mfc_stats->mfcs_bytes,
+ (uintmax_t)mfc_stats->mfcs_wrong_if);
+ break;
+
default:
/* NOTHING: unhandled. */
break;
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index ce06f1683d..1d9b59cf73 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -1044,7 +1044,6 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,
struct rtmsg *rtm;
struct rtattr *tb[RTA_MAX + 1];
struct mcast_route_data *m;
- struct mcast_route_data mr;
int iif = 0;
int count;
int oif[256];
@@ -1053,12 +1052,8 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,
vrf_id_t vrf;
int table;
- if (mroute)
- m = mroute;
- else {
- memset(&mr, 0, sizeof(mr));
- m = &mr;
- }
+ assert(mroute);
+ m = mroute;
rtm = NLMSG_DATA(h);
@@ -1165,9 +1160,19 @@ int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
return 0;
}
- if (!(rtm->rtm_family == AF_INET ||
- rtm->rtm_family == AF_INET6 ||
- rtm->rtm_family == RTNL_FAMILY_IPMR )) {
+ switch (rtm->rtm_family) {
+ case AF_INET:
+ case AF_INET6:
+ break;
+
+ case RTNL_FAMILY_IPMR:
+ case RTNL_FAMILY_IP6MR:
+ /* notifications on IPMR are irrelevant to zebra, we only care
+ * about responses to RTM_GETROUTE requests we sent.
+ */
+ return 0;
+
+ default:
flog_warn(
EC_ZEBRA_UNKNOWN_FAMILY,
"Invalid address family: %u received from kernel route change: %s",
@@ -1193,10 +1198,14 @@ int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
return -1;
}
+ /* these are "magic" kernel-managed *unicast* routes used for
+ * outputting locally generated multicast traffic (which uses unicast
+ * handling on Linux because ~reasons~.
+ */
if (rtm->rtm_type == RTN_MULTICAST)
- netlink_route_change_read_multicast(h, ns_id, startup);
- else
- netlink_route_change_read_unicast(h, ns_id, startup);
+ return 0;
+
+ netlink_route_change_read_unicast(h, ns_id, startup);
return 0;
}
@@ -2324,7 +2333,7 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
struct mcast_route_data *mr = (struct mcast_route_data *)in;
struct {
struct nlmsghdr n;
- struct ndmsg ndm;
+ struct rtmsg rtm;
char buf[256];
} req;
@@ -2334,17 +2343,17 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
zns = zvrf->zns;
memset(&req, 0, sizeof(req));
- req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
req.n.nlmsg_flags = NLM_F_REQUEST;
req.n.nlmsg_pid = zns->netlink_cmd.snl.nl_pid;
req.n.nlmsg_type = RTM_GETROUTE;
- nl_attr_put32(&req.n, sizeof(req), RTA_IIF, mroute->ifindex);
- nl_attr_put32(&req.n, sizeof(req), RTA_OIF, mroute->ifindex);
-
if (mroute->family == AF_INET) {
- req.ndm.ndm_family = RTNL_FAMILY_IPMR;
+ req.rtm.rtm_family = RTNL_FAMILY_IPMR;
+ req.rtm.rtm_dst_len = IPV4_MAX_BITLEN;
+ req.rtm.rtm_src_len = IPV4_MAX_BITLEN;
+
nl_attr_put(&req.n, sizeof(req), RTA_SRC,
&mroute->src.ipaddr_v4,
sizeof(mroute->src.ipaddr_v4));
@@ -2352,7 +2361,10 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
&mroute->grp.ipaddr_v4,
sizeof(mroute->grp.ipaddr_v4));
} else {
- req.ndm.ndm_family = RTNL_FAMILY_IP6MR;
+ req.rtm.rtm_family = RTNL_FAMILY_IP6MR;
+ req.rtm.rtm_dst_len = IPV6_MAX_BITLEN;
+ req.rtm.rtm_src_len = IPV6_MAX_BITLEN;
+
nl_attr_put(&req.n, sizeof(req), RTA_SRC,
&mroute->src.ipaddr_v6,
sizeof(mroute->src.ipaddr_v6));
@@ -2375,8 +2387,13 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
* and the kernel goes screw you and the delicious cookies you
* are trying to give me. So now we have this little hack.
*/
- actual_table = (zvrf->table_id == RT_TABLE_MAIN) ? RT_TABLE_DEFAULT :
- zvrf->table_id;
+ if (mroute->family == AF_INET)
+ actual_table = (zvrf->table_id == RT_TABLE_MAIN)
+ ? RT_TABLE_DEFAULT
+ : zvrf->table_id;
+ else
+ actual_table = zvrf->table_id;
+
nl_attr_put32(&req.n, sizeof(req), RTA_TABLE, actual_table);
suc = netlink_talk(netlink_route_change_read_multicast, &req.n,
diff --git a/zebra/zebra_evpn_neigh.c b/zebra/zebra_evpn_neigh.c
index d463411dea..6d90a603f7 100644
--- a/zebra/zebra_evpn_neigh.c
+++ b/zebra/zebra_evpn_neigh.c
@@ -702,11 +702,6 @@ struct zebra_neigh *zebra_evpn_proc_sync_neigh_update(
n->flags |= ZEBRA_NEIGH_LOCAL_INACTIVE;
}
- if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT))
- SET_FLAG(n->flags, ZEBRA_NEIGH_ES_PEER_PROXY);
- else
- SET_FLAG(n->flags, ZEBRA_NEIGH_ES_PEER_ACTIVE);
-
if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT)) {
SET_FLAG(n->flags, ZEBRA_NEIGH_ES_PEER_PROXY);
/* if the neigh was peer-active previously we