summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_evpn.c1
-rw-r--r--bgpd/bgp_mplsvpn.c3
-rw-r--r--bgpd/bgp_snmp_bgp4v2.c717
-rw-r--r--bgpd/bgp_snmp_bgp4v2.h5
-rw-r--r--bgpd/bgp_vty.c132
-rw-r--r--bgpd/bgp_vty.h16
-rw-r--r--doc/accords/cli-colors44
-rw-r--r--doc/developer/fpm.rst16
-rw-r--r--doc/user/zebra.rst2
-rw-r--r--lib/link_state.c2
-rw-r--r--ospf6d/ospf6_nssa.c48
-rw-r--r--tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py22
-rw-r--r--tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py18
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py20
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/ce1/bgpd.conf23
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/ce1/zebra.conf13
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/pe1/bgpd.conf41
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/default_ipv4_vpn.json32
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf10_ipv4_unicast.json32
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf20_ipv4_unicast.json34
-rw-r--r--tests/topotests/bgp_vrf_leaking_5549_routes/pe1/zebra.conf10
-rwxr-xr-xtests/topotests/bgp_vrf_leaking_5549_routes/test_bgp_vrf_leaking.py121
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py267
-rw-r--r--tests/topotests/lib/common_config.py68
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py35
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py7
-rw-r--r--tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py6
-rw-r--r--tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py6
-rwxr-xr-xtests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py9
-rwxr-xr-xtests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py5
-rwxr-xr-xtests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py5
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py5
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py5
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py5
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py5
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py5
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py8
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py6
-rw-r--r--tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py64
-rw-r--r--tests/topotests/ospf_basic_functionality/ospf_rte_calc.json2
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_authentication.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_chaos.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_lan.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_nssa.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py6
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_single_area.py6
-rw-r--r--tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py6
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py6
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py6
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py59
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py124
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py6
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py6
-rw-r--r--zebra/debug.c8
-rw-r--r--zebra/debug.h4
-rw-r--r--zebra/dplane_fpm_nl.c137
-rw-r--r--zebra/rt_netlink.c30
-rw-r--r--zebra/rt_netlink.h4
-rw-r--r--zebra/zebra_dplane.c188
-rw-r--r--zebra/zebra_dplane.h18
-rw-r--r--zebra/zebra_rib.c117
-rw-r--r--zebra/zebra_router.c11
-rw-r--r--zebra/zebra_router.h8
-rw-r--r--zebra/zebra_vty.c370
76 files changed, 2298 insertions, 783 deletions
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index eab70bfdaf..7a8a91b00b 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -6912,6 +6912,7 @@ static void bgp_evpn_remote_ip_hash_del(struct bgpevpn *vpn,
if (ip->macip_path_list->count == 0) {
bgp_evpn_remote_ip_process_nexthops(vpn, &ip->addr, false);
hash_release(vpn->remote_ip_hash, ip);
+ list_delete(&ip->macip_path_list);
XFREE(MTYPE_EVPN_REMOTE_IP, ip);
}
}
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 12b68f2607..0270695c2f 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1505,7 +1505,8 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
} else {
if (!CHECK_FLAG(from_bgp->af_flags[afi][SAFI_UNICAST],
BGP_CONFIG_VRF_TO_VRF_EXPORT)) {
- if (afi == AFI_IP) {
+ if (afi == AFI_IP &&
+ !BGP_ATTR_NEXTHOP_AFI_IP6(path_vrf->attr)) {
/*
* For ipv4, copy to multiprotocol
* nexthop field
diff --git a/bgpd/bgp_snmp_bgp4v2.c b/bgpd/bgp_snmp_bgp4v2.c
index 1be28e0b2d..2d70aa94d3 100644
--- a/bgpd/bgp_snmp_bgp4v2.c
+++ b/bgpd/bgp_snmp_bgp4v2.c
@@ -380,6 +380,397 @@ static uint8_t *bgpv2PeerErrorsTable(struct variable *v, oid name[],
return NULL;
}
+static uint8_t *bgpv2PeerEventTimesTable(struct variable *v, oid name[],
+ size_t *length, int exact,
+ size_t *var_len,
+ WriteMethod **write_method)
+{
+ struct peer *peer;
+ struct ipaddr addr = {};
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method) ==
+ MATCH_FAILED)
+ return NULL;
+
+ peer = bgpv2PeerTable_lookup(v, name, length, exact, &addr);
+ if (!peer)
+ return NULL;
+
+ switch (v->magic) {
+ case BGP4V2_PEER_FSM_ESTABLISHED_TIME:
+ if (!peer->uptime)
+ return SNMP_INTEGER(0);
+ else
+ return SNMP_INTEGER(monotime(NULL) - peer->uptime);
+ case BGP4V2_PEER_PEER_IN_UPDATES_ELAPSED_TIME:
+ if (!peer->update_time)
+ return SNMP_INTEGER(0);
+ else
+ return SNMP_INTEGER(monotime(NULL) - peer->update_time);
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static struct bgp_path_info *
+bgp4v2PathAttrLookup(struct variable *v, oid name[], size_t *length,
+ struct bgp *bgp, struct prefix *addr, int exact)
+{
+ oid *offset;
+ int offsetlen;
+ struct bgp_path_info *path;
+ struct bgp_dest *dest;
+ union sockunion su;
+ unsigned int len;
+ struct ipaddr paddr = {};
+ size_t namelen = v ? v->namelen : BGP4V2_NLRI_ENTRY_OFFSET;
+ sa_family_t family = name[namelen - 1] == 4 ? AF_INET : AF_INET6;
+ afi_t afi = AFI_IP;
+ size_t afi_len = IN_ADDR_SIZE;
+
+ if (family == AF_INET6) {
+ afi = AFI_IP6;
+ afi_len = IN6_ADDR_SIZE;
+ }
+
+#define BGP_NLRI_ENTRY_OFFSET (afi_len + 1 + afi_len)
+
+ sockunion_init(&su);
+
+ if (exact) {
+ if (*length - namelen != BGP_NLRI_ENTRY_OFFSET)
+ return NULL;
+
+ /* Set OID offset for prefix */
+ offset = name + namelen;
+ if (family == AF_INET)
+ oid2in_addr(offset, afi_len, &addr->u.prefix4);
+ else
+ oid2in6_addr(offset, &addr->u.prefix6);
+ offset += afi_len;
+
+ /* Prefix length */
+ addr->prefixlen = *offset;
+ addr->family = family;
+ offset++;
+
+ /* Peer address */
+ su.sin.sin_family = family;
+ if (family == AF_INET)
+ oid2in_addr(offset, afi_len, &su.sin.sin_addr);
+ else
+ oid2in6_addr(offset, &su.sin6.sin6_addr);
+
+ /* Lookup node */
+ dest = bgp_node_lookup(bgp->rib[afi][SAFI_UNICAST], addr);
+ if (dest) {
+ for (path = bgp_dest_get_bgp_path_info(dest); path;
+ path = path->next)
+ if (sockunion_same(&path->peer->su, &su))
+ return path;
+
+ bgp_dest_unlock_node(dest);
+ }
+
+ return NULL;
+ }
+
+ offset = name + namelen;
+ offsetlen = *length - namelen;
+ len = offsetlen;
+
+ if (offsetlen == 0) {
+ dest = bgp_table_top(bgp->rib[afi][SAFI_UNICAST]);
+ } else {
+ if (len > afi_len)
+ len = afi_len;
+
+ if (family == AF_INET)
+ oid2in_addr(offset, len, &addr->u.prefix4);
+ else
+ oid2in6_addr(offset, &addr->u.prefix6);
+
+ offset += afi_len;
+ offsetlen -= afi_len;
+
+ if (offsetlen > 0)
+ addr->prefixlen = *offset;
+ else
+ addr->prefixlen = len * 8;
+
+ dest = bgp_node_get(bgp->rib[afi][SAFI_UNICAST], addr);
+
+ offset++;
+ offsetlen--;
+ }
+
+ if (offsetlen > 0) {
+ len = offsetlen;
+ if (len > afi_len)
+ len = afi_len;
+
+ if (family == AF_INET)
+ oid2in_addr(offset, len, &paddr.ip._v4_addr);
+ else
+ oid2in6_addr(offset, &paddr.ip._v6_addr);
+ } else {
+ if (family == AF_INET)
+ memset(&paddr.ip._v4_addr, 0, afi_len);
+ else
+ memset(&paddr.ip._v6_addr, 0, afi_len);
+ }
+
+ if (!dest)
+ return NULL;
+
+ while ((dest = bgp_route_next(dest))) {
+ struct bgp_path_info *min = NULL;
+
+ for (path = bgp_dest_get_bgp_path_info(dest); path;
+ path = path->next) {
+ sa_family_t path_family =
+ sockunion_family(&path->peer->su);
+
+ if (path_family == AF_INET &&
+ IPV4_ADDR_CMP(&paddr.ip._v4_addr,
+ &path->peer->su.sin.sin_addr) < 0) {
+ if (!min ||
+ (min &&
+ IPV4_ADDR_CMP(
+ &path->peer->su.sin.sin_addr,
+ &min->peer->su.sin.sin_addr) < 0))
+ min = path;
+ } else if (path_family == AF_INET6 &&
+ IPV6_ADDR_CMP(
+ &paddr.ip._v6_addr,
+ &path->peer->su.sin6.sin6_addr) <
+ 0) {
+ if (!min ||
+ (min &&
+ IPV6_ADDR_CMP(
+ &path->peer->su.sin6.sin6_addr,
+ &min->peer->su.sin6.sin6_addr) <
+ 0))
+ min = path;
+ }
+ }
+
+ if (min) {
+ const struct prefix *rn_p = bgp_dest_get_prefix(dest);
+
+ *length = namelen + BGP_NLRI_ENTRY_OFFSET;
+
+ offset = name + namelen;
+
+ if (family == AF_INET)
+ oid_copy_in_addr(offset, &rn_p->u.prefix4);
+ else
+ oid_copy_in6_addr(offset, &rn_p->u.prefix6);
+
+ offset += afi_len;
+ *offset = rn_p->prefixlen;
+ offset++;
+
+ if (family == AF_INET) {
+ oid_copy_in_addr(offset,
+ &min->peer->su.sin.sin_addr);
+ addr->u.prefix4 = rn_p->u.prefix4;
+ } else {
+ oid_copy_in6_addr(
+ offset, &min->peer->su.sin6.sin6_addr);
+ addr->u.prefix6 = rn_p->u.prefix6;
+ }
+
+ addr->prefixlen = rn_p->prefixlen;
+
+ bgp_dest_unlock_node(dest);
+
+ return min;
+ }
+
+ if (family == AF_INET)
+ memset(&paddr.ip._v4_addr, 0, afi_len);
+ else
+ memset(&paddr.ip._v6_addr, 0, afi_len);
+ }
+
+ return NULL;
+}
+
+static uint8_t *bgp4v2PathAttrTable(struct variable *v, oid name[],
+ size_t *length, int exact, size_t *var_len,
+ WriteMethod **write_method)
+{
+ struct bgp *bgp;
+ struct bgp_path_info *path;
+ struct peer_af *paf = NULL;
+ struct prefix addr = {};
+ const struct prefix *prefix = NULL;
+ enum bgp_af_index index;
+
+ bgp = bgp_get_default();
+ if (!bgp)
+ return NULL;
+
+ if (smux_header_table(v, name, length, exact, var_len, write_method) ==
+ MATCH_FAILED)
+ return NULL;
+
+ path = bgp4v2PathAttrLookup(v, name, length, bgp, &addr, exact);
+ if (!path)
+ return NULL;
+
+ prefix = bgp_dest_get_prefix(path->net);
+
+ AF_FOREACH (index) {
+ paf = path->peer->peer_af_array[index];
+ if (paf)
+ break;
+ }
+
+ switch (v->magic) {
+ case BGP4V2_NLRI_INDEX:
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_AFI:
+ if (paf)
+ return SNMP_INTEGER(paf->afi);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_SAFI:
+ if (paf)
+ return SNMP_INTEGER(paf->safi);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_PREFIX_TYPE:
+ if (paf)
+ return SNMP_INTEGER(paf->afi);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_PREFIX:
+ if (prefix->family == AF_INET6)
+ return SNMP_IP6ADDRESS(prefix->u.prefix6);
+ else
+ return SNMP_IPADDRESS(prefix->u.prefix4);
+ case BGP4V2_NLRI_PREFIX_LEN:
+ return SNMP_INTEGER(prefix->prefixlen);
+ case BGP4V2_NLRI_BEST:
+ if (CHECK_FLAG(path->flags, BGP_PATH_SELECTED))
+ return SNMP_INTEGER(1);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_CALC_LOCAL_PREF:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)))
+ return SNMP_INTEGER(path->attr->local_pref);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_ORIGIN:
+ switch (path->attr->origin) {
+ case BGP_ORIGIN_IGP:
+ return SNMP_INTEGER(1);
+ case BGP_ORIGIN_EGP:
+ return SNMP_INTEGER(2);
+ case BGP_ORIGIN_INCOMPLETE:
+ return SNMP_INTEGER(3);
+ default:
+ return SNMP_INTEGER(0);
+ }
+ case BGP4V2_NLRI_NEXT_HOP_ADDR_TYPE:
+ switch (path->attr->mp_nexthop_len) {
+ case BGP_ATTR_NHLEN_IPV4:
+ return SNMP_INTEGER(1);
+ case BGP_ATTR_NHLEN_IPV6_GLOBAL:
+ return SNMP_INTEGER(2);
+ case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
+ if (path->attr->mp_nexthop_prefer_global)
+ return SNMP_INTEGER(2);
+ else
+ return SNMP_INTEGER(4);
+ default:
+ return SNMP_INTEGER(1);
+ }
+ case BGP4V2_NLRI_NEXT_HOP_ADDR:
+ switch (path->attr->mp_nexthop_len) {
+ case BGP_ATTR_NHLEN_IPV4:
+ return SNMP_IPADDRESS(path->attr->mp_nexthop_global_in);
+ case BGP_ATTR_NHLEN_IPV6_GLOBAL:
+ return SNMP_IP6ADDRESS(path->attr->mp_nexthop_global);
+ case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL:
+ if (path->attr->mp_nexthop_prefer_global)
+ return SNMP_IP6ADDRESS(
+ path->attr->mp_nexthop_global);
+ else
+ return SNMP_IP6ADDRESS(
+ path->attr->mp_nexthop_local);
+ default:
+ return SNMP_IPADDRESS(path->attr->nexthop);
+ }
+ break;
+ case BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR_TYPE:
+ case BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR:
+ /* Not properly defined in specification what should be here. */
+ break;
+ case BGP4V2_NLRI_LOCAL_PREF_PRESENT:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)))
+ return SNMP_INTEGER(1);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_LOCAL_PREF:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)))
+ return SNMP_INTEGER(path->attr->local_pref);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_MED_PRESENT:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)))
+ return SNMP_INTEGER(1);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_MED:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)))
+ return SNMP_INTEGER(path->attr->local_pref);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_ATOMIC_AGGREGATE:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)))
+ return SNMP_INTEGER(1);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_AGGREGATOR_PRESENT:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)))
+ return SNMP_INTEGER(1);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_AGGREGATOR_AS:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)))
+ return SNMP_INTEGER(path->attr->aggregator_as);
+ else
+ return SNMP_INTEGER(0);
+ case BGP4V2_NLRI_AGGREGATOR_ADDR:
+ if (CHECK_FLAG(path->attr->flag,
+ ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)))
+ return SNMP_IPADDRESS(path->attr->aggregator_addr);
+ else
+ return SNMP_IPADDRESS(bgp_empty_addr);
+ case BGP4V2_NLRI_AS_PATH_CALC_LENGTH:
+ return SNMP_INTEGER(path->attr->aspath->segments->length);
+ case BGP4V2_NLRI_AS_PATH:
+ return aspath_snmp_pathseg(path->attr->aspath, var_len);
+ case BGP4V2_NLRI_PATH_ATTR_UNKNOWN:
+ *var_len = 0;
+ return NULL;
+ }
+ return NULL;
+}
+
static struct variable bgpv2_variables[] = {
/* bgp4V2PeerEntry */
{BGP4V2_PEER_INSTANCE,
@@ -671,6 +1062,332 @@ static struct variable bgpv2_variables[] = {
bgpv2PeerErrorsTable,
6,
{1, 3, 1, BGP4V2_PEER_LAST_ERROR_SENT_DATA, 2, 16}},
+ /* bgp4V2PeerEventTimesEntry */
+ {BGP4V2_PEER_FSM_ESTABLISHED_TIME,
+ ASN_UNSIGNED,
+ RONLY,
+ bgpv2PeerEventTimesTable,
+ 6,
+ {1, 4, 1, BGP4V2_PEER_FSM_ESTABLISHED_TIME, 1, 4}},
+ {BGP4V2_PEER_FSM_ESTABLISHED_TIME,
+ ASN_UNSIGNED,
+ RONLY,
+ bgpv2PeerEventTimesTable,
+ 6,
+ {1, 4, 1, BGP4V2_PEER_FSM_ESTABLISHED_TIME, 2, 16}},
+ {BGP4V2_PEER_PEER_IN_UPDATES_ELAPSED_TIME,
+ ASN_UNSIGNED,
+ RONLY,
+ bgpv2PeerEventTimesTable,
+ 6,
+ {1, 4, 1, BGP4V2_PEER_PEER_IN_UPDATES_ELAPSED_TIME, 1, 4}},
+ {BGP4V2_PEER_PEER_IN_UPDATES_ELAPSED_TIME,
+ ASN_UNSIGNED,
+ RONLY,
+ bgpv2PeerEventTimesTable,
+ 6,
+ {1, 4, 1, BGP4V2_PEER_PEER_IN_UPDATES_ELAPSED_TIME, 2, 16}},
+ /* bgp4V2NlriTable */
+ {BGP4V2_NLRI_INDEX,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_INDEX, 1, 4}},
+ {BGP4V2_NLRI_INDEX,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_INDEX, 2, 16}},
+ {BGP4V2_NLRI_AFI,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AFI, 1, 4}},
+ {BGP4V2_NLRI_AFI,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AFI, 2, 16}},
+ {BGP4V2_NLRI_SAFI,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_SAFI, 1, 4}},
+ {BGP4V2_NLRI_SAFI,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_SAFI, 2, 16}},
+ {BGP4V2_NLRI_PREFIX_TYPE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PREFIX_TYPE, 1, 4}},
+ {BGP4V2_NLRI_PREFIX_TYPE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PREFIX_TYPE, 2, 16}},
+ {BGP4V2_NLRI_PREFIX,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PREFIX, 1, 4}},
+ {BGP4V2_NLRI_PREFIX,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PREFIX, 2, 16}},
+ {BGP4V2_NLRI_PREFIX_LEN,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PREFIX_LEN, 1, 4}},
+ {BGP4V2_NLRI_PREFIX_LEN,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PREFIX_LEN, 2, 16}},
+ {BGP4V2_NLRI_BEST,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_BEST, 1, 4}},
+ {BGP4V2_NLRI_BEST,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_BEST, 2, 16}},
+ {BGP4V2_NLRI_CALC_LOCAL_PREF,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_CALC_LOCAL_PREF, 1, 4}},
+ {BGP4V2_NLRI_CALC_LOCAL_PREF,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_CALC_LOCAL_PREF, 2, 16}},
+ {BGP4V2_NLRI_ORIGIN,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_ORIGIN, 1, 4}},
+ {BGP4V2_NLRI_ORIGIN,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_ORIGIN, 2, 16}},
+ {BGP4V2_NLRI_NEXT_HOP_ADDR_TYPE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_NEXT_HOP_ADDR_TYPE, 1, 4}},
+ {BGP4V2_NLRI_NEXT_HOP_ADDR_TYPE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_NEXT_HOP_ADDR_TYPE, 2, 16}},
+ {BGP4V2_NLRI_NEXT_HOP_ADDR,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_NEXT_HOP_ADDR, 1, 4}},
+ {BGP4V2_NLRI_NEXT_HOP_ADDR,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_NEXT_HOP_ADDR, 2, 16}},
+ {BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR_TYPE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR_TYPE, 1, 4}},
+ {BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR_TYPE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR_TYPE, 2, 16}},
+ {BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR, 1, 4}},
+ {BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LINK_LOCAL_NEXT_HOP_ADDR, 2, 16}},
+ {BGP4V2_NLRI_LOCAL_PREF_PRESENT,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LOCAL_PREF_PRESENT, 1, 4}},
+ {BGP4V2_NLRI_LOCAL_PREF_PRESENT,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LOCAL_PREF_PRESENT, 2, 16}},
+ {BGP4V2_NLRI_LOCAL_PREF,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LOCAL_PREF, 1, 4}},
+ {BGP4V2_NLRI_LOCAL_PREF,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_LOCAL_PREF, 2, 16}},
+ {BGP4V2_NLRI_MED_PRESENT,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_MED_PRESENT, 1, 4}},
+ {BGP4V2_NLRI_MED_PRESENT,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_MED_PRESENT, 2, 16}},
+ {BGP4V2_NLRI_MED,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_MED, 1, 4}},
+ {BGP4V2_NLRI_MED,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_MED, 2, 16}},
+ {BGP4V2_NLRI_ATOMIC_AGGREGATE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_ATOMIC_AGGREGATE, 1, 4}},
+ {BGP4V2_NLRI_ATOMIC_AGGREGATE,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_ATOMIC_AGGREGATE, 2, 16}},
+ {BGP4V2_NLRI_AGGREGATOR_PRESENT,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AGGREGATOR_PRESENT, 1, 4}},
+ {BGP4V2_NLRI_AGGREGATOR_PRESENT,
+ ASN_INTEGER,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AGGREGATOR_PRESENT, 2, 16}},
+ {BGP4V2_NLRI_AGGREGATOR_AS,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AGGREGATOR_AS, 1, 4}},
+ {BGP4V2_NLRI_AGGREGATOR_AS,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AGGREGATOR_AS, 2, 16}},
+ {BGP4V2_NLRI_AGGREGATOR_ADDR,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AGGREGATOR_ADDR, 1, 4}},
+ {BGP4V2_NLRI_AGGREGATOR_ADDR,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AGGREGATOR_ADDR, 2, 16}},
+ {BGP4V2_NLRI_AS_PATH_CALC_LENGTH,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AS_PATH_CALC_LENGTH, 1, 4}},
+ {BGP4V2_NLRI_AS_PATH_CALC_LENGTH,
+ ASN_UNSIGNED,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AS_PATH_CALC_LENGTH, 2, 16}},
+ {BGP4V2_NLRI_AS_PATH_STRING,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AS_PATH_STRING, 1, 4}},
+ {BGP4V2_NLRI_AS_PATH_STRING,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AS_PATH_STRING, 2, 16}},
+ {BGP4V2_NLRI_AS_PATH,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AS_PATH, 1, 4}},
+ {BGP4V2_NLRI_AS_PATH,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_AS_PATH, 2, 16}},
+ {BGP4V2_NLRI_PATH_ATTR_UNKNOWN,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PATH_ATTR_UNKNOWN, 1, 4}},
+ {BGP4V2_NLRI_PATH_ATTR_UNKNOWN,
+ ASN_OCTET_STR,
+ RONLY,
+ bgp4v2PathAttrTable,
+ 6,
+ {1, 9, 1, BGP4V2_NLRI_PATH_ATTR_UNKNOWN, 2, 16}},
};
int bgp_snmp_bgp4v2_init(struct thread_master *tm)
diff --git a/bgpd/bgp_snmp_bgp4v2.h b/bgpd/bgp_snmp_bgp4v2.h
index 6980db9f8d..8b474c3887 100644
--- a/bgpd/bgp_snmp_bgp4v2.h
+++ b/bgpd/bgp_snmp_bgp4v2.h
@@ -61,7 +61,10 @@
#define BGP4V2_PEER_FSM_ESTABLISHED_TIME 1
#define BGP4V2_PEER_PEER_IN_UPDATES_ELAPSED_TIME 2
-/* bgp4V2NlriEntry */
+/* bgp4V2NlriEntry
+ * offset 1.3.6.1.3.5.1.1.9.1.x.(1|2).(4|16) = 13
+ */
+#define BGP4V2_NLRI_ENTRY_OFFSET 13
#define BGP4V2_NLRI_INDEX 1
#define BGP4V2_NLRI_AFI 2
#define BGP4V2_NLRI_SAFI 3
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index e51977f0f0..219362cd04 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -158,9 +158,7 @@ static struct peer_group *listen_range_exists(struct bgp *bgp,
struct prefix *range, int exact);
static void bgp_show_global_graceful_restart_mode_vty(struct vty *vty,
- struct bgp *bgp,
- bool use_json,
- json_object *json);
+ struct bgp *bgp);
static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty,
enum show_type type,
@@ -11856,7 +11854,6 @@ static void bgp_show_peer_afi_orf_cap(struct vty *vty, struct peer *p,
static void bgp_show_neighnor_graceful_restart_flags(struct vty *vty,
struct peer *p,
- bool use_json,
json_object *json)
{
bool rbit = false;
@@ -11869,7 +11866,7 @@ static void bgp_show_neighnor_graceful_restart_flags(struct vty *vty,
nbit = CHECK_FLAG(p->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV);
}
- if (use_json) {
+ if (json) {
json_object_boolean_add(json, "rBit", rbit);
json_object_boolean_add(json, "nBit", nbit);
} else {
@@ -11880,12 +11877,11 @@ static void bgp_show_neighnor_graceful_restart_flags(struct vty *vty,
static void bgp_show_neighbor_graceful_restart_remote_mode(struct vty *vty,
struct peer *peer,
- bool use_json,
json_object *json)
{
const char *mode = "NotApplicable";
- if (!use_json)
+ if (!json)
vty_out(vty, "\n Remote GR Mode: ");
if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_ADV)
@@ -11908,20 +11904,19 @@ static void bgp_show_neighbor_graceful_restart_remote_mode(struct vty *vty,
}
}
- if (use_json) {
+ if (json)
json_object_string_add(json, "remoteGrMode", mode);
- } else
+ else
vty_out(vty, mode, "\n");
}
static void bgp_show_neighbor_graceful_restart_local_mode(struct vty *vty,
struct peer *p,
- bool use_json,
json_object *json)
{
const char *mode = "Invalid";
- if (!use_json)
+ if (!json)
vty_out(vty, " Local GR Mode: ");
if (bgp_peer_gr_mode_get(p) == PEER_HELPER)
@@ -11941,15 +11936,14 @@ static void bgp_show_neighbor_graceful_restart_local_mode(struct vty *vty,
mode = "Invalid*";
}
- if (use_json) {
+ if (json)
json_object_string_add(json, "localGrMode", mode);
- } else {
+ else
vty_out(vty, mode, "\n");
- }
}
static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
- struct vty *vty, struct peer *peer, bool use_json, json_object *json)
+ struct vty *vty, struct peer *peer, json_object *json)
{
afi_t afi;
safi_t safi;
@@ -11966,7 +11960,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
!CHECK_FLAG(peer->cap, PEER_CAP_RESTART_RCV))
continue;
- if (use_json) {
+ if (json) {
json_afi_safi = json_object_new_object();
json_endofrib_status = json_object_new_object();
json_timer = json_object_new_object();
@@ -11977,7 +11971,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
else
eor_flag = false;
- if (!use_json) {
+ if (!json) {
vty_out(vty, " %s:\n",
get_afi_safi_str(afi, safi, false));
@@ -11988,25 +11982,25 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_CAP_RESTART_AF_PRESERVE_RCV)) {
- if (use_json) {
+ if (json) {
json_object_boolean_true_add(json_afi_safi,
"fBit");
} else
vty_out(vty, "True\n");
} else {
- if (use_json)
+ if (json)
json_object_boolean_false_add(json_afi_safi,
"fBit");
else
vty_out(vty, "False\n");
}
- if (!use_json)
+ if (!json)
vty_out(vty, " End-of-RIB sent: ");
if (CHECK_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_EOR_SEND)) {
- if (use_json) {
+ if (json) {
json_object_boolean_true_add(
json_endofrib_status, "endOfRibSend");
@@ -12019,7 +12013,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
PRINT_EOR(eor_flag);
}
} else {
- if (use_json) {
+ if (json) {
json_object_boolean_false_add(
json_endofrib_status, "endOfRibSend");
json_object_boolean_false_add(
@@ -12033,25 +12027,25 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
}
}
- if (!use_json)
+ if (!json)
vty_out(vty, " End-of-RIB received: ");
if (CHECK_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_EOR_RECEIVED)) {
- if (use_json)
+ if (json)
json_object_boolean_true_add(
json_endofrib_status, "endOfRibRecv");
else
vty_out(vty, "Yes\n");
} else {
- if (use_json)
+ if (json)
json_object_boolean_false_add(
json_endofrib_status, "endOfRibRecv");
else
vty_out(vty, "No\n");
}
- if (use_json) {
+ if (json) {
json_object_int_add(json_timer, "stalePathTimer",
peer->bgp->stalepath_time);
@@ -12111,7 +12105,7 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
peer->bgp->gr_info[afi][safi]
.t_select_deferral));
}
- if (use_json) {
+ if (json) {
json_object_object_add(json_afi_safi, "endOfRibStatus",
json_endofrib_status);
json_object_object_add(json_afi_safi, "timers",
@@ -12125,10 +12119,9 @@ static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
static void bgp_show_neighbor_graceful_restart_time(struct vty *vty,
struct peer *p,
- bool use_json,
json_object *json)
{
- if (use_json) {
+ if (json) {
json_object *json_timer = NULL;
json_timer = json_object_new_object();
@@ -12164,7 +12157,7 @@ static void bgp_show_neighbor_graceful_restart_time(struct vty *vty,
}
static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
- bool use_json, json_object *json)
+ json_object *json)
{
char dn_flag[2] = {0};
/* '*' + v6 address of neighbor */
@@ -12174,7 +12167,7 @@ static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
dn_flag[0] = '*';
if (p->conf_if) {
- if (use_json)
+ if (json)
json_object_string_addf(json, "neighborAddr", "%pSU",
&p->su);
else
@@ -12184,7 +12177,7 @@ static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
snprintf(neighborAddr, sizeof(neighborAddr), "%s%s", dn_flag,
p->host);
- if (use_json)
+ if (json)
json_object_string_add(json, "neighborAddr",
neighborAddr);
else
@@ -12192,7 +12185,7 @@ static void bgp_show_peer_gr_status(struct vty *vty, struct peer *p,
}
/* more gr info in new format */
- BGP_SHOW_PEER_GR_CAPABILITY(vty, p, use_json, json);
+ BGP_SHOW_PEER_GR_CAPABILITY(vty, p, json);
}
static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
@@ -14177,7 +14170,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
thread_timer_remain_second(p->t_gr_stale) *
1000);
/* more gr info in new format */
- BGP_SHOW_PEER_GR_CAPABILITY(vty, p, use_json, json_grace);
+ BGP_SHOW_PEER_GR_CAPABILITY(vty, p, json_grace);
json_object_object_add(json_neigh, "gracefulRestartInfo",
json_grace);
} else {
@@ -14223,7 +14216,7 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
thread_timer_remain_second(p->t_gr_stale));
/* more gr info in new format */
- BGP_SHOW_PEER_GR_CAPABILITY(vty, p, use_json, NULL);
+ BGP_SHOW_PEER_GR_CAPABILITY(vty, p, NULL);
}
if (use_json) {
@@ -14696,20 +14689,14 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp,
enum show_type type,
union sockunion *su,
const char *conf_if, afi_t afi,
- bool use_json)
+ json_object *json)
{
struct listnode *node, *nnode;
struct peer *peer;
int find = 0;
safi_t safi = SAFI_UNICAST;
- json_object *json = NULL;
json_object *json_neighbor = NULL;
- if (use_json) {
- json = json_object_new_object();
- json_neighbor = json_object_new_object();
- }
-
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
@@ -14718,15 +14705,15 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp,
if ((peer->afc[afi][safi]) == 0)
continue;
+ if (json)
+ json_neighbor = json_object_new_object();
+
if (type == show_all) {
- bgp_show_peer_gr_status(vty, peer, use_json,
- json_neighbor);
+ bgp_show_peer_gr_status(vty, peer, json_neighbor);
- if (use_json) {
+ if (json)
json_object_object_add(json, peer->host,
json_neighbor);
- json_neighbor = NULL;
- }
} else if (type == show_peer) {
if (conf_if) {
@@ -14736,41 +14723,33 @@ static int bgp_show_neighbor_graceful_restart(struct vty *vty, struct bgp *bgp,
&& !strcmp(peer->hostname, conf_if))) {
find = 1;
bgp_show_peer_gr_status(vty, peer,
- use_json,
json_neighbor);
}
} else {
if (sockunion_same(&peer->su, su)) {
find = 1;
bgp_show_peer_gr_status(vty, peer,
- use_json,
json_neighbor);
}
}
- if (use_json && find)
+ if (json && find)
json_object_object_add(json, peer->host,
json_neighbor);
}
- if (find) {
- json_neighbor = NULL;
+ if (find)
break;
- }
}
if (type == show_peer && !find) {
- if (use_json)
+ if (json)
json_object_boolean_true_add(json, "bgpNoSuchNeighbor");
else
vty_out(vty, "%% No such neighbor\n");
}
- if (use_json) {
- if (json_neighbor)
- json_object_free(json_neighbor);
- vty_json(vty, json);
- } else {
+
+ if (!json)
vty_out(vty, "\n");
- }
return CMD_SUCCESS;
}
@@ -14883,7 +14862,7 @@ static int bgp_show_neighbor(struct vty *vty, struct bgp *bgp,
static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty,
enum show_type type,
const char *ip_str,
- afi_t afi, bool use_json)
+ afi_t afi, json_object *json)
{
int ret;
@@ -14895,21 +14874,20 @@ static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty,
if (!bgp)
return;
- if (!use_json)
- bgp_show_global_graceful_restart_mode_vty(vty, bgp, use_json,
- NULL);
+ if (!json)
+ bgp_show_global_graceful_restart_mode_vty(vty, bgp);
if (ip_str) {
ret = str2sockunion(ip_str, &su);
if (ret < 0)
- bgp_show_neighbor_graceful_restart(
- vty, bgp, type, NULL, ip_str, afi, use_json);
+ bgp_show_neighbor_graceful_restart(vty, bgp, type, NULL,
+ ip_str, afi, json);
else
bgp_show_neighbor_graceful_restart(vty, bgp, type, &su,
- NULL, afi, use_json);
+ NULL, afi, json);
} else
bgp_show_neighbor_graceful_restart(vty, bgp, type, NULL, NULL,
- afi, use_json);
+ afi, json);
}
static void bgp_show_all_instances_neighbors_vty(struct vty *vty,
@@ -15229,9 +15207,7 @@ DEFUN (show_ip_bgp_lcommunity_info,
/* Graceful Restart */
static void bgp_show_global_graceful_restart_mode_vty(struct vty *vty,
- struct bgp *bgp,
- bool use_json,
- json_object *json)
+ struct bgp *bgp)
{
@@ -15266,22 +15242,32 @@ static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty,
const char *ip_str,
afi_t afi, bool use_json)
{
+ json_object *json = NULL;
+
+ if (use_json)
+ json = json_object_new_object();
+
if ((afi == AFI_MAX) && (ip_str == NULL)) {
afi = AFI_IP;
while ((afi != AFI_L2VPN) && (afi < AFI_MAX)) {
bgp_show_neighbor_graceful_restart_vty(
- vty, type, ip_str, afi, use_json);
+ vty, type, ip_str, afi, json);
afi++;
}
} else if (afi != AFI_MAX) {
bgp_show_neighbor_graceful_restart_vty(vty, type, ip_str, afi,
- use_json);
+ json);
} else {
+ if (json)
+ json_object_free(json);
return CMD_ERR_INCOMPLETE;
}
+ if (json)
+ vty_json(vty, json);
+
return CMD_SUCCESS;
}
/* Graceful Restart */
diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h
index 9526b50fb9..019789dff8 100644
--- a/bgpd/bgp_vty.h
+++ b/bgpd/bgp_vty.h
@@ -56,18 +56,14 @@ struct bgp;
"V AS LocalAS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n"
#define BGP_SHOW_SUMMARY_HEADER_FAILED "EstdCnt DropCnt ResetTime Reason\n"
-#define BGP_SHOW_PEER_GR_CAPABILITY(vty, p, use_json, json) \
+#define BGP_SHOW_PEER_GR_CAPABILITY(vty, p, json) \
do { \
- bgp_show_neighbor_graceful_restart_local_mode(vty, p, \
- use_json, json); \
- bgp_show_neighbor_graceful_restart_remote_mode( \
- vty, p, use_json, json); \
- bgp_show_neighnor_graceful_restart_flags(vty, p, use_json, \
- json); \
- bgp_show_neighbor_graceful_restart_time(vty, p, use_json, \
- json); \
+ bgp_show_neighbor_graceful_restart_local_mode(vty, p, json); \
+ bgp_show_neighbor_graceful_restart_remote_mode(vty, p, json); \
+ bgp_show_neighnor_graceful_restart_flags(vty, p, json); \
+ bgp_show_neighbor_graceful_restart_time(vty, p, json); \
bgp_show_neighbor_graceful_restart_capability_per_afi_safi( \
- vty, p, use_json, json); \
+ vty, p, json); \
} while (0)
#define VTY_BGP_GR_DEFINE_LOOP_VARIABLE \
diff --git a/doc/accords/cli-colors b/doc/accords/cli-colors
new file mode 100644
index 0000000000..04bdfc7fae
--- /dev/null
+++ b/doc/accords/cli-colors
@@ -0,0 +1,44 @@
+Adding colors to FRR CLI output
+===============================
+
+
+There were multiple approaches/attempts to get colored output for the CLI into
+FRR, most recently End of 2022 in PR #12497. After some discussion, some items
+crystallized out:
+
+First, generally speaking, colors (or other rich output formatting) must be
+used sparingly. In particular, e.g. "every IP address" is not something to
+color. The output formatting needs to have an actual purpose to improve UX,
+not turn it into a christmas tree.
+
+In the long run, the CLI will hopefully become a YANG frontend. In that case,
+the CLI frontend component is a great place to apply all kinds of output/UI/UX
+features. However, this is a long way off.
+
+That said, an implementation in the current vtysh+daemon ecosystem is not out
+of the question, especially if the use of colors/formatting is limited to
+important places (which is desirable anyway - see general statement above.)
+We don't want to litter formatting all over every single vty_out call.
+
+A color option on a per-command/DEFUN level (i.e. the way `[json]` is done) was
+rejected. The decision to color output must take information from vtysh's
+environment into account, notably the TERM environment variable, the NO_COLOR
+environment variable, and whether stdout is a terminal or not. An explicit
+`--color` switch (or `terminal color` vtysh command, or other similar things)
+is needed too. To be clear, the switch must not be on individual commands, it
+needs to be on the vtysh session level.
+
+Lastly, the output pager needs to work with this.
+
+
+Suggested implementation
+------------------------
+
+(not part of the consensus / accord, only to record discussion)
+
+As far as discussion went, the most promising approach to actually implement
+this is to put some type of unconditional formatting tag into daemon's vty_out
+calls. This would be some escape-like sequence - an actual ANSI color code
+itself is not particularly readable or pretty, though that would work as well.
+vtysh would then, while passing through the output from the daemons, replace or
+remove these tags according to terminal/user settings.
diff --git a/doc/developer/fpm.rst b/doc/developer/fpm.rst
index 9849869133..56d33671d2 100644
--- a/doc/developer/fpm.rst
+++ b/doc/developer/fpm.rst
@@ -101,3 +101,19 @@ Data
^^^^
The netlink or protobuf message payload.
+
+
+Route Status Notification from ASIC
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The dplane_fpm_nl has the ability to read route netlink messages
+from the underlying fpm implementation that can tell zebra
+whether or not the route has been Offloaded/Failed or Trapped.
+The end developer must send the data up the same socket that has
+been created to listen for FPM messages from Zebra. The data sent
+must have a Frame Header with Version set to 1, Message Type set to 1
+and an appropriate message Length. The message data must contain
+a RTM_NEWROUTE netlink message that sends the prefix and nexthops
+associated with the route. Finally rtm_flags must contain
+RTM_F_OFFLOAD, RTM_F_TRAP and or RTM_F_OFFLOAD_FAILED to signify
+what has happened to the route in the ASIC.
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 230a4f43b0..3608f828e8 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -1342,7 +1342,7 @@ zebra Terminal Mode Commands
total number of route nodes in the table. Which will be higher than
the actual number of routes that are held.
-.. clicmd:: show nexthop-group rib [ID] [vrf NAME] [singleton [ip|ip6]] [type]
+.. clicmd:: show nexthop-group rib [ID] [vrf NAME] [singleton [ip|ip6]] [type] [json]
Display nexthop groups created by zebra. The [vrf NAME] option
is only meaningful if you have started zebra with the --vrfwnetns
diff --git a/lib/link_state.c b/lib/link_state.c
index ab5a8515b5..1b79c79216 100644
--- a/lib/link_state.c
+++ b/lib/link_state.c
@@ -346,7 +346,7 @@ struct ls_prefix *ls_prefix_new(struct ls_node_id adv, struct prefix p)
if (adv.origin == UNKNOWN)
return NULL;
- new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_attributes));
+ new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_prefix));
new->adv = adv;
new->pref = p;
diff --git a/ospf6d/ospf6_nssa.c b/ospf6d/ospf6_nssa.c
index f35c9df4a5..2921046837 100644
--- a/ospf6d/ospf6_nssa.c
+++ b/ospf6d/ospf6_nssa.c
@@ -1090,7 +1090,25 @@ static void ospf6_check_and_originate_type7_lsa(struct ospf6_area *area)
ospf6_nssa_lsa_originate(aggr->route, area, true);
}
}
+}
+
+static void ospf6_ase_lsa_refresh(struct ospf6 *o)
+{
+ struct ospf6_lsa *old;
+ for (struct ospf6_route *route = ospf6_route_head(o->external_table);
+ route; route = ospf6_route_next(route)) {
+ old = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ route->path.origin.id, o->router_id,
+ o->lsdb);
+ if (old) {
+ THREAD_OFF(old->refresh);
+ thread_add_event(master, ospf6_lsa_refresh, old, 0,
+ &old->refresh);
+ } else {
+ ospf6_as_external_lsa_originate(route, o);
+ }
+ }
}
void ospf6_area_nssa_update(struct ospf6_area *area)
@@ -1134,6 +1152,36 @@ void ospf6_area_nssa_update(struct ospf6_area *area)
if (IS_OSPF6_DEBUG_NSSA)
zlog_debug("Normal area %s", area->name);
ospf6_nssa_flush_area(area);
+
+ /* Check if router is ABR */
+ if (ospf6_check_and_set_router_abr(area->ospf6)) {
+ if (IS_OSPF6_DEBUG_NSSA)
+ zlog_debug("Router is ABR area %s", area->name);
+ ospf6_schedule_abr_task(area->ospf6);
+ ospf6_ase_lsa_refresh(area->ospf6);
+ } else {
+ uint16_t type;
+ struct ospf6_lsa *lsa = NULL;
+
+ /*
+ * Refresh all type-5 LSAs so they get installed
+ * in the converted ares
+ */
+ if (IS_OSPF6_DEBUG_NSSA)
+ zlog_debug("Refresh type-5 LSAs, area %s",
+ area->name);
+
+ type = htons(OSPF6_LSTYPE_AS_EXTERNAL);
+ for (ALL_LSDB_TYPED_ADVRTR(area->ospf6->lsdb, type,
+ area->ospf6->router_id,
+ lsa)) {
+ if (IS_OSPF6_DEBUG_NSSA)
+ ospf6_lsa_header_print(lsa);
+ THREAD_OFF(lsa->refresh);
+ thread_add_event(master, ospf6_lsa_refresh, lsa,
+ 0, &lsa->refresh);
+ }
+ }
}
}
diff --git a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
index ec66c8caef..7ded0ce8b4 100644
--- a/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
+++ b/tests/topotests/bgp_as_allow_in/test_bgp_as_allow_in.py
@@ -76,15 +76,6 @@ from lib.topojson import build_topo_from_json, build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_as_allow_in.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
@@ -92,13 +83,6 @@ NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"}
NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
-def build_topo(tgen):
- """Build function"""
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
@@ -118,7 +102,11 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(build_topo, mod.__name__)
+ json_file = "{}/bgp_as_allow_in.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
diff --git a/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py b/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py
index 9b6480c0d3..b7f6b7aca7 100644
--- a/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py
+++ b/tests/topotests/bgp_ecmp_topo3/test_ibgp_ecmp_topo3.py
@@ -39,8 +39,7 @@ sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib.topogen import get_topogen
-from lib import topojson
+from lib.topogen import Topogen, get_topogen
from lib.common_config import (
write_test_header,
@@ -51,11 +50,12 @@ from lib.common_config import (
reset_config_on_routers,
shutdown_bringup_interface,
apply_raw_config,
+ start_topology,
)
from lib.topolog import logger
+from lib.topojson import build_config_from_json
from lib.bgp import create_router_bgp, verify_bgp_convergence
-
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
@@ -78,9 +78,19 @@ def setup_module(mod):
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
- tgen = topojson.setup_module_from_json(mod.__file__)
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/ibgp_ecmp_topo3.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
topo = tgen.json_topo
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
index 391c272dbc..f193317b1e 100644
--- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
@@ -75,15 +75,6 @@ from lib.topojson import build_topo_from_json, build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_vrf_dynamic_route_leak_topo2.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
NETWORK1_1 = {"ipv4": "11.11.11.1/32", "ipv6": "11:11::1/128"}
NETWORK3_3 = {"ipv4": "50.50.50.5/32", "ipv6": "50:50::5/128"}
@@ -92,13 +83,6 @@ NETWORK3_4 = {"ipv4": "50.50.50.50/32", "ipv6": "50:50::50/128"}
PREFERRED_NEXT_HOP = "global"
-def build_topo(tgen):
- """Build function"""
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
@@ -114,7 +98,9 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(build_topo, mod.__name__)
+ json_file = "{}/bgp_vrf_dynamic_route_leak_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/ce1/bgpd.conf b/tests/topotests/bgp_vrf_leaking_5549_routes/ce1/bgpd.conf
new file mode 100644
index 0000000000..66493f0fea
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/ce1/bgpd.conf
@@ -0,0 +1,23 @@
+frr defaults traditional
+!
+hostname ce1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+router bgp 65002
+ bgp router-id 192.0.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor eth0 interface
+ neighbor eth0 remote-as external
+ neighbor eth0 timers connect 1
+ !
+ address-family ipv4 unicast
+ neighbor eth0 activate
+ redistribute connected
+ exit-address-family
+ !
+!
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/ce1/zebra.conf b/tests/topotests/bgp_vrf_leaking_5549_routes/ce1/zebra.conf
new file mode 100644
index 0000000000..a163295844
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/ce1/zebra.conf
@@ -0,0 +1,13 @@
+log file zebra.log
+!
+hostname ce1
+!
+interface lo
+ ip address 172.16.0.1/32
+!
+interface eth0
+ ipv6 nd ra-interval 1
+ no ipv6 nd suppress-ra
+!
+line vty
+!
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/bgpd.conf b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/bgpd.conf
new file mode 100644
index 0000000000..c5c99270e7
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/bgpd.conf
@@ -0,0 +1,41 @@
+frr defaults traditional
+!
+hostname pe1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+router bgp 65001
+ bgp router-id 192.0.2.1
+ !
+!
+router bgp 65001 vrf vrf10
+ bgp router-id 192.0.2.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor eth0 interface
+ neighbor eth0 remote-as external
+ neighbor eth0 timers connect 1
+ !
+ address-family ipv4 unicast
+ neighbor eth0 activate
+ rd vpn export 65001:10
+ rt vpn both 0:10
+ import vpn
+ export vpn
+ exit-address-family
+ !
+!
+router bgp 65001 vrf vrf20
+ bgp router-id 192.0.2.1
+ !
+ address-family ipv4 unicast
+ rd vpn export 65001:20
+ rt vpn both 0:10
+ import vpn
+ export vpn
+ exit-address-family
+ !
+!
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/default_ipv4_vpn.json b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/default_ipv4_vpn.json
new file mode 100644
index 0000000000..9516016fc2
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/default_ipv4_vpn.json
@@ -0,0 +1,32 @@
+{
+ "vrfName": "default",
+ "routerId": "192.0.2.1",
+ "localAS": 65001,
+ "routes": {
+ "routeDistinguishers": {
+ "65001:10": {
+ "172.16.0.1/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "172.16.0.1",
+ "prefixLen": 32,
+ "network": "172.16.0.1\/32",
+ "path": "65002",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "hostname": "pe1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf10_ipv4_unicast.json b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf10_ipv4_unicast.json
new file mode 100644
index 0000000000..768bffbe9d
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf10_ipv4_unicast.json
@@ -0,0 +1,32 @@
+{
+ "vrfName": "vrf10",
+ "routerId": "192.0.2.1",
+ "localAS": 65001,
+ "routes": {
+ "172.16.0.1/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "172.16.0.1",
+ "prefixLen": 32,
+ "network": "172.16.0.1\/32",
+ "path": "65002",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "hostname": "ce1",
+ "afi": "ipv6",
+ "scope": "global",
+ "used": true
+ },
+ {
+ "hostname": "ce1",
+ "afi": "ipv6",
+ "scope": "link-local"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf20_ipv4_unicast.json b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf20_ipv4_unicast.json
new file mode 100644
index 0000000000..1e93715270
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/results/vrf20_ipv4_unicast.json
@@ -0,0 +1,34 @@
+{
+ "vrfName": "vrf20",
+ "routerId": "192.0.2.1",
+ "localAS": 65001,
+ "routes": {
+ "172.16.0.1/32": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "172.16.0.1",
+ "prefixLen": 32,
+ "network": "172.16.0.1\/32",
+ "path": "65002",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "hostname": "pe1",
+ "afi": "ipv6",
+ "scope": "global",
+ "used": true
+ },
+ {
+ "hostname": "pe1",
+ "afi": "ipv6",
+ "scope": "link-local"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/zebra.conf b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/zebra.conf
new file mode 100644
index 0000000000..d40041ab3c
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/pe1/zebra.conf
@@ -0,0 +1,10 @@
+log file zebra.log
+!
+hostname pe1
+!
+interface eth0 vrf vrf10
+ ipv6 nd ra-interval 1
+ no ipv6 nd suppress-ra
+!
+line vty
+!
diff --git a/tests/topotests/bgp_vrf_leaking_5549_routes/test_bgp_vrf_leaking.py b/tests/topotests/bgp_vrf_leaking_5549_routes/test_bgp_vrf_leaking.py
new file mode 100755
index 0000000000..dd27ad3ed1
--- /dev/null
+++ b/tests/topotests/bgp_vrf_leaking_5549_routes/test_bgp_vrf_leaking.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2022, LINE Corporation
+# Authored by Ryoga Saito <ryoga.saito@linecorp.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import os
+import re
+import sys
+import json
+import functools
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import required_linux_kernel_version
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ tgen.add_router("pe1")
+ tgen.add_router("ce1")
+
+ tgen.add_link(tgen.gears["pe1"], tgen.gears["ce1"], "eth0", "eth0")
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ for rname, router in tgen.routers().items():
+ router.load_config(TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+
+ tgen.gears["pe1"].run("ip link add vrf10 type vrf table 10")
+ tgen.gears["pe1"].run("ip link set vrf10 up")
+ tgen.gears["pe1"].run("ip link add vrf20 type vrf table 20")
+ tgen.gears["pe1"].run("ip link set vrf20 up")
+ tgen.gears["pe1"].run("ip link set eth0 master vrf10")
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def open_json_file(path):
+ try:
+ with open(path, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(path)
+
+
+def check_vrf10_rib(output):
+ expected = open_json_file("%s/pe1/results/vrf10_ipv4_unicast.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def check_default_vpn_rib(output):
+ expected = open_json_file("%s/pe1/results/default_ipv4_vpn.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def check_vrf20_rib(output):
+ expected = open_json_file("%s/pe1/results/vrf20_ipv4_unicast.json" % CWD)
+ actual = json.loads(output)
+ return topotest.json_cmp(actual, expected)
+
+
+def check(name, command, checker):
+ tgen = get_topogen()
+ router = tgen.gears[name]
+
+ def _check():
+ try:
+ return checker(router.vtysh_cmd(command))
+ except:
+ return False
+
+ logger.info('[+] check {} "{}"'.format(name, command))
+ _, result = topotest.run_and_expect(_check, None, count=10, wait=0.5)
+ assert result is None, "Failed"
+
+
+def test_rib():
+ check("pe1", "show bgp vrf vrf10 ipv4 unicast json", check_vrf10_rib)
+ check("pe1", "show bgp ipv4 vpn json", check_default_vpn_rib)
+ check("pe1", "show bgp vrf vrf20 ipv4 unicast json", check_vrf20_rib)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index a641fec584..b6a6037128 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -71,7 +71,7 @@ from lib.common_config import (
configure_brctl,
create_interface_in_kernel,
kill_router_daemons,
- start_router_daemons
+ start_router_daemons,
)
from lib.topolog import logger
@@ -86,15 +86,6 @@ from lib.topojson import build_topo_from_json, build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/evpn_type5_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
NETWORK1_1 = {"ipv4": "10.1.1.1/32", "ipv6": "10::1/128"}
NETWORK1_2 = {"ipv4": "40.1.1.1/32", "ipv6": "40::1/128"}
@@ -135,10 +126,6 @@ BRCTL = {
}
-def build_topo(tgen):
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
@@ -154,7 +141,10 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(build_topo, mod.__name__)
+ json_file = "{}/evpn_type5_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ topo = tgen.json_topo
+
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
@@ -1775,34 +1765,41 @@ def test_evpn_address_family_with_graceful_restart_p0(request):
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r3": {
- "static_routes": [{
- "network": NETWORK1_2[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "RED"
- }]
+ "static_routes": [
+ {
+ "network": NETWORK1_2[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "RED",
+ }
+ ]
+ },
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE",
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN",
+ },
+ ]
},
- "r4":{
- "static_routes": [{
- "network": NETWORK1_3[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "BLUE"
- },
- {
- "network": NETWORK1_4[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "GREEN"
- }]
- }
}
result = create_static_routes(tgen, input_dict_1)
- assert result is True, 'Testcase {} : Failed \n Error: {}'.format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
- step("Redistribute static in (IPv4 and IPv6) address-family "
- "on Edge-1 for all VRFs.")
+ step(
+ "Redistribute static in (IPv4 and IPv6) address-family "
+ "on Edge-1 for all VRFs."
+ )
- input_dict_2={}
+ input_dict_2 = {}
for dut in ["r3", "r4"]:
temp = {dut: {"bgp": []}}
input_dict_2.update(temp)
@@ -1821,108 +1818,116 @@ def test_evpn_address_family_with_graceful_restart_p0(request):
"vrf": vrf,
"address_family": {
"ipv4": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
- }
- })
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ },
+ },
+ }
+ )
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
- step("Verify on router Edge-1 that EVPN routes corresponding to "
- "all VRFs are received from both routers DCG-1 and DCG-2")
+ step(
+ "Verify on router Edge-1 that EVPN routes corresponding to "
+ "all VRFs are received from both routers DCG-1 and DCG-2"
+ )
for addr_type in ADDR_TYPES:
input_routes = {
"r3": {
- "static_routes": [{
- "network": NETWORK1_2[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "RED"
- }]
+ "static_routes": [
+ {
+ "network": NETWORK1_2[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "RED",
+ }
+ ]
+ },
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE",
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN",
+ },
+ ]
},
- "r4":{
- "static_routes": [{
- "network": NETWORK1_3[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "BLUE"
- },
- {
- "network": NETWORK1_4[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "GREEN"
- }]
- }
}
result = verify_rib(tgen, addr_type, "e1", input_routes)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
- step("Configure DCG-2 as GR restarting node for EVPN session between"
+ step(
+ "Configure DCG-2 as GR restarting node for EVPN session between"
" DCG-2 and EDGE-1, following by a session reset using 'clear bgp *'"
- " command.")
+ " command."
+ )
input_dict_gr = {
"d2": {
- "bgp":
- [
+ "bgp": [
{
"local_as": "200",
"graceful-restart": {
"graceful-restart": True,
- }
+ },
}
]
}
}
result = create_router_bgp(tgen, topo, input_dict_gr)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("Verify that DCG-2 changes it's role to GR-restarting router "
- "and EDGE-1 becomes the GR-helper.")
+ step(
+ "Verify that DCG-2 changes it's role to GR-restarting router "
+ "and EDGE-1 becomes the GR-helper."
+ )
step("Kill BGPd daemon on DCG-2.")
kill_router_daemons(tgen, "d2", ["bgpd"])
- step("Verify that EDGE-1 keep stale entries for EVPN RT-5 routes "
- "received from DCG-2 before the restart.")
+ step(
+ "Verify that EDGE-1 keep stale entries for EVPN RT-5 routes "
+ "received from DCG-2 before the restart."
+ )
for addr_type in ADDR_TYPES:
input_routes = {
- "r4":{
- "static_routes": [{
- "network": NETWORK1_3[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "BLUE"
- },
- {
- "network": NETWORK1_4[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "GREEN"
- }]
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE",
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN",
+ },
+ ]
}
}
result = verify_evpn_routes(tgen, topo, "e1", input_routes)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
- step("Verify that DCG-2 keeps BGP routes in Zebra until BGPd "
- "comes up or end of 'rib-stale-time'")
+ step(
+ "Verify that DCG-2 keeps BGP routes in Zebra until BGPd "
+ "comes up or end of 'rib-stale-time'"
+ )
step("Start BGPd daemon on DCG-2.")
start_router_daemons(tgen, "d2", ["bgpd"])
@@ -1930,44 +1935,52 @@ def test_evpn_address_family_with_graceful_restart_p0(request):
step("Verify that EDGE-1 removed all the stale entries.")
for addr_type in ADDR_TYPES:
input_routes = {
- "r4":{
- "static_routes": [{
- "network": NETWORK1_3[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "BLUE"
- },
- {
- "network": NETWORK1_4[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "GREEN"
- }]
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE",
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN",
+ },
+ ]
}
}
result = verify_evpn_routes(tgen, topo, "e1", input_routes)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
- step("Verify that DCG-2 refresh zebra with EVPN routes. "
- "(no significance of 'rib-stale-time'")
+ step(
+ "Verify that DCG-2 refresh zebra with EVPN routes. "
+ "(no significance of 'rib-stale-time'"
+ )
for addr_type in ADDR_TYPES:
input_routes = {
- "r4":{
- "static_routes": [{
- "network": NETWORK1_3[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "BLUE"
- },
- {
- "network": NETWORK1_4[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type],
- "vrf": "GREEN"
- }]
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE",
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN",
+ },
+ ]
}
}
result = verify_rib(tgen, addr_type, "d2", input_routes)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 737226c7fe..676a5704e5 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -961,7 +961,7 @@ def generate_support_bundle():
return True
-def start_topology(tgen, daemon=None):
+def start_topology(tgen):
"""
Starting topology, create tmp files which are loaded to routers
to start daemons and then start routers
@@ -1009,38 +1009,70 @@ def start_topology(tgen, daemon=None):
except IOError as err:
logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
- # Loading empty zebra.conf file to router, to start the zebra daemon
+ topo = tgen.json_topo
+ feature = set()
+
+ if "feature" in topo:
+ feature.update(topo["feature"])
+
+ if rname in topo["routers"]:
+ for key in topo["routers"][rname].keys():
+ feature.add(key)
+
+ for val in topo["routers"][rname]["links"].values():
+ if "pim" in val:
+ feature.add("pim")
+ break
+ for val in topo["routers"][rname]["links"].values():
+ if "pim6" in val:
+ feature.add("pim6")
+ break
+ for val in topo["routers"][rname]["links"].values():
+ if "ospf6" in val:
+ feature.add("ospf6")
+ break
+ if "switches" in topo and rname in topo["switches"]:
+ for val in topo["switches"][rname]["links"].values():
+ if "ospf" in val:
+ feature.add("ospf")
+ break
+ if "ospf6" in val:
+ feature.add("ospf6")
+ break
+
+ # Loading empty zebra.conf file to router, to start the zebra deamon
router.load_config(
TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
)
- # Loading empty bgpd.conf file to router, to start the bgp daemon
- router.load_config(
- TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname)
- )
+ # Loading empty bgpd.conf file to router, to start the bgp deamon
+ if "bgp" in feature:
+ router.load_config(
+ TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname)
+ )
- if daemon and "ospfd" in daemon:
- # Loading empty ospf.conf file to router, to start the bgp daemon
+ # Loading empty pimd.conf file to router, to start the pim deamon
+ if "pim" in feature:
router.load_config(
- TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(tgen.logdir, rname)
+ TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
)
- if daemon and "ospf6d" in daemon:
- # Loading empty ospf.conf file to router, to start the bgp daemon
+ # Loading empty pimd.conf file to router, to start the pim deamon
+ if "pim6" in feature:
router.load_config(
- TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(tgen.logdir, rname)
+ TopoRouter.RD_PIM6, "{}/{}/pim6d.conf".format(tgen.logdir, rname)
)
- if daemon and "pimd" in daemon:
- # Loading empty pimd.conf file to router, to start the pim deamon
+ if "ospf" in feature:
+ # Loading empty ospf.conf file to router, to start the ospf deamon
router.load_config(
- TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
+ TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(tgen.logdir, rname)
)
- if daemon and "pim6d" in daemon:
- # Loading empty pimd.conf file to router, to start the pim6d deamon
+ if "ospf6" in feature:
+ # Loading empty ospf.conf file to router, to start the ospf deamon
router.load_config(
- TopoRouter.RD_PIM6, "{}/{}/pim6d.conf".format(tgen.logdir, rname)
+ TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(tgen.logdir, rname)
)
# Starting routers
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
index dd8818e92c..285f0dcebc 100755
--- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
@@ -86,7 +86,6 @@ from lib.common_config import (
socat_send_mld_join,
socat_send_pim6_traffic,
kill_socat,
- topo_daemons,
)
from lib.pim import (
create_pim_config,
@@ -162,12 +161,9 @@ def setup_module(mod):
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, TOPO)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
@@ -284,8 +280,9 @@ def test_pim6_add_delete_static_RP_p0(request):
shutdown_bringup_interface(tgen, "r1", intf, ifaceaction=False)
step("Enable PIM6 between r1 and r2")
- step("Enable MLD on r1 interface and send MLD " "join {} to r1".\
- format(GROUP_RANGE_1))
+ step(
+ "Enable MLD on r1 interface and send MLD " "join {} to r1".format(GROUP_RANGE_1)
+ )
step("Configure r2 loopback interface as RP")
input_dict = {
"r2": {
@@ -488,8 +485,11 @@ def test_pim6_SPT_RPT_path_same_p1(request):
shutdown_bringup_interface(tgen, "r3", intf, ifaceaction=False)
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
- step("Configure RP on r2 (loopback interface) for the group range {}".\
- format(GROUP_ADDRESS_1))
+ step(
+ "Configure RP on r2 (loopback interface) for the group range {}".format(
+ GROUP_ADDRESS_1
+ )
+ )
input_dict = {
"r2": {
"pim6": {
@@ -507,7 +507,9 @@ def test_pim6_SPT_RPT_path_same_p1(request):
result = create_pim_config(tgen, TOPO, input_dict)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
- step("Enable MLD on r1 interface and send MLD join {} to R1".format(GROUP_ADDRESS_1))
+ step(
+ "Enable MLD on r1 interface and send MLD join {} to R1".format(GROUP_ADDRESS_1)
+ )
intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
result = socat_send_mld_join(
@@ -1088,8 +1090,11 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request):
step("Enable MLD on r1 interface")
step("Enable the PIM66 on all the interfaces of r1, r2, r3 and r4 routers")
- step("Configure RP on r2 (loopback interface) for the group range {}".\
- format(GROUP_RANGE_4))
+ step(
+ "Configure RP on r2 (loopback interface) for the group range {}".format(
+ GROUP_RANGE_4
+ )
+ )
input_dict = {
"r2": {
"pim6": {
@@ -1259,9 +1264,9 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request):
)
assert result is not True, (
"Testcase {} : Failed \n "
- "r1: rp-info is present for group {} \n Error: {}".format(tc_name,
- GROUP_RANGE_4,
- result)
+ "r1: rp-info is present for group {} \n Error: {}".format(
+ tc_name, GROUP_RANGE_4, result
+ )
)
step(
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
index f366708ece..6113635783 100755
--- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
@@ -75,7 +75,6 @@ from lib.common_config import (
socat_send_mld_join,
socat_send_pim6_traffic,
kill_socat,
- topo_daemons,
)
from lib.pim import (
create_pim_config,
@@ -165,12 +164,9 @@ def setup_module(mod):
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, TOPO)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
@@ -251,6 +247,7 @@ def verify_state_incremented(state_before, state_after):
#
#####################################################
+
def test_pim6_multiple_groups_same_RP_address_p2(request):
"""
Configure multiple groups (10 grps) with same RP address
diff --git a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
index bcf8e5b5f3..83ed8a6360 100644
--- a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
+++ b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
@@ -83,7 +83,6 @@ from lib.common_config import (
apply_raw_config,
run_frr_cmd,
required_linux_kernel_version,
- topo_daemons,
verify_rib,
)
@@ -168,12 +167,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
index 8a4ef1d9c7..3da311a08f 100644
--- a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
+++ b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
@@ -67,7 +67,6 @@ from lib.common_config import (
reset_config_on_routers,
run_frr_cmd,
required_linux_kernel_version,
- topo_daemons,
verify_rib,
)
@@ -148,12 +147,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py
index d0422e2f72..7034696a8c 100755
--- a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py
+++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_ospf_topo2.py
@@ -166,12 +166,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, tgen.json_topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
@@ -919,7 +916,9 @@ def test_configuring_igmp_local_join_on_reciever_dr_non_dr_nodes_p1(request):
)
for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]):
- result = verify_igmp_groups(tgen, dut, intf, IGMP_JOIN_RANGE_3, expected=False)
+ result = verify_igmp_groups(
+ tgen, dut, intf, IGMP_JOIN_RANGE_3, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"IGMP groups are still present \n Error: {}".format(tc_name, result)
diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py
index 4d17da5f61..7c6928f661 100755
--- a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py
+++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_static_routes_topo1.py
@@ -180,12 +180,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, tgen.json_topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py
index a5d2730373..181151649f 100755
--- a/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py
+++ b/tests/topotests/multicast_pim_dr_nondr_test/test_pim_dr_nondr_with_transit_router_topo3.py
@@ -185,12 +185,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, tgen.json_topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
index b46885c8a5..eb841d6504 100755
--- a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
+++ b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
@@ -175,12 +175,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, tgen.json_topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
index 9228960776..2775464a54 100755
--- a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
+++ b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
@@ -172,12 +172,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py
index b71c2d65eb..721b30140b 100755
--- a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py
+++ b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo3.py
@@ -186,12 +186,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py
index e7551094ee..d209e42a81 100755
--- a/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py
+++ b/tests/topotests/multicast_pim_sm_topo3/test_multicast_pim_sm_topo4.py
@@ -151,12 +151,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
index 2a9fe32b08..e5182fbecf 100755
--- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
+++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
@@ -223,12 +223,9 @@ def setup_module(mod):
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, TOPO)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py
index 35303c3f2c..dbeaa9b8f9 100755
--- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py
+++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp1.py
@@ -128,7 +128,6 @@ from lib.common_config import (
kill_router_daemons,
start_router_daemons,
create_static_routes,
- topo_daemons,
)
from lib.pim import (
create_pim_config,
@@ -223,12 +222,9 @@ def setup_module(mod):
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, TOPO)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
@@ -1417,8 +1413,6 @@ def test_clear_pim_configuration_p1(request):
write_test_footer(tc_name)
-
-
if __name__ == "__main__":
ARGS = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(ARGS))
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py
index 991d7d5fb6..ef638bc964 100755
--- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py
+++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp2.py
@@ -128,7 +128,6 @@ from lib.common_config import (
kill_router_daemons,
start_router_daemons,
create_static_routes,
- topo_daemons,
)
from lib.pim import (
create_pim_config,
@@ -223,12 +222,9 @@ def setup_module(mod):
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, TOPO)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
diff --git a/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py b/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
index 8a505a86b5..a750c7fdba 100644
--- a/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
+++ b/tests/topotests/multicast_pim_uplink_topo1/test_multicast_pim_uplink_topo1.py
@@ -64,7 +64,6 @@ from lib.common_config import (
stop_router,
create_static_routes,
required_linux_kernel_version,
- topo_daemons,
)
from lib.bgp import create_router_bgp
from lib.pim import (
@@ -148,12 +147,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, tgen.json_topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
@@ -349,8 +345,9 @@ def configure_static_routes_for_rp_reachability(tgen, topo):
}
result = create_static_routes(tgen, static_routes)
- assert result is True, "API {} : Failed Error: {}".\
- format(sys._getframe().f_code.co_name, result)
+ assert result is True, "API {} : Failed Error: {}".format(
+ sys._getframe().f_code.co_name, result
+ )
def verify_state_incremented(state_before, state_after):
@@ -1666,9 +1663,10 @@ def test_mroutes_updated_correctly_after_source_interface_shut_noshut_p1(request
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed "
- "Mroute IIF and OIF are same \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " "Mroute IIF and OIF are same \n Error: {}".format(
+ tc_name, result
)
step("Shut and No shut source interface multiple time")
@@ -2339,9 +2337,10 @@ def test_mroutes_updated_after_sending_IGMP_prune_and_join_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed "
- " mroute are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " " mroute are still present \n Error: {}".format(
+ tc_name, result
)
for data in input_dict_sg:
@@ -2354,9 +2353,10 @@ def test_mroutes_updated_after_sending_IGMP_prune_and_join_p1(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed "
- " mroute are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " " mroute are still present \n Error: {}".format(
+ tc_name, result
)
step(
@@ -2795,10 +2795,11 @@ def test_mroutes_updated_after_changing_rp_config_p1(request):
intf_traffic = topo["routers"]["r4"]["links"]["r3-link1"]["interface"]
state_dict = {"r4": {intf_traffic: ["registerStopRx"]}}
state_before = verify_pim_interface_traffic(tgen, state_dict)
- assert isinstance(state_before, dict), \
- ("Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".\
- format(tc_name, result))
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
step("Change the RP to R3 loopback for same group range (225.1.1.1-5)")
@@ -2888,10 +2889,11 @@ def test_mroutes_updated_after_changing_rp_config_p1(request):
step("Verify pim interface traffic after changing RP")
state_after = verify_pim_interface_traffic(tgen, state_dict)
- assert isinstance(state_before, dict), \
- ("Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".\
- format(tc_name, result))
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
result = verify_state_incremented(state_before, state_after)
assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
@@ -3285,9 +3287,10 @@ def test_mroutes_after_restart_frr_services_p2(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {}: Failed "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {}: Failed " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
step("Stop FRR on R4 node")
@@ -3310,9 +3313,10 @@ def test_mroutes_after_restart_frr_services_p2(request):
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed "
- " Mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed " " Mroutes are still present \n Error: {}".format(
+ tc_name, result
)
step("Start FRR on R4 node")
diff --git a/tests/topotests/ospf_basic_functionality/ospf_rte_calc.json b/tests/topotests/ospf_basic_functionality/ospf_rte_calc.json
index 9062a09091..1fe076ea15 100644
--- a/tests/topotests/ospf_basic_functionality/ospf_rte_calc.json
+++ b/tests/topotests/ospf_basic_functionality/ospf_rte_calc.json
@@ -1,5 +1,5 @@
{
-
+ "feature": ["bgp"],
"ipv4base": "10.0.0.0",
"ipv4mask": 24,
"link_ip_start": {
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
index f42bc47d46..5b163d28e4 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
@@ -53,7 +53,6 @@ from lib.common_config import (
start_router_daemons,
create_route_maps,
shutdown_bringup_interface,
- topo_daemons,
create_prefix_lists,
create_route_maps,
create_interfaces_cfg,
@@ -142,12 +141,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py
index 2c9959c499..b890f9a8aa 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py
@@ -46,7 +46,6 @@ from lib.common_config import (
verify_rib,
create_static_routes,
step,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -134,12 +133,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py
index 252481799c..8a94bf1178 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py
@@ -47,7 +47,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
shutdown_bringup_interface,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -102,12 +101,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
index a0ab828717..27c6954d2b 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py
@@ -43,7 +43,6 @@ from lib.common_config import (
write_test_footer,
reset_config_on_routers,
step,
- topo_daemons,
verify_rib,
stop_router,
start_router,
@@ -113,12 +112,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py
index 2b479db3c2..d6bcbd0fcc 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py
@@ -47,7 +47,6 @@ from lib.common_config import (
create_static_routes,
step,
shutdown_bringup_interface,
- topo_daemons,
)
from lib.topolog import logger
@@ -116,12 +115,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py
index 00feefc4d0..049c2b83f0 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py
@@ -45,7 +45,6 @@ from lib.common_config import (
verify_rib,
create_static_routes,
step,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -117,12 +116,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py
index 497a8b900b..80ca0c8b04 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py
@@ -49,7 +49,6 @@ from lib.common_config import (
shutdown_bringup_interface,
stop_router,
start_router,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -116,12 +115,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py
index 1917bd42f5..7391379bb9 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py
@@ -39,7 +39,6 @@ from lib.common_config import (
verify_rib,
create_static_routes,
step,
- topo_daemons,
)
from lib.topogen import Topogen, get_topogen
import os
@@ -114,12 +113,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
index e131fba0c3..9e48f8e39b 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
@@ -46,7 +46,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons,
retry,
run_frr_cmd,
)
@@ -105,12 +104,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
index 22d768d9f6..c41985e0fe 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
@@ -48,7 +48,6 @@ from lib.common_config import (
step,
create_route_maps,
verify_prefix_lists,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -129,12 +128,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py
index 8bd81a3854..b9da460909 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py
@@ -48,7 +48,6 @@ from lib.common_config import (
create_static_routes,
step,
shutdown_bringup_interface,
- topo_daemons,
)
from lib.bgp import verify_bgp_convergence, create_router_bgp
from lib.topolog import logger
@@ -125,12 +124,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
index 21a7d83845..37f558b99c 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
@@ -48,7 +48,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -110,12 +109,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
index 07d4ca01a9..ade55321f9 100644
--- a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
+++ b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
@@ -17,7 +17,6 @@ from lib.common_config import (
write_test_footer,
reset_config_on_routers,
step,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -48,12 +47,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
index 58d37a368c..a7ab29d791 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
@@ -43,7 +43,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons,
scapy_send_raw_packet,
)
@@ -121,12 +120,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
index 85646a8fab..b78fd235d7 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
@@ -43,7 +43,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons,
scapy_send_raw_packet,
)
@@ -121,12 +120,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
index ec97c254d1..f4e366031f 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
@@ -43,7 +43,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons,
scapy_send_raw_packet,
)
@@ -121,12 +120,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
index 59ba8236c7..1a92c597be 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
@@ -55,7 +55,6 @@ from lib.common_config import (
shutdown_bringup_interface,
create_prefix_lists,
create_route_maps,
- topo_daemons,
create_interfaces_cfg,
)
from lib.topolog import logger
@@ -158,12 +157,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py
index d32a05a88e..0fe0fd95b0 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_authentication.py
@@ -50,7 +50,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
shutdown_bringup_interface,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
@@ -94,6 +93,7 @@ TESTCASES =
"""
+
def setup_module(mod):
"""
Sets up the pytest environment
@@ -112,12 +112,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
@@ -157,6 +154,7 @@ def teardown_module(mod):
# Test cases start here.
# ##################################
+
def test_ospf6_auth_trailer_tc1_md5(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -233,9 +231,7 @@ def test_ospf6_auth_trailer_tc1_md5(request):
tc_name, ospf6_covergence
)
- step(
- "Disable authentication on R2 "
- )
+ step("Disable authentication on R2 ")
r2_ospf6_auth = {
"r2": {
@@ -245,7 +241,7 @@ def test_ospf6_auth_trailer_tc1_md5(request):
"hash-algo": "md5",
"key": "ospf6",
"key-id": "10",
- "del_action": True
+ "del_action": True,
}
}
}
@@ -401,9 +397,7 @@ def test_ospf6_auth_trailer_tc2_sha256(request):
tc_name, ospf6_covergence
)
- step(
- "Disable authentication on R2 "
- )
+ step("Disable authentication on R2 ")
r2_ospf6_auth = {
"r2": {
@@ -413,7 +407,7 @@ def test_ospf6_auth_trailer_tc2_sha256(request):
"hash-algo": "hmac-sha-256",
"key": "ospf6",
"key-id": "10",
- "del_action": True
+ "del_action": True,
}
}
}
@@ -492,6 +486,7 @@ def test_ospf6_auth_trailer_tc2_sha256(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc3_keychain_md5(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -583,21 +578,10 @@ def test_ospf6_auth_trailer_tc3_keychain_md5(request):
tc_name, ospf6_covergence
)
- step(
- "Disable authentication on R2 "
- )
+ step("Disable authentication on R2 ")
r2_ospf6_auth = {
- "r2": {
- "links": {
- "r1": {
- "ospf6": {
- "keychain": "auth",
- "del_action": True
- }
- }
- }
- }
+ "r2": {"links": {"r1": {"ospf6": {"keychain": "auth", "del_action": True}}}}
}
result = config_ospf6_interface(tgen, topo, r2_ospf6_auth)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
@@ -670,6 +654,7 @@ def test_ospf6_auth_trailer_tc3_keychain_md5(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc4_keychain_sha256(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -761,21 +746,10 @@ def test_ospf6_auth_trailer_tc4_keychain_sha256(request):
tc_name, ospf6_covergence
)
- step(
- "Disable authentication on R2 "
- )
+ step("Disable authentication on R2 ")
r2_ospf6_auth = {
- "r2": {
- "links": {
- "r1": {
- "ospf6": {
- "keychain": "auth",
- "del_action": True
- }
- }
- }
- }
+ "r2": {"links": {"r1": {"ospf6": {"keychain": "auth", "del_action": True}}}}
}
result = config_ospf6_interface(tgen, topo, r2_ospf6_auth)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
@@ -848,6 +822,7 @@ def test_ospf6_auth_trailer_tc4_keychain_sha256(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc5_md5_keymissmatch(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -963,6 +938,7 @@ def test_ospf6_auth_trailer_tc5_md5_keymissmatch(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc6_sha256_mismatch(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -1073,6 +1049,7 @@ def test_ospf6_auth_trailer_tc6_sha256_mismatch(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc7_keychain_md5_missmatch(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -1204,6 +1181,7 @@ def test_ospf6_auth_trailer_tc7_keychain_md5_missmatch(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc8_keychain_sha256_missmatch(request):
"""
OSPFv3 Authentication Trailer - Verify ospfv3 authentication trailer
@@ -1335,6 +1313,7 @@ def test_ospf6_auth_trailer_tc8_keychain_sha256_missmatch(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc9_keychain_not_configured(request):
"""
OSPFv3 Neighborship without Authentication Trailer -
@@ -1412,6 +1391,7 @@ def test_ospf6_auth_trailer_tc9_keychain_not_configured(request):
write_test_footer(tc_name)
+
def test_ospf6_auth_trailer_tc10_no_auth_trailer(request):
"""
OSPFv3 Neighborship without Authentication Trailer -
@@ -1441,6 +1421,7 @@ def test_ospf6_auth_trailer_tc10_no_auth_trailer(request):
write_test_footer(tc_name)
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py
index 75be0928ab..6bb88ebca3 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py
@@ -46,7 +46,6 @@ from lib.common_config import (
create_static_routes,
step,
shutdown_bringup_interface,
- topo_daemons,
get_frr_ipv6_linklocal,
)
from lib.topolog import logger
@@ -117,12 +116,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py
index ce880b413b..5cbfb0d6e1 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py
@@ -53,7 +53,6 @@ from lib.common_config import (
create_route_maps,
shutdown_bringup_interface,
create_interfaces_cfg,
- topo_daemons,
get_frr_ipv6_linklocal,
)
from lib.topolog import logger
@@ -130,12 +129,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py
index bdc4c139f7..c0d8d718cc 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa.py
@@ -7,7 +7,6 @@ from lib.common_config import (
write_test_footer,
reset_config_on_routers,
step,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -62,12 +61,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py
index 7b41c80ce3..4cccd9734f 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py
@@ -57,7 +57,6 @@ from lib.common_config import (
verify_rib,
create_static_routes,
step,
- topo_daemons,
create_route_maps,
shutdown_bringup_interface,
create_interfaces_cfg,
@@ -139,12 +138,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
@@ -406,6 +402,124 @@ def test_ospfv3_nssa_tc26_p0(request):
write_test_footer(tc_name)
+def test_ospfv3_learning_tc15_p0(request):
+ """Verify OSPF can learn different types of LSA and processes them.
+
+ OSPF Learning : Edge learning different types of LSAs.
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure area 1 as NSSA Area")
+
+ reset_config_on_routers(tgen)
+
+ step("Verify that Type 3 summary LSA is originated for the same Area 0")
+ ip = topo["routers"]["r1"]["links"]["r3-link0"]["ipv6"]
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": ip_net,
+ "no_of_ip": 1,
+ "routeType": "Network",
+ "pathtype": "Inter-Area",
+ }
+ ]
+ }
+ }
+
+ dut = "r0"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf6"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r2": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 5, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Redistribute static route in R2 ospf.")
+ dut = "r2"
+ red_static(dut)
+
+ step("Verify that Type 5 LSA is originated by R2.")
+ dut = "r0"
+ protocol = "ospf6"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
+ }
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_ospf6_neighbor(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change area 1 as non nssa area (on the fly changing area" " type on DUT).")
+
+ for rtr in ["r1", "r2", "r3"]:
+ input_dict = {
+ rtr: {
+ "ospf6": {"area": [{"id": "0.0.0.2", "type": "nssa", "delete": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that OSPF neighbours are reset after changing area type.")
+ step("Verify that ABR R2 originates type 5 LSA in area 1.")
+ step("Verify that R1 installs type 5 lsa in its database.")
+ step("Verify that route is calculated and installed in R1.")
+
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
+ }
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
# As per internal discussion, this script has to be removed as translator
# function is not supported, for more details kindly check this PR 2565570
def ospfv3_nssa_tc27_p0(request):
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
index 0c9457b39e..ee15a5fe1c 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
@@ -48,7 +48,6 @@ from lib.common_config import (
step,
create_route_maps,
verify_prefix_lists,
- topo_daemons,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -131,12 +130,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
index df3a0249ea..e5b20db6de 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
@@ -46,7 +46,6 @@ from lib.common_config import (
step,
shutdown_bringup_interface,
create_interfaces_cfg,
- topo_daemons,
get_frr_ipv6_linklocal,
check_router_status,
create_static_routes,
@@ -122,12 +121,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
index d318ec0906..6aee3b815d 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
@@ -48,7 +48,6 @@ from lib.common_config import (
reset_config_on_routers,
step,
create_interfaces_cfg,
- topo_daemons,
create_debug_log_config,
apply_raw_config,
)
@@ -116,12 +115,9 @@ def setup_module(mod):
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
- # get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
-
# Starting topology, create tmp files which are loaded to routers
# to start daemons and then start routers
- start_topology(tgen, daemons)
+ start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
diff --git a/zebra/debug.c b/zebra/debug.c
index 953f0423af..977af0e198 100644
--- a/zebra/debug.c
+++ b/zebra/debug.c
@@ -341,7 +341,7 @@ DEFPY(debug_zebra_dplane_dpdk, debug_zebra_dplane_dpdk_cmd,
SET_FLAG(zebra_debug_dplane_dpdk, ZEBRA_DEBUG_DPLANE_DPDK);
if (detail)
- SET_FLAG(zebra_debug_dplane,
+ SET_FLAG(zebra_debug_dplane_dpdk,
ZEBRA_DEBUG_DPLANE_DPDK_DETAIL);
}
@@ -740,10 +740,12 @@ static int config_write_debug(struct vty *vty)
write++;
}
- if (CHECK_FLAG(zebra_debug_dplane, ZEBRA_DEBUG_DPLANE_DPDK_DETAIL)) {
+ if (CHECK_FLAG(zebra_debug_dplane_dpdk,
+ ZEBRA_DEBUG_DPLANE_DPDK_DETAIL)) {
vty_out(vty, "debug zebra dplane dpdk detailed\n");
write++;
- } else if (CHECK_FLAG(zebra_debug_dplane, ZEBRA_DEBUG_DPLANE_DPDK)) {
+ } else if (CHECK_FLAG(zebra_debug_dplane_dpdk,
+ ZEBRA_DEBUG_DPLANE_DPDK)) {
vty_out(vty, "debug zebra dplane dpdk\n");
write++;
}
diff --git a/zebra/debug.h b/zebra/debug.h
index e0c6a9e2b9..514827707a 100644
--- a/zebra/debug.h
+++ b/zebra/debug.h
@@ -111,9 +111,9 @@ extern "C" {
(zebra_debug_dplane & ZEBRA_DEBUG_DPLANE_DETAILED)
#define IS_ZEBRA_DEBUG_DPLANE_DPDK \
- (zebra_debug_dplane & ZEBRA_DEBUG_DPLANE_DPDK)
+ (zebra_debug_dplane_dpdk & ZEBRA_DEBUG_DPLANE_DPDK)
#define IS_ZEBRA_DEBUG_DPLANE_DPDK_DETAIL \
- (zebra_debug_dplane & ZEBRA_DEBUG_DPLANE_DPDK_DETAIL)
+ (zebra_debug_dplane_dpdk & ZEBRA_DEBUG_DPLANE_DPDK_DETAIL)
#define IS_ZEBRA_DEBUG_MLAG (zebra_debug_mlag & ZEBRA_DEBUG_MLAG)
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index 6c95be29df..337113988e 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -51,6 +51,7 @@
#include "zebra/kernel_netlink.h"
#include "zebra/rt_netlink.h"
#include "zebra/debug.h"
+#include "fpm/fpm.h"
#define SOUTHBOUND_DEFAULT_ADDR INADDR_LOOPBACK
#define SOUTHBOUND_DEFAULT_PORT 2620
@@ -462,18 +463,17 @@ static void fpm_reconnect(struct fpm_nl_ctx *fnc)
static void fpm_read(struct thread *t)
{
struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ fpm_msg_hdr_t fpm;
ssize_t rv;
+ char buf[65535];
+ struct nlmsghdr *hdr;
+ struct zebra_dplane_ctx *ctx;
+ size_t available_bytes;
+ size_t hdr_available_bytes;
/* Let's ignore the input at the moment. */
rv = stream_read_try(fnc->ibuf, fnc->socket,
STREAM_WRITEABLE(fnc->ibuf));
- /* We've got an interruption. */
- if (rv == -2) {
- /* Schedule next read. */
- thread_add_read(fnc->fthread->master, fpm_read, fnc,
- fnc->socket, &fnc->t_read);
- return;
- }
if (rv == 0) {
atomic_fetch_add_explicit(&fnc->counters.connection_closes, 1,
memory_order_relaxed);
@@ -492,14 +492,131 @@ static void fpm_read(struct thread *t)
FPM_RECONNECT(fnc);
return;
}
- stream_reset(fnc->ibuf);
+
+ /* Schedule the next read */
+ thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
+ &fnc->t_read);
+
+ /* We've got an interruption. */
+ if (rv == -2)
+ return;
+
/* Account all bytes read. */
atomic_fetch_add_explicit(&fnc->counters.bytes_read, rv,
memory_order_relaxed);
- thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
- &fnc->t_read);
+ available_bytes = STREAM_READABLE(fnc->ibuf);
+ while (available_bytes) {
+ if (available_bytes < (ssize_t)FPM_MSG_HDR_LEN) {
+ stream_pulldown(fnc->ibuf);
+ return;
+ }
+
+ fpm.version = stream_getc(fnc->ibuf);
+ fpm.msg_type = stream_getc(fnc->ibuf);
+ fpm.msg_len = stream_getw(fnc->ibuf);
+
+ if (fpm.version != FPM_PROTO_VERSION &&
+ fpm.msg_type != FPM_MSG_TYPE_NETLINK) {
+ stream_reset(fnc->ibuf);
+ zlog_warn(
+ "%s: Received version/msg_type %u/%u, expected 1/1",
+ __func__, fpm.version, fpm.msg_type);
+
+ FPM_RECONNECT(fnc);
+ return;
+ }
+
+ /*
+ * If the passed in length doesn't even fill in the header
+ * something is wrong and reset.
+ */
+ if (fpm.msg_len < FPM_MSG_HDR_LEN) {
+ zlog_warn(
+ "%s: Received message length: %u that does not even fill the FPM header",
+ __func__, fpm.msg_len);
+ FPM_RECONNECT(fnc);
+ return;
+ }
+
+ /*
+ * If we have not received the whole payload, reset the stream
+ * back to the beginning of the header and move it to the
+ * top.
+ */
+ if (fpm.msg_len > available_bytes) {
+ stream_rewind_getp(fnc->ibuf, FPM_MSG_HDR_LEN);
+ stream_pulldown(fnc->ibuf);
+ return;
+ }
+
+ available_bytes -= FPM_MSG_HDR_LEN;
+
+ /*
+ * Place the data from the stream into a buffer
+ */
+ hdr = (struct nlmsghdr *)buf;
+ stream_get(buf, fnc->ibuf, fpm.msg_len - FPM_MSG_HDR_LEN);
+ hdr_available_bytes = fpm.msg_len - FPM_MSG_HDR_LEN;
+ available_bytes -= hdr_available_bytes;
+
+ /* Sanity check: must be at least header size. */
+ if (hdr->nlmsg_len < sizeof(*hdr)) {
+ zlog_warn(
+ "%s: [seq=%u] invalid message length %u (< %zu)",
+ __func__, hdr->nlmsg_seq, hdr->nlmsg_len,
+ sizeof(*hdr));
+ continue;
+ }
+ if (hdr->nlmsg_len > fpm.msg_len) {
+ zlog_warn(
+ "%s: Received a inner header length of %u that is greater than the fpm total length of %u",
+ __func__, hdr->nlmsg_len, fpm.msg_len);
+ FPM_RECONNECT(fnc);
+ }
+ /* Not enough bytes available. */
+ if (hdr->nlmsg_len > hdr_available_bytes) {
+ zlog_warn(
+ "%s: [seq=%u] invalid message length %u (> %zu)",
+ __func__, hdr->nlmsg_seq, hdr->nlmsg_len,
+ available_bytes);
+ continue;
+ }
+
+ if (!(hdr->nlmsg_flags & NLM_F_REQUEST)) {
+ if (IS_ZEBRA_DEBUG_FPM)
+ zlog_debug(
+ "%s: [seq=%u] not a request, skipping",
+ __func__, hdr->nlmsg_seq);
+
+ /*
+ * This request is a bust, go to the next one
+ */
+ continue;
+ }
+
+ switch (hdr->nlmsg_type) {
+ case RTM_NEWROUTE:
+ ctx = dplane_ctx_alloc();
+ dplane_ctx_set_op(ctx, DPLANE_OP_ROUTE_NOTIFY);
+ if (netlink_route_change_read_unicast_internal(
+ hdr, 0, false, ctx) != 1) {
+ dplane_ctx_fini(&ctx);
+ stream_pulldown(fnc->ibuf);
+ return;
+ }
+ break;
+ default:
+ if (IS_ZEBRA_DEBUG_FPM)
+ zlog_debug(
+ "%s: Received message type %u which is not currently handled",
+ __func__, hdr->nlmsg_type);
+ break;
+ }
+ }
+
+ stream_reset(fnc->ibuf);
}
static void fpm_write(struct thread *t)
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 2396dfe4d6..96ec90e549 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -692,8 +692,9 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id,
}
/* Looking up routing table by netlink interface. */
-static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
- int startup)
+int netlink_route_change_read_unicast_internal(struct nlmsghdr *h,
+ ns_id_t ns_id, int startup,
+ struct zebra_dplane_ctx *ctx)
{
int len;
struct rtmsg *rtm;
@@ -768,9 +769,8 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
selfroute = is_selfroute(rtm->rtm_protocol);
- if (!startup && selfroute
- && h->nlmsg_type == RTM_NEWROUTE
- && !zrouter.asic_offloaded) {
+ if (!startup && selfroute && h->nlmsg_type == RTM_NEWROUTE &&
+ !zrouter.asic_offloaded && !ctx) {
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("Route type: %d Received that we think we have originated, ignoring",
rtm->rtm_protocol);
@@ -988,8 +988,8 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
}
}
if (nhe_id || ng)
- rib_add_multipath(afi, SAFI_UNICAST, &p, &src_p, re, ng,
- startup);
+ dplane_rib_add_multipath(afi, SAFI_UNICAST, &p, &src_p,
+ re, ng, startup, ctx);
else {
/*
* I really don't see how this is possible
@@ -1004,6 +1004,13 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
XFREE(MTYPE_RE, re);
}
} else {
+ if (ctx) {
+ zlog_err(
+ "%s: %pFX RTM_DELROUTE received but received a context as well",
+ __func__, &p);
+ return 0;
+ }
+
if (nhe_id) {
rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, flags,
&p, &src_p, NULL, nhe_id, table, metric,
@@ -1028,7 +1035,14 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
}
}
- return 0;
+ return 1;
+}
+
+static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
+ int startup)
+{
+ return netlink_route_change_read_unicast_internal(h, ns_id, startup,
+ NULL);
}
static struct mcast_route_data *mroute = NULL;
diff --git a/zebra/rt_netlink.h b/zebra/rt_netlink.h
index b1af4b20e1..fd2b79a2bf 100644
--- a/zebra/rt_netlink.h
+++ b/zebra/rt_netlink.h
@@ -122,6 +122,10 @@ netlink_put_lsp_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
extern enum netlink_msg_status
netlink_put_pw_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
+int netlink_route_change_read_unicast_internal(struct nlmsghdr *h,
+ ns_id_t ns_id, int startup,
+ struct zebra_dplane_ctx *ctx);
+
#ifdef NETLINK_DEBUG
const char *nlmsg_type2str(uint16_t type);
const char *af_type2str(int type);
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index c189408b57..84dae7f2d6 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -135,6 +135,8 @@ struct dplane_route_info {
uint32_t zd_mtu;
uint32_t zd_nexthop_mtu;
+ uint32_t zd_flags;
+
/* Nexthop hash entry info */
struct dplane_nexthop_info nhe;
@@ -1430,6 +1432,20 @@ uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_old_instance;
}
+uint32_t dplane_ctx_get_flags(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.rinfo.zd_flags;
+}
+
+void dplane_ctx_set_flags(struct zebra_dplane_ctx *ctx, uint32_t flags)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_flags = flags;
+}
+
uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -2766,25 +2782,16 @@ static int dplane_ctx_ns_init(struct zebra_dplane_ctx *ctx,
return AOK;
}
-/*
- * Initialize a context block for a route update from zebra data structs.
- */
-int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
- struct route_node *rn, struct route_entry *re)
+int dplane_ctx_route_init_basic(struct zebra_dplane_ctx *ctx,
+ enum dplane_op_e op, struct route_entry *re,
+ const struct prefix *p,
+ const struct prefix_ipv6 *src_p, afi_t afi,
+ safi_t safi)
{
int ret = EINVAL;
- const struct route_table *table = NULL;
- const struct rib_table_info *info;
- const struct prefix *p, *src_p;
- struct zebra_ns *zns;
- struct zebra_vrf *zvrf;
- struct nexthop *nexthop;
- struct zebra_l3vni *zl3vni;
- const struct interface *ifp;
- struct dplane_intf_extra *if_extra;
- if (!ctx || !rn || !re)
- goto done;
+ if (!ctx || !re)
+ return ret;
TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
@@ -2794,9 +2801,6 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
ctx->u.rinfo.zd_type = re->type;
ctx->u.rinfo.zd_old_type = re->type;
- /* Prefixes: dest, and optional source */
- srcdest_rnode_prefixes(rn, &p, &src_p);
-
prefix_copy(&(ctx->u.rinfo.zd_dest), p);
if (src_p)
@@ -2806,6 +2810,7 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
ctx->zd_table_id = re->table;
+ ctx->u.rinfo.zd_flags = re->flags;
ctx->u.rinfo.zd_metric = re->metric;
ctx->u.rinfo.zd_old_metric = re->metric;
ctx->zd_vrf_id = re->vrf_id;
@@ -2816,11 +2821,46 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
ctx->u.rinfo.zd_old_tag = re->tag;
ctx->u.rinfo.zd_distance = re->distance;
+ ctx->u.rinfo.zd_afi = afi;
+ ctx->u.rinfo.zd_safi = safi;
+
+ return AOK;
+}
+
+/*
+ * Initialize a context block for a route update from zebra data structs.
+ */
+int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
+ struct route_node *rn, struct route_entry *re)
+{
+ int ret = EINVAL;
+ const struct route_table *table = NULL;
+ const struct rib_table_info *info;
+ const struct prefix *p;
+ const struct prefix_ipv6 *src_p;
+ struct zebra_ns *zns;
+ struct zebra_vrf *zvrf;
+ struct nexthop *nexthop;
+ struct zebra_l3vni *zl3vni;
+ const struct interface *ifp;
+ struct dplane_intf_extra *if_extra;
+
+ if (!ctx || !rn || !re)
+ return ret;
+
+ /*
+ * Let's grab the data from the route_node
+ * so that we can call a helper function
+ */
+
+ /* Prefixes: dest, and optional source */
+ srcdest_rnode_prefixes(rn, &p, (const struct prefix **)&src_p);
table = srcdest_rnode_table(rn);
info = table->info;
- ctx->u.rinfo.zd_afi = info->afi;
- ctx->u.rinfo.zd_safi = info->safi;
+ if (dplane_ctx_route_init_basic(ctx, op, re, p, src_p, info->afi,
+ info->safi) != AOK)
+ return ret;
/* Copy nexthops; recursive info is included too */
copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop),
@@ -2875,8 +2915,7 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
/* Don't need some info when capturing a system notification */
if (op == DPLANE_OP_SYS_ROUTE_ADD ||
op == DPLANE_OP_SYS_ROUTE_DELETE) {
- ret = AOK;
- goto done;
+ return AOK;
}
/* Extract ns info - can't use pointers to 'core' structs */
@@ -2897,14 +2936,12 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
* If its a delete we only use the prefix anyway, so this only
* matters for INSTALL/UPDATE.
*/
- if (zebra_nhg_kernel_nexthops_enabled()
- && (((op == DPLANE_OP_ROUTE_INSTALL)
- || (op == DPLANE_OP_ROUTE_UPDATE))
- && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)
- && !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED))) {
- ret = ENOENT;
- goto done;
- }
+ if (zebra_nhg_kernel_nexthops_enabled() &&
+ (((op == DPLANE_OP_ROUTE_INSTALL) ||
+ (op == DPLANE_OP_ROUTE_UPDATE)) &&
+ !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED) &&
+ !CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED)))
+ return ENOENT;
re->nhe_installed_id = nhe->id;
}
@@ -2916,10 +2953,7 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
re->dplane_sequence = zebra_router_get_next_sequence();
ctx->zd_seq = re->dplane_sequence;
- ret = AOK;
-
-done:
- return ret;
+ return AOK;
}
static int dplane_ctx_tc_qdisc_init(struct zebra_dplane_ctx *ctx,
@@ -3031,7 +3065,7 @@ int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
int ret = EINVAL;
if (!ctx || !nhe)
- goto done;
+ return ret;
ctx->zd_op = op;
ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
@@ -3066,7 +3100,6 @@ int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
ret = AOK;
-done:
return ret;
}
@@ -3088,7 +3121,7 @@ int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
bool set_pdown, unset_pdown;
if (!ctx || !ifp)
- goto done;
+ return ret;
ctx->zd_op = op;
ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
@@ -3133,7 +3166,6 @@ int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
ret = AOK;
-done:
return ret;
}
@@ -3161,10 +3193,8 @@ int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
/* This may be called to create/init a dplane context, not necessarily
* to copy an lsp object.
*/
- if (lsp == NULL) {
- ret = AOK;
- goto done;
- }
+ if (lsp == NULL)
+ return ret;
if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
zlog_debug("init dplane ctx %s: in-label %u ecmp# %d",
@@ -3207,7 +3237,7 @@ int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
}
if (ret != AOK)
- goto done;
+ return ret;
/* Capture backup nhlfes/nexthops */
frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
@@ -3228,11 +3258,6 @@ int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
}
- /* On error the ctx will be cleaned-up, so we don't need to
- * deal with any allocated nhlfe or nexthop structs here.
- */
-done:
-
return ret;
}
@@ -3293,11 +3318,11 @@ static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
if (table == NULL)
- goto done;
+ return ret;
rn = route_node_match(table, &p);
if (rn == NULL)
- goto done;
+ return ret;
re = NULL;
RNODE_FOREACH_RE(rn, re) {
@@ -3365,10 +3390,7 @@ static int dplane_ctx_pw_init(struct zebra_dplane_ctx *ctx,
}
route_unlock_node(rn);
- ret = AOK;
-
-done:
- return ret;
+ return AOK;
}
/**
@@ -3943,12 +3965,11 @@ enum zebra_dplane_result dplane_route_add(struct route_node *rn,
enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
if (rn == NULL || re == NULL)
- goto done;
+ return ret;
ret = dplane_route_update_internal(rn, re, NULL,
DPLANE_OP_ROUTE_INSTALL);
-done:
return ret;
}
@@ -3962,11 +3983,11 @@ enum zebra_dplane_result dplane_route_update(struct route_node *rn,
enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
if (rn == NULL || re == NULL)
- goto done;
+ return ret;
ret = dplane_route_update_internal(rn, re, old_re,
DPLANE_OP_ROUTE_UPDATE);
-done:
+
return ret;
}
@@ -3979,12 +4000,11 @@ enum zebra_dplane_result dplane_route_delete(struct route_node *rn,
enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
if (rn == NULL || re == NULL)
- goto done;
+ return ret;
ret = dplane_route_update_internal(rn, re, NULL,
DPLANE_OP_ROUTE_DELETE);
-done:
return ret;
}
@@ -3997,18 +4017,16 @@ enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
/* Ignore this event unless a provider plugin has requested it. */
- if (!zdplane_info.dg_sys_route_notifs) {
- ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
- goto done;
- }
+ if (!zdplane_info.dg_sys_route_notifs)
+ return ZEBRA_DPLANE_REQUEST_SUCCESS;
+
if (rn == NULL || re == NULL)
- goto done;
+ return ret;
ret = dplane_route_update_internal(rn, re, NULL,
DPLANE_OP_SYS_ROUTE_ADD);
-done:
return ret;
}
@@ -4021,18 +4039,15 @@ enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
/* Ignore this event unless a provider plugin has requested it. */
- if (!zdplane_info.dg_sys_route_notifs) {
- ret = ZEBRA_DPLANE_REQUEST_SUCCESS;
- goto done;
- }
+ if (!zdplane_info.dg_sys_route_notifs)
+ return ZEBRA_DPLANE_REQUEST_SUCCESS;
if (rn == NULL || re == NULL)
- goto done;
+ return ret;
ret = dplane_route_update_internal(rn, re, NULL,
DPLANE_OP_SYS_ROUTE_DELETE);
-done:
return ret;
}
@@ -6287,6 +6302,20 @@ kernel_dplane_process_ipset_entry(struct zebra_dplane_provider *prov,
dplane_provider_enqueue_out_ctx(prov, ctx);
}
+void dplane_rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
+ struct prefix_ipv6 *src_p, struct route_entry *re,
+ struct nexthop_group *ng, int startup,
+ struct zebra_dplane_ctx *ctx)
+{
+ if (!ctx)
+ rib_add_multipath(afi, safi, p, src_p, re, ng, startup);
+ else {
+ dplane_ctx_route_init_basic(ctx, dplane_ctx_get_op(ctx), re, p,
+ src_p, afi, safi);
+ dplane_provider_enqueue_to_zebra(ctx);
+ }
+}
+
/*
* Kernel provider callback
*/
@@ -6463,7 +6492,7 @@ int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
TAILQ_INIT(&work_list);
if (context_cb == NULL)
- goto done;
+ return AOK;
/* Walk the pending context queue under the dplane lock. */
DPLANE_LOCK();
@@ -6487,9 +6516,7 @@ int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
dplane_ctx_fini(&ctx);
}
-done:
-
- return 0;
+ return AOK;
}
/* Indicates zebra shutdown/exit is in progress. Some operations may be
@@ -6553,10 +6580,8 @@ static bool dplane_work_pending(void)
}
DPLANE_UNLOCK();
- if (ctx != NULL) {
- ret = true;
- goto done;
- }
+ if (ctx != NULL)
+ return true;
while (prov) {
@@ -6579,7 +6604,6 @@ static bool dplane_work_pending(void)
if (ctx != NULL)
ret = true;
-done:
return ret;
}
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index b9fd176de7..51f6f3d897 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -380,6 +380,8 @@ route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx);
uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance);
uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx);
+uint32_t dplane_ctx_get_flags(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_flags(struct zebra_dplane_ctx *ctx, uint32_t flags);
uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx);
@@ -908,6 +910,12 @@ dplane_pbr_ipset_entry_delete(struct zebra_pbr_ipset_entry *ipset);
int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
struct route_node *rn, struct route_entry *re);
+int dplane_ctx_route_init_basic(struct zebra_dplane_ctx *ctx,
+ enum dplane_op_e op, struct route_entry *re,
+ const struct prefix *p,
+ const struct prefix_ipv6 *src_p, afi_t afi,
+ safi_t safi);
+
/* Encode next hop information into data plane context. */
int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
struct nhg_hash_entry *nhe);
@@ -1073,6 +1081,16 @@ void zebra_dplane_pre_finish(void);
void zebra_dplane_finish(void);
void zebra_dplane_shutdown(void);
+/*
+ * decision point for sending a routing update through the old
+ * straight to zebra master pthread or through the dplane to
+ * the master pthread for handling
+ */
+void dplane_rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
+ struct prefix_ipv6 *src_p, struct route_entry *re,
+ struct nexthop_group *ng, int startup,
+ struct zebra_dplane_ctx *ctx);
+
#ifdef __cplusplus
}
#endif
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 656588bb82..b86780276b 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -1523,8 +1523,7 @@ static bool rib_route_match_ctx(const struct route_entry *re,
}
done:
-
- return (result);
+ return result;
}
static void zebra_rib_fixup_system(struct route_node *rn)
@@ -2261,10 +2260,8 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
}
/* Ensure we clear the QUEUED flag */
- if (!zrouter.asic_offloaded) {
- UNSET_FLAG(re->status, ROUTE_ENTRY_QUEUED);
- UNSET_FLAG(re->status, ROUTE_ENTRY_ROUTE_REPLACING);
- }
+ UNSET_FLAG(re->status, ROUTE_ENTRY_QUEUED);
+ UNSET_FLAG(re->status, ROUTE_ENTRY_ROUTE_REPLACING);
/* Is this a notification that ... matters? We mostly care about
* the route that is currently selected for installation; we may also
@@ -2307,6 +2304,19 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
dplane_ctx_get_type(ctx)));
}
goto done;
+ } else {
+ uint32_t flags = dplane_ctx_get_flags(ctx);
+
+ if (CHECK_FLAG(flags, ZEBRA_FLAG_OFFLOADED)) {
+ UNSET_FLAG(re->flags, ZEBRA_FLAG_OFFLOAD_FAILED);
+ SET_FLAG(re->flags, ZEBRA_FLAG_OFFLOADED);
+ }
+ if (CHECK_FLAG(flags, ZEBRA_FLAG_OFFLOAD_FAILED)) {
+ UNSET_FLAG(re->flags, ZEBRA_FLAG_OFFLOADED);
+ SET_FLAG(re->flags, ZEBRA_FLAG_OFFLOAD_FAILED);
+ }
+ if (CHECK_FLAG(flags, ZEBRA_FLAG_TRAPPED))
+ SET_FLAG(re->flags, ZEBRA_FLAG_TRAPPED);
}
/* We'll want to determine whether the installation status of the
@@ -2340,55 +2350,70 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
/* Various fib transitions: changed nexthops; from installed to
* not-installed; or not-installed to installed.
*/
- if (start_count > 0 && end_count > 0) {
- if (debug_p)
- zlog_debug(
- "%s(%u:%u):%pRN applied nexthop changes from dplane notification",
- VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx),
- dplane_ctx_get_table(ctx), rn);
+ if (zrouter.asic_notification_nexthop_control) {
+ if (start_count > 0 && end_count > 0) {
+ if (debug_p)
+ zlog_debug(
+ "%s(%u:%u):%pRN applied nexthop changes from dplane notification",
+ VRF_LOGNAME(vrf),
+ dplane_ctx_get_vrf(ctx),
+ dplane_ctx_get_table(ctx), rn);
- /* Changed nexthops - update kernel/others */
- dplane_route_notif_update(rn, re,
- DPLANE_OP_ROUTE_UPDATE, ctx);
+ /* Changed nexthops - update kernel/others */
+ dplane_route_notif_update(rn, re,
+ DPLANE_OP_ROUTE_UPDATE, ctx);
- } else if (start_count == 0 && end_count > 0) {
- if (debug_p)
- zlog_debug(
- "%s(%u:%u):%pRN installed transition from dplane notification",
- VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx),
- dplane_ctx_get_table(ctx), rn);
+ } else if (start_count == 0 && end_count > 0) {
+ if (debug_p)
+ zlog_debug(
+ "%s(%u:%u):%pRN installed transition from dplane notification",
+ VRF_LOGNAME(vrf),
+ dplane_ctx_get_vrf(ctx),
+ dplane_ctx_get_table(ctx), rn);
- /* We expect this to be the selected route, so we want
- * to tell others about this transition.
- */
- SET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
+ /* We expect this to be the selected route, so we want
+ * to tell others about this transition.
+ */
+ SET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
- /* Changed nexthops - update kernel/others */
- dplane_route_notif_update(rn, re, DPLANE_OP_ROUTE_UPDATE, ctx);
+ /* Changed nexthops - update kernel/others */
+ dplane_route_notif_update(rn, re,
+ DPLANE_OP_ROUTE_UPDATE, ctx);
- /* Redistribute, lsp, and nht update */
- redistribute_update(rn, re, NULL);
+ /* Redistribute, lsp, and nht update */
+ redistribute_update(rn, re, NULL);
- } else if (start_count > 0 && end_count == 0) {
- if (debug_p)
- zlog_debug(
- "%s(%u:%u):%pRN un-installed transition from dplane notification",
- VRF_LOGNAME(vrf), dplane_ctx_get_vrf(ctx),
- dplane_ctx_get_table(ctx), rn);
+ } else if (start_count > 0 && end_count == 0) {
+ if (debug_p)
+ zlog_debug(
+ "%s(%u:%u):%pRN un-installed transition from dplane notification",
+ VRF_LOGNAME(vrf),
+ dplane_ctx_get_vrf(ctx),
+ dplane_ctx_get_table(ctx), rn);
- /* Transition from _something_ installed to _nothing_
- * installed.
- */
- /* We expect this to be the selected route, so we want
- * to tell others about this transistion.
- */
- UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
+ /* Transition from _something_ installed to _nothing_
+ * installed.
+ */
+ /* We expect this to be the selected route, so we want
+ * to tell others about this transistion.
+ */
+ UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
- /* Changed nexthops - update kernel/others */
- dplane_route_notif_update(rn, re, DPLANE_OP_ROUTE_DELETE, ctx);
+ /* Changed nexthops - update kernel/others */
+ dplane_route_notif_update(rn, re,
+ DPLANE_OP_ROUTE_DELETE, ctx);
- /* Redistribute, lsp, and nht update */
- redistribute_delete(rn, re, NULL);
+ /* Redistribute, lsp, and nht update */
+ redistribute_delete(rn, re, NULL);
+ }
+ }
+
+ if (!zebra_router_notify_on_ack()) {
+ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_OFFLOADED))
+ zsend_route_notify_owner_ctx(ctx, ZAPI_ROUTE_INSTALLED);
+ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_OFFLOAD_FAILED))
+ zsend_route_notify_owner_ctx(ctx,
+ ZAPI_ROUTE_FAIL_INSTALL);
}
/* Make any changes visible for lsp and nexthop-tracking processing */
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index b8923ef57d..a9a7b66ce7 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -330,6 +330,17 @@ void zebra_router_init(bool asic_offload, bool notify_on_ack)
zrouter.asic_offloaded = asic_offload;
zrouter.notify_on_ack = notify_on_ack;
+ /*
+ * If you start using asic_notification_nexthop_control
+ * come talk to the FRR community about what you are doing
+ * We would like to know.
+ */
+#if CONFDATE > 20251231
+ CPP_NOTICE(
+ "Remove zrouter.asic_notification_nexthop_control as that it's not being maintained or used");
+#endif
+ zrouter.asic_notification_nexthop_control = false;
+
#ifdef HAVE_SCRIPTING
zebra_script_init();
#endif
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index 069437ef47..e0ef86f082 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -224,6 +224,14 @@ struct zebra_router {
bool asic_offloaded;
bool notify_on_ack;
+ /*
+ * If the asic is notifying us about successful nexthop
+ * allocation/control. Some developers have made their
+ * asic take control of how many nexthops/ecmp they can
+ * have and will report what is successfull or not
+ */
+ bool asic_notification_nexthop_control;
+
bool supports_nhgs;
bool all_mc_forwardingv4, default_mc_forwardingv4;
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 91a0c1dd31..8ed8abe304 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -630,8 +630,7 @@ static void show_route_nexthop_helper(struct vty *vty,
case NEXTHOP_TYPE_IFINDEX:
vty_out(vty, " is directly connected, %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
+ ifindex2ifname(nexthop->ifindex, nexthop->vrf_id));
break;
case NEXTHOP_TYPE_BLACKHOLE:
vty_out(vty, " unreachable");
@@ -703,8 +702,10 @@ static void show_route_nexthop_helper(struct vty *vty,
seg6local_context2str(buf, sizeof(buf),
&nexthop->nh_srv6->seg6local_ctx,
nexthop->nh_srv6->seg6local_action);
- vty_out(vty, ", seg6local %s %s", seg6local_action2str(
- nexthop->nh_srv6->seg6local_action), buf);
+ vty_out(vty, ", seg6local %s %s",
+ seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action),
+ buf);
inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
sizeof(buf));
@@ -722,6 +723,7 @@ static void show_route_nexthop_helper(struct vty *vty,
}
}
+
/*
* Render a nexthop into a json object; the caller allocates and owns
* the json object memory.
@@ -806,9 +808,8 @@ static void show_nexthop_json_helper(json_object *json_nexthop,
json_nexthop, "reject");
break;
case BLACKHOLE_ADMINPROHIB:
- json_object_boolean_true_add(
- json_nexthop,
- "admin-prohibited");
+ json_object_boolean_true_add(json_nexthop,
+ "adminProhibited");
break;
case BLACKHOLE_NULL:
json_object_boolean_true_add(
@@ -827,7 +828,7 @@ static void show_nexthop_json_helper(json_object *json_nexthop,
if (nexthop->rparent)
json_object_boolean_true_add(json_nexthop, "resolver");
- if (nexthop->vrf_id != re->vrf_id)
+ if ((re == NULL || (nexthop->vrf_id != re->vrf_id)))
json_object_string_add(json_nexthop, "vrf",
vrf_id_to_name(nexthop->vrf_id));
@@ -840,8 +841,7 @@ static void show_nexthop_json_helper(json_object *json_nexthop,
"active");
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
- json_object_boolean_true_add(json_nexthop,
- "onLink");
+ json_object_boolean_true_add(json_nexthop, "onLink");
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
json_object_boolean_true_add(json_nexthop, "linkDown");
@@ -1479,125 +1479,264 @@ DEFUN (ip_nht_default_route,
return CMD_SUCCESS;
}
-static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe)
+static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe,
+ json_object *json_nhe_hdr)
{
struct nexthop *nexthop = NULL;
struct nhg_connected *rb_node_dep = NULL;
struct nexthop_group *backup_nhg;
char up_str[MONOTIME_STRLEN];
char time_left[MONOTIME_STRLEN];
+ json_object *json_dependants = NULL;
+ json_object *json_depends = NULL;
+ json_object *json_nexthop_array = NULL;
+ json_object *json_nexthops = NULL;
+ json_object *json = NULL;
+ json_object *json_backup_nexthop_array = NULL;
+ json_object *json_backup_nexthops = NULL;
+
uptime2str(nhe->uptime, up_str, sizeof(up_str));
- vty_out(vty, "ID: %u (%s)\n", nhe->id, zebra_route_string(nhe->type));
- vty_out(vty, " RefCnt: %u", nhe->refcnt);
- if (thread_is_scheduled(nhe->timer))
- vty_out(vty, " Time to Deletion: %s",
- thread_timer_to_hhmmss(time_left, sizeof(time_left),
- nhe->timer));
- vty_out(vty, "\n");
+ if (json_nhe_hdr)
+ json = json_object_new_object();
+
+ if (json) {
+ json_object_string_add(json, "type",
+ zebra_route_string(nhe->type));
+ json_object_int_add(json, "refCount", nhe->refcnt);
+ if (thread_is_scheduled(nhe->timer))
+ json_object_string_add(
+ json, "timeToDeletion",
+ thread_timer_to_hhmmss(time_left,
+ sizeof(time_left),
+ nhe->timer));
+ json_object_string_add(json, "uptime", up_str);
+ json_object_string_add(json, "vrf",
+ vrf_id_to_name(nhe->vrf_id));
- vty_out(vty, " Uptime: %s\n", up_str);
- vty_out(vty, " VRF: %s\n", vrf_id_to_name(nhe->vrf_id));
+ } else {
+ vty_out(vty, "ID: %u (%s)\n", nhe->id,
+ zebra_route_string(nhe->type));
+ vty_out(vty, " RefCnt: %u", nhe->refcnt);
+ if (thread_is_scheduled(nhe->timer))
+ vty_out(vty, " Time to Deletion: %s",
+ thread_timer_to_hhmmss(time_left,
+ sizeof(time_left),
+ nhe->timer));
+ vty_out(vty, "\n");
+ vty_out(vty, " Uptime: %s\n", up_str);
+ vty_out(vty, " VRF: %s\n", vrf_id_to_name(nhe->vrf_id));
+ }
if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
- vty_out(vty, " Valid");
- if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED))
- vty_out(vty, ", Installed");
- vty_out(vty, "\n");
+ if (json)
+ json_object_boolean_true_add(json, "valid");
+ else
+ vty_out(vty, " Valid");
+
+ if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED)) {
+ if (json)
+ json_object_boolean_true_add(json, "installed");
+ else
+ vty_out(vty, ", Installed");
+ }
+ if (!json)
+ vty_out(vty, "\n");
+ }
+ if (nhe->ifp) {
+ if (json)
+ json_object_int_add(json, "interfaceIndex",
+ nhe->ifp->ifindex);
+ else
+ vty_out(vty, " Interface Index: %d\n",
+ nhe->ifp->ifindex);
}
- if (nhe->ifp)
- vty_out(vty, " Interface Index: %d\n", nhe->ifp->ifindex);
if (!zebra_nhg_depends_is_empty(nhe)) {
- vty_out(vty, " Depends:");
+ if (json)
+ json_depends = json_object_new_array();
+ else
+ vty_out(vty, " Depends:");
frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) {
- vty_out(vty, " (%u)", rb_node_dep->nhe->id);
+ if (json_depends)
+ json_object_array_add(
+ json_depends,
+ json_object_new_int(
+ rb_node_dep->nhe->id));
+ else
+ vty_out(vty, " (%u)", rb_node_dep->nhe->id);
}
- vty_out(vty, "\n");
+ if (!json_depends)
+ vty_out(vty, "\n");
+ else
+ json_object_object_add(json, "depends", json_depends);
}
/* Output nexthops */
- for (ALL_NEXTHOPS(nhe->nhg, nexthop)) {
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- vty_out(vty, " ");
- else
- /* Make recursive nexthops a bit more clear */
- vty_out(vty, " ");
+ if (json)
+ json_nexthop_array = json_object_new_array();
- show_route_nexthop_helper(vty, NULL, nexthop);
+
+ for (ALL_NEXTHOPS(nhe->nhg, nexthop)) {
+ if (json_nexthop_array) {
+ json_nexthops = json_object_new_object();
+ show_nexthop_json_helper(json_nexthops, nexthop, NULL);
+ } else {
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ vty_out(vty, " ");
+ else
+ /* Make recursive nexthops a bit more clear */
+ vty_out(vty, " ");
+ show_route_nexthop_helper(vty, NULL, nexthop);
+ }
if (nhe->backup_info == NULL || nhe->backup_info->nhe == NULL) {
if (CHECK_FLAG(nexthop->flags,
- NEXTHOP_FLAG_HAS_BACKUP))
- vty_out(vty, " [backup %d]",
- nexthop->backup_idx[0]);
+ NEXTHOP_FLAG_HAS_BACKUP)) {
+ if (json)
+ json_object_int_add(
+ json_nexthops, "backup",
+ nexthop->backup_idx[0]);
+ else
+ vty_out(vty, " [backup %d]",
+ nexthop->backup_idx[0]);
+ }
+
+ if (!json)
+ vty_out(vty, "\n");
+ else
+ json_object_array_add(json_nexthop_array,
+ json_nexthops);
- vty_out(vty, "\n");
continue;
}
- /* TODO -- print more useful backup info */
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
- int i;
-
- vty_out(vty, "[backup");
- for (i = 0; i < nexthop->backup_num; i++)
- vty_out(vty, " %d", nexthop->backup_idx[i]);
-
- vty_out(vty, "]");
+ if (!json) {
+ /* TODO -- print more useful backup info */
+ if (CHECK_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_HAS_BACKUP)) {
+ int i;
+
+ vty_out(vty, "[backup");
+ for (i = 0; i < nexthop->backup_num; i++)
+ vty_out(vty, " %d",
+ nexthop->backup_idx[i]);
+ vty_out(vty, "]");
+ }
+ vty_out(vty, "\n");
+ } else {
+ json_object_array_add(json_nexthop_array,
+ json_nexthops);
}
-
- vty_out(vty, "\n");
}
+ if (json)
+ json_object_object_add(json, "nexthops", json_nexthop_array);
+
/* Output backup nexthops (if any) */
backup_nhg = zebra_nhg_get_backup_nhg(nhe);
if (backup_nhg) {
- vty_out(vty, " Backups:\n");
+ if (json)
+ json_backup_nexthop_array = json_object_new_array();
+ else
+ vty_out(vty, " Backups:\n");
for (ALL_NEXTHOPS_PTR(backup_nhg, nexthop)) {
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- vty_out(vty, " ");
- else
- /* Make recursive nexthops a bit more clear */
- vty_out(vty, " ");
+ if (json_backup_nexthop_array) {
+ json_backup_nexthops = json_object_new_object();
+ show_nexthop_json_helper(json_backup_nexthops,
+ nexthop, NULL);
+ json_object_array_add(json_backup_nexthop_array,
+ json_backup_nexthops);
+ } else {
- show_route_nexthop_helper(vty, NULL, nexthop);
- vty_out(vty, "\n");
+ if (!CHECK_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_RECURSIVE))
+ vty_out(vty, " ");
+ else
+ /* Make recursive nexthops a bit more
+ * clear
+ */
+ vty_out(vty, " ");
+ show_route_nexthop_helper(vty, NULL, nexthop);
+ vty_out(vty, "\n");
+ }
}
+
+ if (json)
+ json_object_object_add(json, "backupNexthops",
+ json_backup_nexthop_array);
}
if (!zebra_nhg_dependents_is_empty(nhe)) {
- vty_out(vty, " Dependents:");
+ if (json)
+ json_dependants = json_object_new_array();
+ else
+ vty_out(vty, " Dependents:");
frr_each(nhg_connected_tree, &nhe->nhg_dependents,
rb_node_dep) {
- vty_out(vty, " (%u)", rb_node_dep->nhe->id);
+ if (json)
+ json_object_array_add(
+ json_dependants,
+ json_object_new_int(
+ rb_node_dep->nhe->id));
+ else
+ vty_out(vty, " (%u)", rb_node_dep->nhe->id);
}
- vty_out(vty, "\n");
+ if (json)
+ json_object_object_add(json, "dependents",
+ json_dependants);
+ else
+ vty_out(vty, "\n");
}
- if (nhe->nhg.nhgr.buckets)
- vty_out(vty,
- " Buckets: %u Idle Timer: %u Unbalanced Timer: %u Unbalanced time: %" PRIu64 "\n",
- nhe->nhg.nhgr.buckets, nhe->nhg.nhgr.idle_timer,
- nhe->nhg.nhgr.unbalanced_timer,
- nhe->nhg.nhgr.unbalanced_time);
+ if (nhe->nhg.nhgr.buckets) {
+ if (json) {
+ json_object_int_add(json, "buckets",
+ nhe->nhg.nhgr.buckets);
+ json_object_int_add(json, "idleTimer",
+ nhe->nhg.nhgr.idle_timer);
+ json_object_int_add(json, "unbalancedTimer",
+ nhe->nhg.nhgr.unbalanced_timer);
+ json_object_int_add(json, "unbalancedTime",
+ nhe->nhg.nhgr.unbalanced_time);
+ } else {
+ vty_out(vty,
+ " Buckets: %u Idle Timer: %u Unbalanced Timer: %u Unbalanced time: %" PRIu64
+ "\n",
+ nhe->nhg.nhgr.buckets, nhe->nhg.nhgr.idle_timer,
+ nhe->nhg.nhgr.unbalanced_timer,
+ nhe->nhg.nhgr.unbalanced_time);
+ }
+ }
+
+ if (json_nhe_hdr)
+ json_object_object_addf(json_nhe_hdr, json, "%u", nhe->id);
}
-static int show_nexthop_group_id_cmd_helper(struct vty *vty, uint32_t id)
+static int show_nexthop_group_id_cmd_helper(struct vty *vty, uint32_t id,
+ json_object *json)
{
struct nhg_hash_entry *nhe = NULL;
nhe = zebra_nhg_lookup_id(id);
if (nhe)
- show_nexthop_group_out(vty, nhe);
+ show_nexthop_group_out(vty, nhe, json);
else {
- vty_out(vty, "Nexthop Group ID: %u does not exist\n", id);
+ if (json)
+ vty_json(vty, json);
+ else
+ vty_out(vty, "Nexthop Group ID: %u does not exist\n",
+ id);
return CMD_WARNING;
}
+
+ if (json)
+ vty_json(vty, json);
+
return CMD_SUCCESS;
}
@@ -1608,6 +1747,7 @@ struct nhe_show_context {
vrf_id_t vrf_id;
afi_t afi;
int type;
+ json_object *json;
};
static int nhe_show_walker(struct hash_bucket *bucket, void *arg)
@@ -1626,7 +1766,7 @@ static int nhe_show_walker(struct hash_bucket *bucket, void *arg)
if (ctx->type && nhe->type != ctx->type)
goto done;
- show_nexthop_group_out(ctx->vty, nhe);
+ show_nexthop_group_out(ctx->vty, nhe, ctx->json);
done:
return HASHWALK_CONTINUE;
@@ -1634,7 +1774,7 @@ done:
static void show_nexthop_group_cmd_helper(struct vty *vty,
struct zebra_vrf *zvrf, afi_t afi,
- int type)
+ int type, json_object *json)
{
struct nhe_show_context ctx;
@@ -1642,6 +1782,7 @@ static void show_nexthop_group_cmd_helper(struct vty *vty,
ctx.afi = afi;
ctx.vrf_id = zvrf->vrf->vrf_id;
ctx.type = type;
+ ctx.json = json;
hash_walk(zrouter.nhgs_id, nhe_show_walker, &ctx);
}
@@ -1659,7 +1800,7 @@ static void if_nexthop_group_dump_vty(struct vty *vty, struct interface *ifp)
frr_each(nhg_connected_tree, &zebra_if->nhg_dependents,
rb_node_dep) {
vty_out(vty, " ");
- show_nexthop_group_out(vty, rb_node_dep->nhe);
+ show_nexthop_group_out(vty, rb_node_dep->nhe, NULL);
}
}
}
@@ -1698,29 +1839,36 @@ DEFPY (show_interface_nexthop_group,
return CMD_SUCCESS;
}
-DEFPY (show_nexthop_group,
- show_nexthop_group_cmd,
- "show nexthop-group rib <(0-4294967295)$id|[singleton <ip$v4|ipv6$v6>] [<kernel|zebra|bgp|sharp>$type_str] [vrf <NAME$vrf_name|all$vrf_all>]>",
- SHOW_STR
- "Show Nexthop Groups\n"
- "RIB information\n"
- "Nexthop Group ID\n"
- "Show Singleton Nexthop-Groups\n"
- IP_STR
- IP6_STR
- "Kernel (not installed via the zebra RIB)\n"
- "Zebra (implicitly created by zebra)\n"
- "Border Gateway Protocol (BGP)\n"
- "Super Happy Advanced Routing Protocol (SHARP)\n"
- VRF_FULL_CMD_HELP_STR)
+DEFPY(show_nexthop_group,
+ show_nexthop_group_cmd,
+ "show nexthop-group rib <(0-4294967295)$id|[singleton <ip$v4|ipv6$v6>] [<kernel|zebra|bgp|sharp>$type_str] [vrf <NAME$vrf_name|all$vrf_all>]> [json]",
+ SHOW_STR
+ "Show Nexthop Groups\n"
+ "RIB information\n"
+ "Nexthop Group ID\n"
+ "Show Singleton Nexthop-Groups\n"
+ IP_STR
+ IP6_STR
+ "Kernel (not installed via the zebra RIB)\n"
+ "Zebra (implicitly created by zebra)\n"
+ "Border Gateway Protocol (BGP)\n"
+ "Super Happy Advanced Routing Protocol (SHARP)\n"
+ VRF_FULL_CMD_HELP_STR
+ JSON_STR)
{
struct zebra_vrf *zvrf = NULL;
afi_t afi = AFI_UNSPEC;
int type = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
+ json_object *json_vrf = NULL;
+
+ if (uj)
+ json = json_object_new_object();
if (id)
- return show_nexthop_group_id_cmd_helper(vty, id);
+ return show_nexthop_group_id_cmd_helper(vty, id, json);
if (v4)
afi = AFI_IP;
@@ -1736,8 +1884,11 @@ DEFPY (show_nexthop_group,
}
if (!vrf_is_backend_netns() && (vrf_name || vrf_all)) {
- vty_out(vty,
- "VRF subcommand does not make any sense in l3mdev based vrf's\n");
+ if (uj)
+ vty_json(vty, json);
+ else
+ vty_out(vty,
+ "VRF subcommand does not make any sense in l3mdev based vrf's\n");
return CMD_WARNING;
}
@@ -1750,11 +1901,21 @@ DEFPY (show_nexthop_group,
zvrf = vrf->info;
if (!zvrf)
continue;
+ if (uj)
+ json_vrf = json_object_new_object();
+ else
+ vty_out(vty, "VRF: %s\n", vrf->name);
- vty_out(vty, "VRF: %s\n", vrf->name);
- show_nexthop_group_cmd_helper(vty, zvrf, afi, type);
+ show_nexthop_group_cmd_helper(vty, zvrf, afi, type,
+ json_vrf);
+ if (uj)
+ json_object_object_add(json, vrf->name,
+ json_vrf);
}
+ if (uj)
+ vty_json(vty, json);
+
return CMD_SUCCESS;
}
@@ -1764,12 +1925,18 @@ DEFPY (show_nexthop_group,
zvrf = zebra_vrf_lookup_by_name(VRF_DEFAULT_NAME);
if (!zvrf) {
- vty_out(vty, "%% VRF '%s' specified does not exist\n",
- vrf_name);
+ if (uj)
+ vty_json(vty, json);
+ else
+ vty_out(vty, "%% VRF '%s' specified does not exist\n",
+ vrf_name);
return CMD_WARNING;
}
- show_nexthop_group_cmd_helper(vty, zvrf, afi, type);
+ show_nexthop_group_cmd_helper(vty, zvrf, afi, type, json);
+
+ if (uj)
+ vty_json(vty, json);
return CMD_SUCCESS;
}
@@ -4073,6 +4240,15 @@ DEFUN (show_zebra,
ttable_add_row(table, "ASIC offload|%s",
zrouter.asic_offloaded ? "Used" : "Unavailable");
+ /*
+ * Do not display this unless someone is actually using it
+ *
+ * Why this distinction? I think this is effectively dead code
+ * and should not be exposed. Maybe someone proves me wrong.
+ */
+ if (zrouter.asic_notification_nexthop_control)
+ ttable_add_row(table, "ASIC offload and nexthop control|Used");
+
ttable_add_row(table, "RA|%s",
rtadv_compiled_in() ? "Compiled in" : "Not Compiled in");
ttable_add_row(table, "RFC 5549|%s",