summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/babel_interface.c12
-rw-r--r--bgpd/bgp_attr.c9
-rw-r--r--bgpd/bgp_damp.c155
-rw-r--r--bgpd/bgp_damp.h11
-rw-r--r--bgpd/bgp_filter.c92
-rw-r--r--bgpd/bgp_fsm.c3
-rw-r--r--bgpd/bgp_open.c3
-rw-r--r--bgpd/bgp_route.c19
-rw-r--r--bgpd/bgp_routemap.c37
-rw-r--r--bgpd/bgp_vty.c2
-rw-r--r--bgpd/bgp_zebra.c1
-rw-r--r--bgpd/bgpd.c1
-rw-r--r--doc/developer/scripting.rst534
-rw-r--r--doc/user/bgp.rst28
-rw-r--r--doc/user/filter.rst32
-rw-r--r--doc/user/ospf6d.rst87
-rw-r--r--doc/user/pbr.rst15
-rw-r--r--doc/user/pim.rst6
-rw-r--r--doc/user/routemap.rst4
-rw-r--r--doc/user/zebra.rst17
-rw-r--r--eigrpd/eigrp_cli.c13
-rw-r--r--isisd/isis_circuit.c11
-rw-r--r--lib/command.c32
-rw-r--r--lib/filter.c257
-rw-r--r--lib/frr_zmq.c24
-rw-r--r--lib/frrlua.c30
-rw-r--r--lib/frrlua.h6
-rw-r--r--lib/frrscript.c205
-rw-r--r--lib/frrscript.h154
-rw-r--r--lib/if.c12
-rw-r--r--lib/if.h3
-rw-r--r--lib/pbr.h6
-rw-r--r--lib/plist.c232
-rw-r--r--lib/routemap.c214
-rw-r--r--lib/vrf.c1
-rw-r--r--nhrpd/nhrp_vty.c13
-rw-r--r--ospf6d/ospf6_abr.c14
-rw-r--r--ospf6d/ospf6_area.c2
-rw-r--r--ospf6d/ospf6_asbr.c1219
-rw-r--r--ospf6d/ospf6_asbr.h73
-rw-r--r--ospf6d/ospf6_flood.c37
-rw-r--r--ospf6d/ospf6_flood.h3
-rw-r--r--ospf6d/ospf6_interface.c49
-rw-r--r--ospf6d/ospf6_interface.h1
-rw-r--r--ospf6d/ospf6_intra.c46
-rw-r--r--ospf6d/ospf6_lsa.c35
-rw-r--r--ospf6d/ospf6_lsa.h5
-rw-r--r--ospf6d/ospf6_lsdb.c23
-rw-r--r--ospf6d/ospf6_nssa.c57
-rw-r--r--ospf6d/ospf6_route.c94
-rw-r--r--ospf6d/ospf6_route.h86
-rw-r--r--ospf6d/ospf6_spf.c2
-rw-r--r--ospf6d/ospf6_top.c514
-rw-r--r--ospf6d/ospf6_top.h17
-rw-r--r--ospf6d/ospf6_zebra.c21
-rw-r--r--ospf6d/ospf6d.h4
-rw-r--r--ospf6d/subdir.am1
-rw-r--r--ospfd/ospf_network.c2
-rw-r--r--ospfd/ospf_vty.c76
-rw-r--r--pbrd/pbr_map.h11
-rw-r--r--pbrd/pbr_vty.c104
-rw-r--r--pbrd/pbr_zebra.c5
-rw-r--r--pimd/pim_cmd.c15
-rw-r--r--pimd/pim_igmp.c46
-rw-r--r--pimd/pim_igmp.h3
-rw-r--r--pimd/pim_msdp.c1
-rw-r--r--ripd/rip_interface.c12
-rw-r--r--ripngd/ripng_interface.c13
-rw-r--r--tests/lib/script1.lua55
-rw-r--r--tests/lib/test_frrscript.c75
-rwxr-xr-xtests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py3
-rw-r--r--tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py2
-rw-r--r--tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py8
-rw-r--r--tests/topotests/bgp_community_alias/test_bgp-community-alias.py2
-rw-r--r--tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py2
-rw-r--r--tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py2
-rw-r--r--tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py2
-rw-r--r--tests/topotests/bgp_default_route/test_bgp_default-originate.py2
-rw-r--r--tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py2
-rw-r--r--tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py2
-rw-r--r--tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py3
-rw-r--r--tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py2
-rw-r--r--tests/topotests/bgp_distance_change/test_bgp_distance_change.py2
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/__init__.py0
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf6
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf4
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf7
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf7
-rw-r--r--tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py110
-rw-r--r--tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py2
-rw-r--r--tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py2
-rw-r--r--tests/topotests/bgp_evpn_mh/test_evpn_mh.py24
-rw-r--r--tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py1
-rw-r--r--tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py3
-rw-r--r--tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py3
-rw-r--r--tests/topotests/bgp_gshut/test_bgp_gshut.py2
-rw-r--r--tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py3
-rw-r--r--tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py3
-rw-r--r--tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py3
-rw-r--r--tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py3
-rw-r--r--tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py3
-rwxr-xr-xtests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py6
-rw-r--r--tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py2
-rw-r--r--tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py2
-rw-r--r--tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py2
-rw-r--r--tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py1
-rw-r--r--tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py4
-rw-r--r--tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py3
-rwxr-xr-xtests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py2
-rw-r--r--tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py3
-rw-r--r--tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py2
-rwxr-xr-xtests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py2
-rw-r--r--tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py2
-rw-r--r--tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py3
-rw-r--r--tests/topotests/bgp_route_map/test_route_map_topo1.py3
-rw-r--r--tests/topotests/bgp_route_map/test_route_map_topo2.py3
-rw-r--r--tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py3
-rw-r--r--tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py2
-rwxr-xr-xtests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py2
-rw-r--r--tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py2
-rw-r--r--tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py2
-rw-r--r--tests/topotests/bgp_update_delay/test_bgp_update_delay.py2
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py3
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py3
-rw-r--r--tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py2
-rw-r--r--tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py3
-rw-r--r--tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py2
-rw-r--r--tests/topotests/evpn_pim_1/leaf1/pimd.conf1
-rw-r--r--tests/topotests/evpn_pim_1/leaf2/pimd.conf1
-rw-r--r--tests/topotests/evpn_pim_1/spine/pimd.conf1
-rw-r--r--tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py3
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py3
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py3
-rwxr-xr-xtests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py3
-rwxr-xr-xtests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py2
-rwxr-xr-xtests/topotests/isis_snmp/test_isis_snmp.py2
-rw-r--r--tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py1
-rw-r--r--tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py2
-rw-r--r--tests/topotests/lib/common_config.py2
-rw-r--r--tests/topotests/lib/ospf.py865
-rwxr-xr-xtests/topotests/lib/scapy_sendpkt.py61
-rw-r--r--tests/topotests/lib/topojson.py18
-rw-r--r--tests/topotests/msdp_mesh_topo1/r1/pimd.conf1
-rw-r--r--tests/topotests/msdp_mesh_topo1/r2/pimd.conf1
-rw-r--r--tests/topotests/msdp_mesh_topo1/r3/pimd.conf1
-rw-r--r--tests/topotests/msdp_topo1/r1/pimd.conf1
-rw-r--r--tests/topotests/msdp_topo1/r2/pimd.conf1
-rw-r--r--tests/topotests/msdp_topo1/r3/pimd.conf1
-rw-r--r--tests/topotests/msdp_topo1/r4/pimd.conf1
-rw-r--r--tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py3
-rw-r--r--tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py3
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py3
-rwxr-xr-xtests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py3
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py3
-rw-r--r--tests/topotests/nhrp_topo/test_nhrp_topo.py4
-rw-r--r--tests/topotests/ospf6_topo1/test_ospf6_topo1.py3
-rwxr-xr-xtests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_authentication.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_lan.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_nssa.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py3
-rw-r--r--tests/topotests/ospf_basic_functionality/test_ospf_single_area.py1
-rw-r--r--tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py3
-rw-r--r--tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py2
-rw-r--r--tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py2
-rw-r--r--tests/topotests/ospf_topo2/test_ospf_topo2.py2
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json198
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json347
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json137
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py1928
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py523
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py875
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py231
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py5
-rw-r--r--tests/topotests/pim_acl/r1/pimd.conf1
-rw-r--r--tests/topotests/pim_acl/r11/pimd.conf1
-rw-r--r--tests/topotests/pim_acl/r12/pimd.conf1
-rw-r--r--tests/topotests/pim_acl/r13/pimd.conf1
-rw-r--r--tests/topotests/pim_acl/r14/pimd.conf1
-rw-r--r--tests/topotests/pim_acl/r15/pimd.conf1
-rwxr-xr-xtests/topotests/pim_acl/test_pim_acl.py2
-rw-r--r--tests/topotests/pim_basic/r1/pimd.conf1
-rw-r--r--tests/topotests/pim_basic/rp/pimd.conf1
-rw-r--r--tests/topotests/pim_basic_topo2/r2/pimd.conf1
-rw-r--r--tests/topotests/pim_igmp_vrf/r1/pimd.conf1
-rw-r--r--tests/topotests/pim_igmp_vrf/r11/pimd.conf1
-rw-r--r--tests/topotests/pim_igmp_vrf/r12/pimd.conf1
-rwxr-xr-xtests/topotests/pim_igmp_vrf/test_pim_vrf.py2
-rw-r--r--tests/topotests/route_scale/test_route_scale.py3
-rwxr-xr-xtests/topotests/simple_snmp_test/test_simple_snmp.py10
-rwxr-xr-xtests/topotests/srv6_locator/test_srv6_locator.py2
-rw-r--r--tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py3
-rw-r--r--tests/topotests/zebra_netlink/test_zebra_netlink.py3
-rw-r--r--tests/topotests/zebra_opaque/test_zebra_opaque.py2
-rw-r--r--tests/topotests/zebra_rib/test_zebra_rib.py8
-rwxr-xr-xtests/topotests/zebra_seg6_route/test_zebra_seg6_route.py2
-rwxr-xr-xtests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py2
-rw-r--r--vrrpd/vrrp_vty.c11
-rw-r--r--yang/frr-pim.yang2
-rw-r--r--zebra/interface.c643
-rw-r--r--zebra/kernel_netlink.c6
-rw-r--r--zebra/kernel_netlink.h2
-rw-r--r--zebra/redistribute.c6
-rw-r--r--zebra/rule_netlink.c34
-rw-r--r--zebra/zapi_msg.c6
-rw-r--r--zebra/zebra_dplane.c18
-rw-r--r--zebra/zebra_dplane.h2
-rw-r--r--zebra/zebra_evpn_mh.c95
-rw-r--r--zebra/zebra_evpn_mh.h3
-rw-r--r--zebra/zebra_fpm.c4
-rw-r--r--zebra/zebra_mpls.c34
-rw-r--r--zebra/zebra_mpls.h6
-rw-r--r--zebra/zebra_netns_notify.c2
-rw-r--r--zebra/zebra_pbr.c9
-rw-r--r--zebra/zebra_ptm.c21
-rw-r--r--zebra/zebra_ptm.h3
-rw-r--r--zebra/zebra_routemap_nb_config.c28
-rw-r--r--zebra/zebra_vrf.h1
-rw-r--r--zebra/zserv.c2
225 files changed, 9889 insertions, 1672 deletions
diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c
index 43ed97cf17..c1e5ffde3c 100644
--- a/babeld/babel_interface.c
+++ b/babeld/babel_interface.c
@@ -59,15 +59,6 @@ static void babel_interface_free (babel_interface_nfo *bi);
static vector babel_enable_if; /* enable interfaces (by cmd). */
-static int interface_config_write(struct vty *vty);
-static struct cmd_node babel_interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = interface_config_write,
-};
-
int
babel_interface_up (ZAPI_CALLBACK_ARGS)
@@ -1257,8 +1248,7 @@ babel_if_init(void)
babel_enable_if = vector_init (1);
/* install interface node and commands */
- install_node(&babel_interface_node);
- if_cmd_init();
+ if_cmd_init(interface_config_write);
install_element(BABEL_NODE, &babel_network_cmd);
install_element(BABEL_NODE, &no_babel_network_cmd);
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index adf408220e..0870748f7e 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -1497,8 +1497,10 @@ static int bgp_attr_aspath(struct bgp_attr_parser_args *args)
* peer with AS4 => will get 4Byte ASnums
* otherwise, will get 16 Bit
*/
- attr->aspath = aspath_parse(peer->curr, length,
- CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV));
+ attr->aspath = aspath_parse(
+ peer->curr, length,
+ CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)
+ && CHECK_FLAG(peer->cap, PEER_CAP_AS4_ADV));
/* In case of IBGP, length will be zero. */
if (!attr->aspath) {
@@ -3745,7 +3747,8 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
struct aspath *aspath;
int send_as4_path = 0;
int send_as4_aggregator = 0;
- bool use32bit = CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV);
+ bool use32bit = CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV)
+ && CHECK_FLAG(peer->cap, PEER_CAP_AS4_ADV);
if (!bgp)
bgp = peer->bgp;
diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c
index 2a372c0ba4..dd9de5dc99 100644
--- a/bgpd/bgp_damp.c
+++ b/bgpd/bgp_damp.c
@@ -40,72 +40,33 @@
static void bgp_reuselist_add(struct reuselist *list,
struct bgp_damp_info *info)
{
- struct reuselist_node *new_node;
-
assert(info);
- new_node = XCALLOC(MTYPE_BGP_DAMP_REUSELIST, sizeof(*new_node));
- new_node->info = info;
- SLIST_INSERT_HEAD(list, new_node, entry);
+ SLIST_INSERT_HEAD(list, info, entry);
}
static void bgp_reuselist_del(struct reuselist *list,
- struct reuselist_node **node)
+ struct bgp_damp_info *info)
{
- if ((*node) == NULL)
- return;
- assert(list && node && *node);
- SLIST_REMOVE(list, (*node), reuselist_node, entry);
- XFREE(MTYPE_BGP_DAMP_REUSELIST, (*node));
- *node = NULL;
+ assert(info);
+ SLIST_REMOVE(list, info, bgp_damp_info, entry);
}
static void bgp_reuselist_switch(struct reuselist *source,
- struct reuselist_node *node,
+ struct bgp_damp_info *info,
struct reuselist *target)
{
- assert(source && target && node);
- SLIST_REMOVE(source, node, reuselist_node, entry);
- SLIST_INSERT_HEAD(target, node, entry);
-}
-
-static void bgp_reuselist_free(struct reuselist *list)
-{
- struct reuselist_node *rn;
-
- assert(list);
- while ((rn = SLIST_FIRST(list)) != NULL)
- bgp_reuselist_del(list, &rn);
-}
-
-static struct reuselist_node *bgp_reuselist_find(struct reuselist *list,
- struct bgp_damp_info *info)
-{
- struct reuselist_node *rn;
-
- assert(list && info);
- SLIST_FOREACH (rn, list, entry) {
- if (rn->info == info)
- return rn;
- }
- return NULL;
+ assert(source && target && info);
+ SLIST_REMOVE(source, info, bgp_damp_info, entry);
+ SLIST_INSERT_HEAD(target, info, entry);
}
static void bgp_damp_info_unclaim(struct bgp_damp_info *bdi)
{
- struct reuselist_node *node;
-
assert(bdi && bdi->config);
- if (bdi->index == BGP_DAMP_NO_REUSE_LIST_INDEX) {
- node = bgp_reuselist_find(&bdi->config->no_reuse_list, bdi);
- if (node)
- bgp_reuselist_del(&bdi->config->no_reuse_list, &node);
- } else {
- node = bgp_reuselist_find(&bdi->config->reuse_list[bdi->index],
- bdi);
- if (node)
- bgp_reuselist_del(&bdi->config->reuse_list[bdi->index],
- &node);
- }
+ if (bdi->index == BGP_DAMP_NO_REUSE_LIST_INDEX)
+ bgp_reuselist_del(&bdi->config->no_reuse_list, bdi);
+ else
+ bgp_reuselist_del(&bdi->config->reuse_list[bdi->index], bdi);
bdi->config = NULL;
}
@@ -174,16 +135,9 @@ static void bgp_reuse_list_add(struct bgp_damp_info *bdi,
}
/* Delete BGP dampening information from reuse list. */
-static void bgp_reuse_list_delete(struct bgp_damp_info *bdi,
- struct bgp_damp_config *bdc)
+static void bgp_reuse_list_delete(struct bgp_damp_info *bdi)
{
- struct reuselist *list;
- struct reuselist_node *rn;
-
- list = &bdc->reuse_list[bdi->index];
- rn = bgp_reuselist_find(list, bdi);
bgp_damp_info_unclaim(bdi);
- bgp_reuselist_del(list, &rn);
}
static void bgp_no_reuse_list_add(struct bgp_damp_info *bdi,
@@ -194,19 +148,9 @@ static void bgp_no_reuse_list_add(struct bgp_damp_info *bdi,
bgp_reuselist_add(&bdc->no_reuse_list, bdi);
}
-static void bgp_no_reuse_list_delete(struct bgp_damp_info *bdi,
- struct bgp_damp_config *bdc)
+static void bgp_no_reuse_list_delete(struct bgp_damp_info *bdi)
{
- struct reuselist_node *rn;
-
- assert(bdc && bdi);
- if (bdi->config == NULL) {
- bgp_damp_info_unclaim(bdi);
- return;
- }
- bdi->config = NULL;
- rn = bgp_reuselist_find(&bdc->no_reuse_list, bdi);
- bgp_reuselist_del(&bdc->no_reuse_list, &rn);
+ bgp_damp_info_unclaim(bdi);
}
/* Return decayed penalty value. */
@@ -232,7 +176,6 @@ static int bgp_reuse_timer(struct thread *t)
struct bgp_damp_config *bdc = THREAD_ARG(t);
struct bgp_damp_info *bdi;
struct reuselist plist;
- struct reuselist_node *node;
struct bgp *bgp;
time_t t_now, t_diff;
@@ -253,8 +196,7 @@ static int bgp_reuse_timer(struct thread *t)
assert(bdc->reuse_offset < bdc->reuse_list_size);
/* 3. if ( the saved list head pointer is non-empty ) */
- while ((node = SLIST_FIRST(&plist)) != NULL) {
- bdi = node->info;
+ while ((bdi = SLIST_FIRST(&plist)) != NULL) {
bgp = bdi->path->peer->bgp;
/* Set t-diff = t-now - t-updated. */
@@ -285,20 +227,18 @@ static int bgp_reuse_timer(struct thread *t)
}
if (bdi->penalty <= bdc->reuse_limit / 2.0) {
- bgp_damp_info_free(&bdi, bdc, 1, bdi->afi,
- bdi->safi);
- bgp_reuselist_del(&plist, &node);
+ bgp_reuselist_del(&plist, bdi);
+ bgp_damp_info_free(bdi, 1);
} else {
- node->info->index =
- BGP_DAMP_NO_REUSE_LIST_INDEX;
- bgp_reuselist_switch(&plist, node,
+ bdi->index = BGP_DAMP_NO_REUSE_LIST_INDEX;
+ bgp_reuselist_switch(&plist, bdi,
&bdc->no_reuse_list);
}
} else {
/* Re-insert into another list (See RFC2439 Section
* 4.8.6). */
bdi->index = bgp_reuse_index(bdi->penalty, bdc);
- bgp_reuselist_switch(&plist, node,
+ bgp_reuselist_switch(&plist, bdi,
&bdc->reuse_list[bdi->index]);
}
}
@@ -349,7 +289,14 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
(bgp_path_info_extra_get(path))->damp_info = bdi;
bgp_no_reuse_list_add(bdi, bdc);
} else {
- bgp_damp_info_claim(bdi, bdc);
+ if (bdi->config != bdc) {
+ bgp_damp_info_claim(bdi, bdc);
+ if (bdi->index == BGP_DAMP_NO_REUSE_LIST_INDEX)
+ bgp_reuselist_add(&bdc->no_reuse_list, bdi);
+ else
+ bgp_reuselist_add(&bdc->reuse_list[bdi->index],
+ bdi);
+ }
last_penalty = bdi->penalty;
/* 1. Set t-diff = t-now - t-updated. */
@@ -376,7 +323,7 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
if (CHECK_FLAG(bdi->path->flags, BGP_PATH_DAMPED)) {
/* If decay rate isn't equal to 0, reinsert brn. */
if (bdi->penalty != last_penalty) {
- bgp_reuse_list_delete(bdi, bdc);
+ bgp_reuse_list_delete(bdi);
bgp_reuse_list_add(bdi, bdc);
}
return BGP_DAMP_SUPPRESSED;
@@ -387,7 +334,7 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
if (bdi->penalty >= bdc->suppress_value) {
bgp_path_info_set_flag(dest, path, BGP_PATH_DAMPED);
bdi->suppress_time = t_now;
- bgp_no_reuse_list_delete(bdi, bdc);
+ bgp_no_reuse_list_delete(bdi);
bgp_reuse_list_add(bdi, bdc);
}
return BGP_DAMP_USED;
@@ -420,7 +367,7 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest,
else if (CHECK_FLAG(bdi->path->flags, BGP_PATH_DAMPED)
&& (bdi->penalty < bdc->reuse_limit)) {
bgp_path_info_unset_flag(dest, path, BGP_PATH_DAMPED);
- bgp_reuse_list_delete(bdi, bdc);
+ bgp_reuse_list_delete(bdi);
bgp_no_reuse_list_add(bdi, bdc);
bdi->suppress_time = 0;
status = BGP_DAMP_USED;
@@ -431,27 +378,26 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest,
bdi->t_updated = t_now;
else {
bgp_damp_info_unclaim(bdi);
- bgp_damp_info_free(&bdi, bdc, 0, afi, safi);
+ bgp_damp_info_free(bdi, 0);
}
return status;
}
-void bgp_damp_info_free(struct bgp_damp_info **bdi, struct bgp_damp_config *bdc,
- int withdraw, afi_t afi, safi_t safi)
+void bgp_damp_info_free(struct bgp_damp_info *bdi, int withdraw)
{
- assert(bdc && bdi && *bdi);
+ assert(bdi);
- if ((*bdi)->path == NULL) {
- XFREE(MTYPE_BGP_DAMP_INFO, (*bdi));
+ if (bdi->path == NULL) {
+ XFREE(MTYPE_BGP_DAMP_INFO, bdi);
return;
}
- (*bdi)->path->extra->damp_info = NULL;
- bgp_path_info_unset_flag((*bdi)->dest, (*bdi)->path,
+ bdi->path->extra->damp_info = NULL;
+ bgp_path_info_unset_flag(bdi->dest, bdi->path,
BGP_PATH_HISTORY | BGP_PATH_DAMPED);
- if ((*bdi)->lastrecord == BGP_RECORD_WITHDRAW && withdraw)
- bgp_path_info_delete((*bdi)->dest, (*bdi)->path);
+ if (bdi->lastrecord == BGP_RECORD_WITHDRAW && withdraw)
+ bgp_path_info_delete(bdi->dest, bdi->path);
}
static void bgp_damp_parameter_set(int hlife, int reuse, int sup, int maxsup,
@@ -548,15 +494,13 @@ void bgp_damp_info_clean(struct bgp *bgp, struct bgp_damp_config *bdc,
afi_t afi, safi_t safi)
{
struct bgp_damp_info *bdi;
- struct reuselist_node *rn;
struct reuselist *list;
unsigned int i;
bdc->reuse_offset = 0;
for (i = 0; i < bdc->reuse_list_size; ++i) {
list = &bdc->reuse_list[i];
- while ((rn = SLIST_FIRST(list)) != NULL) {
- bdi = rn->info;
+ while ((bdi = SLIST_FIRST(list)) != NULL) {
if (bdi->lastrecord == BGP_RECORD_UPDATE) {
bgp_aggregate_increment(bgp, &bdi->dest->p,
bdi->path, bdi->afi,
@@ -564,15 +508,14 @@ void bgp_damp_info_clean(struct bgp *bgp, struct bgp_damp_config *bdc,
bgp_process(bgp, bdi->dest, bdi->afi,
bdi->safi);
}
- bgp_reuselist_del(list, &rn);
- bgp_damp_info_free(&bdi, bdc, 1, afi, safi);
+ bgp_reuselist_del(list, bdi);
+ bgp_damp_info_free(bdi, 1);
}
}
- while ((rn = SLIST_FIRST(&bdc->no_reuse_list)) != NULL) {
- bdi = rn->info;
- bgp_reuselist_del(&bdc->no_reuse_list, &rn);
- bgp_damp_info_free(&bdi, bdc, 1, afi, safi);
+ while ((bdi = SLIST_FIRST(&bdc->no_reuse_list)) != NULL) {
+ bgp_reuselist_del(&bdc->no_reuse_list, bdi);
+ bgp_damp_info_free(bdi, 1);
}
/* Free decay array */
@@ -583,10 +526,6 @@ void bgp_damp_info_clean(struct bgp *bgp, struct bgp_damp_config *bdc,
XFREE(MTYPE_BGP_DAMP_ARRAY, bdc->reuse_index);
bdc->reuse_index_size = 0;
- /* Free reuse list array. */
- for (i = 0; i < bdc->reuse_list_size; ++i)
- bgp_reuselist_free(&bdc->reuse_list[i]);
-
XFREE(MTYPE_BGP_DAMP_ARRAY, bdc->reuse_list);
bdc->reuse_list_size = 0;
diff --git a/bgpd/bgp_damp.h b/bgpd/bgp_damp.h
index c03a0cc5c9..fc03b97c13 100644
--- a/bgpd/bgp_damp.h
+++ b/bgpd/bgp_damp.h
@@ -61,14 +61,11 @@ struct bgp_damp_info {
afi_t afi;
safi_t safi;
-};
-struct reuselist_node {
- SLIST_ENTRY(reuselist_node) entry;
- struct bgp_damp_info *info;
+ SLIST_ENTRY(bgp_damp_info) entry;
};
-SLIST_HEAD(reuselist, reuselist_node);
+SLIST_HEAD(reuselist, bgp_damp_info);
/* Specified parameter set configuration. */
struct bgp_damp_config {
@@ -148,9 +145,7 @@ extern int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
afi_t afi, safi_t safi, int attr_change);
extern int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest,
afi_t afi, safi_t saff);
-extern void bgp_damp_info_free(struct bgp_damp_info **path,
- struct bgp_damp_config *bdc, int withdraw,
- afi_t afi, safi_t safi);
+extern void bgp_damp_info_free(struct bgp_damp_info *bdi, int withdraw);
extern void bgp_damp_info_clean(struct bgp *bgp, struct bgp_damp_config *bdc,
afi_t afi, safi_t safi);
extern void bgp_damp_config_clean(struct bgp_damp_config *bdc);
diff --git a/bgpd/bgp_filter.c b/bgpd/bgp_filter.c
index 8d6691945f..641cc7605f 100644
--- a/bgpd/bgp_filter.c
+++ b/bgpd/bgp_filter.c
@@ -612,78 +612,118 @@ DEFUN (no_as_path_all,
return CMD_SUCCESS;
}
-static void as_list_show(struct vty *vty, struct as_list *aslist)
+static void as_list_show(struct vty *vty, struct as_list *aslist,
+ json_object *json)
{
struct as_filter *asfilter;
+ json_object *json_aslist = NULL;
- vty_out(vty, "AS path access list %s\n", aslist->name);
+ if (json) {
+ json_aslist = json_object_new_array();
+ json_object_object_add(json, aslist->name, json_aslist);
+ } else
+ vty_out(vty, "AS path access list %s\n", aslist->name);
for (asfilter = aslist->head; asfilter; asfilter = asfilter->next) {
- vty_out(vty, " %s %s\n", filter_type_str(asfilter->type),
- asfilter->reg_str);
+ if (json) {
+ json_object *json_asfilter = json_object_new_object();
+
+ json_object_int_add(json_asfilter, "sequenceNumber",
+ asfilter->seq);
+ json_object_string_add(json_asfilter, "type",
+ filter_type_str(asfilter->type));
+ json_object_string_add(json_asfilter, "regExp",
+ asfilter->reg_str);
+
+ json_object_array_add(json_aslist, json_asfilter);
+ } else
+ vty_out(vty, " %s %s\n",
+ filter_type_str(asfilter->type),
+ asfilter->reg_str);
}
}
-static void as_list_show_all(struct vty *vty)
+static void as_list_show_all(struct vty *vty, json_object *json)
{
struct as_list *aslist;
- struct as_filter *asfilter;
-
- for (aslist = as_list_master.str.head; aslist; aslist = aslist->next) {
- vty_out(vty, "AS path access list %s\n", aslist->name);
- for (asfilter = aslist->head; asfilter;
- asfilter = asfilter->next) {
- vty_out(vty, " %s %s\n",
- filter_type_str(asfilter->type),
- asfilter->reg_str);
- }
- }
+ for (aslist = as_list_master.str.head; aslist; aslist = aslist->next)
+ as_list_show(vty, aslist, json);
}
DEFUN (show_as_path_access_list,
show_bgp_as_path_access_list_cmd,
- "show bgp as-path-access-list WORD",
+ "show bgp as-path-access-list WORD [json]",
SHOW_STR
BGP_STR
"List AS path access lists\n"
- "AS path access list name\n")
+ "AS path access list name\n"
+ JSON_STR)
{
int idx_word = 3;
struct as_list *aslist;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
+
+ if (uj)
+ json = json_object_new_object();
aslist = as_list_lookup(argv[idx_word]->arg);
if (aslist)
- as_list_show(vty, aslist);
+ as_list_show(vty, aslist, json);
+
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
ALIAS (show_as_path_access_list,
show_ip_as_path_access_list_cmd,
- "show ip as-path-access-list WORD",
+ "show ip as-path-access-list WORD [json]",
SHOW_STR
IP_STR
"List AS path access lists\n"
- "AS path access list name\n")
+ "AS path access list name\n"
+ JSON_STR)
DEFUN (show_as_path_access_list_all,
show_bgp_as_path_access_list_all_cmd,
- "show bgp as-path-access-list",
+ "show bgp as-path-access-list [json]",
SHOW_STR
BGP_STR
- "List AS path access lists\n")
+ "List AS path access lists\n"
+ JSON_STR)
{
- as_list_show_all(vty);
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+
+ as_list_show_all(vty, json);
+
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
return CMD_SUCCESS;
}
ALIAS (show_as_path_access_list_all,
show_ip_as_path_access_list_all_cmd,
- "show ip as-path-access-list",
+ "show ip as-path-access-list [json]",
SHOW_STR
IP_STR
- "List AS path access lists\n")
+ "List AS path access lists\n"
+ JSON_STR)
static int config_write_as_list(struct vty *vty)
{
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 8d996e16eb..b62a42a4f6 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -1378,6 +1378,9 @@ int bgp_stop(struct peer *peer)
peer->fd = -1;
}
+ /* Reset capabilities. */
+ peer->cap = 0;
+
FOREACH_AFI_SAFI (afi, safi) {
/* Reset all negotiated variables */
peer->afc_nego[afi][safi] = 0;
diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c
index 94d905127d..113017559e 100644
--- a/bgpd/bgp_open.c
+++ b/bgpd/bgp_open.c
@@ -1216,7 +1216,8 @@ int bgp_open_option_parse(struct peer *peer, uint8_t length, int *mp_capability)
/* Extended Message Support */
peer->max_packet_size =
- CHECK_FLAG(peer->cap, PEER_CAP_EXTENDED_MESSAGE_RCV)
+ (CHECK_FLAG(peer->cap, PEER_CAP_EXTENDED_MESSAGE_RCV)
+ && CHECK_FLAG(peer->cap, PEER_CAP_EXTENDED_MESSAGE_ADV))
? BGP_EXTENDED_MESSAGE_MAX_PACKET_SIZE
: BGP_STANDARD_MESSAGE_MAX_PACKET_SIZE;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index d5bb53ad8d..4299ea3525 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -222,7 +222,10 @@ void bgp_path_info_extra_free(struct bgp_path_info_extra **extra)
e = *extra;
+ if (e->damp_info)
+ bgp_damp_info_free(e->damp_info, 0);
e->damp_info = NULL;
+
if (e->parent) {
struct bgp_path_info *bpi = (struct bgp_path_info *)e->parent;
@@ -10862,8 +10865,7 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
const char *com2alias =
bgp_community2alias(
communities[i]);
- if (strncmp(alias, com2alias,
- strlen(com2alias))
+ if (strcmp(alias, com2alias)
== 0) {
found = true;
break;
@@ -10878,8 +10880,7 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi,
const char *com2alias =
bgp_community2alias(
communities[i]);
- if (strncmp(alias, com2alias,
- strlen(com2alias))
+ if (strcmp(alias, com2alias)
== 0) {
found = true;
break;
@@ -14760,9 +14761,8 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name,
if (pi->extra && pi->extra->damp_info) {
pi_temp = pi->next;
bgp_damp_info_free(
- &pi->extra->damp_info,
- &bgp->damp[afi][safi],
- 1, afi, safi);
+ pi->extra->damp_info,
+ 1);
pi = pi_temp;
} else
pi = pi->next;
@@ -14798,9 +14798,8 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name,
bdi->safi);
}
bgp_damp_info_free(
- &pi->extra->damp_info,
- &bgp->damp[afi][safi],
- 1, afi, safi);
+ pi->extra->damp_info,
+ 1);
pi = pi_temp;
} else
pi = pi->next;
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index 5b1044754e..09dd71c020 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -366,38 +366,37 @@ static enum route_map_cmd_result_t
route_match_script(void *rule, const struct prefix *prefix, void *object)
{
const char *scriptname = rule;
+ const char *routematch_function = "route_match";
struct bgp_path_info *path = (struct bgp_path_info *)object;
- struct frrscript *fs = frrscript_load(scriptname, NULL);
+ struct frrscript *fs = frrscript_new(scriptname);
- if (!fs) {
- zlog_err("Issue loading script rule; defaulting to no match");
+ if (frrscript_load(fs, routematch_function, NULL)) {
+ zlog_err(
+ "Issue loading script or function; defaulting to no match");
return RMAP_NOMATCH;
}
- enum frrlua_rm_status lrm_status = LUA_RM_FAILURE,
- status_nomatch = LUA_RM_NOMATCH,
- status_match = LUA_RM_MATCH,
- status_match_and_change = LUA_RM_MATCH_AND_CHANGE;
-
struct attr newattr = *path->attr;
int result = frrscript_call(
- fs, ("RM_FAILURE", (long long *)&lrm_status),
- ("RM_NOMATCH", (long long *)&status_nomatch),
- ("RM_MATCH", (long long *)&status_match),
- ("RM_MATCH_AND_CHANGE", (long long *)&status_match_and_change),
- ("action", (long long *)&lrm_status), ("prefix", prefix),
- ("attributes", &newattr), ("peer", path->peer));
+ fs, routematch_function, ("prefix", prefix),
+ ("attributes", &newattr), ("peer", path->peer),
+ ("RM_FAILURE", LUA_RM_FAILURE), ("RM_NOMATCH", LUA_RM_NOMATCH),
+ ("RM_MATCH", LUA_RM_MATCH),
+ ("RM_MATCH_AND_CHANGE", LUA_RM_MATCH_AND_CHANGE));
if (result) {
zlog_err("Issue running script rule; defaulting to no match");
return RMAP_NOMATCH;
}
+ long long *action = frrscript_get_result(fs, routematch_function,
+ "action", lua_tointegerp);
+
int status = RMAP_NOMATCH;
- switch (lrm_status) {
+ switch (*action) {
case LUA_RM_FAILURE:
zlog_err(
"Executing route-map match script '%s' failed; defaulting to no match",
@@ -428,7 +427,9 @@ route_match_script(void *rule, const struct prefix *prefix, void *object)
break;
}
- frrscript_unload(fs);
+ XFREE(MTYPE_SCRIPT_RES, action);
+
+ frrscript_delete(fs);
return status;
}
@@ -1195,7 +1196,7 @@ route_match_alias(void *rule, const struct prefix *prefix, void *object)
for (int i = 0; i < num; i++) {
const char *com2alias =
bgp_community2alias(communities[i]);
- if (strncmp(alias, com2alias, strlen(com2alias)) == 0)
+ if (strcmp(alias, com2alias) == 0)
return RMAP_MATCH;
}
}
@@ -1206,7 +1207,7 @@ route_match_alias(void *rule, const struct prefix *prefix, void *object)
for (int i = 0; i < num; i++) {
const char *com2alias =
bgp_community2alias(communities[i]);
- if (strncmp(alias, com2alias, strlen(com2alias)) == 0)
+ if (strcmp(alias, com2alias) == 0)
return RMAP_MATCH;
}
}
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index dfdfab79a5..2b19f76c7b 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -18290,7 +18290,7 @@ static struct cmd_node bgp_ipv4_labeled_unicast_node = {
};
static struct cmd_node bgp_ipv6_unicast_node = {
- .name = "bgp ipv6",
+ .name = "bgp ipv6 unicast",
.node = BGP_IPV6_NODE,
.parent_node = BGP_NODE,
.prompt = "%s(config-router-af)# ",
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 2c7c087855..24652ee93a 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -2576,6 +2576,7 @@ static void bgp_encode_pbr_rule_action(struct stream *s,
stream_putl(s, pbr->unique);
else
stream_putl(s, pbra->unique);
+ stream_putc(s, 0); /* ip protocol being used */
if (pbr && pbr->flags & MATCH_IP_SRC_SET)
memcpy(&pfx, &(pbr->src), sizeof(struct prefix));
else {
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 6c9ec0ebaa..6f9be93757 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1449,7 +1449,6 @@ void peer_xfer_config(struct peer *peer_dst, struct peer *peer_src)
/* peer flags apply */
peer_dst->flags = peer_src->flags;
- peer_dst->cap = peer_src->cap;
peer_dst->peer_gr_present_state = peer_src->peer_gr_present_state;
peer_dst->peer_gr_new_status_flag = peer_src->peer_gr_new_status_flag;
diff --git a/doc/developer/scripting.rst b/doc/developer/scripting.rst
index 1757d41feb..d543ed3560 100644
--- a/doc/developer/scripting.rst
+++ b/doc/developer/scripting.rst
@@ -14,8 +14,8 @@ is implemented using the standard Lua C bindings. The supported version of Lua
is 5.3.
C objects may be passed into Lua and Lua objects may be retrieved by C code via
-a marshalling system. In this way, arbitrary data from FRR may be passed to
-scripts. It is possible to pass C functions as well.
+a encoding/decoding system. In this way, arbitrary data from FRR may be passed to
+scripts.
The Lua environment is isolated from the C environment; user scripts cannot
access FRR's address space unless explicitly allowed by FRR.
@@ -53,150 +53,290 @@ Reasons against supporting multiple scripting languages:
with which a given script can be shared
General
-^^^^^^^
-
-FRR's concept of a script is somewhat abstracted away from the fact that it is
-Lua underneath. A script in has two things:
-
-- name
-- state
+-------
-In code:
+FRR's scripting functionality is provided in the form of Lua functions in Lua
+scripts (``.lua`` files). One Lua script may contain many Lua functions. These
+are respectively encapsulated in the following structures:
.. code-block:: c
struct frrscript {
- /* Script name */
- char *name;
+ /* Lua file name */
+ char *name;
- /* Lua state */
- struct lua_State *L;
+ /* hash of lua_function_states */
+ struct hash *lua_function_hash;
};
+ struct lua_function_state {
+ /* Lua function name */
+ char *name;
-``name`` is simply a string. Everything else is in ``state``, which is itself a
-Lua library object (``lua_State``). This is an opaque struct that is
-manipulated using ``lua_*`` functions. The basic ones are imported from
-``lua.h`` and the rest are implemented within FRR to fill our use cases. The
-thing to remember is that all operations beyond the initial loading the script
-take place on this opaque state object.
+ lua_State *L;
+ };
-There are four basic actions that can be done on a script:
-- load
-- execute
-- query state
-- unload
+`struct frrscript`: Since all Lua functions are contained within scripts, the
+following APIs manipulates this structure. ``name`` contains the
+Lua script name and a hash of Lua functions to their function names.
-They are typically done in this order.
+`struct lua_function_state` is an internal structure, but it essentially contains
+the name of the Lua function and its state (a stack), which is run using Lua
+library functions.
+In general, to run a Lua function, these steps must take place:
-Loading
-^^^^^^^
+- Initialization
+- Load
+- Call
+- Delete
+
+Initialization
+^^^^^^^^^^^^^^
+
+The ``frrscript`` object encapsulates the Lua function state(s) from
+one Lua script file. To create, use ``frrscript_new()`` which takes the
+name of the Lua script.
+The string ".lua" is appended to the script name, and the resultant filename
+will be used to look for the script when we want to load a Lua function from it.
+
+For example, to create ``frrscript`` for ``/etc/frr/scripts/bingus.lua``:
+
+.. code-block:: c
+
+ struct frrscript *fs = frrscript_new("bingus");
-A snippet of Lua code is referred to as a "chunk". These are simply text. FRR
-presently assumes chunks are located in individual files specific to one task.
-These files are stored in the scripts directory and must end in ``.lua``.
-A script object is created by loading a script. This is done with
-``frrscript_load()``. This function takes the name of the script and an
-optional callback function. The string ".lua" is appended to the script name,
-and the resultant filename is looked for in the scripts directory.
+The script is *not* read at this stage.
+This function cannot be used to test for a script's presence.
-For example, to load ``/etc/frr/scripts/bingus.lua``:
+Load
+^^^^
+
+The function to be called must first be loaded. Use ``frrscript_load()``
+which takes a ``frrscript`` object, the name of the Lua function
+and a callback function.
+
+For example, to load the Lua function ``on_foo``
+in ``/etc/frr/scripts/bingus.lua``:
.. code-block:: c
- struct frrscript *fs = frrscript_load("bingus", NULL);
+ int ret = frrscript_load(fs, "on_foo", NULL);
+
-During loading the script is validated for syntax and its initial environment
-is setup. By default this does not include the Lua standard library; there are
+This function returns 0 if and only if the Lua function was successfully loaded.
+A non-zero return could indicate either a missing Lua script, a missing
+Lua function, or an error when loading the function.
+
+During loading the script is validated for syntax and its environment
+is set up. By default this does not include the Lua standard library; there are
security issues to consider, though for practical purposes untrusted users
-should not be able to write the scripts directory anyway. If desired the Lua
-standard library may be added to the script environment using
-``luaL_openlibs(fs->L)`` after loading the script. Further information on
-setting up the script environment is in the Lua manual.
+should not be able to write the scripts directory anyway.
+
+Call
+^^^^
+After loading, Lua functions may be called.
-Executing
-^^^^^^^^^
+Input
+"""""
-After loading, scripts may be executed. A script may take input in the form of
-variable bindings set in its environment prior to being run, and may provide
-results by setting the value of variables. Arbitrary C values may be
-transferred into the script environment, including functions.
+Inputs to the Lua script should be given by providing a list of parenthesized
+pairs,
+where the first and second field identify the name of the variable and the
+value it is bound to, respectively.
+The types of the values must have registered encoders (more below); the compiler
+will warn you otherwise.
-A typical execution call looks something like this:
+These variables are first encoded in-order, then provided as arguments
+to the Lua function. In the example, note that ``c`` is passed in as a value
+while ``a`` and ``b`` are passed in as pointers.
.. code-block:: c
- struct frrscript *fs = frrscript_load(...);
+ int a = 100, b = 200, c = 300;
+ frrscript_call(fs, "on_foo", ("a", &a), ("b", &b), ("c", c));
+
- int status_ok = 0, status_fail = 1;
- struct prefix p = ...;
+.. code-block:: lua
- int result = frrscript_call(fs,
- ("STATUS_FAIL", &status_fail),
- ("STATUS_OK", &status_ok),
- ("prefix", &p));
+ function on_foo(a, b, c)
+ -- a is 100, b is 200, c is 300
+ ...
-To execute a loaded script, we need to define the inputs. These inputs are
-passed in by binding values to variable names that will be accessible within the
-Lua environment. Basically, all communication with the script takes place via
-global variables within the script, and to provide inputs we predefine globals
-before the script runs. This is done by passing ``frrscript_call()`` a list of
-parenthesized pairs, where the first and second fields identify, respectively,
-the name of the global variable within the script environment and the value it
-is bound to.
+Output
+""""""
-The script is then executed and returns a general status code. In the success
-case this will be 0, otherwise it will be nonzero. The script itself does not
-determine this code, it is provided by the Lua interpreter.
+.. code-block:: c
+ int a = 100, b = 200, c = 300;
+ frrscript_call(fs, "on_foo", ("a", &a), ("b", &b), ("c", c));
+ // a is 500, b is 200, c is 300
-Querying State
-^^^^^^^^^^^^^^
+ int* d = frrscript_get_result(fs, "on_foo", "d", lua_tointegerp);
+ // d is 800
-.. todo::
- This section will be updated once ``frrscript_get_result`` has been
- updated to work with the new ``frrscript_call`` and the rest of the new API.
+.. code-block:: lua
+
+ function on_foo(a, b, c)
+ b = 600
+ return { ["a"] = 500, ["c"] = 700, ["d"] = 800 }
+ end
-Unloading
-^^^^^^^^^
+**Lua functions being called must return a single table of string names to
+values.**
+(Lua functions should return an empty table if there is no output.)
+The keys of the table are mapped back to names of variables in C. Note that
+the values in the table can also be tables. Since tables are Lua's primary
+data structure, this design lets us return any Lua value.
-To destroy a script and its associated state:
+After the Lua function returns, the names of variables to ``frrscript_call()``
+are matched against keys of the returned table, and then decoded. The types
+being decoded must have registered decoders (more below); the compiler will
+warn you otherwise.
+
+In the example, since ``a`` was in the returned table and ``b`` was not,
+``a`` was decoded and its value modified, while ``b`` was not decoded.
+``c`` was decoded as well, but its decoder is a noop.
+What modifications happen given a variable depends whether its name was
+in the returned table and the decoder's implementation.
+
+.. warning::
+ Always keep in mind that non const-qualified pointers in
+ ``frrscript_call()`` may be modified - this may be a source of bugs.
+ On the other hand, const-qualified pointers and other values cannot
+ be modified.
+
+
+.. tip::
+ You can make a copy of a data structure and pass that in instead,
+ so that modifications only happen to that copy.
+
+``frrscript_call()`` returns 0 if and only if the Lua function was successfully
+called. A non-zero return could indicate either a missing Lua script, a missing
+Lua function, or an error from the Lua interpreter.
+
+In the above example, ``d`` was not an input to ``frrscript_call()``, so its
+value must be explicitly retrieved with ``frrscript_get_result``.
+
+``frrscript_get_result()`` takes a
+decoder and string name which is used as a key to search the returned table.
+Returns the pointer to the decoded value, or NULL if it was not found.
+In the example, ``d`` is a "new" value in C space,
+so memory allocation might take place. Hence the caller is
+responsible for memory deallocation.
+
+
+Delete
+^^^^^^
+
+To delete a script and the all Lua states associated with it:
.. code-block:: c
- frrscript_unload(fs);
+ frrscript_delete(fs);
+
+
+A complete example
+""""""""""""""""""
+
+So, a typical execution call, with error checking, looks something like this:
+
+.. code-block:: c
+
+ struct frrscript *fs = frrscript_new("my_script"); // name *without* .lua
+
+ int ret = frrscript_load(fs, "on_foo", NULL);
+ if (ret != 0)
+ goto DONE; // Lua script or function might have not been found
+
+ int a = 100, b = 200, c = 300;
+ ret = frrscript_call(fs, "on_foo", ("a", &a), ("b", &b), ("c", c));
+ if (ret != 0)
+ goto DONE; // Lua function might have not successfully run
+
+ // a and b might be modified
+ assert(a == 500);
+ assert(b == 200);
+
+ // c could not have been modified
+ assert(c == 300);
+
+ // d is new
+ int* d = frrscript_get_result(fs, "on_foo", "d", lua_tointegerp);
+
+ if (!d)
+ goto DONE; // "d" might not have been in returned table
+ assert(*d == 800);
+ XFREE(MTYPE_SCRIPT_RES, d); // caller responsible for free
-.. _marshalling:
+ DONE:
+ frrscript_delete(fs);
-Marshalling
-^^^^^^^^^^^
+
+.. code-block:: lua
+
+ function on_foo(a, b, c)
+ b = 600
+ return { a = 500, c = 700, d = 800 }
+ end
+
+
+Note that ``{ a = ...`` is same as ``{ ["a"] = ...``; it is Lua shorthand to
+use the variable name as the key in a table.
+
+Encoding and Decoding
+^^^^^^^^^^^^^^^^^^^^^
Earlier sections glossed over the types of values that can be passed into
-``frrscript_call`` and how data is passed between C and Lua. Lua, as a dynamically
-typed, garbage collected language, cannot directly use C values without some
-kind of marshalling / unmarshalling system to translate types between the two
-runtimes.
+``frrscript_call()`` and how data is passed between C and Lua. Lua, as a
+dynamically typed, garbage collected language, cannot directly use C values
+without some kind of encoding / decoding system to
+translate types between the two runtimes.
Lua communicates with C code using a stack. C code wishing to provide data to
-Lua scripts must provide a function that marshalls the C data into a Lua
+Lua scripts must provide a function that encodes the C data into a Lua
representation and pushes it on the stack. C code wishing to retrieve data from
-Lua must provide a corresponding unmarshalling function that retrieves a Lua
-value from the stack and converts it to the corresponding C type. These
-functions are known as encoders and decoders in FRR.
+Lua must provide a corresponding decoder function that retrieves a Lua
+value from the stack and converts it to the corresponding C type.
+
+Encoders and decoders are provided for common data types.
+Developers wishing to pass their own data structures between C and Lua need to
+create encoders and decoders for that data type.
+
+We try to keep them named consistently.
+There are three kinds of encoders and decoders:
+
+1. lua_push*: encodes a value onto the Lua stack.
+ Required for ``frrscript_call``.
+
+2. lua_decode*: decodes a value from the Lua stack.
+ Required for ``frrscript_call``.
+ Only non const-qualified pointers may be actually decoded (more below).
+
+3. lua_to*: allocates memory and decodes a value from the Lua stack.
+ Required for ``frrscript_get_result``.
-An encoder is a function that takes a ``lua_State *`` and a C type and pushes
-onto the Lua stack a value representing the C type. For C structs, the usual
-case, this will typically be a Lua table (tables are the only datastructure Lua
-has). For example, here is the encoder function for ``struct prefix``:
+This design allows us to combine typesafe *modification* of C values as well as
+*allocation* of new C values.
+In the following sections, we will use the encoders/decoders for ``struct prefix`` as an example.
+
+Encoding
+""""""""
+
+An encoder function takes a ``lua_State *``, a C type and pushes that value onto
+the Lua state (a stack).
+For C structs, the usual case,
+this will typically be encoded to a Lua table, then pushed onto the Lua stack.
+
+Here is the encoder function for ``struct prefix``:
.. code-block:: c
@@ -204,8 +344,6 @@ has). For example, here is the encoder function for ``struct prefix``:
{
char buffer[PREFIX_STRLEN];
- zlog_debug("frrlua: pushing prefix table");
-
lua_newtable(L);
lua_pushstring(L, prefix2str(prefix, buffer, PREFIX_STRLEN));
lua_setfield(L, -2, "network");
@@ -215,7 +353,7 @@ has). For example, here is the encoder function for ``struct prefix``:
lua_setfield(L, -2, "family");
}
-This function pushes a single value onto the Lua stack. It is a table whose
+This function pushes a single value, a table, onto the Lua stack, whose
equivalent in Lua is:
.. code-block:: c
@@ -223,16 +361,23 @@ equivalent in Lua is:
{ ["network"] = "1.2.3.4/24", ["prefixlen"] = 24, ["family"] = 2 }
+Decoding
+""""""""
+
Decoders are a bit more involved. They do the reverse; a decoder function takes
a ``lua_State *``, pops a value off the Lua stack and converts it back into its
C type.
-However, since Lua programs have the ability to directly modify their inputs
-(i.e. values passed in via ``frrscript_call``), we need two separate decoder
-functions, called ``lua_decode_*`` and ``lua_to*``.
-A ``lua_decode_*`` function takes a ``lua_State*``, an index, and a C type, and
-unmarshalls a Lua value into that C type.
-Again, for ``struct prefix``:
+There are two: ``lua_decode*`` and ``lua_to*``. The former does no mememory
+allocation and is needed for ``frrscript_call``.
+The latter performs allocation and is optional.
+
+A ``lua_decode_*`` function takes a ``lua_State*``, an index, and a pointer
+to a C data structure, and directly modifies the structure with values from the
+Lua stack. Note that only non const-qualified pointers may be modified;
+``lua_decode_*`` for other types will be noops.
+
+Again, for ``struct prefix *``:
.. code-block:: c
@@ -240,29 +385,52 @@ Again, for ``struct prefix``:
{
lua_getfield(L, idx, "network");
(void)str2prefix(lua_tostring(L, -1), prefix);
+ /* pop the netork string */
lua_pop(L, 1);
- /* pop the table */
+ /* pop the prefix table */
lua_pop(L, 1);
}
+
+Note:
+ - Before ``lua_decode*`` is run, the "prefix" table is already on the top of
+ the stack. ``frrscript_call`` does this for us.
+ - However, at the end of ``lua_decode*``, the "prefix" table should be popped.
+ - The other two fields in the "network" table are disregarded, meaning that any
+ modification to them is discarded in C space. In this case, this is desired
+ behavior.
+
.. warning::
- ``lua_decode_prefix`` functions should leave the Lua stack completely empty
- when they return.
- For decoders that unmarshall fields from tables, remember to pop the table
- at the end.
+ ``lua_decode*`` functions should pop all values that ``lua_to*`` pushed onto
+ the Lua stack.
+ For encoders that pushed a table, its decoder should pop the table at the end.
+ The above is an example.
+
+
+``int`` is not a non const-qualified pointer, so for ``int``:
+
+.. code-block:: c
+
+ void lua_decode_int_noop(lua_State *L, int idx, int i)
+ { //noop
+ }
+
+
+A ``lua_to*`` function provides identical functionality except that it first
+allocates memory for the new C type before decoding the value from the Lua stack,
+then returns a pointer to the newly allocated C type. You only need to implement
+this function to use with ``frrscript_get_result`` to retrieve a result of
+this type.
-A ``lua_to*`` function perform a similar role except that it first allocates
-memory for the new C type before decoding the value from the Lua stack, then
-returns a pointer to the newly allocated C type.
This function can and should be implemented using ``lua_decode_*``:
.. code-block:: c
void *lua_toprefix(lua_State *L, int idx)
{
- struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
+ struct prefix *p = XCALLOC(MTYPE_SCRIPT_RES, sizeof(struct prefix));
lua_decode_prefix(L, idx, p);
return p;
@@ -270,22 +438,15 @@ This function can and should be implemented using ``lua_decode_*``:
The returned data must always be copied off the stack and the copy must be
-allocated with ``MTYPE_TMP``. This way it is possible to unload the script
+allocated with ``MTYPE_SCRIPT_RES``. This way it is possible to unload the script
(destroy the state) without invalidating any references to values stored in it.
Note that it is the caller's responsibility to free the data.
-For consistency, we should always name functions of the first type
-``lua_decode_*``.
-Functions of the second type should be named ``lua_to*``, as this is the
-naming convention used by the Lua C library for the basic types e.g.
-``lua_tointeger`` and ``lua_tostring``.
-This two-function design allows the compiler to warn if a value passed into
-``frrscript_call`` does not have a encoder and decoder for that type.
-The ``lua_to*`` functions enable us to easily create decoders for nested
-structures.
+Registering encoders and decoders for frrscript_call
+""""""""""""""""""""""""""""""""""""""""""""""""""""
-To register a new type with its corresponding encoding and decoding functions,
+To register a new type with its ``lua_push*`` and ``lua_decode*`` functions,
add the mapping in the following macros in ``frrscript.h``:
.. code-block:: diff
@@ -296,7 +457,7 @@ add the mapping in the following macros in ``frrscript.h``:
- struct peer * : lua_pushpeer \
+ struct peer * : lua_pushpeer, \
+ struct prefix * : lua_pushprefix \
- )(L, value)
+ )((L), (value))
#define DECODE_ARGS_WITH_STATE(L, value) \
_Generic((value), \
@@ -304,7 +465,7 @@ add the mapping in the following macros in ``frrscript.h``:
- struct peer * : lua_decode_peer \
+ struct peer * : lua_decode_peer, \
+ struct prefix * : lua_decode_prefix \
- )(L, -1, value)
+ )((L), -1, (value))
At compile time, the compiler will search for encoders/decoders for the type of
@@ -331,11 +492,12 @@ For that, use ``lua_decode_noop``:
.. note::
- Marshalled types are not restricted to simple values like integers, strings
- and tables. It is possible to marshall a type such that the resultant object
- in Lua is an actual object-oriented object, complete with methods that call
- back into defined C functions. See the Lua manual for how to do this; for a
- code example, look at how zlog is exported into the script environment.
+ Encodable/decodable types are not restricted to simple values like integers,
+ strings and tables.
+ It is possible to encode a type such that the resultant object in Lua
+ is an actual object-oriented object, complete with methods that call
+ back into defined C functions. See the Lua manual for how to do this;
+ for a code example, look at how zlog is exported into the script environment.
Script Environment
@@ -364,10 +526,11 @@ Examples
For a complete code example involving passing custom types, retrieving results,
and doing complex calculations in Lua, look at the implementation of the
``match script SCRIPT`` command for BGP routemaps. This example calls into a
-script with a route prefix and attributes received from a peer and expects the
-script to return a match / no match / match and update result.
+script with a function named ``route_match``,
+provides route prefix and attributes received from a peer and expects the
+function to return a match / no match / match and update result.
-An example script to use with this follows. This script matches, does not match
+An example script to use with this follows. This function matches, does not match
or updates a route depending on how many BGP UPDATE messages the peer has
received when the script is called, simply as a demonstration of what can be
accomplished with scripting.
@@ -378,64 +541,75 @@ accomplished with scripting.
-- Example route map matching
-- author: qlyoung
--
- -- The following variables are available to us:
+ -- The following variables are available in the global environment:
-- log
-- logging library, with the usual functions
- -- prefix
+ --
+ -- route_match arguments:
+ -- table prefix
-- the route under consideration
- -- attributes
+ -- table attributes
-- the route's attributes
- -- peer
+ -- table peer
-- the peer which received this route
- -- RM_FAILURE
+ -- integer RM_FAILURE
-- status code in case of failure
- -- RM_NOMATCH
+ -- integer RM_NOMATCH
-- status code for no match
- -- RM_MATCH
+ -- integer RM_MATCH
-- status code for match
- -- RM_MATCH_AND_CHANGE
+ -- integer RM_MATCH_AND_CHANGE
-- status code for match-and-set
--
- -- We need to set the following out values:
- -- action
- -- Set to the appropriate status code to indicate what we did
- -- attributes
- -- Setting fields on here will propagate them back up to the caller if
- -- 'action' is set to RM_MATCH_AND_CHANGE.
-
-
- log.info("Evaluating route " .. prefix.network .. " from peer " .. peer.remote_id.string)
-
- function on_match (prefix, attrs)
- log.info("Match")
- action = RM_MATCH
- end
-
- function on_nomatch (prefix, attrs)
- log.info("No match")
- action = RM_NOMATCH
- end
-
- function on_match_and_change (prefix, attrs)
- action = RM_MATCH_AND_CHANGE
- log.info("Match and change")
- attrs["metric"] = attrs["metric"] + 7
- end
-
- special_routes = {
- ["172.16.10.4/24"] = on_match,
- ["172.16.13.1/8"] = on_nomatch,
- ["192.168.0.24/8"] = on_match_and_change,
- }
+ -- route_match returns table with following keys:
+ -- integer action, required
+ -- resultant status code. Should be one of RM_*
+ -- table attributes, optional
+ -- updated route attributes
+ --
+
+ function route_match(prefix, attributes, peer,
+ RM_FAILURE, RM_NOMATCH, RM_MATCH, RM_MATCH_AND_CHANGE)
+
+ log.info("Evaluating route " .. prefix.network .. " from peer " .. peer.remote_id.string)
+ function on_match (prefix, attributes)
+ log.info("Match")
+ return {
+ attributes = RM_MATCH
+ }
+ end
- if special_routes[prefix.network] then
- special_routes[prefix.network](prefix, attributes)
- elseif peer.stats.update_in % 3 == 0 then
- on_match(prefix, attributes)
- elseif peer.stats.update_in % 2 == 0 then
- on_nomatch(prefix, attributes)
- else
- on_match_and_change(prefix, attributes)
- end
-
+ function on_nomatch (prefix, attributes)
+ log.info("No match")
+ return {
+ action = RM_NOMATCH
+ }
+ end
+
+ function on_match_and_change (prefix, attributes)
+ log.info("Match and change")
+ attributes["metric"] = attributes["metric"] + 7
+ return {
+ action = RM_MATCH_AND_CHANGE,
+ attributes = attributes
+ }
+ end
+
+ special_routes = {
+ ["172.16.10.4/24"] = on_match,
+ ["172.16.13.1/8"] = on_nomatch,
+ ["192.168.0.24/8"] = on_match_and_change,
+ }
+
+
+ if special_routes[prefix.network] then
+ return special_routes[prefix.network](prefix, attributes)
+ elseif peer.stats.update_in % 3 == 0 then
+ return on_match(prefix, attributes)
+ elseif peer.stats.update_in % 2 == 0 then
+ return on_nomatch(prefix, attributes)
+ else
+ return on_match_and_change(prefix, attributes)
+ end
+ end
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 88f0483901..ef72c5030a 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -1896,7 +1896,17 @@ AS path access list is user defined AS path.
This command defines a new AS path access list.
+.. clicmd:: show bgp as-path-access-list [json]
+ Display all BGP AS Path access lists.
+
+ If the ``json`` option is specified, output is displayed in JSON format.
+
+.. clicmd:: show bgp as-path-access-list WORD [json]
+
+ Display the specified BGP AS Path access list.
+
+ If the ``json`` option is specified, output is displayed in JSON format.
.. _bgp-bogon-filter-example:
@@ -3539,17 +3549,17 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
It helps to identify which prefixes were installed at some point.
Here is an example of how to check what prefixes were installed starting
- with an arbitrary version::
+ with an arbitrary version:
- .. code-block:: frr
+.. code-block:: shell
- ~# vtysh -c 'show bgp ipv4 unicast json' | jq '.tableVersion'
- 9
- ~# vtysh -c 'show ip bgp version 9 json' | jq -r '.routes | keys[]'
- 192.168.3.0/24
- ~# vtysh -c 'show ip bgp version 8 json' | jq -r '.routes | keys[]'
- 192.168.2.0/24
- 192.168.3.0/24
+ # vtysh -c 'show bgp ipv4 unicast json' | jq '.tableVersion'
+ 9
+ # vtysh -c 'show ip bgp version 9 json' | jq -r '.routes | keys[]'
+ 192.168.3.0/24
+ # vtysh -c 'show ip bgp version 8 json' | jq -r '.routes | keys[]'
+ 192.168.2.0/24
+ 192.168.3.0/24
.. clicmd:: show bgp [afi] [safi] statistics
diff --git a/doc/user/filter.rst b/doc/user/filter.rst
index cbbcd47dc3..c1146e50aa 100644
--- a/doc/user/filter.rst
+++ b/doc/user/filter.rst
@@ -35,6 +35,18 @@ IP Access List
access-list filter permit 10.0.0.0/8
access-list filter seq 13 permit 10.0.0.0/7
+.. clicmd:: show <ip|ipv6> access-list [json]
+
+ Display all IPv4 or IPv6 access lists.
+
+ If the ``json`` option is specified, output is displayed in JSON format.
+
+.. clicmd:: show <ip|ipv6> access-list WORD [json]
+
+ Display the specified IPv4 or IPv6 access list.
+
+ If the ``json`` option is specified, output is displayed in JSON format.
+
IP Prefix List
==============
@@ -111,19 +123,25 @@ ip prefix-list description
Showing ip prefix-list
----------------------
-.. clicmd:: show ip prefix-list
+.. clicmd:: show ip prefix-list [json]
Display all IP prefix lists.
-.. clicmd:: show ip prefix-list NAME
+ If the ``json`` option is specified, output is displayed in JSON format.
+
+.. clicmd:: show ip prefix-list NAME [json]
Show IP prefix list can be used with a prefix list name.
-.. clicmd:: show ip prefix-list NAME seq NUM
+ If the ``json`` option is specified, output is displayed in JSON format.
+
+.. clicmd:: show ip prefix-list NAME seq NUM [json]
Show IP prefix list can be used with a prefix list name and sequential
number.
+ If the ``json`` option is specified, output is displayed in JSON format.
+
.. clicmd:: show ip prefix-list NAME A.B.C.D/M
If the command longer is used, all prefix lists with prefix lengths equal to
@@ -132,10 +150,10 @@ Showing ip prefix-list
.. clicmd:: show ip prefix-list NAME A.B.C.D/M longer
.. clicmd:: show ip prefix-list NAME A.B.C.D/M first-match
-.. clicmd:: show ip prefix-list summary
-.. clicmd:: show ip prefix-list summary NAME
-.. clicmd:: show ip prefix-list detail
-.. clicmd:: show ip prefix-list detail NAME
+.. clicmd:: show ip prefix-list summary [json]
+.. clicmd:: show ip prefix-list summary NAME [json]
+.. clicmd:: show ip prefix-list detail [json]
+.. clicmd:: show ip prefix-list detail NAME [json]
.. clicmd:: debug prefix-list NAME match <A.B.C.D/M|X:X::X:X/M> [address-mode]
diff --git a/doc/user/ospf6d.rst b/doc/user/ospf6d.rst
index c4a1bc381e..f8595ef3f5 100644
--- a/doc/user/ospf6d.rst
+++ b/doc/user/ospf6d.rst
@@ -85,6 +85,83 @@ OSPF6 router
change to take effect, user can use this cli instead of restarting the
ospf6d daemon.
+ASBR Summarisation Support in OSPFv3
+====================================
+
+ External routes in OSPFv3 are carried by type 5/7 LSA (external LSAs).
+ External LSAs are generated by ASBR (Autonomous System Boundary Router).
+ Large topology database requires a large amount of router memory, which
+ slows down all processes, including SPF calculations.
+ It is necessary to reduce the size of the OSPFv3 topology database,
+ especially in a large network. Summarising routes keeps the routing
+ tables smaller and easier to troubleshoot.
+
+ External route summarization must be configured on ASBR.
+ Stub area do not allow ASBR because they don’t allow type 5 LSAs.
+
+ An ASBR will inject a summary route into the OSPFv3 domain.
+
+ Summary route will only be advertised if you have at least one subnet
+ that falls within the summary range.
+
+ Users will be allowed an option in the CLI to not advertise range of
+ ipv6 prefixes as well.
+
+ The configuration of ASBR Summarisation is supported using the CLI command
+
+.. clicmd:: summary-address X:X::X:X/M [tag (1-4294967295)] [{metric (0-16777215) | metric-type (1-2)}]
+
+ This command will advertise a single External LSA on behalf of all the
+ prefixes falling under this range configured by the CLI.
+ The user is allowed to configure tag, metric and metric-type as well.
+ By default, tag is not configured, default metric as 20 and metric-type
+ as type-2 gets advertised.
+ A summary route is created when one or more specific routes are learned and
+ removed when no more specific route exist.
+ The summary route is also installed in the local system with Null0 as
+ next-hop to avoid leaking traffic.
+
+.. clicmd:: no summary-address X:X::X:X/M [tag (1-4294967295)] [{metric (0-16777215) | metric-type (1-2)}]
+
+ This command can be used to remove the summarisation configuration.
+ This will flush the single External LSA if it was originated and advertise
+ the External LSAs for all the existing individual prefixes.
+
+.. clicmd:: summary-address X:X::X:X/M no-advertise
+
+ This command can be used when user do not want to advertise a certain
+ range of prefixes using the no-advertise option.
+ This command when configured will flush all the existing external LSAs
+ falling under this range.
+
+.. clicmd:: no summary-address X:X::X:X/M no-advertise
+
+ This command can be used to remove the previous configuration.
+ When configured, tt will resume originating external LSAs for all the prefixes
+ falling under the configured range.
+
+.. clicmd:: aggregation timer (5-1800)
+
+ The summarisation command takes effect after the aggregation timer expires.
+ By default the value of this timer is 5 seconds. User can modify the time
+ after which the external LSAs should get originated using this command.
+
+.. clicmd:: no aggregation timer (5-1800)
+
+ This command removes the timer configuration. It reverts back to default
+ 5 second timer.
+
+.. clicmd:: show ipv6 ospf6 summary-address [detail] [json]
+
+ This command can be used to see all the summary-address related information.
+ When detail option is used, it shows all the prefixes falling under each
+ summary-configuration apart from other information.
+
+.. clicmd:: debug ospf6 lsa aggregation
+
+ This command can be used to enable the debugs related to the summarisation
+ of these LSAs.
+
.. _ospf6-debugging:
OSPFv3 Debugging
@@ -387,13 +464,3 @@ Larger example with policy and various options set:
ipv6 access-class access6
exec-timeout 0 0
!
-
-
-Configuration Limits
-====================
-
-Ospf6d currently supports 100 interfaces addresses if MTU is set to
-default value, and 200 interface addresses if MTU is set to jumbo
-packet size or larger.
-
-
diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst
index 77134a7704..e59ed10896 100644
--- a/doc/user/pbr.rst
+++ b/doc/user/pbr.rst
@@ -117,6 +117,21 @@ end destination.
both v4 and v6 prefixes. This command is used in conjunction of the
:clicmd:`match src-ip PREFIX` command for matching.
+.. clicmd:: match src-port (1-65535)
+
+ When a incoming packet matches the source port specified, take the
+ packet and forward according to the nexthops specified.
+
+.. clicmd:: match dst-port (1-65535)
+
+ When a incoming packet matches the destination port specified, take the
+ packet and forward according to the nexthops specified.
+
+.. clicmd:: match ip-protocol [tcp|udp]
+
+ When a incoming packet matches the specified ip protocol, take the
+ packet and forward according to the nexthops specified.
+
.. clicmd:: match mark (1-4294967295)
Select the mark to match. This is a linux only command and if attempted
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 4b67326b3d..6f9aa289b4 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -93,11 +93,13 @@ Certain signals have special meanings to *pimd*.
down. This command is vrf aware, to configure for a vrf, enter the vrf
submode.
-.. clicmd:: ip pim join-prune-interval (60-600)
+.. clicmd:: ip pim join-prune-interval (5-600)
Modify the join/prune interval that pim uses to the new value. Time is
specified in seconds. This command is vrf aware, to configure for a vrf,
- enter the vrf submode.
+ enter the vrf submode. The default time is 60 seconds. If you enter
+ a value smaller than 60 seconds be aware that this can and will affect
+ convergence at scale.
.. clicmd:: ip pim keep-alive-timer (31-60000)
diff --git a/doc/user/routemap.rst b/doc/user/routemap.rst
index 3cb83cc652..2714b81dbe 100644
--- a/doc/user/routemap.rst
+++ b/doc/user/routemap.rst
@@ -90,11 +90,13 @@ cont
.. _route-map-show-command:
-.. clicmd:: show route-map [WORD]
+.. clicmd:: show route-map [WORD] [json]
Display data about each daemons knowledge of individual route-maps.
If WORD is supplied narrow choice to that particular route-map.
+ If the ``json`` option is specified, output is displayed in JSON format.
+
.. _route-map-clear-counter-command:
.. clicmd:: clear route-map counter [WORD]
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 3654801100..8b4e2c8bdc 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -908,10 +908,11 @@ IPv6 example for OSPFv3.
.. note::
- For both IPv4 and IPv6, the IP address has to exist at the point the
- route-map is created. Be wary of race conditions if the interface is
- not created at startup. On Debian, FRR might start before ifupdown
- completes. Consider a reboot test.
+ For both IPv4 and IPv6, the IP address has to exist on some interface when
+ the route is getting installed into the system. Otherwise, kernel rejects
+ the route. To solve the problem of disappearing IPv6 addresses when the
+ interface goes down, use ``net.ipv6.conf.all.keep_addr_on_down``
+ :ref:`sysctl option <zebra-sysctl>`.
.. clicmd:: zebra route-map delay-timer (0-600)
@@ -1139,6 +1140,10 @@ zebra Terminal Mode Commands
Display detailed information about a route. If [nexthop-group] is
included, it will display the nexthop group ID the route is using as well.
+.. clicmd:: show interface [NAME] [{vrf VRF|brief}] [json]
+
+.. clicmd:: show interface [NAME] [{vrf all|brief}] [json]
+
.. clicmd:: show interface [NAME] [{vrf VRF|brief}] [nexthop-group]
.. clicmd:: show interface [NAME] [{vrf all|brief}] [nexthop-group]
@@ -1148,6 +1153,8 @@ zebra Terminal Mode Commands
detailed information about that single interface. If [nexthop-group] is
specified, it will display nexthop groups pointing out that interface.
+ If the ``json`` option is specified, output is displayed in JSON format.
+
.. clicmd:: show ip prefix-list [NAME]
.. clicmd:: show route-map [NAME]
@@ -1232,6 +1239,8 @@ For protocols requiring an IPv6 router-id, the following commands are available:
Display the user configured IPv6 router-id.
+.. _zebra-sysctl:
+
Expected sysctl settings
========================
diff --git a/eigrpd/eigrp_cli.c b/eigrpd/eigrp_cli.c
index 47de929fc3..35536979ea 100644
--- a/eigrpd/eigrp_cli.c
+++ b/eigrpd/eigrp_cli.c
@@ -861,16 +861,6 @@ static int eigrp_config_write(struct vty *vty)
return written;
}
-static int eigrp_write_interface(struct vty *vty);
-static struct cmd_node eigrp_interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = eigrp_write_interface,
-};
-
-
static int eigrp_write_interface(struct vty *vty)
{
struct lyd_node *dnode;
@@ -921,8 +911,7 @@ eigrp_cli_init(void)
vrf_cmd_init(NULL, &eigrpd_privs);
- install_node(&eigrp_interface_node);
- if_cmd_init();
+ if_cmd_init(eigrp_write_interface);
install_element(INTERFACE_NODE, &eigrp_if_delay_cmd);
install_element(INTERFACE_NODE, &no_eigrp_if_delay_cmd);
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index bccb9065f4..a78e4996b4 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -1430,14 +1430,6 @@ ferr_r isis_circuit_passwd_hmac_md5_set(struct isis_circuit *circuit,
passwd);
}
-struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = isis_interface_config_write,
-};
-
void isis_circuit_circ_type_set(struct isis_circuit *circuit, int circ_type)
{
if (circuit->circ_type == circ_type)
@@ -1537,8 +1529,7 @@ void isis_circuit_init(void)
hook_register_prio(if_del, 0, isis_if_delete_hook);
/* Install interface node */
- install_node(&interface_node);
- if_cmd_init();
+ if_cmd_init(isis_interface_config_write);
if_zapi_callbacks(isis_ifp_create, isis_ifp_up,
isis_ifp_down, isis_ifp_destroy);
}
diff --git a/lib/command.c b/lib/command.c
index fe17c68a8b..422544b70b 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -2422,28 +2422,30 @@ DEFUN(find,
}
#if defined(DEV_BUILD) && defined(HAVE_SCRIPTING)
-DEFUN(script,
- script_cmd,
- "script SCRIPT",
- "Test command - execute a script\n"
- "Script name (same as filename in /etc/frr/scripts/\n")
+DEFUN(script, script_cmd, "script SCRIPT FUNCTION",
+ "Test command - execute a function in a script\n"
+ "Script name (same as filename in /etc/frr/scripts/)\n"
+ "Function name (in the script)\n")
{
struct prefix p;
(void)str2prefix("1.2.3.4/24", &p);
- struct frrscript *fs = frrscript_load(argv[1]->arg, NULL);
+ struct frrscript *fs = frrscript_new(argv[1]->arg);
- if (fs == NULL) {
- vty_out(vty, "Script '/etc/frr/scripts/%s.lua' not found\n",
- argv[1]->arg);
- } else {
- int ret = frrscript_call(fs, ("p", &p));
- char buf[40];
- prefix2str(&p, buf, sizeof(buf));
- vty_out(vty, "p: %s\n", buf);
- vty_out(vty, "Script result: %d\n", ret);
+ if (frrscript_load(fs, argv[2]->arg, NULL)) {
+ vty_out(vty,
+ "/etc/frr/scripts/%s.lua or function '%s' not found\n",
+ argv[1]->arg, argv[2]->arg);
}
+ int ret = frrscript_call(fs, argv[2]->arg, ("p", &p));
+ char buf[40];
+ prefix2str(&p, buf, sizeof(buf));
+ vty_out(vty, "p: %s\n", buf);
+ vty_out(vty, "Script result: %d\n", ret);
+
+ frrscript_delete(fs);
+
return CMD_SUCCESS;
}
#endif
diff --git a/lib/filter.c b/lib/filter.c
index 72a66d85ad..744ea9c480 100644
--- a/lib/filter.c
+++ b/lib/filter.c
@@ -30,6 +30,7 @@
#include "routemap.h"
#include "libfrr.h"
#include "northbound_cli.h"
+#include "json.h"
DEFINE_MTYPE_STATIC(LIB, ACCESS_LIST, "Access List");
DEFINE_MTYPE_STATIC(LIB, ACCESS_LIST_STR, "Access List Str");
@@ -443,71 +444,158 @@ void access_list_filter_add(struct access_list *access,
host A single host address
*/
-static void config_write_access_zebra(struct vty *, struct filter *);
-static void config_write_access_cisco(struct vty *, struct filter *);
+static void config_write_access_zebra(struct vty *, struct filter *,
+ json_object *);
+static void config_write_access_cisco(struct vty *, struct filter *,
+ json_object *);
+
+static const char *filter_type2str(struct filter *filter)
+{
+ if (filter->cisco) {
+ if (filter->u.cfilter.extended)
+ return "Extended";
+ else
+ return "Standard";
+ } else
+ return "Zebra";
+}
/* show access-list command. */
-static int filter_show(struct vty *vty, const char *name, afi_t afi)
+static int filter_show(struct vty *vty, const char *name, afi_t afi,
+ bool use_json)
{
struct access_list *access;
struct access_master *master;
struct filter *mfilter;
struct filter_cisco *filter;
- int write = 0;
+ bool first;
+ json_object *json = NULL;
+ json_object *json_proto = NULL;
master = access_master_get(afi);
- if (master == NULL)
+ if (master == NULL) {
+ if (use_json)
+ vty_out(vty, "{}\n");
return 0;
+ }
+
+ if (use_json)
+ json = json_object_new_object();
/* Print the name of the protocol */
- vty_out(vty, "%s:\n", frr_protoname);
+ if (json) {
+ json_proto = json_object_new_object();
+ json_object_object_add(json, frr_protoname, json_proto);
+ } else
+ vty_out(vty, "%s:\n", frr_protoname);
for (access = master->str.head; access; access = access->next) {
+ json_object *json_acl = NULL;
+ json_object *json_rules = NULL;
+
if (name && strcmp(access->name, name) != 0)
continue;
- write = 1;
+ first = true;
for (mfilter = access->head; mfilter; mfilter = mfilter->next) {
+ json_object *json_rule = NULL;
+
filter = &mfilter->u.cfilter;
- if (write) {
- vty_out(vty, "%s %s access list %s\n",
- mfilter->cisco ? (filter->extended
- ? "Extended"
- : "Standard")
- : "Zebra",
- (afi == AFI_IP)
- ? ("IP")
- : ((afi == AFI_IP6) ? ("IPv6 ")
- : ("MAC ")),
- access->name);
- write = 0;
+ if (first) {
+ const char *type = filter_type2str(mfilter);
+
+ if (json) {
+ json_acl = json_object_new_object();
+ json_object_object_add(json_proto,
+ access->name,
+ json_acl);
+
+ json_object_string_add(json_acl, "type",
+ type);
+ json_object_string_add(json_acl,
+ "addressFamily",
+ afi2str(afi));
+ json_rules = json_object_new_array();
+ json_object_object_add(
+ json_acl, "rules", json_rules);
+ } else {
+ vty_out(vty, "%s %s access list %s\n",
+ type,
+ (afi == AFI_IP)
+ ? ("IP")
+ : ((afi == AFI_IP6)
+ ? ("IPv6 ")
+ : ("MAC ")),
+ access->name);
+ }
+
+ first = false;
}
- vty_out(vty, " seq %" PRId64, mfilter->seq);
- vty_out(vty, " %s%s", filter_type_str(mfilter),
- mfilter->type == FILTER_DENY ? " " : "");
+ if (json) {
+ json_rule = json_object_new_object();
+ json_object_array_add(json_rules, json_rule);
+
+ json_object_int_add(json_rule, "sequenceNumber",
+ mfilter->seq);
+ json_object_string_add(
+ json_rule, "filterType",
+ filter_type_str(mfilter));
+ } else {
+ vty_out(vty, " seq %" PRId64, mfilter->seq);
+ vty_out(vty, " %s%s", filter_type_str(mfilter),
+ mfilter->type == FILTER_DENY ? " "
+ : "");
+ }
if (!mfilter->cisco)
- config_write_access_zebra(vty, mfilter);
+ config_write_access_zebra(vty, mfilter,
+ json_rule);
else if (filter->extended)
- config_write_access_cisco(vty, mfilter);
+ config_write_access_cisco(vty, mfilter,
+ json_rule);
else {
- if (filter->addr_mask.s_addr == 0xffffffff)
- vty_out(vty, " any\n");
- else {
- vty_out(vty, " %pI4", &filter->addr);
+ if (json) {
+ char buf[BUFSIZ];
+
+ json_object_string_add(
+ json_rule, "address",
+ inet_ntop(AF_INET,
+ &filter->addr, buf,
+ sizeof(buf)));
+ json_object_string_add(
+ json_rule, "mask",
+ inet_ntop(AF_INET,
+ &filter->addr_mask,
+ buf, sizeof(buf)));
+ } else {
if (filter->addr_mask.s_addr
- != INADDR_ANY)
- vty_out(vty,
- ", wildcard bits %pI4",
- &filter->addr_mask);
- vty_out(vty, "\n");
+ == 0xffffffff)
+ vty_out(vty, " any\n");
+ else {
+ vty_out(vty, " %pI4",
+ &filter->addr);
+ if (filter->addr_mask.s_addr
+ != INADDR_ANY)
+ vty_out(vty,
+ ", wildcard bits %pI4",
+ &filter->addr_mask);
+ vty_out(vty, "\n");
+ }
}
}
}
}
+
+ if (json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
return CMD_SUCCESS;
}
@@ -519,7 +607,7 @@ DEFUN (show_mac_access_list,
"mac access lists\n"
"List mac access lists\n")
{
- return filter_show(vty, NULL, AFI_L2VPN);
+ return filter_show(vty, NULL, AFI_L2VPN, false);
}
DEFUN (show_mac_access_list_name,
@@ -530,22 +618,24 @@ DEFUN (show_mac_access_list_name,
"List mac access lists\n"
"mac address\n")
{
- return filter_show(vty, argv[3]->arg, AFI_L2VPN);
+ return filter_show(vty, argv[3]->arg, AFI_L2VPN, false);
}
DEFUN (show_ip_access_list,
show_ip_access_list_cmd,
- "show ip access-list",
+ "show ip access-list [json]",
SHOW_STR
IP_STR
- "List IP access lists\n")
+ "List IP access lists\n"
+ JSON_STR)
{
- return filter_show(vty, NULL, AFI_IP);
+ bool uj = use_json(argc, argv);
+ return filter_show(vty, NULL, AFI_IP, uj);
}
DEFUN (show_ip_access_list_name,
show_ip_access_list_name_cmd,
- "show ip access-list <(1-99)|(100-199)|(1300-1999)|(2000-2699)|WORD>",
+ "show ip access-list <(1-99)|(100-199)|(1300-1999)|(2000-2699)|WORD> [json]",
SHOW_STR
IP_STR
"List IP access lists\n"
@@ -553,41 +643,64 @@ DEFUN (show_ip_access_list_name,
"IP extended access list\n"
"IP standard access list (expanded range)\n"
"IP extended access list (expanded range)\n"
- "IP zebra access-list\n")
+ "IP zebra access-list\n"
+ JSON_STR)
{
+ bool uj = use_json(argc, argv);
int idx_acl = 3;
- return filter_show(vty, argv[idx_acl]->arg, AFI_IP);
+ return filter_show(vty, argv[idx_acl]->arg, AFI_IP, uj);
}
DEFUN (show_ipv6_access_list,
show_ipv6_access_list_cmd,
- "show ipv6 access-list",
+ "show ipv6 access-list [json]",
SHOW_STR
IPV6_STR
- "List IPv6 access lists\n")
+ "List IPv6 access lists\n"
+ JSON_STR)
{
- return filter_show(vty, NULL, AFI_IP6);
+ bool uj = use_json(argc, argv);
+ return filter_show(vty, NULL, AFI_IP6, uj);
}
DEFUN (show_ipv6_access_list_name,
show_ipv6_access_list_name_cmd,
- "show ipv6 access-list WORD",
+ "show ipv6 access-list WORD [json]",
SHOW_STR
IPV6_STR
"List IPv6 access lists\n"
- "IPv6 zebra access-list\n")
+ "IPv6 zebra access-list\n"
+ JSON_STR)
{
+ bool uj = use_json(argc, argv);
int idx_word = 3;
- return filter_show(vty, argv[idx_word]->arg, AFI_IP6);
+ return filter_show(vty, argv[idx_word]->arg, AFI_IP6, uj);
}
-static void config_write_access_cisco(struct vty *vty, struct filter *mfilter)
+static void config_write_access_cisco(struct vty *vty, struct filter *mfilter,
+ json_object *json)
{
struct filter_cisco *filter;
filter = &mfilter->u.cfilter;
- if (filter->extended) {
+ if (json) {
+ char buf[BUFSIZ];
+
+ json_object_boolean_add(json, "extended", !!filter->extended);
+ json_object_string_add(
+ json, "sourceAddress",
+ inet_ntop(AF_INET, &filter->addr, buf, sizeof(buf)));
+ json_object_string_add(json, "sourceMask",
+ inet_ntop(AF_INET, &filter->addr_mask,
+ buf, sizeof(buf)));
+ json_object_string_add(
+ json, "destinationAddress",
+ inet_ntop(AF_INET, &filter->mask, buf, sizeof(buf)));
+ json_object_string_add(json, "destinationMask",
+ inet_ntop(AF_INET, &filter->mask_mask,
+ buf, sizeof(buf)));
+ } else {
vty_out(vty, " ip");
if (filter->addr_mask.s_addr == 0xffffffff)
vty_out(vty, " any");
@@ -607,19 +720,11 @@ static void config_write_access_cisco(struct vty *vty, struct filter *mfilter)
vty_out(vty, " %pI4", &filter->mask_mask);
}
vty_out(vty, "\n");
- } else {
- if (filter->addr_mask.s_addr == 0xffffffff)
- vty_out(vty, " any\n");
- else {
- vty_out(vty, " %pI4", &filter->addr);
- if (filter->addr_mask.s_addr != INADDR_ANY)
- vty_out(vty, " %pI4", &filter->addr_mask);
- vty_out(vty, "\n");
- }
}
}
-static void config_write_access_zebra(struct vty *vty, struct filter *mfilter)
+static void config_write_access_zebra(struct vty *vty, struct filter *mfilter,
+ json_object *json)
{
struct filter_zebra *filter;
struct prefix *p;
@@ -628,21 +733,29 @@ static void config_write_access_zebra(struct vty *vty, struct filter *mfilter)
filter = &mfilter->u.zfilter;
p = &filter->prefix;
- if (p->prefixlen == 0 && !filter->exact)
- vty_out(vty, " any");
- else if (p->family == AF_INET6 || p->family == AF_INET)
- vty_out(vty, " %s/%d%s",
- inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
- p->prefixlen, filter->exact ? " exact-match" : "");
- else if (p->family == AF_ETHERNET) {
- if (p->prefixlen == 0)
+ if (json) {
+ json_object_string_add(json, "prefix",
+ prefix2str(p, buf, sizeof(buf)));
+ json_object_boolean_add(json, "exact-match", !!filter->exact);
+ } else {
+ if (p->prefixlen == 0 && !filter->exact)
vty_out(vty, " any");
- else
- vty_out(vty, " %s", prefix_mac2str(&(p->u.prefix_eth),
- buf, sizeof(buf)));
- }
+ else if (p->family == AF_INET6 || p->family == AF_INET)
+ vty_out(vty, " %s/%d%s",
+ inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
+ p->prefixlen,
+ filter->exact ? " exact-match" : "");
+ else if (p->family == AF_ETHERNET) {
+ if (p->prefixlen == 0)
+ vty_out(vty, " any");
+ else
+ vty_out(vty, " %s",
+ prefix_mac2str(&(p->u.prefix_eth), buf,
+ sizeof(buf)));
+ }
- vty_out(vty, "\n");
+ vty_out(vty, "\n");
+ }
}
static struct cmd_node access_mac_node = {
diff --git a/lib/frr_zmq.c b/lib/frr_zmq.c
index ce52848a25..ea9c828f7c 100644
--- a/lib/frr_zmq.c
+++ b/lib/frr_zmq.c
@@ -17,6 +17,14 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * IF YOU MODIFY THIS FILE PLEASE RUN `make check` and ensure that
+ * the test_zmq.c unit test is still working. There are dependancies
+ * between the two that are extremely fragile. My understanding
+ * is that there is specialized ownership of the cb pointer based
+ * upon what is happening. Those assumptions are supposed to be
+ * tested in the test_zmq.c
+ */
#include <zebra.h>
#include <zmq.h>
@@ -309,8 +317,22 @@ void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core)
core->cancelled = true;
thread_cancel(&core->thread);
+ /*
+ * Looking at this code one would assume that FRR
+ * would want a `!(*cb)->write.thread. This was
+ * attempted in e08165def1c62beee0e87385 but this
+ * change caused `make check` to stop working
+ * which was not noticed because our CI system
+ * does not build with zeromq. Put this back
+ * to the code as written in 2017. e08165de..
+ * was introduced in 2021. So someone was ok
+ * with frrzmq_thread_cancel for 4 years. This will
+ * allow those people doing `make check` to continue
+ * working. In the meantime if the people using
+ * this code see an issue they can fix it
+ */
if ((*cb)->read.cancelled && !(*cb)->read.thread
- && (*cb)->write.cancelled && !(*cb)->write.thread)
+ && (*cb)->write.cancelled && (*cb)->write.thread)
XFREE(MTYPE_ZEROMQ_CB, *cb);
}
diff --git a/lib/frrlua.c b/lib/frrlua.c
index e97e48121c..96d7269440 100644
--- a/lib/frrlua.c
+++ b/lib/frrlua.c
@@ -29,6 +29,8 @@
#include "log.h"
#include "buffer.h"
+DEFINE_MTYPE(LIB, SCRIPT_RES, "Scripting results");
+
/* Lua stuff */
/*
@@ -81,7 +83,7 @@ void lua_decode_prefix(lua_State *L, int idx, struct prefix *prefix)
void *lua_toprefix(lua_State *L, int idx)
{
- struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
+ struct prefix *p = XCALLOC(MTYPE_SCRIPT_RES, sizeof(struct prefix));
lua_decode_prefix(L, idx, p);
return p;
}
@@ -153,7 +155,8 @@ void lua_decode_interface(lua_State *L, int idx, struct interface *ifp)
}
void *lua_tointerface(lua_State *L, int idx)
{
- struct interface *ifp = XCALLOC(MTYPE_TMP, sizeof(struct interface));
+ struct interface *ifp =
+ XCALLOC(MTYPE_SCRIPT_RES, sizeof(struct interface));
lua_decode_interface(L, idx, ifp);
return ifp;
@@ -183,7 +186,8 @@ void lua_decode_inaddr(lua_State *L, int idx, struct in_addr *inaddr)
void *lua_toinaddr(lua_State *L, int idx)
{
- struct in_addr *inaddr = XCALLOC(MTYPE_TMP, sizeof(struct in_addr));
+ struct in_addr *inaddr =
+ XCALLOC(MTYPE_SCRIPT_RES, sizeof(struct in_addr));
lua_decode_inaddr(L, idx, inaddr);
return inaddr;
}
@@ -213,7 +217,8 @@ void lua_decode_in6addr(lua_State *L, int idx, struct in6_addr *in6addr)
void *lua_toin6addr(lua_State *L, int idx)
{
- struct in6_addr *in6addr = XCALLOC(MTYPE_TMP, sizeof(struct in6_addr));
+ struct in6_addr *in6addr =
+ XCALLOC(MTYPE_SCRIPT_RES, sizeof(struct in6_addr));
lua_decode_in6addr(L, idx, in6addr);
return in6addr;
}
@@ -243,7 +248,8 @@ void lua_decode_sockunion(lua_State *L, int idx, union sockunion *su)
void *lua_tosockunion(lua_State *L, int idx)
{
- union sockunion *su = XCALLOC(MTYPE_TMP, sizeof(union sockunion));
+ union sockunion *su =
+ XCALLOC(MTYPE_SCRIPT_RES, sizeof(union sockunion));
lua_decode_sockunion(L, idx, su);
return su;
@@ -262,7 +268,7 @@ void lua_decode_timet(lua_State *L, int idx, time_t *t)
void *lua_totimet(lua_State *L, int idx)
{
- time_t *t = XCALLOC(MTYPE_TMP, sizeof(time_t));
+ time_t *t = XCALLOC(MTYPE_SCRIPT_RES, sizeof(time_t));
lua_decode_timet(L, idx, t);
return t;
@@ -283,7 +289,7 @@ void lua_decode_integerp(lua_State *L, int idx, long long *num)
void *lua_tointegerp(lua_State *L, int idx)
{
- long long *num = XCALLOC(MTYPE_TMP, sizeof(long long));
+ long long *num = XCALLOC(MTYPE_SCRIPT_RES, sizeof(long long));
lua_decode_integerp(L, idx, num);
return num;
@@ -297,7 +303,7 @@ void lua_decode_stringp(lua_State *L, int idx, char *str)
void *lua_tostringp(lua_State *L, int idx)
{
- char *string = XSTRDUP(MTYPE_TMP, lua_tostring(L, idx));
+ char *string = XSTRDUP(MTYPE_SCRIPT_RES, lua_tostring(L, idx));
return string;
}
@@ -309,6 +315,14 @@ void lua_decode_noop(lua_State *L, int idx, const void *ptr)
{
}
+
+/*
+ * Noop decoder for int.
+ */
+void lua_decode_integer_noop(lua_State *L, int idx, int i)
+{
+}
+
/*
* Logging.
*
diff --git a/lib/frrlua.h b/lib/frrlua.h
index c4de82740c..3e16c82e22 100644
--- a/lib/frrlua.h
+++ b/lib/frrlua.h
@@ -34,6 +34,8 @@
extern "C" {
#endif
+DECLARE_MTYPE(SCRIPT_RES);
+
/*
* gcc-10 is complaining about the wrapper function
* not being compatible with lua_pushstring returning
@@ -162,10 +164,12 @@ void lua_decode_stringp(lua_State *L, int idx, char *str);
void *lua_tostringp(lua_State *L, int idx);
/*
- * No-op decocder
+ * No-op decoders
*/
void lua_decode_noop(lua_State *L, int idx, const void *ptr);
+void lua_decode_integer_noop(lua_State *L, int idx, int i);
+
/*
* Retrieve an integer from table on the top of the stack.
*
diff --git a/lib/frrscript.c b/lib/frrscript.c
index 1a9f3639dd..d00b84ccbb 100644
--- a/lib/frrscript.c
+++ b/lib/frrscript.c
@@ -102,67 +102,136 @@ static void codec_free(struct codec *c)
}
#endif
-/* Generic script APIs */
+/* Lua function hash utils */
-int _frrscript_call(struct frrscript *fs)
+unsigned int lua_function_hash_key(const void *data)
{
+ const struct lua_function_state *lfs = data;
- int ret = lua_pcall(fs->L, 0, 0, 0);
+ return string_hash_make(lfs->name);
+}
+
+bool lua_function_hash_cmp(const void *d1, const void *d2)
+{
+ const struct lua_function_state *lfs1 = d1;
+ const struct lua_function_state *lfs2 = d2;
+
+ return strmatch(lfs1->name, lfs2->name);
+}
+
+void *lua_function_alloc(void *arg)
+{
+ struct lua_function_state *tmp = arg;
+
+ struct lua_function_state *lfs =
+ XCALLOC(MTYPE_SCRIPT, sizeof(struct lua_function_state));
+ lfs->name = tmp->name;
+ lfs->L = tmp->L;
+ return lfs;
+}
+
+static void lua_function_free(struct hash_bucket *b, void *data)
+{
+ struct lua_function_state *lfs = (struct lua_function_state *)b->data;
+ lua_close(lfs->L);
+ XFREE(MTYPE_SCRIPT, lfs);
+}
+
+/* internal frrscript APIs */
+
+int _frrscript_call_lua(struct lua_function_state *lfs, int nargs)
+{
+
+ int ret;
+ ret = lua_pcall(lfs->L, nargs, 1, 0);
switch (ret) {
case LUA_OK:
break;
case LUA_ERRRUN:
- zlog_err("Script '%s' runtime error: %s", fs->name,
- lua_tostring(fs->L, -1));
+ zlog_err("Lua hook call '%s' : runtime error: %s", lfs->name,
+ lua_tostring(lfs->L, -1));
break;
case LUA_ERRMEM:
- zlog_err("Script '%s' memory error: %s", fs->name,
- lua_tostring(fs->L, -1));
+ zlog_err("Lua hook call '%s' : memory error: %s", lfs->name,
+ lua_tostring(lfs->L, -1));
break;
case LUA_ERRERR:
- zlog_err("Script '%s' error handler error: %s", fs->name,
- lua_tostring(fs->L, -1));
+ zlog_err("Lua hook call '%s' : error handler error: %s",
+ lfs->name, lua_tostring(lfs->L, -1));
break;
case LUA_ERRGCMM:
- zlog_err("Script '%s' garbage collector error: %s", fs->name,
- lua_tostring(fs->L, -1));
+ zlog_err("Lua hook call '%s' : garbage collector error: %s",
+ lfs->name, lua_tostring(lfs->L, -1));
break;
default:
- zlog_err("Script '%s' unknown error: %s", fs->name,
- lua_tostring(fs->L, -1));
+ zlog_err("Lua hook call '%s' : unknown error: %s", lfs->name,
+ lua_tostring(lfs->L, -1));
break;
}
if (ret != LUA_OK) {
- lua_pop(fs->L, 1);
+ lua_pop(lfs->L, 1);
goto done;
}
+ if (lua_gettop(lfs->L) != 1) {
+ zlog_err(
+ "Lua hook call '%s': Lua function should return only 1 result",
+ lfs->name);
+ ret = 1;
+ goto done;
+ }
+
+ if (lua_istable(lfs->L, 1) != 1) {
+ zlog_err(
+ "Lua hook call '%s': Lua function should return a Lua table",
+ lfs->name);
+ ret = 1;
+ }
+
done:
/* LUA_OK is 0, so we can just return lua_pcall's result directly */
return ret;
}
-void *frrscript_get_result(struct frrscript *fs,
- const struct frrscript_env *result)
+void *frrscript_get_result(struct frrscript *fs, const char *function_name,
+ const char *name,
+ void *(*lua_to)(lua_State *L, int idx))
{
- void *r;
- struct frrscript_codec c = {.typename = result->typename};
+ void *p;
+ struct lua_function_state *lfs;
+ struct lua_function_state lookup = {.name = function_name};
- struct frrscript_codec *codec = hash_lookup(codec_hash, &c);
- assert(codec && "No encoder for type");
+ lfs = hash_lookup(fs->lua_function_hash, &lookup);
- if (!codec->decoder) {
- zlog_err("No script decoder for type '%s'", result->typename);
+ if (lfs == NULL)
+ return NULL;
+
+ /* At this point, the Lua state should have only the returned table.
+ * We will then search the table for the key/value we're interested in.
+ * Then if the value is present (i.e. non-nil), call the lua_to*
+ * decoder.
+ */
+ assert(lua_gettop(lfs->L) == 1);
+ assert(lua_istable(lfs->L, -1) == 1);
+ lua_getfield(lfs->L, -1, name);
+ if (lua_isnil(lfs->L, -1)) {
+ lua_pop(lfs->L, 1);
+ zlog_warn(
+ "frrscript: '%s.lua': '%s': tried to decode '%s' as result but failed",
+ fs->name, function_name, name);
return NULL;
}
+ p = lua_to(lfs->L, 2);
- lua_getglobal(fs->L, result->name);
- r = codec->decoder(fs->L, -1);
- lua_pop(fs->L, 1);
+ /* At the end, the Lua state should be same as it was at the start
+ * i.e. containing soley the returned table.
+ */
+ assert(lua_gettop(lfs->L) == 1);
+ assert(lua_istable(lfs->L, -1) == 1);
- return r;
+ return p;
}
void frrscript_register_type_codec(struct frrscript_codec *codec)
@@ -183,61 +252,99 @@ void frrscript_register_type_codecs(struct frrscript_codec *codecs)
frrscript_register_type_codec(&codecs[i]);
}
-struct frrscript *frrscript_load(const char *name,
- int (*load_cb)(struct frrscript *))
+struct frrscript *frrscript_new(const char *name)
{
struct frrscript *fs = XCALLOC(MTYPE_SCRIPT, sizeof(struct frrscript));
fs->name = XSTRDUP(MTYPE_SCRIPT, name);
- fs->L = luaL_newstate();
- frrlua_export_logging(fs->L);
+ fs->lua_function_hash =
+ hash_create(lua_function_hash_key, lua_function_hash_cmp,
+ "Lua function state hash");
+ return fs;
+}
+
+int frrscript_load(struct frrscript *fs, const char *function_name,
+ int (*load_cb)(struct frrscript *))
+{
- char fname[MAXPATHLEN * 2];
- snprintf(fname, sizeof(fname), "%s/%s.lua", scriptdir, fs->name);
+ /* Set up the Lua script */
+ lua_State *L = luaL_newstate();
- int ret = luaL_loadfile(fs->L, fname);
+ frrlua_export_logging(L);
+
+ char script_name[MAXPATHLEN];
+
+ if (snprintf(script_name, sizeof(script_name), "%s/%s.lua", scriptdir,
+ fs->name)
+ >= (int)sizeof(script_name)) {
+ zlog_err("frrscript: path to script %s/%s.lua is too long",
+ scriptdir, fs->name);
+ goto fail;
+ }
+ int ret = luaL_dofile(L, script_name);
switch (ret) {
case LUA_OK:
break;
case LUA_ERRSYNTAX:
- zlog_err("Failed loading script '%s': syntax error: %s", fname,
- lua_tostring(fs->L, -1));
+ zlog_err(
+ "frrscript: failed loading script '%s.lua': syntax error: %s",
+ script_name, lua_tostring(L, -1));
break;
case LUA_ERRMEM:
- zlog_err("Failed loading script '%s': out-of-memory error: %s",
- fname, lua_tostring(fs->L, -1));
+ zlog_err(
+ "frrscript: failed loading script '%s.lua': out-of-memory error: %s",
+ script_name, lua_tostring(L, -1));
break;
case LUA_ERRGCMM:
zlog_err(
- "Failed loading script '%s': garbage collector error: %s",
- fname, lua_tostring(fs->L, -1));
+ "frrscript: failed loading script '%s.lua': garbage collector error: %s",
+ script_name, lua_tostring(L, -1));
break;
case LUA_ERRFILE:
- zlog_err("Failed loading script '%s': file read error: %s",
- fname, lua_tostring(fs->L, -1));
+ zlog_err(
+ "frrscript: failed loading script '%s.lua': file read error: %s",
+ script_name, lua_tostring(L, -1));
break;
default:
- zlog_err("Failed loading script '%s': unknown error: %s", fname,
- lua_tostring(fs->L, -1));
+ zlog_err(
+ "frrscript: failed loading script '%s.lua': unknown error: %s",
+ script_name, lua_tostring(L, -1));
break;
}
if (ret != LUA_OK)
goto fail;
- if (load_cb && (*load_cb)(fs) != 0)
+ /* Push the Lua function we want */
+ lua_getglobal(L, function_name);
+ if (lua_isfunction(L, lua_gettop(L)) == 0) {
+ zlog_err("frrscript: loaded script '%s.lua' but %s not found",
+ script_name, function_name);
goto fail;
+ }
- return fs;
+ if (load_cb && (*load_cb)(fs) != 0) {
+ zlog_err(
+ "frrscript: '%s.lua': %s: loaded but callback returned non-zero exit code",
+ script_name, function_name);
+ goto fail;
+ }
+
+ /* Add the Lua function state to frrscript */
+ struct lua_function_state key = {.name = function_name, .L = L};
+
+ hash_get(fs->lua_function_hash, &key, lua_function_alloc);
+
+ return 0;
fail:
- frrscript_unload(fs);
- return NULL;
+ lua_close(L);
+ return 1;
}
-void frrscript_unload(struct frrscript *fs)
+void frrscript_delete(struct frrscript *fs)
{
- lua_close(fs->L);
+ hash_iterate(fs->lua_function_hash, lua_function_free, NULL);
XFREE(MTYPE_SCRIPT, fs->name);
XFREE(MTYPE_SCRIPT, fs);
}
diff --git a/lib/frrscript.h b/lib/frrscript.h
index 8612c602f3..540676c099 100644
--- a/lib/frrscript.h
+++ b/lib/frrscript.h
@@ -25,7 +25,7 @@
#include <lua.h>
#include "frrlua.h"
-#include "../bgpd/bgp_script.h"
+#include "bgpd/bgp_script.h" // for peer and attr encoders/decoders
#ifdef __cplusplus
extern "C" {
@@ -40,14 +40,30 @@ struct frrscript_codec {
decoder_func decoder;
};
+struct lua_function_state {
+ const char *name;
+ lua_State *L;
+};
+
struct frrscript {
/* Script name */
char *name;
- /* Lua state */
- struct lua_State *L;
+ /* Hash of Lua function name to Lua function state */
+ struct hash *lua_function_hash;
};
+
+/*
+ * Hash related functions for lua_function_hash
+ */
+
+void *lua_function_alloc(void *arg);
+
+unsigned int lua_function_hash_key(const void *data);
+
+bool lua_function_hash_cmp(const void *d1, const void *d2);
+
struct frrscript_env {
/* Value type */
const char *typename;
@@ -60,15 +76,24 @@ struct frrscript_env {
};
/*
- * Create new FRR script.
+ * Create new struct frrscript for a Lua script.
+ * This will hold the states for the Lua functions in this script.
+ *
+ * scriptname
+ * Name of the Lua script file, without the .lua
*/
-struct frrscript *frrscript_load(const char *name,
- int (*load_cb)(struct frrscript *));
+struct frrscript *frrscript_new(const char *scriptname);
/*
- * Destroy FRR script.
+ * Load a function into frrscript, run callback if any
*/
-void frrscript_unload(struct frrscript *fs);
+int frrscript_load(struct frrscript *fs, const char *function_name,
+ int (*load_cb)(struct frrscript *));
+
+/*
+ * Delete Lua function states and frrscript
+ */
+void frrscript_delete(struct frrscript *fs);
/*
* Register a Lua codec for a type.
@@ -97,16 +122,31 @@ void frrscript_register_type_codecs(struct frrscript_codec *codecs);
*/
void frrscript_init(const char *scriptdir);
-#define ENCODE_ARGS(name, value) \
- do { \
- ENCODE_ARGS_WITH_STATE(L, value); \
- lua_setglobal(L, name); \
- } while (0)
+/*
+ * This macro is mapped to every (name, value) in frrscript_call,
+ * so this in turn maps them onto their encoders
+ */
+#define ENCODE_ARGS(name, value) ENCODE_ARGS_WITH_STATE(lfs->L, (value))
+/*
+ * This macro is also mapped to every (name, value) in frrscript_call, but
+ * not every value can be mapped to its decoder - only those that appear
+ * in the returned table will. To find out if they appear in the returned
+ * table, first pop the value and check if its nil. Only call the decoder
+ * if non-nil.
+ *
+ * At the end, the only thing left on the stack should be the
+ * returned table.
+ */
#define DECODE_ARGS(name, value) \
do { \
- lua_getglobal(L, name); \
- DECODE_ARGS_WITH_STATE(L, value); \
+ lua_getfield(lfs->L, 1, (name)); \
+ if (lua_isnil(lfs->L, 2)) { \
+ lua_pop(lfs->L, 1); \
+ } else { \
+ DECODE_ARGS_WITH_STATE(lfs->L, (value)); \
+ } \
+ assert(lua_gettop(lfs->L) == 1); \
} while (0)
/*
@@ -120,6 +160,7 @@ void frrscript_init(const char *scriptdir);
*/
#define ENCODE_ARGS_WITH_STATE(L, value) \
_Generic((value), \
+int : lua_pushinteger, \
long long * : lua_pushintegerp, \
struct prefix * : lua_pushprefix, \
struct interface * : lua_pushinterface, \
@@ -131,10 +172,11 @@ char * : lua_pushstring_wrapper, \
struct attr * : lua_pushattr, \
struct peer * : lua_pushpeer, \
const struct prefix * : lua_pushprefix \
-)(L, value)
+)((L), (value))
#define DECODE_ARGS_WITH_STATE(L, value) \
_Generic((value), \
+int : lua_decode_integer_noop, \
long long * : lua_decode_integerp, \
struct prefix * : lua_decode_prefix, \
struct interface * : lua_decode_interface, \
@@ -146,56 +188,84 @@ char * : lua_decode_stringp, \
struct attr * : lua_decode_attr, \
struct peer * : lua_decode_noop, \
const struct prefix * : lua_decode_noop \
-)(L, -1, value)
+)((L), -1, (value))
/*
- * Call script.
+ * Call Lua function state (abstraction for a single Lua function)
*
- * fs
- * The script to call; this is obtained from frrscript_load().
+ * lfs
+ * The Lua function to call; this should have been loaded in by
+ * frrscript_load(). nargs Number of arguments the function accepts
*
* Returns:
* 0 if the script ran successfully, nonzero otherwise.
*/
-int _frrscript_call(struct frrscript *fs);
+int _frrscript_call_lua(struct lua_function_state *lfs, int nargs);
/*
- * Wrapper for call script. Maps values passed in to their encoder
- * and decoder types.
+ * Wrapper for calling Lua function state. Maps values passed in to their
+ * encoder and decoder types.
*
* fs
- * The script to call; this is obtained from frrscript_load().
+ * The struct frrscript in which the Lua fuction was loaded into
+ * f
+ * Name of the Lua function.
*
* Returns:
* 0 if the script ran successfully, nonzero otherwise.
*/
-#define frrscript_call(fs, ...) \
- ({ \
- lua_State *L = fs->L; \
- MAP_LISTS(ENCODE_ARGS, ##__VA_ARGS__); \
- int ret = _frrscript_call(fs); \
- if (ret == 0) { \
- MAP_LISTS(DECODE_ARGS, ##__VA_ARGS__); \
- } \
- ret; \
+#define frrscript_call(fs, f, ...) \
+ ({ \
+ struct lua_function_state lookup = {.name = (f)}; \
+ struct lua_function_state *lfs; \
+ lfs = hash_lookup((fs)->lua_function_hash, &lookup); \
+ lfs == NULL ? ({ \
+ zlog_err( \
+ "frrscript: '%s.lua': '%s': tried to call this function but it was not loaded", \
+ (fs)->name, (f)); \
+ 1; \
+ }) \
+ : ({ \
+ MAP_LISTS(ENCODE_ARGS, ##__VA_ARGS__); \
+ _frrscript_call_lua( \
+ lfs, PP_NARG(__VA_ARGS__)); \
+ }) != 0 \
+ ? ({ \
+ zlog_err( \
+ "frrscript: '%s.lua': '%s': this function called but returned non-zero exit code. No variables modified.", \
+ (fs)->name, (f)); \
+ 1; \
+ }) \
+ : ({ \
+ MAP_LISTS(DECODE_ARGS, \
+ ##__VA_ARGS__); \
+ 0; \
+ }); \
})
/*
- * Get result from finished script.
+ * Get result from finished function
*
* fs
* The script. This script must have been run already.
- *
- * result
- * The result to extract from the script.
- * This reuses the frrscript_env type, but only the typename and name fields
- * need to be set. The value is returned directly.
+ * function_name
+ * Name of the Lua function.
+ * name
+ * Name of the result.
+ * This will be used as a string key to retrieve from the table that the
+ * Lua function returns.
+ * The name here should *not* appear in frrscript_call.
+ * lua_to
+ * Function pointer to a lua_to decoder function.
+ * This function should allocate and decode a value from the Lua state.
*
* Returns:
- * The script result of the specified name and type, or NULL.
+ * A pointer to the decoded value from the Lua state, or NULL if no such
+ * value.
*/
-void *frrscript_get_result(struct frrscript *fs,
- const struct frrscript_env *result);
+void *frrscript_get_result(struct frrscript *fs, const char *function_name,
+ const char *name,
+ void *(*lua_to)(lua_State *L, int idx));
#ifdef __cplusplus
}
diff --git a/lib/if.c b/lib/if.c
index e37b4f55b0..6c57855ca1 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -1349,10 +1349,20 @@ static const struct cmd_variable_handler if_var_handlers[] = {
{.tokenname = "INTERFACE", .completions = if_autocomplete},
{.completions = NULL}};
-void if_cmd_init(void)
+static struct cmd_node interface_node = {
+ .name = "interface",
+ .node = INTERFACE_NODE,
+ .parent_node = CONFIG_NODE,
+ .prompt = "%s(config-if)# ",
+};
+
+void if_cmd_init(int (*config_write)(struct vty *))
{
cmd_variable_handler_register(if_var_handlers);
+ interface_node.config_write = config_write;
+ install_node(&interface_node);
+
install_element(CONFIG_NODE, &interface_cmd);
install_element(CONFIG_NODE, &no_interface_cmd);
diff --git a/lib/if.h b/lib/if.h
index 0d689fe14b..43e2d3cffa 100644
--- a/lib/if.h
+++ b/lib/if.h
@@ -597,7 +597,8 @@ struct if_link_params *if_link_params_get(struct interface *);
void if_link_params_free(struct interface *);
/* Northbound. */
-extern void if_cmd_init(void);
+struct vty;
+extern void if_cmd_init(int (*config_write)(struct vty *));
extern void if_zapi_callbacks(int (*create)(struct interface *ifp),
int (*up)(struct interface *ifp),
int (*down)(struct interface *ifp),
diff --git a/lib/pbr.h b/lib/pbr.h
index e365888662..cef1d9d380 100644
--- a/lib/pbr.h
+++ b/lib/pbr.h
@@ -49,7 +49,8 @@ struct pbr_filter {
#define PBR_FILTER_PROTO (1 << 5)
#define PBR_FILTER_SRC_PORT_RANGE (1 << 6)
#define PBR_FILTER_DST_PORT_RANGE (1 << 7)
-#define PBR_FILTER_DSFIELD (1 << 8)
+#define PBR_FILTER_DSFIELD (1 << 8)
+#define PBR_FILTER_IP_PROTOCOL (1 << 9)
#define PBR_DSFIELD_DSCP (0xfc) /* Upper 6 bits of DS field: DSCP */
#define PBR_DSFIELD_ECN (0x03) /* Lower 2 bits of DS field: BCN */
@@ -67,6 +68,9 @@ struct pbr_filter {
/* Filter with fwmark */
uint32_t fwmark;
+
+ /* Filter with the ip protocol */
+ uint8_t ip_proto;
};
/*
diff --git a/lib/plist.c b/lib/plist.c
index 2b42c43764..63ae579796 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -932,102 +932,206 @@ enum display_type {
first_match_display
};
-static void vty_show_prefix_entry(struct vty *vty, afi_t afi,
+static void vty_show_prefix_entry(struct vty *vty, json_object *json, afi_t afi,
struct prefix_list *plist,
struct prefix_master *master,
enum display_type dtype, int seqnum)
{
struct prefix_list_entry *pentry;
+ json_object *json_pl = NULL;
/* Print the name of the protocol */
- vty_out(vty, "%s: ", frr_protoname);
+ if (json) {
+ json_pl = json_object_new_object();
+ json_object_object_add(json, plist->name, json_pl);
+ } else
+ vty_out(vty, "%s: ", frr_protoname);
if (dtype == normal_display) {
- vty_out(vty, "ip%s prefix-list %s: %d entries\n",
- afi == AFI_IP ? "" : "v6", plist->name, plist->count);
- if (plist->desc)
- vty_out(vty, " Description: %s\n", plist->desc);
+ if (json) {
+ json_object_string_add(json_pl, "addressFamily",
+ afi2str(afi));
+ json_object_int_add(json_pl, "entries", plist->count);
+ if (plist->desc)
+ json_object_string_add(json_pl, "description",
+ plist->desc);
+ } else {
+ vty_out(vty, "ip%s prefix-list %s: %d entries\n",
+ afi == AFI_IP ? "" : "v6", plist->name,
+ plist->count);
+ if (plist->desc)
+ vty_out(vty, " Description: %s\n",
+ plist->desc);
+ }
} else if (dtype == summary_display || dtype == detail_display) {
- vty_out(vty, "ip%s prefix-list %s:\n",
- afi == AFI_IP ? "" : "v6", plist->name);
-
- if (plist->desc)
- vty_out(vty, " Description: %s\n", plist->desc);
-
- vty_out(vty,
- " count: %d, range entries: %d, sequences: %" PRId64 " - %" PRId64 "\n",
- plist->count, plist->rangecount,
- plist->head ? plist->head->seq : 0,
- plist->tail ? plist->tail->seq : 0);
+ if (json) {
+ json_object_string_add(json_pl, "addressFamily",
+ afi2str(afi));
+ if (plist->desc)
+ json_object_string_add(json_pl, "description",
+ plist->desc);
+ json_object_int_add(json_pl, "count", plist->count);
+ json_object_int_add(json_pl, "rangeEntries",
+ plist->rangecount);
+ json_object_int_add(json_pl, "sequenceStart",
+ plist->head ? plist->head->seq : 0);
+ json_object_int_add(json_pl, "sequenceEnd",
+ plist->tail ? plist->tail->seq : 0);
+ } else {
+ vty_out(vty, "ip%s prefix-list %s:\n",
+ afi == AFI_IP ? "" : "v6", plist->name);
+
+ if (plist->desc)
+ vty_out(vty, " Description: %s\n",
+ plist->desc);
+
+ vty_out(vty,
+ " count: %d, range entries: %d, sequences: %" PRId64
+ " - %" PRId64 "\n",
+ plist->count, plist->rangecount,
+ plist->head ? plist->head->seq : 0,
+ plist->tail ? plist->tail->seq : 0);
+ }
}
if (dtype != summary_display) {
+ json_object *json_entries = NULL;
+
+ if (json) {
+ json_entries = json_object_new_array();
+ json_object_object_add(json_pl, "entries",
+ json_entries);
+ }
+
for (pentry = plist->head; pentry; pentry = pentry->next) {
if (dtype == sequential_display
&& pentry->seq != seqnum)
continue;
- vty_out(vty, " ");
+ if (json) {
+ json_object *json_entry;
+ char buf[BUFSIZ];
- vty_out(vty, "seq %" PRId64 " ", pentry->seq);
+ json_entry = json_object_new_object();
+ json_object_array_add(json_entries, json_entry);
- vty_out(vty, "%s ", prefix_list_type_str(pentry));
-
- if (pentry->any)
- vty_out(vty, "any");
- else {
- struct prefix *p = &pentry->prefix;
-
- vty_out(vty, "%pFX", p);
+ json_object_int_add(json_entry,
+ "sequenceNumber",
+ pentry->seq);
+ json_object_string_add(
+ json_entry, "type",
+ prefix_list_type_str(pentry));
+ json_object_string_add(
+ json_entry, "prefix",
+ prefix2str(&pentry->prefix, buf,
+ sizeof(buf)));
if (pentry->ge)
- vty_out(vty, " ge %d", pentry->ge);
+ json_object_int_add(
+ json_entry,
+ "minimumPrefixLength",
+ pentry->ge);
if (pentry->le)
- vty_out(vty, " le %d", pentry->le);
+ json_object_int_add(
+ json_entry,
+ "maximumPrefixLength",
+ pentry->le);
+
+ if (dtype == detail_display
+ || dtype == sequential_display) {
+ json_object_int_add(json_entry,
+ "hitCount",
+ pentry->hitcnt);
+ json_object_int_add(json_entry,
+ "referenceCount",
+ pentry->refcnt);
+ }
+ } else {
+ vty_out(vty, " ");
+
+ vty_out(vty, "seq %" PRId64 " ", pentry->seq);
+
+ vty_out(vty, "%s ",
+ prefix_list_type_str(pentry));
+
+ if (pentry->any)
+ vty_out(vty, "any");
+ else {
+ struct prefix *p = &pentry->prefix;
+
+ vty_out(vty, "%pFX", p);
+
+ if (pentry->ge)
+ vty_out(vty, " ge %d",
+ pentry->ge);
+ if (pentry->le)
+ vty_out(vty, " le %d",
+ pentry->le);
+ }
+
+ if (dtype == detail_display
+ || dtype == sequential_display)
+ vty_out(vty,
+ " (hit count: %ld, refcount: %ld)",
+ pentry->hitcnt, pentry->refcnt);
+
+ vty_out(vty, "\n");
}
-
- if (dtype == detail_display
- || dtype == sequential_display)
- vty_out(vty, " (hit count: %ld, refcount: %ld)",
- pentry->hitcnt, pentry->refcnt);
-
- vty_out(vty, "\n");
}
}
}
static int vty_show_prefix_list(struct vty *vty, afi_t afi, const char *name,
- const char *seq, enum display_type dtype)
+ const char *seq, enum display_type dtype,
+ bool uj)
{
struct prefix_list *plist;
struct prefix_master *master;
int64_t seqnum = 0;
+ json_object *json = NULL;
+ json_object *json_proto = NULL;
master = prefix_master_get(afi, 0);
if (master == NULL)
return CMD_WARNING;
+ if (uj) {
+ json = json_object_new_object();
+ json_proto = json_object_new_object();
+ json_object_object_add(json, frr_protoname, json_proto);
+ }
+
if (seq)
seqnum = (int64_t)atol(seq);
if (name) {
plist = prefix_list_lookup(afi, name);
if (!plist) {
- vty_out(vty, "%% Can't find specified prefix-list\n");
+ if (!uj)
+ vty_out(vty,
+ "%% Can't find specified prefix-list\n");
return CMD_WARNING;
}
- vty_show_prefix_entry(vty, afi, plist, master, dtype, seqnum);
+ vty_show_prefix_entry(vty, json_proto, afi, plist, master,
+ dtype, seqnum);
} else {
if (dtype == detail_display || dtype == summary_display) {
- if (master->recent)
+ if (master->recent && !uj)
vty_out(vty,
"Prefix-list with the last deletion/insertion: %s\n",
master->recent->name);
}
for (plist = master->str.head; plist; plist = plist->next)
- vty_show_prefix_entry(vty, afi, plist, master, dtype,
- seqnum);
+ vty_show_prefix_entry(vty, json_proto, afi, plist,
+ master, dtype, seqnum);
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
}
return CMD_SUCCESS;
@@ -1150,19 +1254,21 @@ static int vty_clear_prefix_list(struct vty *vty, afi_t afi, const char *name,
DEFPY (show_ip_prefix_list,
show_ip_prefix_list_cmd,
- "show ip prefix-list [WORD [seq$dseq (1-4294967295)$arg]]",
+ "show ip prefix-list [WORD [seq$dseq (1-4294967295)$arg]] [json$uj]",
SHOW_STR
IP_STR
PREFIX_LIST_STR
"Name of a prefix list\n"
"sequence number of an entry\n"
- "Sequence number\n")
+ "Sequence number\n"
+ JSON_STR)
{
enum display_type dtype = normal_display;
if (dseq)
dtype = sequential_display;
- return vty_show_prefix_list(vty, AFI_IP, prefix_list, arg_str, dtype);
+ return vty_show_prefix_list(vty, AFI_IP, prefix_list, arg_str, dtype,
+ !!uj);
}
DEFPY (show_ip_prefix_list_prefix,
@@ -1188,28 +1294,30 @@ DEFPY (show_ip_prefix_list_prefix,
DEFPY (show_ip_prefix_list_summary,
show_ip_prefix_list_summary_cmd,
- "show ip prefix-list summary [WORD$prefix_list]",
+ "show ip prefix-list summary [WORD$prefix_list] [json$uj]",
SHOW_STR
IP_STR
PREFIX_LIST_STR
"Summary of prefix lists\n"
- "Name of a prefix list\n")
+ "Name of a prefix list\n"
+ JSON_STR)
{
return vty_show_prefix_list(vty, AFI_IP, prefix_list, NULL,
- summary_display);
+ summary_display, !!uj);
}
DEFPY (show_ip_prefix_list_detail,
show_ip_prefix_list_detail_cmd,
- "show ip prefix-list detail [WORD$prefix_list]",
+ "show ip prefix-list detail [WORD$prefix_list] [json$uj]",
SHOW_STR
IP_STR
PREFIX_LIST_STR
"Detail of prefix lists\n"
- "Name of a prefix list\n")
+ "Name of a prefix list\n"
+ JSON_STR)
{
return vty_show_prefix_list(vty, AFI_IP, prefix_list, NULL,
- detail_display);
+ detail_display, !!uj);
}
DEFPY (clear_ip_prefix_list,
@@ -1226,19 +1334,21 @@ DEFPY (clear_ip_prefix_list,
DEFPY (show_ipv6_prefix_list,
show_ipv6_prefix_list_cmd,
- "show ipv6 prefix-list [WORD [seq$dseq (1-4294967295)$arg]]",
+ "show ipv6 prefix-list [WORD [seq$dseq (1-4294967295)$arg]] [json$uj]",
SHOW_STR
IPV6_STR
PREFIX_LIST_STR
"Name of a prefix list\n"
"sequence number of an entry\n"
- "Sequence number\n")
+ "Sequence number\n"
+ JSON_STR)
{
enum display_type dtype = normal_display;
if (dseq)
dtype = sequential_display;
- return vty_show_prefix_list(vty, AFI_IP6, prefix_list, arg_str, dtype);
+ return vty_show_prefix_list(vty, AFI_IP6, prefix_list, arg_str, dtype,
+ !!uj);
}
DEFPY (show_ipv6_prefix_list_prefix,
@@ -1264,28 +1374,30 @@ DEFPY (show_ipv6_prefix_list_prefix,
DEFPY (show_ipv6_prefix_list_summary,
show_ipv6_prefix_list_summary_cmd,
- "show ipv6 prefix-list summary [WORD$prefix-list]",
+ "show ipv6 prefix-list summary [WORD$prefix-list] [json$uj]",
SHOW_STR
IPV6_STR
PREFIX_LIST_STR
"Summary of prefix lists\n"
- "Name of a prefix list\n")
+ "Name of a prefix list\n"
+ JSON_STR)
{
return vty_show_prefix_list(vty, AFI_IP6, prefix_list, NULL,
- summary_display);
+ summary_display, !!uj);
}
DEFPY (show_ipv6_prefix_list_detail,
show_ipv6_prefix_list_detail_cmd,
- "show ipv6 prefix-list detail [WORD$prefix-list]",
+ "show ipv6 prefix-list detail [WORD$prefix-list] [json$uj]",
SHOW_STR
IPV6_STR
PREFIX_LIST_STR
"Detail of prefix lists\n"
- "Name of a prefix list\n")
+ "Name of a prefix list\n"
+ JSON_STR)
{
return vty_show_prefix_list(vty, AFI_IP6, prefix_list, NULL,
- detail_display);
+ detail_display, !!uj);
}
DEFPY (clear_ipv6_prefix_list,
diff --git a/lib/routemap.c b/lib/routemap.c
index 9dc1c7c82d..5d45dc1047 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -22,6 +22,7 @@
#include "linklist.h"
#include "memory.h"
+#include "command.h"
#include "vector.h"
#include "prefix.h"
#include "vty.h"
@@ -32,6 +33,7 @@
#include "libfrr.h"
#include "lib_errors.h"
#include "table.h"
+#include "json.h"
DEFINE_MTYPE_STATIC(LIB, ROUTE_MAP, "Route map");
DEFINE_MTYPE(LIB, ROUTE_MAP_NAME, "Route map name");
@@ -840,50 +842,140 @@ static const char *route_map_result_str(route_map_result_t res)
}
/* show route-map */
-static void vty_show_route_map_entry(struct vty *vty, struct route_map *map)
+static void vty_show_route_map_entry(struct vty *vty, struct route_map *map,
+ json_object *json)
{
struct route_map_index *index;
struct route_map_rule *rule;
-
- vty_out(vty, "route-map: %s Invoked: %" PRIu64 " Optimization: %s Processed Change: %s\n",
- map->name, map->applied - map->applied_clear,
- map->optimization_disabled ? "disabled" : "enabled",
- map->to_be_processed ? "true" : "false");
+ json_object *json_rmap = NULL;
+ json_object *json_rules = NULL;
+
+ if (json) {
+ json_rmap = json_object_new_object();
+ json_object_object_add(json, map->name, json_rmap);
+
+ json_rules = json_object_new_array();
+ json_object_int_add(json_rmap, "invoked",
+ map->applied - map->applied_clear);
+ json_object_boolean_add(json_rmap, "disabledOptimization",
+ map->optimization_disabled);
+ json_object_boolean_add(json_rmap, "processedChange",
+ map->to_be_processed);
+ json_object_object_add(json_rmap, "rules", json_rules);
+ } else {
+ vty_out(vty,
+ "route-map: %s Invoked: %" PRIu64
+ " Optimization: %s Processed Change: %s\n",
+ map->name, map->applied - map->applied_clear,
+ map->optimization_disabled ? "disabled" : "enabled",
+ map->to_be_processed ? "true" : "false");
+ }
for (index = map->head; index; index = index->next) {
- vty_out(vty, " %s, sequence %d Invoked %" PRIu64 "\n",
- route_map_type_str(index->type), index->pref,
- index->applied - index->applied_clear);
-
- /* Description */
- if (index->description)
- vty_out(vty, " Description:\n %s\n",
- index->description);
-
- /* Match clauses */
- vty_out(vty, " Match clauses:\n");
- for (rule = index->match_list.head; rule; rule = rule->next)
- vty_out(vty, " %s %s\n", rule->cmd->str,
- rule->rule_str);
-
- vty_out(vty, " Set clauses:\n");
- for (rule = index->set_list.head; rule; rule = rule->next)
- vty_out(vty, " %s %s\n", rule->cmd->str,
- rule->rule_str);
-
- /* Call clause */
- vty_out(vty, " Call clause:\n");
- if (index->nextrm)
- vty_out(vty, " Call %s\n", index->nextrm);
-
- /* Exit Policy */
- vty_out(vty, " Action:\n");
- if (index->exitpolicy == RMAP_GOTO)
- vty_out(vty, " Goto %d\n", index->nextpref);
- else if (index->exitpolicy == RMAP_NEXT)
- vty_out(vty, " Continue to next entry\n");
- else if (index->exitpolicy == RMAP_EXIT)
- vty_out(vty, " Exit routemap\n");
+ if (json) {
+ json_object *json_rule;
+ json_object *json_matches;
+ json_object *json_sets;
+ char action[BUFSIZ] = {};
+
+ json_rule = json_object_new_object();
+ json_object_array_add(json_rules, json_rule);
+
+ json_object_int_add(json_rule, "sequenceNumber",
+ index->pref);
+ json_object_string_add(json_rule, "type",
+ route_map_type_str(index->type));
+ json_object_int_add(json_rule, "invoked",
+ index->applied
+ - index->applied_clear);
+
+ /* Description */
+ if (index->description)
+ json_object_string_add(json_rule, "description",
+ index->description);
+
+ /* Match clauses */
+ json_matches = json_object_new_array();
+ json_object_object_add(json_rule, "matchClauses",
+ json_matches);
+ for (rule = index->match_list.head; rule;
+ rule = rule->next) {
+ char buf[BUFSIZ];
+
+ snprintf(buf, sizeof(buf), "%s %s",
+ rule->cmd->str, rule->rule_str);
+ json_array_string_add(json_matches, buf);
+ }
+
+ /* Set clauses */
+ json_sets = json_object_new_array();
+ json_object_object_add(json_rule, "setClauses",
+ json_sets);
+ for (rule = index->set_list.head; rule;
+ rule = rule->next) {
+ char buf[BUFSIZ];
+
+ snprintf(buf, sizeof(buf), "%s %s",
+ rule->cmd->str, rule->rule_str);
+ json_array_string_add(json_sets, buf);
+ }
+
+ /* Call clause */
+ if (index->nextrm)
+ json_object_string_add(json_rule, "callClause",
+ index->nextrm);
+
+ /* Exit Policy */
+ if (index->exitpolicy == RMAP_GOTO)
+ snprintf(action, sizeof(action), "Goto %d",
+ index->nextpref);
+ else if (index->exitpolicy == RMAP_NEXT)
+ snprintf(action, sizeof(action),
+ "Continue to next entry");
+ else if (index->exitpolicy == RMAP_EXIT)
+ snprintf(action, sizeof(action),
+ "Exit routemap");
+ if (action[0] != '\0')
+ json_object_string_add(json_rule, "action",
+ action);
+ } else {
+ vty_out(vty, " %s, sequence %d Invoked %" PRIu64 "\n",
+ route_map_type_str(index->type), index->pref,
+ index->applied - index->applied_clear);
+
+ /* Description */
+ if (index->description)
+ vty_out(vty, " Description:\n %s\n",
+ index->description);
+
+ /* Match clauses */
+ vty_out(vty, " Match clauses:\n");
+ for (rule = index->match_list.head; rule;
+ rule = rule->next)
+ vty_out(vty, " %s %s\n", rule->cmd->str,
+ rule->rule_str);
+
+ /* Set clauses */
+ vty_out(vty, " Set clauses:\n");
+ for (rule = index->set_list.head; rule;
+ rule = rule->next)
+ vty_out(vty, " %s %s\n", rule->cmd->str,
+ rule->rule_str);
+
+ /* Call clause */
+ vty_out(vty, " Call clause:\n");
+ if (index->nextrm)
+ vty_out(vty, " Call %s\n", index->nextrm);
+
+ /* Exit Policy */
+ vty_out(vty, " Action:\n");
+ if (index->exitpolicy == RMAP_GOTO)
+ vty_out(vty, " Goto %d\n", index->nextpref);
+ else if (index->exitpolicy == RMAP_NEXT)
+ vty_out(vty, " Continue to next entry\n");
+ else if (index->exitpolicy == RMAP_EXIT)
+ vty_out(vty, " Exit routemap\n");
+ }
}
}
@@ -895,22 +987,28 @@ static int sort_route_map(const void **map1, const void **map2)
return strcmp(m1->name, m2->name);
}
-static int vty_show_route_map(struct vty *vty, const char *name)
+static int vty_show_route_map(struct vty *vty, const char *name, bool use_json)
{
struct route_map *map;
+ json_object *json = NULL;
+ json_object *json_proto = NULL;
- vty_out(vty, "%s:\n", frr_protonameinst);
+ if (use_json) {
+ json = json_object_new_object();
+ json_proto = json_object_new_object();
+ json_object_object_add(json, frr_protonameinst, json_proto);
+ } else
+ vty_out(vty, "%s:\n", frr_protonameinst);
if (name) {
map = route_map_lookup_by_name(name);
if (map) {
- vty_show_route_map_entry(vty, map);
+ vty_show_route_map_entry(vty, map, json_proto);
return CMD_SUCCESS;
- } else {
+ } else if (!use_json) {
vty_out(vty, "%s: 'route-map %s' not found\n",
frr_protonameinst, name);
- return CMD_SUCCESS;
}
} else {
@@ -923,10 +1021,18 @@ static int vty_show_route_map(struct vty *vty, const char *name)
list_sort(maplist, sort_route_map);
for (ALL_LIST_ELEMENTS_RO(maplist, ln, map))
- vty_show_route_map_entry(vty, map);
+ vty_show_route_map_entry(vty, map, json_proto);
list_delete(&maplist);
}
+
+ if (use_json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
return CMD_SUCCESS;
}
@@ -950,7 +1056,7 @@ static int vty_show_unused_route_map(struct vty *vty)
list_sort(maplist, sort_route_map);
for (ALL_LIST_ELEMENTS_RO(maplist, ln, map))
- vty_show_route_map_entry(vty, map);
+ vty_show_route_map_entry(vty, map, NULL);
} else {
vty_out(vty, "\n%s: None\n", frr_protonameinst);
}
@@ -2957,14 +3063,20 @@ DEFUN (rmap_clear_counters,
DEFUN (rmap_show_name,
rmap_show_name_cmd,
- "show route-map [WORD]",
+ "show route-map [WORD] [json]",
SHOW_STR
"route-map information\n"
- "route-map name\n")
+ "route-map name\n"
+ JSON_STR)
{
- int idx_word = 2;
- const char *name = (argc == 3) ? argv[idx_word]->arg : NULL;
- return vty_show_route_map(vty, name);
+ bool uj = use_json(argc, argv);
+ int idx = 0;
+ const char *name = NULL;
+
+ if (argv_find(argv, argc, "WORD", &idx))
+ name = argv[idx]->arg;
+
+ return vty_show_route_map(vty, name, uj);
}
DEFUN (rmap_show_unused,
diff --git a/lib/vrf.c b/lib/vrf.c
index 03d9a62c0f..815c0fcba2 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -287,6 +287,7 @@ void vrf_delete(struct vrf *vrf)
RB_REMOVE(vrf_id_head, &vrfs_by_id, vrf);
vrf->vrf_id = VRF_UNKNOWN;
}
+ vrf->ns_ctxt = NULL;
return;
}
diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c
index 963fa4d995..dcbc61e926 100644
--- a/nhrpd/nhrp_vty.c
+++ b/nhrpd/nhrp_vty.c
@@ -26,15 +26,6 @@ static struct cmd_node zebra_node = {
.config_write = nhrp_config_write,
};
-static int interface_config_write(struct vty *vty);
-static struct cmd_node nhrp_interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = interface_config_write,
-};
-
#define NHRP_DEBUG_FLAGS_CMD "<all|common|event|interface|kernel|route|vici>"
#define NHRP_DEBUG_FLAGS_STR \
@@ -1263,9 +1254,7 @@ void nhrp_config_init(void)
vrf_cmd_init(NULL, &nhrpd_privs);
/* interface specific commands */
- install_node(&nhrp_interface_node);
-
- if_cmd_init();
+ if_cmd_init(interface_config_write);
install_element(INTERFACE_NODE, &tunnel_protection_cmd);
install_element(INTERFACE_NODE, &no_tunnel_protection_cmd);
install_element(INTERFACE_NODE, &tunnel_source_cmd);
diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c
index cc99d7c387..69be807c13 100644
--- a/ospf6d/ospf6_abr.c
+++ b/ospf6d/ospf6_abr.c
@@ -477,11 +477,11 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route,
monotime(&summary->changed);
}
+ summary->prefix_options = route->prefix_options;
summary->path.router_bits = route->path.router_bits;
summary->path.options[0] = route->path.options[0];
summary->path.options[1] = route->path.options[1];
summary->path.options[2] = route->path.options[2];
- summary->path.prefix_options = route->path.prefix_options;
summary->path.area_id = area->area_id;
summary->path.type = OSPF6_PATH_TYPE_INTER;
summary->path.subtype = route->path.subtype;
@@ -514,7 +514,7 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route,
/* Fill Inter-Area-Prefix-LSA */
OSPF6_ABR_SUMMARY_METRIC_SET(prefix_lsa, route->path.cost);
prefix_lsa->prefix.prefix_length = route->prefix.prefixlen;
- prefix_lsa->prefix.prefix_options = route->path.prefix_options;
+ prefix_lsa->prefix.prefix_options = route->prefix_options;
/* set Prefix */
memcpy(p, &route->prefix.u.prefix6,
@@ -715,7 +715,7 @@ void ospf6_abr_defaults_to_stub(struct ospf6 *o)
if (!o->backbone)
return;
- def = ospf6_route_create();
+ def = ospf6_route_create(o);
def->type = OSPF6_DEST_TYPE_NETWORK;
def->prefix.family = AF_INET6;
def->prefix.prefixlen = 0;
@@ -1150,10 +1150,11 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
/* (5),(6): the path preference is handled by the sorting
in the routing table. Always install the path by substituting
old route (if any). */
- route = ospf6_route_create();
+ route = ospf6_route_create(oa->ospf6);
route->type = type;
route->prefix = prefix;
+ route->prefix_options = prefix_options;
route->path.origin.type = lsa->header->type;
route->path.origin.id = lsa->header->id;
route->path.origin.adv_router = lsa->header->adv_router;
@@ -1161,7 +1162,6 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
route->path.options[0] = options[0];
route->path.options[1] = options[1];
route->path.options[2] = options[2];
- route->path.prefix_options = prefix_options;
route->path.area_id = oa->area_id;
route->path.type = OSPF6_PATH_TYPE_INTER;
route->path.cost = abr_entry->path.cost + cost;
@@ -1237,7 +1237,9 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
listcount(old_route->nh_list));
}
} else {
- struct ospf6_route *tmp_route = ospf6_route_create();
+ struct ospf6_route *tmp_route;
+
+ tmp_route = ospf6_route_create(oa->ospf6);
ospf6_copy_nexthops(tmp_route->nh_list,
o_path->nh_list);
diff --git a/ospf6d/ospf6_area.c b/ospf6d/ospf6_area.c
index 355b8441bd..f4d9964a57 100644
--- a/ospf6d/ospf6_area.c
+++ b/ospf6d/ospf6_area.c
@@ -519,7 +519,7 @@ DEFUN (area_range,
range = ospf6_route_lookup(&prefix, oa->range_table);
if (range == NULL) {
- range = ospf6_route_create();
+ range = ospf6_route_create(ospf6);
range->type = OSPF6_DEST_TYPE_RANGE;
range->prefix = prefix;
range->path.area_id = oa->area_id;
diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c
index 3e911a743a..165e409eed 100644
--- a/ospf6d/ospf6_asbr.c
+++ b/ospf6d/ospf6_asbr.c
@@ -57,6 +57,7 @@
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_EXTERNAL_INFO, "OSPF6 ext. info");
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_DIST_ARGS, "OSPF6 Distribute arguments");
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_REDISTRIBUTE, "OSPF6 Redistribute arguments");
+DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_EXTERNAL_RT_AGGR, "OSPF6 ASBR Summarisation");
static void ospf6_asbr_redistribute_set(struct ospf6 *ospf6, int type);
static void ospf6_asbr_redistribute_unset(struct ospf6 *ospf6,
@@ -70,9 +71,28 @@ unsigned char conf_debug_ospf6_asbr = 0;
#define ZROUTE_NAME(x) zebra_route_string(x)
+/* Originate Type-5 and Type-7 LSA */
+static struct ospf6_lsa *ospf6_originate_type5_type7_lsas(
+ struct ospf6_route *route,
+ struct ospf6 *ospf6)
+{
+ struct ospf6_lsa *lsa;
+ struct listnode *lnode;
+ struct ospf6_area *oa = NULL;
+
+ lsa = ospf6_as_external_lsa_originate(route, ospf6);
+
+ for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, lnode, oa)) {
+ if (IS_AREA_NSSA(oa))
+ ospf6_nssa_lsa_originate(route, oa);
+ }
+
+ return lsa;
+}
+
/* AS External LSA origination */
-void ospf6_as_external_lsa_originate(struct ospf6_route *route,
- struct ospf6 *ospf6)
+struct ospf6_lsa *ospf6_as_external_lsa_originate(struct ospf6_route *route,
+ struct ospf6 *ospf6)
{
char buffer[OSPF6_MAX_LSASIZE];
struct ospf6_lsa_header *lsa_header;
@@ -121,7 +141,7 @@ void ospf6_as_external_lsa_originate(struct ospf6_route *route,
as_external_lsa->prefix.prefix_length = route->prefix.prefixlen;
/* PrefixOptions */
- as_external_lsa->prefix.prefix_options = route->path.prefix_options;
+ as_external_lsa->prefix.prefix_options = route->prefix_options;
/* don't use refer LS-type */
as_external_lsa->prefix.prefix_refer_lstype = htons(0);
@@ -164,6 +184,8 @@ void ospf6_as_external_lsa_originate(struct ospf6_route *route,
/* Originate */
ospf6_lsa_originate_process(lsa, ospf6);
+
+ return lsa;
}
int ospf6_orig_as_external_lsa(struct thread *thread)
@@ -583,18 +605,18 @@ void ospf6_asbr_lsa_add(struct ospf6_lsa *lsa)
}
}
- route = ospf6_route_create();
+ route = ospf6_route_create(ospf6);
route->type = OSPF6_DEST_TYPE_NETWORK;
route->prefix.family = AF_INET6;
route->prefix.prefixlen = external->prefix.prefix_length;
ospf6_prefix_in6_addr(&route->prefix.u.prefix6, external,
&external->prefix);
+ route->prefix_options = external->prefix.prefix_options;
route->path.area_id = asbr_entry->path.area_id;
route->path.origin.type = lsa->header->type;
route->path.origin.id = lsa->header->id;
route->path.origin.adv_router = lsa->header->adv_router;
- route->path.prefix_options = external->prefix.prefix_options;
memcpy(&route->path.ls_prefix, &asbr_id, sizeof(struct prefix));
if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_E)) {
@@ -705,7 +727,7 @@ void ospf6_asbr_lsa_remove(struct ospf6_lsa *lsa,
return;
}
- route_to_del = ospf6_route_create();
+ route_to_del = ospf6_route_create(ospf6);
route_to_del->type = OSPF6_DEST_TYPE_NETWORK;
route_to_del->prefix.family = AF_INET6;
route_to_del->prefix.prefixlen = external->prefix.prefix_length;
@@ -1301,6 +1323,28 @@ void ospf6_asbr_remove_externals_from_area(struct ospf6_area *oa)
}
}
+static struct ospf6_external_aggr_rt *
+ospf6_external_aggr_match(struct ospf6 *ospf6, struct prefix *p)
+{
+ struct route_node *node;
+
+ node = route_node_match(ospf6->rt_aggr_tbl, p);
+ if (node == NULL)
+ return NULL;
+
+ if (IS_OSPF6_DEBUG_AGGR) {
+ struct ospf6_external_aggr_rt *ag = node->info;
+ zlog_debug("%s: Matching aggregator found.prefix: %pFX Aggregator %pFX",
+ __func__,
+ p,
+ &ag->p);
+ }
+
+ route_unlock_node(node);
+
+ return node->info;
+}
+
void ospf6_asbr_redistribute_add(int type, ifindex_t ifindex,
struct prefix *prefix,
unsigned int nexthop_num,
@@ -1308,8 +1352,6 @@ void ospf6_asbr_redistribute_add(int type, ifindex_t ifindex,
struct ospf6 *ospf6)
{
route_map_result_t ret;
- struct listnode *lnode;
- struct ospf6_area *oa;
struct ospf6_route troute;
struct ospf6_external_info tinfo;
struct ospf6_route *route, *match;
@@ -1378,6 +1420,7 @@ void ospf6_asbr_redistribute_add(int type, ifindex_t ifindex,
match->path.cost = troute.path.cost;
else
match->path.cost = metric_value(ospf6, type, 0);
+
if (!IN6_IS_ADDR_UNSPECIFIED(&tinfo.forwarding))
memcpy(&info->forwarding, &tinfo.forwarding,
sizeof(struct in6_addr));
@@ -1414,25 +1457,22 @@ void ospf6_asbr_redistribute_add(int type, ifindex_t ifindex,
}
match->path.origin.id = htonl(info->id);
- ospf6_as_external_lsa_originate(match, ospf6);
+ ospf6_handle_external_lsa_origination(ospf6, match, prefix);
+
ospf6_asbr_status_update(ospf6, ospf6->redistribute);
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, lnode, oa)) {
- if (IS_AREA_NSSA(oa))
- ospf6_nssa_lsa_originate(match, oa);
- }
return;
}
/* create new entry */
- route = ospf6_route_create();
+ route = ospf6_route_create(ospf6);
route->type = OSPF6_DEST_TYPE_NETWORK;
prefix_copy(&route->prefix, prefix);
+ route->ospf6 = ospf6;
info = (struct ospf6_external_info *)XCALLOC(
MTYPE_OSPF6_EXTERNAL_INFO, sizeof(struct ospf6_external_info));
route->route_option = info;
- info->id = ospf6->external_id++;
/* copy result of route-map */
if (ROUTEMAP(red)) {
@@ -1463,43 +1503,109 @@ void ospf6_asbr_redistribute_add(int type, ifindex_t ifindex,
else
ospf6_route_add_nexthop(route, ifindex, NULL);
- /* create/update binding in external_id_table */
- prefix_id.family = AF_INET;
- prefix_id.prefixlen = IPV4_MAX_BITLEN;
- prefix_id.u.prefix4.s_addr = htonl(info->id);
- node = route_node_get(ospf6->external_id_table, &prefix_id);
- node->info = route;
-
route = ospf6_route_add(route, ospf6->external_table);
- route->route_option = info;
-
- if (IS_OSPF6_DEBUG_ASBR) {
- inet_ntop(AF_INET, &prefix_id.u.prefix4, ibuf, sizeof(ibuf));
- zlog_debug(
- "Advertise as AS-External Id:%s prefix %pFX metric %u",
- ibuf, prefix, route->path.metric_type);
- }
+ ospf6_handle_external_lsa_origination(ospf6, route, prefix);
- route->path.origin.id = htonl(info->id);
- ospf6_as_external_lsa_originate(route, ospf6);
ospf6_asbr_status_update(ospf6, ospf6->redistribute);
+
+}
+
+static void ospf6_asbr_external_lsa_remove_by_id(struct ospf6 *ospf6,
+ uint32_t id)
+{
+ struct ospf6_lsa *lsa;
+ struct ospf6_area *oa;
+ struct listnode *lnode;
+
+ lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(id), ospf6->router_id, ospf6->lsdb);
+ if (!lsa)
+ return;
+
+ ospf6_external_lsa_purge(ospf6, lsa);
+
+ /* Delete the NSSA LSA */
for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, lnode, oa)) {
- if (IS_AREA_NSSA(oa))
- ospf6_nssa_lsa_originate(route, oa);
+ lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_TYPE_7),
+ htonl(id), ospf6->router_id,
+ oa->lsdb);
+ if (lsa) {
+ if (IS_OSPF6_DEBUG_ASBR)
+ zlog_debug("withdraw type 7 lsa, LS ID: %u",
+ htonl(id));
+
+ ospf6_lsa_purge(lsa);
+ }
+ }
+
+}
+
+static void
+ospf6_link_route_to_aggr(struct ospf6_external_aggr_rt *aggr,
+ struct ospf6_route *rt)
+{
+ hash_get(aggr->match_extnl_hash, rt, hash_alloc_intern);
+ rt->aggr_route = aggr;
+}
+
+static void
+ospf6_asbr_summary_remove_lsa_and_route(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr)
+{
+
+ /* Send a Max age LSA if it is already originated.*/
+ if (!CHECK_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED))
+ return;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Flushing Aggregate route (%pFX)",
+ __func__,
+ &aggr->p);
+
+ ospf6_asbr_external_lsa_remove_by_id(ospf6, aggr->id);
+
+ if (aggr->route) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug(
+ "%s: Remove the blackhole route",
+ __func__);
+ ospf6_zebra_route_update_remove(aggr->route, ospf6);
+ ospf6_route_delete(aggr->route);
+ aggr->route = NULL;
}
+
+ aggr->id = 0;
+ /* Unset the Origination flag */
+ UNSET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED);
+}
+
+static void
+ospf6_unlink_route_from_aggr(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr,
+ struct ospf6_route *rt)
+{
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Unlinking external route(%pFX) from aggregator(%pFX), external route count:%ld",
+ __func__,
+ &rt->prefix,
+ &aggr->p,
+ OSPF6_EXTERNAL_RT_COUNT(aggr));
+
+ hash_release(aggr->match_extnl_hash, rt);
+ rt->aggr_route = NULL;
+
+ /* Flush the aggregate route if matching
+ * external route count becomes zero.
+ */
+ if (!OSPF6_EXTERNAL_RT_COUNT(aggr))
+ ospf6_asbr_summary_remove_lsa_and_route(ospf6, aggr);
}
void ospf6_asbr_redistribute_remove(int type, ifindex_t ifindex,
struct prefix *prefix, struct ospf6 *ospf6)
{
- struct ospf6_area *oa;
struct ospf6_route *match;
struct ospf6_external_info *info = NULL;
- struct listnode *lnode;
- struct route_node *node;
- struct ospf6_lsa *lsa;
- struct prefix prefix_id;
- char ibuf[16];
match = ospf6_route_lookup(prefix, ospf6->external_table);
if (match == NULL) {
@@ -1517,44 +1623,17 @@ void ospf6_asbr_redistribute_remove(int type, ifindex_t ifindex,
return;
}
- if (IS_OSPF6_DEBUG_ASBR) {
- inet_ntop(AF_INET, &prefix_id.u.prefix4, ibuf, sizeof(ibuf));
- zlog_debug("Withdraw %pFX (AS-External Id:%s)", prefix, ibuf);
- }
-
- lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
- htonl(info->id), ospf6->router_id, ospf6->lsdb);
- if (lsa) {
- if (IS_OSPF6_DEBUG_ASBR) {
- zlog_debug("withdraw type 5 LSA for route %pFX",
- prefix);
- }
- ospf6_lsa_purge(lsa);
- }
-
- /* Delete the NSSA LSA */
- for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, lnode, oa)) {
- lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_TYPE_7),
- htonl(info->id), ospf6->router_id,
- oa->lsdb);
- if (lsa) {
- if (IS_OSPF6_DEBUG_ASBR) {
- zlog_debug("withdraw type 7 LSA for route %pFX",
- prefix);
- }
- ospf6_lsa_purge(lsa);
- }
- }
+ /* This means aggregation on this route was not done, hence remove LSA
+ * if any originated for this prefix
+ */
+ if (!match->aggr_route)
+ ospf6_asbr_external_lsa_remove_by_id(ospf6, info->id);
+ else
+ ospf6_unlink_route_from_aggr(ospf6, match->aggr_route, match);
- /* remove binding in external_id_table */
- prefix_id.family = AF_INET;
- prefix_id.prefixlen = IPV4_MAX_BITLEN;
- prefix_id.u.prefix4.s_addr = htonl(info->id);
- node = route_node_lookup(ospf6->external_id_table, &prefix_id);
- assert(node);
- node->info = NULL;
- route_unlock_node(node); /* to free the lookup lock */
- route_unlock_node(node); /* to free the original lock */
+ if (IS_OSPF6_DEBUG_ASBR)
+ zlog_debug("Removing route from external table %pFX",
+ prefix);
ospf6_route_remove(match, ospf6->external_table);
XFREE(MTYPE_OSPF6_EXTERNAL_INFO, info);
@@ -1574,6 +1653,7 @@ DEFUN (ospf6_redistribute,
VTY_DECLVAR_CONTEXT(ospf6, ospf6);
char *proto = argv[argc - 1]->text;
+
type = proto_redistnum(AFI_IP6, proto);
if (type < 0)
return CMD_WARNING_CONFIG_FAILED;
@@ -2613,3 +2693,988 @@ void install_element_ospf6_debug_asbr(void)
install_element(CONFIG_NODE, &debug_ospf6_asbr_cmd);
install_element(CONFIG_NODE, &no_debug_ospf6_asbr_cmd);
}
+
+/* ASBR Summarisation */
+void ospf6_fill_aggr_route_details(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr)
+{
+ struct ospf6_route *rt_aggr = aggr->route;
+ struct ospf6_external_info *ei_aggr = rt_aggr->route_option;
+
+ rt_aggr->prefix = aggr->p;
+ ei_aggr->tag = aggr->tag;
+ ei_aggr->type = 0;
+ ei_aggr->id = aggr->id;
+
+ /* When metric is not configured, apply the default metric */
+ rt_aggr->path.cost = ((aggr->metric == -1) ?
+ DEFAULT_DEFAULT_METRIC
+ : (unsigned int)(aggr->metric));
+ rt_aggr->path.metric_type = aggr->mtype;
+
+ rt_aggr->path.origin.id = htonl(aggr->id);
+}
+
+static void ospf6_originate_new_aggr_lsa(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr)
+{
+
+ struct prefix prefix_id;
+ struct route_node *node;
+ struct ospf6_lsa *lsa = NULL;
+ struct ospf6_route *rt_aggr;
+ struct ospf6_external_info *info;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Originate new aggregate route(%pFX)", __func__,
+ &aggr->p);
+
+ aggr->id = ospf6->external_id++;
+ /* create/update binding in external_id_table */
+ prefix_id.family = AF_INET;
+ prefix_id.prefixlen = 32;
+ prefix_id.u.prefix4.s_addr = htonl(aggr->id);
+ node = route_node_get(ospf6->external_id_table, &prefix_id);
+ node->info = aggr;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug(
+ "Advertise AS-External Id:%pI4 prefix %pFX metric %u",
+ &prefix_id.u.prefix4, &aggr->p, aggr->metric);
+
+ /* Create summary route and save it. */
+ rt_aggr = ospf6_route_create(ospf6);
+ rt_aggr->type = OSPF6_DEST_TYPE_NETWORK;
+ /* Needed to install route while calling zebra api */
+ SET_FLAG(rt_aggr->flag, OSPF6_ROUTE_BEST);
+
+ info = XCALLOC(MTYPE_OSPF6_EXTERNAL_INFO, sizeof(*info));
+ rt_aggr->route_option = info;
+ aggr->route = rt_aggr;
+
+ /* Prepare the external_info for aggregator
+ * Fill all the details which will get advertised
+ */
+ ospf6_fill_aggr_route_details(ospf6, aggr);
+
+ /* Add next-hop to Null interface. */
+ ospf6_add_route_nexthop_blackhole(rt_aggr);
+
+ ospf6_zebra_route_update_add(rt_aggr, ospf6);
+
+ /* Originate summary LSA */
+ lsa = ospf6_originate_type5_type7_lsas(rt_aggr, ospf6);
+ if (lsa) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Set the origination bit for aggregator",
+ __func__);
+ SET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED);
+ }
+}
+
+static void
+ospf6_aggr_handle_advertise_change(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr)
+{
+ /* Check if advertise option modified. */
+ if (CHECK_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE)) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Don't originate the summary address,It is configured to not-advertise.",
+ __func__);
+ ospf6_asbr_summary_remove_lsa_and_route(ospf6, aggr);
+
+ return;
+ }
+
+ /* There are no routes present under this aggregation config, hence
+ * nothing to originate here
+ */
+ if (OSPF6_EXTERNAL_RT_COUNT(aggr) == 0) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: No routes present under this aggregation",
+ __func__);
+ return;
+ }
+
+ if (!CHECK_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED)) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Now it is advertisable",
+ __func__);
+
+ ospf6_originate_new_aggr_lsa(ospf6, aggr);
+
+ return;
+ }
+}
+
+static void
+ospf6_originate_summary_lsa(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr,
+ struct ospf6_route *rt)
+{
+ struct ospf6_lsa *lsa = NULL, *aggr_lsa = NULL;
+ struct ospf6_external_info *info = NULL;
+ struct ospf6_external_aggr_rt *old_aggr;
+ struct ospf6_as_external_lsa *external;
+ struct ospf6_route *rt_aggr = NULL;
+ route_tag_t tag = 0;
+ unsigned int metric = 0;
+ int mtype;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Prepare to originate Summary route(%pFX)",
+ __func__, &aggr->p);
+
+ /* This case to handle when the overlapping aggregator address
+ * is available. Best match will be considered.So need to delink
+ * from old aggregator and link to the new aggr.
+ */
+ if (rt->aggr_route) {
+ if (rt->aggr_route != aggr) {
+ old_aggr = rt->aggr_route;
+ ospf6_unlink_route_from_aggr(ospf6, old_aggr, rt);
+ }
+ }
+
+ /* Add the external route to hash table */
+ ospf6_link_route_to_aggr(aggr, rt);
+
+ /* The key for ID field is a running number and not prefix */
+ info = rt->route_option;
+ assert(info);
+ if (info->id) {
+ lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(info->id), ospf6->router_id,
+ ospf6->lsdb);
+ assert(lsa);
+ }
+
+ aggr_lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(aggr->id), ospf6->router_id, ospf6->lsdb);
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Aggr LSA ID: %d flags %x.",
+ __func__, aggr->id, aggr->aggrflags);
+ /* Dont originate external LSA,
+ * If it is configured not to advertise.
+ */
+ if (CHECK_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE)) {
+ /* If it is already originated as external LSA,
+ * But, it is configured not to advertise then
+ * flush the originated external lsa.
+ */
+ if (lsa) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Purge the external LSA %s.",
+ __func__, lsa->name);
+ ospf6_external_lsa_purge(ospf6, lsa);
+ info->id = 0;
+ rt->path.origin.id = 0;
+ }
+
+ if (aggr_lsa) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Purge the aggr external LSA %s.",
+ __func__, lsa->name);
+ ospf6_asbr_summary_remove_lsa_and_route(ospf6, aggr);
+ }
+
+ UNSET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED);
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Don't originate the summary address,It is configured to not-advertise.",
+ __func__);
+ return;
+ }
+
+ /* Summary route already originated,
+ * So, Do nothing.
+ */
+ if (CHECK_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED)) {
+ if (!aggr_lsa) {
+ zlog_warn(
+ "%s: Could not refresh/originate %pFX",
+ __func__,
+ &aggr->p);
+ /* Remove the assert later */
+ assert(aggr_lsa);
+ return;
+ }
+
+ external = (struct ospf6_as_external_lsa *)OSPF6_LSA_HEADER_END
+ (aggr_lsa->header);
+ metric = (unsigned long)OSPF6_ASBR_METRIC(external);
+ tag = ospf6_as_external_lsa_get_tag(aggr_lsa);
+ mtype = CHECK_FLAG(external->bits_metric,
+ OSPF6_ASBR_BIT_E) ? 2 : 1;
+
+ /* Prepare the external_info for aggregator */
+ ospf6_fill_aggr_route_details(ospf6, aggr);
+ rt_aggr = aggr->route;
+ /* If tag/metric/metric-type modified , then re-originate the
+ * route with modified tag/metric/metric-type details.
+ */
+ if ((tag != aggr->tag)
+ || (metric != (unsigned int)rt_aggr->path.cost)
+ || (mtype != aggr->mtype)) {
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug(
+ "%s: Routetag(old:%d new:%d)/Metric(o:%u,n:%u)/mtype(o:%d n:%d) modified,So refresh the summary route.(%pFX)",
+ __func__, tag, aggr->tag,
+ metric,
+ aggr->metric,
+ mtype, aggr->mtype,
+ &aggr->p);
+
+ aggr_lsa = ospf6_originate_type5_type7_lsas(aggr->route,
+ ospf6);
+ if (aggr_lsa)
+ SET_FLAG(aggr->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_ORIGINATED);
+ }
+
+ return;
+ }
+
+ /* If the external route prefix same as aggregate route
+ * and if external route is already originated as TYPE-5
+ * then it need to be refreshed and originate bit should
+ * be set.
+ */
+ if (lsa && prefix_same(&aggr->p, &rt->prefix)) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: External route prefix is same as aggr so refreshing LSA(%pFX)",
+ __PRETTY_FUNCTION__,
+ &aggr->p);
+
+ THREAD_OFF(lsa->refresh);
+ thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
+ aggr->id = info->id;
+ SET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED);
+ return;
+ }
+
+ ospf6_originate_new_aggr_lsa(ospf6, aggr);
+}
+
+static void ospf6_aggr_handle_external_info(void *data)
+{
+ struct ospf6_route *rt = (struct ospf6_route *)data;
+ struct ospf6_external_aggr_rt *aggr = NULL;
+ struct ospf6_lsa *lsa = NULL;
+ struct ospf6_external_info *info;
+ struct ospf6 *ospf6 = NULL;
+ struct prefix prefix_id;
+ struct route_node *node;
+
+ rt->aggr_route = NULL;
+
+ rt->to_be_processed = true;
+
+ if (IS_OSPF6_DEBUG_ASBR || IS_OSPF6_DEBUG_ORIGINATE(AS_EXTERNAL))
+ zlog_debug("%s: Handle external route for origination/refresh (%pFX)",
+ __func__,
+ &rt->prefix);
+
+ ospf6 = rt->ospf6;
+ assert(ospf6);
+
+ aggr = ospf6_external_aggr_match(ospf6,
+ &rt->prefix);
+ if (aggr) {
+ ospf6_originate_summary_lsa(ospf6, aggr, rt);
+ return;
+ }
+
+ info = rt->route_option;
+ if (info->id) {
+ lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(info->id), ospf6->router_id,
+ ospf6->lsdb);
+ if (lsa) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: LSA found, refresh it",
+ __func__);
+ THREAD_OFF(lsa->refresh);
+ thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
+ return;
+ }
+ }
+
+ info->id = ospf6->external_id++;
+ rt->path.origin.id = htonl(info->id);
+
+ /* create/update binding in external_id_table */
+ prefix_id.family = AF_INET;
+ prefix_id.prefixlen = 32;
+ prefix_id.u.prefix4.s_addr = htonl(info->id);
+ node = route_node_get(ospf6->external_id_table, &prefix_id);
+ node->info = rt;
+
+ (void)ospf6_originate_type5_type7_lsas(rt, ospf6);
+}
+
+static void
+ospf6_asbr_summary_config_delete(struct ospf6 *ospf6, struct route_node *rn)
+{
+ struct ospf6_external_aggr_rt *aggr = rn->info;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Deleting Aggregate route (%pFX)",
+ __func__,
+ &aggr->p);
+
+ ospf6_asbr_summary_remove_lsa_and_route(ospf6, aggr);
+
+ rn->info = NULL;
+ route_unlock_node(rn);
+}
+
+static int
+ospf6_handle_external_aggr_modify(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr)
+{
+ struct ospf6_lsa *lsa = NULL;
+ struct ospf6_as_external_lsa *asel = NULL;
+ struct ospf6_route *rt_aggr;
+ unsigned int metric = 0;
+ route_tag_t tag = 0;
+ int mtype;
+
+ lsa = ospf6_lsdb_lookup(
+ htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(aggr->id), ospf6->router_id,
+ ospf6->lsdb);
+ if (!lsa) {
+ zlog_warn(
+ "%s: Could not refresh/originate %pFX",
+ __func__,
+ &aggr->p);
+
+ return OSPF6_FAILURE;
+ }
+
+ asel = (struct ospf6_as_external_lsa *)
+ OSPF6_LSA_HEADER_END(lsa->header);
+ metric = (unsigned long)OSPF6_ASBR_METRIC(asel);
+ tag = ospf6_as_external_lsa_get_tag(lsa);
+ mtype = CHECK_FLAG(asel->bits_metric,
+ OSPF6_ASBR_BIT_E) ? 2 : 1;
+
+ /* Fill all the details for advertisement */
+ ospf6_fill_aggr_route_details(ospf6, aggr);
+ rt_aggr = aggr->route;
+ /* If tag/metric/metric-type modified , then
+ * re-originate the route with modified
+ * tag/metric/metric-type details.
+ */
+ if ((tag != aggr->tag)
+ || (metric
+ != (unsigned int)rt_aggr->path.cost)
+ || (mtype
+ != aggr->mtype)) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug(
+ "%s: Changed tag(old:%d new:%d)/metric(o:%u n:%d)/mtype(o:%d n:%d),So refresh the summary route.(%pFX)",
+ __func__, tag,
+ aggr->tag,
+ metric,
+ (unsigned int)rt_aggr->path.cost,
+ mtype, aggr->mtype,
+ &aggr->p);
+
+ (void)ospf6_originate_type5_type7_lsas(
+ aggr->route,
+ ospf6);
+ }
+
+ return OSPF6_SUCCESS;
+}
+
+static void ospf6_handle_external_aggr_update(struct ospf6 *ospf6)
+{
+ struct route_node *rn = NULL;
+ int ret;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Process modified aggregators.", __func__);
+
+ for (rn = route_top(ospf6->rt_aggr_tbl); rn; rn = route_next(rn)) {
+ struct ospf6_external_aggr_rt *aggr;
+
+ if (!rn->info)
+ continue;
+
+ aggr = rn->info;
+
+ if (aggr->action == OSPF6_ROUTE_AGGR_DEL) {
+ aggr->action = OSPF6_ROUTE_AGGR_NONE;
+ ospf6_asbr_summary_config_delete(ospf6, rn);
+
+ if (OSPF6_EXTERNAL_RT_COUNT(aggr))
+ hash_clean(aggr->match_extnl_hash,
+ ospf6_aggr_handle_external_info);
+
+ hash_free(aggr->match_extnl_hash);
+ XFREE(MTYPE_OSPF6_EXTERNAL_RT_AGGR, aggr);
+
+ } else if (aggr->action == OSPF6_ROUTE_AGGR_MODIFY) {
+
+ aggr->action = OSPF6_ROUTE_AGGR_NONE;
+
+ /* Check if tag/metric/metric-type modified */
+ if (CHECK_FLAG(aggr->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_ORIGINATED)
+ && !CHECK_FLAG(aggr->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE)) {
+
+ ret = ospf6_handle_external_aggr_modify(ospf6,
+ aggr);
+ if (ret == OSPF6_FAILURE)
+ continue;
+ }
+
+ /* Advertise option modified ?
+ * If so, handled it here.
+ */
+ ospf6_aggr_handle_advertise_change(ospf6, aggr);
+ }
+ }
+}
+
+static void ospf6_aggr_unlink_external_info(void *data)
+{
+ struct ospf6_route *rt = (struct ospf6_route *)data;
+
+ rt->aggr_route = NULL;
+
+ rt->to_be_processed = true;
+}
+
+void ospf6_external_aggregator_free(struct ospf6_external_aggr_rt *aggr)
+{
+ if (OSPF6_EXTERNAL_RT_COUNT(aggr))
+ hash_clean(aggr->match_extnl_hash,
+ ospf6_aggr_unlink_external_info);
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Release the aggregator Address(%pFX)",
+ __func__,
+ &aggr->p);
+
+ hash_free(aggr->match_extnl_hash);
+ aggr->match_extnl_hash = NULL;
+
+ XFREE(MTYPE_OSPF6_EXTERNAL_RT_AGGR, aggr);
+}
+
+static void
+ospf6_delete_all_marked_aggregators(struct ospf6 *ospf6)
+{
+ struct route_node *rn = NULL;
+ struct ospf6_external_aggr_rt *aggr;
+
+ /* Loop through all the aggregators, Delete all aggregators
+ * which are marked as DELETE. Set action to NONE for remaining
+ * aggregators
+ */
+ for (rn = route_top(ospf6->rt_aggr_tbl); rn; rn = route_next(rn)) {
+ if (!rn->info)
+ continue;
+
+ aggr = rn->info;
+
+ if (aggr->action != OSPF6_ROUTE_AGGR_DEL) {
+ aggr->action = OSPF6_ROUTE_AGGR_NONE;
+ continue;
+ }
+ ospf6_asbr_summary_config_delete(ospf6, rn);
+ ospf6_external_aggregator_free(aggr);
+ }
+}
+
+static void ospf6_handle_exnl_rt_after_aggr_del(struct ospf6 *ospf6,
+ struct ospf6_route *rt)
+{
+ struct ospf6_lsa *lsa;
+
+ /* Process only marked external routes.
+ * These routes were part of a deleted
+ * aggregator.So, originate now.
+ */
+ if (!rt->to_be_processed)
+ return;
+
+ rt->to_be_processed = false;
+
+ lsa = ospf6_find_external_lsa(ospf6, &rt->prefix);
+
+ if (lsa) {
+ THREAD_OFF(lsa->refresh);
+ thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
+ } else {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Originate external route(%pFX)",
+ __func__,
+ &rt->prefix);
+
+ (void)ospf6_originate_type5_type7_lsas(rt, ospf6);
+ }
+}
+
+static void ospf6_handle_aggregated_exnl_rt(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr,
+ struct ospf6_route *rt)
+{
+ struct ospf6_lsa *lsa;
+ struct ospf6_as_external_lsa *ext_lsa;
+ struct ospf6_external_info *info;
+
+ /* Handling the case where the external route prefix
+ * and aggegate prefix is same
+ * If same dont flush the originated external LSA.
+ */
+ if (prefix_same(&aggr->p, &rt->prefix)) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: External Route prefix same as Aggregator(%pFX), so dont flush.",
+ __func__,
+ &rt->prefix);
+
+ return;
+ }
+
+ info = rt->route_option;
+ assert(info);
+
+ lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(info->id), ospf6->router_id, ospf6->lsdb);
+ if (lsa) {
+ ext_lsa = (struct ospf6_as_external_lsa
+ *)((char *)(lsa->header)
+ + sizeof(struct ospf6_lsa_header));
+
+ if (rt->prefix.prefixlen != ext_lsa->prefix.prefix_length)
+ return;
+
+ ospf6_external_lsa_purge(ospf6, lsa);
+
+ /* Resetting the ID of route */
+ rt->path.origin.id = 0;
+ info->id = 0;
+ }
+}
+
+static void
+ospf6_handle_external_aggr_add(struct ospf6 *ospf6)
+{
+ struct ospf6_route *rt = NULL;
+ struct ospf6_external_info *ei = NULL;
+ struct ospf6_external_aggr_rt *aggr;
+
+ /* Delete all the aggregators which are marked as
+ * OSPF6_ROUTE_AGGR_DEL.
+ */
+ ospf6_delete_all_marked_aggregators(ospf6);
+
+ for (rt = ospf6_route_head(ospf6->external_table); rt;
+ rt = ospf6_route_next(rt)) {
+ ei = rt->route_option;
+ if (ei == NULL)
+ continue;
+
+ if (is_default_prefix(&rt->prefix))
+ continue;
+
+ aggr = ospf6_external_aggr_match(ospf6,
+ &rt->prefix);
+
+ /* If matching aggregator found, Add
+ * the external route refrenace to the
+ * aggregator and originate the aggr
+ * route if it is advertisable.
+ * flush the external LSA if it is
+ * already originated for this external
+ * prefix.
+ */
+ if (aggr) {
+ ospf6_originate_summary_lsa(ospf6, aggr, rt);
+
+ /* All aggregated external rts
+ * are handled here.
+ */
+ ospf6_handle_aggregated_exnl_rt(
+ ospf6, aggr, rt);
+ continue;
+ }
+
+ /* External routes which are only out
+ * of aggregation will be handled here.
+ */
+ ospf6_handle_exnl_rt_after_aggr_del(
+ ospf6, rt);
+ }
+}
+
+static int ospf6_asbr_summary_process(struct thread *thread)
+{
+ struct ospf6 *ospf6 = THREAD_ARG(thread);
+ int operation = 0;
+
+ ospf6->t_external_aggr = NULL;
+ operation = ospf6->aggr_action;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: operation:%d",
+ __func__,
+ operation);
+
+ switch (operation) {
+ case OSPF6_ROUTE_AGGR_ADD:
+ ospf6_handle_external_aggr_add(ospf6);
+ break;
+ case OSPF6_ROUTE_AGGR_DEL:
+ case OSPF6_ROUTE_AGGR_MODIFY:
+ ospf6_handle_external_aggr_update(ospf6);
+ break;
+ default:
+ break;
+ }
+
+ return OSPF6_SUCCESS;
+}
+
+static void
+ospf6_start_asbr_summary_delay_timer(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr,
+ ospf6_aggr_action_t operation)
+{
+ aggr->action = operation;
+
+ if (ospf6->t_external_aggr) {
+ if (ospf6->aggr_action == OSPF6_ROUTE_AGGR_ADD) {
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Not required to restart timer,set is already added.",
+ __func__);
+ return;
+ }
+
+ if (operation == OSPF6_ROUTE_AGGR_ADD) {
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s, Restarting Aggregator delay timer.",
+ __func__);
+ THREAD_OFF(ospf6->t_external_aggr);
+ }
+ }
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Start Aggregator delay timer %d(in seconds).",
+ __func__, ospf6->aggr_delay_interval);
+
+ ospf6->aggr_action = operation;
+ thread_add_timer(master,
+ ospf6_asbr_summary_process,
+ ospf6, ospf6->aggr_delay_interval,
+ &ospf6->t_external_aggr);
+}
+
+int ospf6_asbr_external_rt_advertise(struct ospf6 *ospf6,
+ struct prefix *p)
+{
+ struct route_node *rn;
+ struct ospf6_external_aggr_rt *aggr;
+
+ rn = route_node_lookup(ospf6->rt_aggr_tbl, p);
+ if (!rn)
+ return OSPF6_INVALID;
+
+ aggr = rn->info;
+
+ route_unlock_node(rn);
+
+ if (!CHECK_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE))
+ return OSPF6_INVALID;
+
+ UNSET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE);
+
+ if (!OSPF6_EXTERNAL_RT_COUNT(aggr))
+ return OSPF6_SUCCESS;
+
+ ospf6_start_asbr_summary_delay_timer(ospf6, aggr,
+ OSPF6_ROUTE_AGGR_MODIFY);
+
+ return OSPF6_SUCCESS;
+}
+
+int ospf6_external_aggr_delay_timer_set(struct ospf6 *ospf6,
+ unsigned int interval)
+{
+ ospf6->aggr_delay_interval = interval;
+
+ return OSPF6_SUCCESS;
+}
+
+static unsigned int ospf6_external_rt_hash_key(const void *data)
+{
+ const struct ospf6_route *rt = data;
+ unsigned int key = 0;
+
+ key = prefix_hash_key(&rt->prefix);
+ return key;
+}
+
+static bool ospf6_external_rt_hash_cmp(const void *d1, const void *d2)
+{
+ const struct ospf6_route *rt1 = d1;
+ const struct ospf6_route *rt2 = d2;
+
+ return prefix_same(&rt1->prefix, &rt2->prefix);
+}
+
+static struct ospf6_external_aggr_rt *
+ospf6_external_aggr_new(struct prefix *p)
+{
+ struct ospf6_external_aggr_rt *aggr;
+
+ aggr = XCALLOC(MTYPE_OSPF6_EXTERNAL_RT_AGGR,
+ sizeof(struct ospf6_external_aggr_rt));
+
+ prefix_copy(&aggr->p, p);
+ aggr->metric = -1;
+ aggr->mtype = DEFAULT_METRIC_TYPE;
+ aggr->match_extnl_hash = hash_create(ospf6_external_rt_hash_key,
+ ospf6_external_rt_hash_cmp,
+ "Ospf6 external route hash");
+ return aggr;
+}
+
+static void ospf6_external_aggr_add(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr)
+{
+ struct route_node *rn;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Adding Aggregate route to Aggr table (%pFX)",
+ __func__,
+ &aggr->p);
+
+ rn = route_node_get(ospf6->rt_aggr_tbl, &aggr->p);
+ if (rn->info)
+ route_unlock_node(rn);
+ else
+ rn->info = aggr;
+}
+
+int ospf6_asbr_external_rt_no_advertise(struct ospf6 *ospf6,
+ struct prefix *p)
+{
+ struct ospf6_external_aggr_rt *aggr;
+ route_tag_t tag = 0;
+
+ aggr = ospf6_external_aggr_config_lookup(ospf6, p);
+ if (aggr) {
+ if (CHECK_FLAG(aggr->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE))
+ return OSPF6_SUCCESS;
+
+ SET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE);
+
+ aggr->tag = tag;
+ aggr->metric = -1;
+
+ if (!OSPF6_EXTERNAL_RT_COUNT(aggr))
+ return OSPF6_SUCCESS;
+
+ ospf6_start_asbr_summary_delay_timer(ospf6, aggr,
+ OSPF6_ROUTE_AGGR_MODIFY);
+ } else {
+ aggr = ospf6_external_aggr_new(p);
+
+ if (!aggr)
+ return OSPF6_FAILURE;
+
+ SET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE);
+ ospf6_external_aggr_add(ospf6, aggr);
+ ospf6_start_asbr_summary_delay_timer(ospf6, aggr,
+ OSPF6_ROUTE_AGGR_ADD);
+ }
+
+ return OSPF6_SUCCESS;
+}
+
+struct ospf6_external_aggr_rt *
+ospf6_external_aggr_config_lookup(struct ospf6 *ospf6, struct prefix *p)
+{
+ struct route_node *rn;
+
+ rn = route_node_lookup(ospf6->rt_aggr_tbl, p);
+ if (rn) {
+ route_unlock_node(rn);
+ return rn->info;
+ }
+
+ return NULL;
+}
+
+
+int ospf6_external_aggr_config_set(struct ospf6 *ospf6, struct prefix *p,
+ route_tag_t tag, int metric, int mtype)
+{
+ struct ospf6_external_aggr_rt *aggregator;
+
+ aggregator = ospf6_external_aggr_config_lookup(ospf6, p);
+
+ if (aggregator) {
+ if (CHECK_FLAG(aggregator->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE))
+ UNSET_FLAG(aggregator->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE);
+ else if ((aggregator->tag == tag)
+ && (aggregator->metric == metric)
+ && (aggregator->mtype == mtype))
+ return OSPF6_SUCCESS;
+
+ aggregator->tag = tag;
+ aggregator->metric = metric;
+ aggregator->mtype = mtype;
+
+ ospf6_start_asbr_summary_delay_timer(ospf6, aggregator,
+ OSPF6_ROUTE_AGGR_MODIFY);
+ } else {
+ aggregator = ospf6_external_aggr_new(p);
+ if (!aggregator)
+ return OSPF6_FAILURE;
+
+ aggregator->tag = tag;
+ aggregator->metric = metric;
+ aggregator->mtype = mtype;
+
+ ospf6_external_aggr_add(ospf6, aggregator);
+ ospf6_start_asbr_summary_delay_timer(ospf6, aggregator,
+ OSPF6_ROUTE_AGGR_ADD);
+ }
+
+ return OSPF6_SUCCESS;
+}
+
+int ospf6_external_aggr_config_unset(struct ospf6 *ospf6,
+ struct prefix *p)
+{
+ struct route_node *rn;
+ struct ospf6_external_aggr_rt *aggr;
+
+ rn = route_node_lookup(ospf6->rt_aggr_tbl, p);
+ if (!rn)
+ return OSPF6_INVALID;
+
+ aggr = rn->info;
+
+ route_unlock_node(rn);
+
+ if (!OSPF6_EXTERNAL_RT_COUNT(aggr)) {
+ ospf6_asbr_summary_config_delete(ospf6, rn);
+ ospf6_external_aggregator_free(aggr);
+ return OSPF6_SUCCESS;
+ }
+
+ ospf6_start_asbr_summary_delay_timer(ospf6, aggr,
+ OSPF6_ROUTE_AGGR_DEL);
+
+ return OSPF6_SUCCESS;
+}
+
+void ospf6_handle_external_lsa_origination(struct ospf6 *ospf6,
+ struct ospf6_route *rt,
+ struct prefix *p)
+{
+
+ struct ospf6_external_aggr_rt *aggr;
+ struct ospf6_external_info *info;
+ struct prefix prefix_id;
+ struct route_node *node;
+
+ if (!is_default_prefix(p)) {
+ aggr = ospf6_external_aggr_match(ospf6,
+ p);
+
+ if (aggr) {
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("%s: Send Aggregate LSA (%pFX)",
+ __func__,
+ &aggr->p);
+
+ ospf6_originate_summary_lsa(
+ ospf6, aggr, rt);
+
+ /* Handling the case where the
+ * external route prefix
+ * and aggegate prefix is same
+ * If same dont flush the
+ * originated
+ * external LSA.
+ */
+ ospf6_handle_aggregated_exnl_rt(
+ ospf6, aggr, rt);
+ return;
+ }
+ }
+
+ info = rt->route_option;
+
+ /* When the info->id = 0, it means it is being originated for the
+ * first time.
+ */
+ if (!info->id) {
+ info->id = ospf6->external_id++;
+
+ /* create/update binding in external_id_table */
+ prefix_id.family = AF_INET;
+ prefix_id.prefixlen = 32;
+ prefix_id.u.prefix4.s_addr = htonl(info->id);
+ node = route_node_get(ospf6->external_id_table, &prefix_id);
+ node->info = rt;
+
+ } else {
+ prefix_id.family = AF_INET;
+ prefix_id.prefixlen = 32;
+ prefix_id.u.prefix4.s_addr = htonl(info->id);
+ }
+
+ rt->path.origin.id = htonl(info->id);
+
+ if (IS_OSPF6_DEBUG_ASBR) {
+ zlog_debug("Advertise new AS-External Id:%pI4 prefix %pFX metric %u",
+ &prefix_id.u.prefix4, p, rt->path.metric_type);
+ }
+
+ ospf6_originate_type5_type7_lsas(rt, ospf6);
+
+}
+
+void ospf6_unset_all_aggr_flag(struct ospf6 *ospf6)
+{
+ struct route_node *rn = NULL;
+ struct ospf6_external_aggr_rt *aggr;
+
+ if (IS_OSPF6_DEBUG_AGGR)
+ zlog_debug("Unset the origination bit for all aggregator");
+
+ /* Resetting the running external ID counter so that the origination
+ * of external LSAs starts from the beginning 0.0.0.1
+ */
+ ospf6->external_id = OSPF6_EXT_INIT_LS_ID;
+
+ for (rn = route_top(ospf6->rt_aggr_tbl); rn; rn = route_next(rn)) {
+ if (!rn->info)
+ continue;
+
+ aggr = rn->info;
+
+ UNSET_FLAG(aggr->aggrflags, OSPF6_EXTERNAL_AGGRT_ORIGINATED);
+ }
+}
diff --git a/ospf6d/ospf6_asbr.h b/ospf6d/ospf6_asbr.h
index 7ccd1c992b..0aa1374a46 100644
--- a/ospf6d/ospf6_asbr.h
+++ b/ospf6d/ospf6_asbr.h
@@ -46,6 +46,52 @@ struct ospf6_external_info {
route_tag_t tag;
ifindex_t ifindex;
+
+};
+
+/* OSPF6 ASBR Summarisation */
+typedef enum {
+ OSPF6_ROUTE_AGGR_NONE = 0,
+ OSPF6_ROUTE_AGGR_ADD,
+ OSPF6_ROUTE_AGGR_DEL,
+ OSPF6_ROUTE_AGGR_MODIFY
+} ospf6_aggr_action_t;
+
+#define OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE 0x1
+#define OSPF6_EXTERNAL_AGGRT_ORIGINATED 0x2
+
+#define OSPF6_EXTERNAL_RT_COUNT(aggr) \
+ (((struct ospf6_external_aggr_rt *)aggr)->match_extnl_hash->count)
+
+struct ospf6_external_aggr_rt {
+ /* range address and masklen */
+ struct prefix p;
+
+ /* use bits for OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE and
+ * OSPF6_EXTERNAL_AGGRT_ORIGINATED
+ */
+ uint16_t aggrflags;
+
+ /* To store external metric-type */
+ uint8_t mtype;
+
+ /* Route tag for summary address */
+ route_tag_t tag;
+
+ /* To store aggregated metric config */
+ int metric;
+
+ /* To Store the LS ID when LSA is originated */
+ uint32_t id;
+
+ /* Action to be done after delay timer expiry */
+ int action;
+
+ /* OSPFv3 route generated by summary address. */
+ struct ospf6_route *route;
+
+ /* Hash table of matching external routes */
+ struct hash *match_extnl_hash;
};
/* AS-External-LSA */
@@ -110,8 +156,31 @@ extern void ospf6_asbr_distribute_list_update(struct ospf6 *ospf6,
struct ospf6_redist *ospf6_redist_lookup(struct ospf6 *ospf6, int type,
unsigned short instance);
extern void ospf6_asbr_routemap_update(const char *mapname);
-extern void ospf6_as_external_lsa_originate(struct ospf6_route *route,
- struct ospf6 *ospf6);
+extern struct ospf6_lsa *
+ospf6_as_external_lsa_originate(struct ospf6_route *route,
+ struct ospf6 *ospf6);
extern void ospf6_asbr_status_update(struct ospf6 *ospf6, int status);
+int ospf6_asbr_external_rt_advertise(struct ospf6 *ospf6,
+ struct prefix *p);
+int ospf6_external_aggr_delay_timer_set(struct ospf6 *ospf6,
+ unsigned int interval);
+int ospf6_asbr_external_rt_no_advertise(struct ospf6 *ospf6,
+ struct prefix *p);
+
+struct ospf6_external_aggr_rt *
+ospf6_external_aggr_config_lookup(struct ospf6 *ospf6, struct prefix *p);
+
+int ospf6_external_aggr_config_set(struct ospf6 *ospf6, struct prefix *p,
+ route_tag_t tag, int metric, int mtype);
+
+int ospf6_external_aggr_config_unset(struct ospf6 *ospf6,
+ struct prefix *p);
+void ospf6_handle_external_lsa_origination(struct ospf6 *ospf6,
+ struct ospf6_route *rt,
+ struct prefix *p);
+void ospf6_external_aggregator_free(struct ospf6_external_aggr_rt *aggr);
+void ospf6_unset_all_aggr_flag(struct ospf6 *ospf6);
+void ospf6_fill_aggr_route_details(struct ospf6 *ospf6,
+ struct ospf6_external_aggr_rt *aggr);
#endif /* OSPF6_ASBR_H */
diff --git a/ospf6d/ospf6_flood.c b/ospf6d/ospf6_flood.c
index 738c2218fa..3d52597161 100644
--- a/ospf6d/ospf6_flood.c
+++ b/ospf6d/ospf6_flood.c
@@ -89,6 +89,16 @@ void ospf6_lsa_originate(struct ospf6_lsa *lsa)
struct ospf6_lsa *old;
struct ospf6_lsdb *lsdb_self;
+ if (lsa->header->adv_router == INADDR_ANY) {
+ if (IS_OSPF6_DEBUG_ORIGINATE_TYPE(lsa->header->type))
+ zlog_debug(
+ "Refusing to originate LSA (zero router ID): %s",
+ lsa->name);
+
+ ospf6_lsa_delete(lsa);
+ return;
+ }
+
/* find previous LSA */
old = ospf6_lsdb_lookup(lsa->header->type, lsa->header->id,
lsa->header->adv_router, lsa->lsdb);
@@ -106,7 +116,7 @@ void ospf6_lsa_originate(struct ospf6_lsa *lsa)
lsdb_self = ospf6_get_scoped_lsdb_self(lsa);
ospf6_lsdb_add(ospf6_lsa_copy(lsa), lsdb_self);
- lsa->refresh = NULL;
+ THREAD_OFF(lsa->refresh);
thread_add_timer(master, ospf6_lsa_refresh, lsa, OSPF_LS_REFRESH_TIME,
&lsa->refresh);
@@ -139,6 +149,31 @@ void ospf6_lsa_originate_interface(struct ospf6_lsa *lsa,
ospf6_lsa_originate(lsa);
}
+void ospf6_remove_id_from_external_id_table(struct ospf6 *ospf6,
+ uint32_t id)
+{
+ struct prefix prefix_id;
+ struct route_node *node;
+
+ /* remove binding in external_id_table */
+ prefix_id.family = AF_INET;
+ prefix_id.prefixlen = 32;
+ prefix_id.u.prefix4.s_addr = id;
+ node = route_node_lookup(ospf6->external_id_table, &prefix_id);
+ assert(node);
+ node->info = NULL;
+ route_unlock_node(node); /* to free the lookup lock */
+ route_unlock_node(node); /* to free the original lock */
+
+}
+
+void ospf6_external_lsa_purge(struct ospf6 *ospf6, struct ospf6_lsa *lsa)
+{
+ ospf6_lsa_purge(lsa);
+
+ ospf6_remove_id_from_external_id_table(ospf6, lsa->header->id);
+}
+
void ospf6_lsa_purge(struct ospf6_lsa *lsa)
{
struct ospf6_lsa *self;
diff --git a/ospf6d/ospf6_flood.h b/ospf6d/ospf6_flood.h
index 5515a1c3fe..4e4fc55ed4 100644
--- a/ospf6d/ospf6_flood.h
+++ b/ospf6d/ospf6_flood.h
@@ -39,6 +39,9 @@ extern void ospf6_lsa_originate_area(struct ospf6_lsa *lsa,
struct ospf6_area *oa);
extern void ospf6_lsa_originate_interface(struct ospf6_lsa *lsa,
struct ospf6_interface *oi);
+void ospf6_remove_id_from_external_id_table(struct ospf6 *ospf6,
+ uint32_t id);
+void ospf6_external_lsa_purge(struct ospf6 *ospf6, struct ospf6_lsa *lsa);
extern void ospf6_lsa_purge(struct ospf6_lsa *lsa);
extern void ospf6_lsa_purge_multi_ls_id(struct ospf6_area *oa,
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index b52d6af90e..a169b9c60e 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -385,7 +385,6 @@ void ospf6_interface_connected_route_update(struct interface *ifp)
struct connected *c;
struct listnode *node, *nnode;
struct in6_addr nh_addr;
- int count = 0, max_addr_count;
oi = (struct ospf6_interface *)ifp->info;
if (oi == NULL)
@@ -404,22 +403,10 @@ void ospf6_interface_connected_route_update(struct interface *ifp)
/* update "route to advertise" interface route table */
ospf6_route_remove_all(oi->route_connected);
- if (oi->ifmtu >= OSPF6_JUMBO_MTU)
- max_addr_count = OSPF6_MAX_IF_ADDRS_JUMBO;
- else
- max_addr_count = OSPF6_MAX_IF_ADDRS;
-
for (ALL_LIST_ELEMENTS(oi->interface->connected, node, nnode, c)) {
if (c->address->family != AF_INET6)
continue;
- /* number of interface addresses supported is based on MTU
- * size of OSPFv3 packet
- */
- count++;
- if (count >= max_addr_count)
- break;
-
CONTINUE_IF_ADDRESS_LINKLOCAL(IS_OSPF6_DEBUG_INTERFACE,
c->address);
CONTINUE_IF_ADDRESS_UNSPECIFIED(IS_OSPF6_DEBUG_INTERFACE,
@@ -448,7 +435,7 @@ void ospf6_interface_connected_route_update(struct interface *ifp)
}
}
- route = ospf6_route_create();
+ route = ospf6_route_create(oi->area->ospf6);
memcpy(&route->prefix, c->address, sizeof(struct prefix));
apply_mask(&route->prefix);
route->type = OSPF6_DEST_TYPE_NETWORK;
@@ -821,7 +808,9 @@ int interface_up(struct thread *thread)
}
/* decide next interface state */
- if (oi->type == OSPF_IFTYPE_POINTOPOINT) {
+ if (oi->type == OSPF_IFTYPE_LOOPBACK) {
+ ospf6_interface_state_change(OSPF6_INTERFACE_LOOPBACK, oi);
+ } else if (oi->type == OSPF_IFTYPE_POINTOPOINT) {
ospf6_interface_state_change(OSPF6_INTERFACE_POINTTOPOINT, oi);
} else if (oi->priority == 0)
ospf6_interface_state_change(OSPF6_INTERFACE_DROTHER, oi);
@@ -1728,7 +1717,6 @@ DEFUN (ipv6_ospf6_area,
int idx_ipv4 = 3;
uint32_t area_id;
int format;
- int ipv6_count = 0;
assert(ifp);
@@ -1743,23 +1731,6 @@ DEFUN (ipv6_ospf6_area,
return CMD_SUCCESS;
}
- /* if more than OSPF6_MAX_IF_ADDRS are configured on this interface
- * then don't allow ospfv3 to be configured
- */
- ipv6_count = connected_count_by_family(ifp, AF_INET6);
- if (oi->ifmtu == OSPF6_DEFAULT_MTU && ipv6_count > OSPF6_MAX_IF_ADDRS) {
- vty_out(vty,
- "can not configure OSPFv3 on if %s, must have less than %d interface addresses but has %d addresses\n",
- ifp->name, OSPF6_MAX_IF_ADDRS, ipv6_count);
- return CMD_WARNING_CONFIG_FAILED;
- } else if (oi->ifmtu >= OSPF6_JUMBO_MTU
- && ipv6_count > OSPF6_MAX_IF_ADDRS_JUMBO) {
- vty_out(vty,
- "can not configure OSPFv3 on if %s, must have less than %d interface addresses but has %d addresses\n",
- ifp->name, OSPF6_MAX_IF_ADDRS_JUMBO, ipv6_count);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
if (str2area_id(argv[idx_ipv4]->arg, &area_id, &format)) {
vty_out(vty, "Malformed Area-ID: %s\n", argv[idx_ipv4]->arg);
return CMD_WARNING_CONFIG_FAILED;
@@ -2613,15 +2584,6 @@ static int config_write_interface(struct vty *vty)
return write;
}
-static int config_write_ospf6_interface(struct vty *vty, struct vrf *vrf);
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = config_write_interface,
-};
-
static int ospf6_ifp_create(struct interface *ifp)
{
if (IS_OSPF6_DEBUG_ZEBRA(RECV))
@@ -2679,8 +2641,7 @@ static int ospf6_ifp_destroy(struct interface *ifp)
void ospf6_interface_init(void)
{
/* Install interface node. */
- install_node(&interface_node);
- if_cmd_init();
+ if_cmd_init(config_write_interface);
if_zapi_callbacks(ospf6_ifp_create, ospf6_ifp_up,
ospf6_ifp_down, ospf6_ifp_destroy);
diff --git a/ospf6d/ospf6_interface.h b/ospf6d/ospf6_interface.h
index c9cd74b691..b5efca743e 100644
--- a/ospf6d/ospf6_interface.h
+++ b/ospf6d/ospf6_interface.h
@@ -201,7 +201,6 @@ extern void ospf6_interface_disable(struct ospf6_interface *);
extern void ospf6_interface_state_update(struct interface *);
extern void ospf6_interface_connected_route_update(struct interface *);
-extern void ospf6_interface_connected_route_add(struct connected *);
extern struct in6_addr *
ospf6_interface_get_global_address(struct interface *ifp);
diff --git a/ospf6d/ospf6_intra.c b/ospf6d/ospf6_intra.c
index c971c6180e..e4db8f3a02 100644
--- a/ospf6d/ospf6_intra.c
+++ b/ospf6d/ospf6_intra.c
@@ -767,7 +767,6 @@ int ospf6_link_lsa_originate(struct thread *thread)
struct ospf6_link_lsa *link_lsa;
struct ospf6_route *route;
struct ospf6_prefix *op;
- int count, max_addr_count;
oi = (struct ospf6_interface *)THREAD_ARG(thread);
oi->thread_link_lsa = NULL;
@@ -811,30 +810,22 @@ int ospf6_link_lsa_originate(struct thread *thread)
memcpy(link_lsa->options, oi->area->options, 3);
memcpy(&link_lsa->linklocal_addr, oi->linklocal_addr,
sizeof(struct in6_addr));
+ link_lsa->prefix_num = htonl(oi->route_connected->count);
op = (struct ospf6_prefix *)((caddr_t)link_lsa
+ sizeof(struct ospf6_link_lsa));
- /* connected prefix to advertise, number of interface addresses
- * supported is based on MTU size of OSPFv3 packets
- */
- if (oi->ifmtu >= OSPF6_JUMBO_MTU)
- max_addr_count = OSPF6_MAX_IF_ADDRS_JUMBO;
- else
- max_addr_count = OSPF6_MAX_IF_ADDRS;
- for (route = ospf6_route_head(oi->route_connected), count = 0;
- route && count < max_addr_count;
- route = ospf6_route_next(route), count++) {
+ /* connected prefix to advertise */
+ for (route = ospf6_route_head(oi->route_connected); route;
+ route = ospf6_route_next(route)) {
op->prefix_length = route->prefix.prefixlen;
- op->prefix_options = route->path.prefix_options;
+ op->prefix_options = route->prefix_options;
op->prefix_metric = htons(0);
memcpy(OSPF6_PREFIX_BODY(op), &route->prefix.u.prefix6,
OSPF6_PREFIX_SPACE(op->prefix_length));
op = OSPF6_PREFIX_NEXT(op);
}
- link_lsa->prefix_num = htonl(count);
-
/* Fill LSA Header */
lsa_header->age = 0;
lsa_header->type = htons(OSPF6_LSTYPE_LINK);
@@ -1014,7 +1005,6 @@ int ospf6_intra_prefix_lsa_originate_stub(struct thread *thread)
unsigned short prefix_num = 0;
struct ospf6_route_table *route_advertise;
int ls_id = 0;
- int count, max_addr_count;
oa = (struct ospf6_area *)THREAD_ARG(thread);
oa->thread_intra_prefix_lsa = NULL;
@@ -1060,8 +1050,6 @@ int ospf6_intra_prefix_lsa_originate_stub(struct thread *thread)
intra_prefix_lsa->ref_adv_router = oa->ospf6->router_id;
route_advertise = ospf6_route_table_create(0, 0);
- route_advertise->hook_add = NULL;
- route_advertise->hook_remove = NULL;
for (ALL_LIST_ELEMENTS_RO(oa->if_list, i, oi)) {
if (oi->state == OSPF6_INTERFACE_DOWN) {
@@ -1090,14 +1078,8 @@ int ospf6_intra_prefix_lsa_originate_stub(struct thread *thread)
zlog_debug(" Interface %s:", oi->interface->name);
/* connected prefix to advertise */
- if (oi->ifmtu >= OSPF6_JUMBO_MTU)
- max_addr_count = OSPF6_MAX_IF_ADDRS_JUMBO;
- else
- max_addr_count = OSPF6_MAX_IF_ADDRS;
-
- for (route = ospf6_route_head(oi->route_connected), count = 0;
- route && count < max_addr_count;
- route = ospf6_route_best_next(route), count++) {
+ for (route = ospf6_route_head(oi->route_connected); route;
+ route = ospf6_route_best_next(route)) {
if (IS_OSPF6_DEBUG_ORIGINATE(INTRA_PREFIX))
zlog_debug(" include %pFX", &route->prefix);
ospf6_route_add(ospf6_route_copy(route),
@@ -1193,7 +1175,7 @@ int ospf6_intra_prefix_lsa_originate_stub(struct thread *thread)
}
op->prefix_length = route->prefix.prefixlen;
- op->prefix_options = route->path.prefix_options;
+ op->prefix_options = route->prefix_options;
op->prefix_metric = htons(route->path.cost);
memcpy(OSPF6_PREFIX_BODY(op), &route->prefix.u.prefix6,
OSPF6_PREFIX_SPACE(op->prefix_length));
@@ -1312,8 +1294,6 @@ int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
/* connected prefix to advertise */
route_advertise = ospf6_route_table_create(0, 0);
- route_advertise->hook_add = NULL;
- route_advertise->hook_remove = NULL;
type = ntohs(OSPF6_LSTYPE_LINK);
for (ALL_LSDB_TYPED(oi->lsdb, type, lsa)) {
@@ -1347,7 +1327,7 @@ int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
|| current + OSPF6_PREFIX_SIZE(op) > end)
break;
- route = ospf6_route_create();
+ route = ospf6_route_create(oi->area->ospf6);
route->type = OSPF6_DEST_TYPE_NETWORK;
route->prefix.family = AF_INET6;
@@ -1356,6 +1336,7 @@ int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
sizeof(struct in6_addr));
memcpy(&route->prefix.u.prefix6, OSPF6_PREFIX_BODY(op),
OSPF6_PREFIX_SPACE(op->prefix_length));
+ route->prefix_options = op->prefix_options;
route->path.origin.type = lsa->header->type;
route->path.origin.id = lsa->header->id;
@@ -1363,7 +1344,6 @@ int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
route->path.options[0] = link_lsa->options[0];
route->path.options[1] = link_lsa->options[1];
route->path.options[2] = link_lsa->options[2];
- route->path.prefix_options = op->prefix_options;
route->path.area_id = oi->area->area_id;
route->path.type = OSPF6_PATH_TYPE_INTRA;
@@ -1384,7 +1364,7 @@ int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
for (route = ospf6_route_head(route_advertise); route;
route = ospf6_route_best_next(route)) {
op->prefix_length = route->prefix.prefixlen;
- op->prefix_options = route->path.prefix_options;
+ op->prefix_options = route->prefix_options;
op->prefix_metric = htons(0);
memcpy(OSPF6_PREFIX_BODY(op), &route->prefix.u.prefix6,
OSPF6_PREFIX_SPACE(op->prefix_length));
@@ -1810,19 +1790,19 @@ void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa)
continue;
}
- route = ospf6_route_create();
+ route = ospf6_route_create(oa->ospf6);
memset(&route->prefix, 0, sizeof(struct prefix));
route->prefix.family = AF_INET6;
route->prefix.prefixlen = op->prefix_length;
ospf6_prefix_in6_addr(&route->prefix.u.prefix6,
intra_prefix_lsa, op);
+ route->prefix_options = op->prefix_options;
route->type = OSPF6_DEST_TYPE_NETWORK;
route->path.origin.type = lsa->header->type;
route->path.origin.id = lsa->header->id;
route->path.origin.adv_router = lsa->header->adv_router;
- route->path.prefix_options = op->prefix_options;
route->path.area_id = oa->area_id;
route->path.type = OSPF6_PATH_TYPE_INTRA;
route->path.metric_type = 1;
diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c
index bab5fdaae8..9c03ce21ed 100644
--- a/ospf6d/ospf6_lsa.c
+++ b/ospf6d/ospf6_lsa.c
@@ -45,6 +45,10 @@
#include "ospf6_flood.h"
#include "ospf6d.h"
+#ifndef VTYSH_EXTRACT_PL
+#include "ospf6d/ospf6_lsa_clippy.c"
+#endif
+
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_LSA, "OSPF6 LSA");
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_LSA_HEADER, "OSPF6 LSA header");
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_LSA_SUMMARY, "OSPF6 LSA summary");
@@ -822,6 +826,8 @@ int ospf6_lsa_expire(struct thread *thread)
if (CHECK_FLAG(lsa->flag, OSPF6_LSA_HEADERONLY))
return 0; /* dbexchange will do something ... */
ospf6 = ospf6_get_by_lsdb(lsa);
+ assert(ospf6);
+
/* reinstall lsa */
ospf6_install_lsa(lsa);
@@ -994,6 +1000,30 @@ static char *ospf6_lsa_handler_name(const struct ospf6_lsa_handler *h)
return buf;
}
+DEFPY (debug_ospf6_lsa_aggregation,
+ debug_ospf6_lsa_aggregation_cmd,
+ "[no] debug ospf6 lsa aggregation",
+ NO_STR
+ DEBUG_STR
+ OSPF6_STR
+ "Debug Link State Advertisements (LSAs)\n"
+ "External LSA Aggregation\n")
+{
+
+ struct ospf6_lsa_handler *handler;
+
+ handler = ospf6_get_lsa_handler(OSPF6_LSTYPE_AS_EXTERNAL);
+ if (handler == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ if (no)
+ UNSET_FLAG(handler->lh_debug, OSPF6_LSA_DEBUG_AGGR);
+ else
+ SET_FLAG(handler->lh_debug, OSPF6_LSA_DEBUG_AGGR);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (debug_ospf6_lsa_type,
debug_ospf6_lsa_hex_cmd,
"debug ospf6 lsa <router|network|inter-prefix|inter-router|as-external|link|intra-prefix|unknown> [<originate|examine|flooding>]",
@@ -1105,6 +1135,9 @@ void install_element_ospf6_debug_lsa(void)
install_element(ENABLE_NODE, &no_debug_ospf6_lsa_hex_cmd);
install_element(CONFIG_NODE, &debug_ospf6_lsa_hex_cmd);
install_element(CONFIG_NODE, &no_debug_ospf6_lsa_hex_cmd);
+
+ install_element(ENABLE_NODE, &debug_ospf6_lsa_aggregation_cmd);
+ install_element(CONFIG_NODE, &debug_ospf6_lsa_aggregation_cmd);
}
int config_write_ospf6_debug_lsa(struct vty *vty)
@@ -1128,6 +1161,8 @@ int config_write_ospf6_debug_lsa(struct vty *vty)
if (CHECK_FLAG(handler->lh_debug, OSPF6_LSA_DEBUG_FLOOD))
vty_out(vty, "debug ospf6 lsa %s flooding\n",
ospf6_lsa_handler_name(handler));
+ if (CHECK_FLAG(handler->lh_debug, OSPF6_LSA_DEBUG_AGGR))
+ vty_out(vty, "debug ospf6 lsa aggregation\n");
}
return 0;
diff --git a/ospf6d/ospf6_lsa.h b/ospf6d/ospf6_lsa.h
index 15b0d4ebbc..4c95ee69bd 100644
--- a/ospf6d/ospf6_lsa.h
+++ b/ospf6d/ospf6_lsa.h
@@ -28,6 +28,7 @@
#define OSPF6_LSA_DEBUG_ORIGINATE 0x02
#define OSPF6_LSA_DEBUG_EXAMIN 0x04
#define OSPF6_LSA_DEBUG_FLOOD 0x08
+#define OSPF6_LSA_DEBUG_AGGR 0x10
/* OSPF LSA Default metric values */
#define DEFAULT_DEFAULT_METRIC 20
@@ -51,6 +52,8 @@
(ospf6_lstype_debug(type) & OSPF6_LSA_DEBUG_EXAMIN)
#define IS_OSPF6_DEBUG_FLOOD_TYPE(type) \
(ospf6_lstype_debug(type) & OSPF6_LSA_DEBUG_FLOOD)
+#define IS_OSPF6_DEBUG_AGGR \
+ (ospf6_lstype_debug(OSPF6_LSTYPE_AS_EXTERNAL) & OSPF6_LSA_DEBUG_AGGR) \
/* LSA definition */
@@ -263,4 +266,6 @@ extern void install_element_ospf6_debug_lsa(void);
extern void ospf6_lsa_age_set(struct ospf6_lsa *lsa);
extern void ospf6_flush_self_originated_lsas_now(struct ospf6 *ospf6);
extern struct ospf6 *ospf6_get_by_lsdb(struct ospf6_lsa *lsa);
+struct ospf6_lsa *ospf6_find_external_lsa(struct ospf6 *ospf6,
+ struct prefix *p);
#endif /* OSPF6_LSA_H */
diff --git a/ospf6d/ospf6_lsdb.c b/ospf6d/ospf6_lsdb.c
index 304f03fde8..039c65d739 100644
--- a/ospf6d/ospf6_lsdb.c
+++ b/ospf6d/ospf6_lsdb.c
@@ -30,6 +30,7 @@
#include "ospf6_proto.h"
#include "ospf6_lsa.h"
#include "ospf6_lsdb.h"
+#include "ospf6_asbr.h"
#include "ospf6_route.h"
#include "ospf6d.h"
#include "bitfield.h"
@@ -194,6 +195,28 @@ struct ospf6_lsa *ospf6_lsdb_lookup(uint16_t type, uint32_t id,
return (struct ospf6_lsa *)node->info;
}
+struct ospf6_lsa *ospf6_find_external_lsa(struct ospf6 *ospf6, struct prefix *p)
+{
+ struct ospf6_route *match;
+ struct ospf6_lsa *lsa;
+ struct ospf6_external_info *info;
+
+ match = ospf6_route_lookup(p, ospf6->external_table);
+ if (match == NULL) {
+ if (IS_OSPF6_DEBUG_ASBR)
+ zlog_debug("No such route %pFX to withdraw", p);
+
+ return NULL;
+ }
+
+ info = match->route_option;
+ assert(info);
+
+ lsa = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ htonl(info->id), ospf6->router_id, ospf6->lsdb);
+ return lsa;
+}
+
struct ospf6_lsa *ospf6_lsdb_lookup_next(uint16_t type, uint32_t id,
uint32_t adv_router,
struct ospf6_lsdb *lsdb)
diff --git a/ospf6d/ospf6_nssa.c b/ospf6d/ospf6_nssa.c
index 9f8cdf8fb7..470a5b1338 100644
--- a/ospf6d/ospf6_nssa.c
+++ b/ospf6d/ospf6_nssa.c
@@ -1159,10 +1159,49 @@ static void ospf6_nssa_flush_area(struct ospf6_area *area)
}
}
-static void ospf6_area_nssa_update(struct ospf6_area *area)
+static void ospf6_check_and_originate_type7_lsa(struct ospf6_area *area)
{
struct ospf6_route *route;
+ struct route_node *rn = NULL;
+ struct ospf6_external_aggr_rt *aggr;
+
+ /* Loop through the external_table to find the LSAs originated
+ * without aggregation and originate type-7 LSAs for them.
+ */
+ for (route = ospf6_route_head(
+ area->ospf6->external_table);
+ route; route = ospf6_route_next(route)) {
+ /* This means the Type-5 LSA was originated for this route */
+ if (route->path.origin.id != 0)
+ ospf6_nssa_lsa_originate(route, area);
+
+ }
+
+ /* Loop through the aggregation table to originate type-7 LSAs
+ * for the aggregated type-5 LSAs
+ */
+ for (rn = route_top(area->ospf6->rt_aggr_tbl); rn;
+ rn = route_next(rn)) {
+ if (!rn->info)
+ continue;
+
+ aggr = rn->info;
+
+ if (CHECK_FLAG(aggr->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_ORIGINATED)) {
+ if (IS_OSPF6_DEBUG_NSSA)
+ zlog_debug(
+ "Originating Type-7 LSAs for area %s",
+ area->name);
+
+ ospf6_nssa_lsa_originate(aggr->route, area);
+ }
+ }
+}
+
+static void ospf6_area_nssa_update(struct ospf6_area *area)
+{
if (IS_AREA_NSSA(area)) {
if (!ospf6_check_and_set_router_abr(area->ospf6))
OSPF6_OPT_CLEAR(area->options, OSPF6_OPT_E);
@@ -1194,10 +1233,7 @@ static void ospf6_area_nssa_update(struct ospf6_area *area)
zlog_debug("NSSA area %s", area->name);
/* Originate NSSA LSA */
- for (route = ospf6_route_head(
- area->ospf6->external_table);
- route; route = ospf6_route_next(route))
- ospf6_nssa_lsa_originate(route, area);
+ ospf6_check_and_originate_type7_lsa(area);
}
} else {
/* Disable NSSA */
@@ -1259,13 +1295,10 @@ void ospf6_nssa_lsa_originate(struct ospf6_route *route,
struct in6_addr *fwd_addr;
struct ospf6_as_external_lsa *as_external_lsa;
- char buf[PREFIX2STR_BUFFER];
caddr_t p;
- if (IS_OSPF6_DEBUG_ASBR || IS_OSPF6_DEBUG_ORIGINATE(AS_EXTERNAL)) {
- prefix2str(&route->prefix, buf, sizeof(buf));
- zlog_debug("Originate AS-External-LSA for %s", buf);
- }
+ if (IS_OSPF6_DEBUG_ASBR || IS_OSPF6_DEBUG_ORIGINATE(AS_EXTERNAL))
+ zlog_debug("Originate NSSA-LSA for %pFX", &route->prefix);
/* prepare buffer */
memset(buffer, 0, sizeof(buffer));
@@ -1296,7 +1329,7 @@ void ospf6_nssa_lsa_originate(struct ospf6_route *route,
as_external_lsa->prefix.prefix_length = route->prefix.prefixlen;
/* PrefixOptions */
- as_external_lsa->prefix.prefix_options = route->path.prefix_options;
+ as_external_lsa->prefix.prefix_options = route->prefix_options;
/* Set the P bit */
as_external_lsa->prefix.prefix_options |= OSPF6_PREFIX_OPTION_P;
@@ -1334,7 +1367,7 @@ void ospf6_nssa_lsa_originate(struct ospf6_route *route,
lsa_header->adv_router = area->ospf6->router_id;
lsa_header->seqnum =
ospf6_new_ls_seqnum(lsa_header->type, lsa_header->id,
- lsa_header->adv_router, area->ospf6->lsdb);
+ lsa_header->adv_router, area->lsdb);
lsa_header->length = htons((caddr_t)p - (caddr_t)lsa_header);
/* LSA checksum */
diff --git a/ospf6d/ospf6_route.c b/ospf6d/ospf6_route.c
index 0a026785f4..cd3139d28a 100644
--- a/ospf6d/ospf6_route.c
+++ b/ospf6d/ospf6_route.c
@@ -284,12 +284,21 @@ void ospf6_add_nexthop(struct list *nh_list, int ifindex, struct in6_addr *addr)
struct ospf6_nexthop nh_match;
if (nh_list) {
- nh_match.ifindex = ifindex;
- if (addr != NULL)
+ if (addr) {
+ if (ifindex)
+ nh_match.type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ else
+ nh_match.type = NEXTHOP_TYPE_IPV6;
+
memcpy(&nh_match.address, addr,
sizeof(struct in6_addr));
- else
+ } else {
+ nh_match.type = NEXTHOP_TYPE_IFINDEX;
+
memset(&nh_match.address, 0, sizeof(struct in6_addr));
+ }
+
+ nh_match.ifindex = ifindex;
if (!ospf6_route_find_nexthop(nh_list, &nh_match)) {
nh = ospf6_nexthop_create();
@@ -299,36 +308,76 @@ void ospf6_add_nexthop(struct list *nh_list, int ifindex, struct in6_addr *addr)
}
}
+void ospf6_add_route_nexthop_blackhole(struct ospf6_route *route)
+{
+ struct ospf6_nexthop *nh;
+ struct ospf6_nexthop nh_match = {};
+
+ /* List not allocated. */
+ if (route->nh_list == NULL)
+ return;
+
+ /* Entry already exists. */
+ nh_match.type = NEXTHOP_TYPE_BLACKHOLE;
+ if (ospf6_route_find_nexthop(route->nh_list, &nh_match))
+ return;
+
+ nh = ospf6_nexthop_create();
+ ospf6_nexthop_copy(nh, &nh_match);
+ listnode_add(route->nh_list, nh);
+}
+
void ospf6_route_zebra_copy_nexthops(struct ospf6_route *route,
struct zapi_nexthop nexthops[],
int entries, vrf_id_t vrf_id)
{
struct ospf6_nexthop *nh;
struct listnode *node;
- char buf[64];
int i;
if (route) {
i = 0;
for (ALL_LIST_ELEMENTS_RO(route->nh_list, node, nh)) {
if (IS_OSPF6_DEBUG_ZEBRA(SEND)) {
- const char *ifname;
- inet_ntop(AF_INET6, &nh->address, buf,
- sizeof(buf));
- ifname = ifindex2ifname(nh->ifindex, vrf_id);
- zlog_debug(" nexthop: %s%%%.*s(%d)", buf,
- IFNAMSIZ, ifname, nh->ifindex);
+ zlog_debug(" nexthop: %s %pI6%%%.*s(%d)",
+ nexthop_type_to_str(nh->type),
+ &nh->address, IFNAMSIZ,
+ ifindex2ifname(nh->ifindex, vrf_id),
+ nh->ifindex);
}
+
if (i >= entries)
return;
nexthops[i].vrf_id = vrf_id;
- nexthops[i].ifindex = nh->ifindex;
- if (!IN6_IS_ADDR_UNSPECIFIED(&nh->address)) {
+ nexthops[i].type = nh->type;
+
+ switch (nh->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* NOTHING */
+ break;
+
+ case NEXTHOP_TYPE_IFINDEX:
+ nexthops[i].ifindex = nh->ifindex;
+ break;
+
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV4:
+ /*
+ * OSPFv3 with IPv4 routes is not supported
+ * yet. Skip this next hop.
+ */
+ if (IS_OSPF6_DEBUG_ZEBRA(SEND))
+ zlog_debug(" Skipping IPv4 next hop");
+ continue;
+
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ nexthops[i].ifindex = nh->ifindex;
+ /* FALLTHROUGH */
+ case NEXTHOP_TYPE_IPV6:
nexthops[i].gate.ipv6 = nh->address;
- nexthops[i].type = NEXTHOP_TYPE_IPV6_IFINDEX;
- } else
- nexthops[i].type = NEXTHOP_TYPE_IFINDEX;
+ break;
+ }
i++;
}
}
@@ -404,7 +453,7 @@ void ospf6_copy_paths(struct list *dst, struct list *src)
}
}
-struct ospf6_route *ospf6_route_create(void)
+struct ospf6_route *ospf6_route_create(struct ospf6 *ospf6)
{
struct ospf6_route *route;
@@ -415,6 +464,8 @@ struct ospf6_route *ospf6_route_create(void)
route->paths = list_new();
route->paths->cmp = (int (*)(void *, void *))ospf6_path_cmp;
route->paths->del = (void (*)(void *))ospf6_path_free;
+ route->ospf6 = ospf6;
+
return route;
}
@@ -433,9 +484,10 @@ struct ospf6_route *ospf6_route_copy(struct ospf6_route *route)
{
struct ospf6_route *new;
- new = ospf6_route_create();
+ new = ospf6_route_create(route->ospf6);
new->type = route->type;
memcpy(&new->prefix, &route->prefix, sizeof(struct prefix));
+ new->prefix_options = route->prefix_options;
new->installed = route->installed;
new->changed = route->changed;
new->flag = route->flag;
@@ -1137,6 +1189,7 @@ void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route,
{
char destination[PREFIX2STR_BUFFER], nexthop[64];
char area_id[16], id[16], adv_router[16], capa[16], options[16];
+ char pfx_options[16];
struct timeval now, res;
char duration[64];
struct listnode *node;
@@ -1264,10 +1317,13 @@ void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route,
vty_out(vty, "Router Bits: %s\n", capa);
/* Prefix Options */
+ ospf6_prefix_options_printbuf(route->prefix_options, pfx_options,
+ sizeof(pfx_options));
if (use_json)
- json_object_string_add(json_route, "prefixOptions", "xxx");
+ json_object_string_add(json_route, "prefixOptions",
+ pfx_options);
else
- vty_out(vty, "Prefix Options: xxx\n");
+ vty_out(vty, "Prefix Options: %s\n", pfx_options);
/* Metrics */
if (use_json) {
diff --git a/ospf6d/ospf6_route.h b/ospf6d/ospf6_route.h
index a791a82cd4..991720ec2e 100644
--- a/ospf6d/ospf6_route.h
+++ b/ospf6d/ospf6_route.h
@@ -24,6 +24,7 @@
#include "command.h"
#include "zclient.h"
#include "lib/json.h"
+#include "lib/nexthop.h"
#define OSPF6_MULTI_PATH_LIMIT 4
@@ -44,23 +45,60 @@ struct ospf6_nexthop {
/* IP address, if any */
struct in6_addr address;
+
+ /** Next-hop type information. */
+ enum nexthop_types_t type;
};
-#define ospf6_nexthop_is_set(x) \
- ((x)->ifindex || !IN6_IS_ADDR_UNSPECIFIED(&(x)->address))
-#define ospf6_nexthop_is_same(a, b) \
- ((a)->ifindex == (b)->ifindex \
- && IN6_ARE_ADDR_EQUAL(&(a)->address, &(b)->address))
-#define ospf6_nexthop_clear(x) \
- do { \
- (x)->ifindex = 0; \
- memset(&(x)->address, 0, sizeof(struct in6_addr)); \
- } while (0)
-#define ospf6_nexthop_copy(a, b) \
- do { \
- (a)->ifindex = (b)->ifindex; \
- memcpy(&(a)->address, &(b)->address, sizeof(struct in6_addr)); \
- } while (0)
+static inline bool ospf6_nexthop_is_set(const struct ospf6_nexthop *nh)
+{
+ return nh->type != 0;
+}
+
+static inline bool ospf6_nexthop_is_same(const struct ospf6_nexthop *nha,
+ const struct ospf6_nexthop *nhb)
+{
+ if (nha->type != nhb->type)
+ return false;
+
+ switch (nha->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* NOTHING */
+ break;
+
+ case NEXTHOP_TYPE_IFINDEX:
+ if (nha->ifindex != nhb->ifindex)
+ return false;
+ break;
+
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV4:
+ /* OSPFv3 does not support IPv4 next hops. */
+ return false;
+
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (nha->ifindex != nhb->ifindex)
+ return false;
+ /* FALLTHROUGH */
+ case NEXTHOP_TYPE_IPV6:
+ if (!IN6_ARE_ADDR_EQUAL(&nha->address, &nhb->address))
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static inline void ospf6_nexthop_clear(struct ospf6_nexthop *nh)
+{
+ memset(nh, 0, sizeof(*nh));
+}
+
+static inline void ospf6_nexthop_copy(struct ospf6_nexthop *nha,
+ const struct ospf6_nexthop *nhb)
+{
+ memcpy(nha, nhb, sizeof(*nha));
+}
/* Path */
struct ospf6_ls_origin {
@@ -79,9 +117,6 @@ struct ospf6_path {
/* Optional Capabilities */
uint8_t options[3];
- /* Prefix Options */
- uint8_t prefix_options;
-
/* Associated Area */
in_addr_t area_id;
@@ -127,6 +162,9 @@ struct ospf6_route {
struct ospf6_route *prev;
struct ospf6_route *next;
+ /* Back pointer to ospf6 */
+ struct ospf6 *ospf6;
+
unsigned int lock;
/* Destination Type */
@@ -147,6 +185,9 @@ struct ospf6_route {
/* flag */
uint8_t flag;
+ /* Prefix Options */
+ uint8_t prefix_options;
+
/* route option */
void *route_option;
@@ -161,6 +202,12 @@ struct ospf6_route {
/* nexthop */
struct list *nh_list;
+
+ /* points to the summarised route */
+ struct ospf6_external_aggr_rt *aggr_route;
+
+ /* For Aggr routes */
+ bool to_be_processed;
};
#define OSPF6_DEST_TYPE_NONE 0
@@ -279,6 +326,7 @@ extern void ospf6_copy_nexthops(struct list *dst, struct list *src);
extern void ospf6_merge_nexthops(struct list *dst, struct list *src);
extern void ospf6_add_nexthop(struct list *nh_list, int ifindex,
struct in6_addr *addr);
+extern void ospf6_add_route_nexthop_blackhole(struct ospf6_route *route);
extern int ospf6_num_nexthops(struct list *nh_list);
extern int ospf6_route_cmp_nexthops(struct ospf6_route *a,
struct ospf6_route *b);
@@ -294,7 +342,7 @@ extern int ospf6_route_get_first_nh_index(struct ospf6_route *route);
#define ospf6_route_add_nexthop(route, ifindex, addr) \
ospf6_add_nexthop(route->nh_list, ifindex, addr)
-extern struct ospf6_route *ospf6_route_create(void);
+extern struct ospf6_route *ospf6_route_create(struct ospf6 *ospf6);
extern void ospf6_route_delete(struct ospf6_route *);
extern struct ospf6_route *ospf6_route_copy(struct ospf6_route *route);
extern int ospf6_route_cmp(struct ospf6_route *ra, struct ospf6_route *rb);
diff --git a/ospf6d/ospf6_spf.c b/ospf6d/ospf6_spf.c
index 051b3a63ef..4e7a7146eb 100644
--- a/ospf6d/ospf6_spf.c
+++ b/ospf6d/ospf6_spf.c
@@ -374,7 +374,7 @@ static int ospf6_spf_install(struct ospf6_vertex *v,
up to here. */
assert(route == NULL);
- route = ospf6_route_create();
+ route = ospf6_route_create(v->area->ospf6);
memcpy(&route->prefix, &v->vertex_id, sizeof(struct prefix));
route->type = OSPF6_DEST_TYPE_LINKSTATE;
route->path.type = OSPF6_PATH_TYPE_INTRA;
diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c
index 6f40989efd..6105e2c24b 100644
--- a/ospf6d/ospf6_top.c
+++ b/ospf6d/ospf6_top.c
@@ -409,13 +409,31 @@ static struct ospf6 *ospf6_create(const char *name)
o->external_table = OSPF6_ROUTE_TABLE_CREATE(GLOBAL, EXTERNAL_ROUTES);
o->external_table->scope = o;
-
+ /* Setting this to 1, so that the LS ID 0 can be considered as invalid
+ * for self originated external LSAs. This helps in differentiating if
+ * an LSA is originated for any route or not in the route data.
+ * rt->route_option->id is by default 0
+ * Consider a route having id as 0 and prefix as 1::1, an external LSA
+ * is originated with ID 0.0.0.0. Now consider another route 2::2
+ * and for this LSA was not originated because of some configuration
+ * but the ID field rt->route_option->id is still 0.Consider now this
+ * 2::2 is being deleted, it will search LSA with LS ID as 0 and it
+ * will find the LSA and hence delete it but the LSA belonged to prefix
+ * 1::1, this happened because of LS ID 0.
+ */
+ o->external_id = OSPF6_EXT_INIT_LS_ID;
o->external_id_table = route_table_init();
o->write_oi_count = OSPF6_WRITE_INTERFACE_COUNT_DEFAULT;
o->ref_bandwidth = OSPF6_REFERENCE_BANDWIDTH;
o->distance_table = route_table_init();
+
+ o->rt_aggr_tbl = route_table_init();
+ o->aggr_delay_interval = OSPF6_EXTL_AGGR_DEFAULT_DELAY;
+ o->t_external_aggr = NULL;
+ o->aggr_action = OSPF6_ROUTE_AGGR_NONE;
+
o->fd = -1;
o->max_multipath = MULTIPATH_NUM;
@@ -461,6 +479,7 @@ struct ospf6 *ospf6_instance_create(const char *name)
void ospf6_delete(struct ospf6 *o)
{
struct listnode *node, *nnode;
+ struct route_node *rn = NULL;
struct ospf6_area *oa;
struct vrf *vrf;
@@ -499,6 +518,11 @@ void ospf6_delete(struct ospf6 *o)
ospf6_vrf_unlink(o, vrf);
}
+ for (rn = route_top(o->rt_aggr_tbl); rn; rn = route_next(rn))
+ if (rn->info)
+ ospf6_external_aggregator_free(rn->info);
+ route_table_finish(o->rt_aggr_tbl);
+
XFREE(MTYPE_OSPF6_TOP, o->name);
XFREE(MTYPE_OSPF6_TOP, o);
}
@@ -527,6 +551,7 @@ static void ospf6_disable(struct ospf6 *o)
THREAD_OFF(o->t_ase_calc);
THREAD_OFF(o->t_distribute_update);
THREAD_OFF(o->t_ospf6_receive);
+ THREAD_OFF(o->t_external_aggr);
}
}
@@ -690,6 +715,7 @@ static void ospf6_process_reset(struct ospf6 *ospf6)
struct interface *ifp;
struct vrf *vrf = vrf_lookup_by_id(ospf6->vrf_id);
+ ospf6_unset_all_aggr_flag(ospf6);
ospf6_flush_self_originated_lsas_now(ospf6);
ospf6->inst_shutdown = 0;
ospf6_db_clear(ospf6);
@@ -989,7 +1015,6 @@ DEFUN_HIDDEN (ospf6_interface_area,
struct ospf6_interface *oi;
struct interface *ifp;
vrf_id_t vrf_id = VRF_DEFAULT;
- int ipv6_count = 0;
uint32_t area_id;
int format;
@@ -1012,23 +1037,6 @@ DEFUN_HIDDEN (ospf6_interface_area,
return CMD_SUCCESS;
}
- /* if more than OSPF6_MAX_IF_ADDRS are configured on this interface
- * then don't allow ospfv3 to be configured
- */
- ipv6_count = connected_count_by_family(ifp, AF_INET6);
- if (oi->ifmtu == OSPF6_DEFAULT_MTU && ipv6_count > OSPF6_MAX_IF_ADDRS) {
- vty_out(vty,
- "can not configure OSPFv3 on if %s, must have less than %d interface addresses but has %d addresses\n",
- ifp->name, OSPF6_MAX_IF_ADDRS, ipv6_count);
- return CMD_WARNING_CONFIG_FAILED;
- } else if (oi->ifmtu >= OSPF6_JUMBO_MTU
- && ipv6_count > OSPF6_MAX_IF_ADDRS_JUMBO) {
- vty_out(vty,
- "can not configure OSPFv3 on if %s, must have less than %d interface addresses but has %d addresses\n",
- ifp->name, OSPF6_MAX_IF_ADDRS_JUMBO, ipv6_count);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
if (str2area_id(argv[idx_ipv4]->arg, &area_id, &format)) {
vty_out(vty, "Malformed Area-ID: %s\n", argv[idx_ipv4]->arg);
return CMD_WARNING_CONFIG_FAILED;
@@ -1672,6 +1680,424 @@ DEFUN(show_ipv6_ospf6_route_type_detail, show_ipv6_ospf6_route_type_detail_cmd,
return CMD_SUCCESS;
}
+bool ospf6_is_valid_summary_addr(struct vty *vty, struct prefix *p)
+{
+ struct in6_addr addr_zero;
+
+ memset(&addr_zero, 0, sizeof(struct in6_addr));
+
+ /* Default prefix validation*/
+ if ((is_default_prefix((struct prefix *)p))
+ || (!memcmp(&p->u.prefix6, &addr_zero, sizeof(struct in6_addr)))) {
+ vty_out(vty, "Default address should not be configured as summary address.\n");
+ return false;
+ }
+
+ /* Host route should not be configured as summary address */
+ if (p->prefixlen == IPV6_MAX_BITLEN) {
+ vty_out(vty, "Host route should not be configured as summary address.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* External Route Aggregation */
+DEFPY (ospf6_external_route_aggregation,
+ ospf6_external_route_aggregation_cmd,
+ "summary-address X:X::X:X/M$prefix [tag (1-4294967295)] [{metric (0-16777215) | metric-type (1-2)$mtype}]",
+ "External summary address\n"
+ "Specify IPv6 prefix\n"
+ "Router tag \n"
+ "Router tag value\n"
+ "Metric \n"
+ "Advertised metric for this route\n"
+ "OSPFv3 exterior metric type for summarised routes\n"
+ "Set OSPFv3 External Type 1/2 metrics\n")
+{
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+
+ struct prefix p;
+ int ret = CMD_SUCCESS;
+
+ p.family = AF_INET6;
+ ret = str2prefix(prefix_str, &p);
+ if (ret == 0) {
+ vty_out(vty, "Malformed prefix\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* Apply mask for given prefix. */
+ apply_mask((struct prefix *)&p);
+
+ if (!ospf6_is_valid_summary_addr(vty, &p))
+ return CMD_WARNING_CONFIG_FAILED;
+
+ if (!tag_str)
+ tag = 0;
+
+ if (!metric_str)
+ metric = -1;
+
+ if (!mtype_str)
+ mtype = DEFAULT_METRIC_TYPE;
+
+ ret = ospf6_external_aggr_config_set(ospf6, &p, tag, metric, mtype);
+ if (ret == OSPF6_FAILURE) {
+ vty_out(vty, "Invalid configuration!!\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_ospf6_external_route_aggregation,
+ no_ospf6_external_route_aggregation_cmd,
+ "no summary-address X:X::X:X/M$prefix [tag (1-4294967295)] [{metric (0-16777215) | metric-type (1-2)}]",
+ NO_STR
+ "External summary address\n"
+ "Specify IPv6 prefix\n"
+ "Router tag\n"
+ "Router tag value\n"
+ "Metric \n"
+ "Advertised metric for this route\n"
+ "OSPFv3 exterior metric type for summarised routes\n"
+ "Set OSPFv3 External Type 1/2 metrics\n")
+{
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+
+ struct prefix p;
+ int ret = CMD_SUCCESS;
+
+ ret = str2prefix(prefix_str, &p);
+ if (ret == 0) {
+ vty_out(vty, "Malformed prefix\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* Apply mask for given prefix. */
+ apply_mask((struct prefix *)&p);
+
+ if (!ospf6_is_valid_summary_addr(vty, &p))
+ return CMD_WARNING_CONFIG_FAILED;
+
+ ret = ospf6_external_aggr_config_unset(ospf6, &p);
+ if (ret == OSPF6_INVALID)
+ vty_out(vty, "Invalid configuration!!\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (ospf6_external_route_aggregation_no_advertise,
+ ospf6_external_route_aggregation_no_advertise_cmd,
+ "summary-address X:X::X:X/M$prefix no-advertise",
+ "External summary address\n"
+ "Specify IPv6 prefix\n"
+ "Don't advertise summary route \n")
+{
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+
+ struct prefix p;
+ int ret = CMD_SUCCESS;
+
+ ret = str2prefix(prefix_str, &p);
+ if (ret == 0) {
+ vty_out(vty, "Malformed prefix\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* Apply mask for given prefix. */
+ apply_mask((struct prefix *)&p);
+
+ if (!ospf6_is_valid_summary_addr(vty, &p))
+ return CMD_WARNING_CONFIG_FAILED;
+
+ ret = ospf6_asbr_external_rt_no_advertise(ospf6, &p);
+ if (ret == OSPF6_INVALID)
+ vty_out(vty, "!!Invalid configuration\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_ospf6_external_route_aggregation_no_advertise,
+ no_ospf6_external_route_aggregation_no_advertise_cmd,
+ "no summary-address X:X::X:X/M$prefix no-advertise",
+ NO_STR
+ "External summary address\n"
+ "Specify IPv6 prefix\n"
+ "Adverise summary route to the AS \n")
+{
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+
+ struct prefix p;
+ int ret = CMD_SUCCESS;
+
+ ret = str2prefix(prefix_str, &p);
+ if (ret == 0) {
+ vty_out(vty, "Malformed prefix\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* Apply mask for given prefix. */
+ apply_mask((struct prefix *)&p);
+
+ if (!ospf6_is_valid_summary_addr(vty, &p))
+ return CMD_WARNING_CONFIG_FAILED;
+
+ ret = ospf6_asbr_external_rt_advertise(ospf6, &p);
+ if (ret == OSPF6_INVALID)
+ vty_out(vty, "!!Invalid configuration\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (ospf6_route_aggregation_timer,
+ ospf6_route_aggregation_timer_cmd,
+ "aggregation timer (5-1800)",
+ "External route aggregation\n"
+ "Delay timer (in seconds)\n"
+ "Timer interval(in seconds)\n")
+{
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+
+ ospf6_external_aggr_delay_timer_set(ospf6, timer);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_ospf6_route_aggregation_timer,
+ no_ospf6_route_aggregation_timer_cmd,
+ "no aggregation timer [5-1800]",
+ NO_STR
+ "External route aggregation\n"
+ "Delay timer\n"
+ "Timer interval(in seconds)\n")
+{
+ VTY_DECLVAR_CONTEXT(ospf6, ospf6);
+
+ ospf6_external_aggr_delay_timer_set(ospf6,
+ OSPF6_EXTL_AGGR_DEFAULT_DELAY);
+ return CMD_SUCCESS;
+}
+
+static int
+ospf6_print_vty_external_routes_walkcb(struct hash_bucket *bucket, void *arg)
+{
+ struct ospf6_route *rt = bucket->data;
+ struct vty *vty = (struct vty *)arg;
+ static unsigned int count;
+
+ vty_out(vty, "%pFX ", &rt->prefix);
+
+ count++;
+
+ if (count%5 == 0)
+ vty_out(vty, "\n");
+
+ if (OSPF6_EXTERNAL_RT_COUNT(rt->aggr_route) == count)
+ count = 0;
+
+ return HASHWALK_CONTINUE;
+}
+
+static int
+ospf6_print_json_external_routes_walkcb(struct hash_bucket *bucket, void *arg)
+{
+ struct ospf6_route *rt = bucket->data;
+ struct json_object *json = (struct json_object *)arg;
+ char buf[PREFIX2STR_BUFFER];
+ char exnalbuf[20];
+ static unsigned int count;
+
+ prefix2str(&rt->prefix, buf, sizeof(buf));
+
+ snprintf(exnalbuf, sizeof(exnalbuf), "Exnl Addr-%d", count);
+
+ json_object_string_add(json, exnalbuf, buf);
+
+ count++;
+
+ if (OSPF6_EXTERNAL_RT_COUNT(rt->aggr_route) == count)
+ count = 0;
+
+ return HASHWALK_CONTINUE;
+}
+
+static void
+ospf6_show_vrf_name(struct vty *vty, struct ospf6 *ospf6,
+ json_object *json)
+{
+ if (json) {
+ if (ospf6->vrf_id == VRF_DEFAULT)
+ json_object_string_add(json, "vrfName",
+ "default");
+ else
+ json_object_string_add(json, "vrfName",
+ ospf6->name);
+ json_object_int_add(json, "vrfId", ospf6->vrf_id);
+ } else {
+ if (ospf6->vrf_id == VRF_DEFAULT)
+ vty_out(vty, "VRF Name: %s\n", "default");
+ else if (ospf6->name)
+ vty_out(vty, "VRF Name: %s\n", ospf6->name);
+ }
+}
+
+static int
+ospf6_show_summary_address(struct vty *vty, struct ospf6 *ospf6,
+ json_object *json,
+ bool uj, const char *detail)
+{
+ struct route_node *rn;
+ static const char header[] = "Summary-address Metric-type Metric Tag External_Rt_count\n";
+ json_object *json_vrf = NULL;
+
+ if (!uj) {
+ ospf6_show_vrf_name(vty, ospf6, json_vrf);
+ vty_out(vty, "aggregation delay interval :%d(in seconds)\n\n",
+ ospf6->aggr_delay_interval);
+ vty_out(vty, "%s\n", header);
+ } else {
+ json_vrf = json_object_new_object();
+
+ ospf6_show_vrf_name(vty, ospf6, json_vrf);
+
+ json_object_int_add(json_vrf, "aggregation delay interval",
+ ospf6->aggr_delay_interval);
+ }
+
+
+ for (rn = route_top(ospf6->rt_aggr_tbl); rn; rn = route_next(rn)) {
+ if (!rn->info)
+ continue;
+
+ struct ospf6_external_aggr_rt *aggr = rn->info;
+ json_object *json_aggr = NULL;
+ char buf[PREFIX2STR_BUFFER];
+
+ prefix2str(&aggr->p, buf, sizeof(buf));
+
+ if (uj) {
+
+ json_aggr = json_object_new_object();
+
+ json_object_object_add(json_vrf,
+ buf,
+ json_aggr);
+
+ json_object_string_add(json_aggr,
+ "Summary address",
+ buf);
+
+ json_object_string_add(
+ json_aggr, "Metric-type",
+ (aggr->mtype == DEFAULT_METRIC_TYPE)
+ ? "E2"
+ : "E1");
+
+ json_object_int_add(json_aggr, "Metric",
+ (aggr->metric != -1)
+ ? aggr->metric
+ : DEFAULT_DEFAULT_METRIC);
+
+ json_object_int_add(json_aggr, "Tag",
+ aggr->tag);
+
+ json_object_int_add(json_aggr,
+ "External route count",
+ OSPF6_EXTERNAL_RT_COUNT(aggr));
+
+ if (OSPF6_EXTERNAL_RT_COUNT(aggr) && detail) {
+ json_object_int_add(json_aggr, "ID",
+ aggr->id);
+ json_object_int_add(json_aggr, "Flags",
+ aggr->aggrflags);
+ hash_walk(aggr->match_extnl_hash,
+ ospf6_print_json_external_routes_walkcb,
+ json_aggr);
+ }
+
+ } else {
+ vty_out(vty, "%-22s", buf);
+
+ (aggr->mtype == DEFAULT_METRIC_TYPE)
+ ? vty_out(vty, "%-16s", "E2")
+ : vty_out(vty, "%-16s", "E1");
+ vty_out(vty, "%-11d", (aggr->metric != -1)
+ ? aggr->metric
+ : DEFAULT_DEFAULT_METRIC);
+
+ vty_out(vty, "%-12u", aggr->tag);
+
+ vty_out(vty, "%-5ld\n",
+ OSPF6_EXTERNAL_RT_COUNT(aggr));
+
+ if (OSPF6_EXTERNAL_RT_COUNT(aggr) && detail) {
+ vty_out(vty,
+ "Matched External routes:\n");
+ hash_walk(aggr->match_extnl_hash,
+ ospf6_print_vty_external_routes_walkcb,
+ vty);
+ vty_out(vty, "\n");
+ }
+
+ vty_out(vty, "\n");
+ }
+ }
+
+ if (uj)
+ json_object_object_add(json, ospf6->name,
+ json_vrf);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_ospf6_external_aggregator,
+ show_ipv6_ospf6_external_aggregator_cmd,
+ "show ipv6 ospf6 [vrf <NAME|all>] summary-address [detail$detail] [json]",
+ SHOW_STR
+ IP6_STR
+ OSPF6_STR
+ VRF_CMD_HELP_STR
+ "All VRFs\n"
+ "Show external summary addresses\n"
+ "detailed informtion\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct ospf6 *ospf6 = NULL;
+ json_object *json = NULL;
+ const char *vrf_name = NULL;
+ struct listnode *node;
+ bool all_vrf = false;
+ int idx_vrf = 0;
+
+ if (uj)
+ json = json_object_new_object();
+
+ OSPF6_CMD_CHECK_RUNNING();
+ OSPF6_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
+
+ for (ALL_LIST_ELEMENTS_RO(om6->ospf6, node, ospf6)) {
+ if (all_vrf || strcmp(ospf6->name, vrf_name) == 0) {
+
+ ospf6_show_summary_address(vty, ospf6, json, uj,
+ detail);
+
+ if (!all_vrf)
+ break;
+ }
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
+ return CMD_SUCCESS;
+}
+
static void ospf6_stub_router_config_write(struct vty *vty, struct ospf6 *ospf6)
{
if (CHECK_FLAG(ospf6->flag, OSPF6_STUB_ROUTER)) {
@@ -1711,6 +2137,44 @@ static int ospf6_distance_config_write(struct vty *vty, struct ospf6 *ospf6)
return 0;
}
+static int ospf6_asbr_summary_config_write(struct vty *vty, struct ospf6 *ospf6)
+{
+ struct route_node *rn;
+ struct ospf6_external_aggr_rt *aggr;
+ char buf[PREFIX2STR_BUFFER];
+
+ if (ospf6->aggr_delay_interval != OSPF6_EXTL_AGGR_DEFAULT_DELAY)
+ vty_out(vty, " aggregation timer %u\n",
+ ospf6->aggr_delay_interval);
+
+ /* print 'summary-address A:B::C:D/M' */
+ for (rn = route_top(ospf6->rt_aggr_tbl); rn; rn = route_next(rn)) {
+ if (!rn->info)
+ continue;
+
+ aggr = rn->info;
+
+ prefix2str(&aggr->p, buf, sizeof(buf));
+ vty_out(vty, " summary-address %s", buf);
+ if (aggr->tag)
+ vty_out(vty, " tag %u", aggr->tag);
+
+ if (aggr->metric != -1)
+ vty_out(vty, " metric %d", aggr->metric);
+
+ if (aggr->mtype != DEFAULT_METRIC_TYPE)
+ vty_out(vty, " metric-type %d", aggr->mtype);
+
+ if (CHECK_FLAG(aggr->aggrflags,
+ OSPF6_EXTERNAL_AGGRT_NO_ADVERTISE))
+ vty_out(vty, " no-advertise");
+
+ vty_out(vty, "\n");
+ }
+
+ return 0;
+}
+
/* OSPF configuration write function. */
static int config_write_ospf6(struct vty *vty)
{
@@ -1768,6 +2232,7 @@ static int config_write_ospf6(struct vty *vty)
ospf6_spf_config_write(vty, ospf6);
ospf6_distance_config_write(vty, ospf6);
ospf6_distribute_config_write(vty, ospf6);
+ ospf6_asbr_summary_config_write(vty, ospf6);
vty_out(vty, "!\n");
}
@@ -1826,6 +2291,17 @@ void ospf6_top_init(void)
install_element(OSPF6_NODE, &ospf6_max_multipath_cmd);
install_element(OSPF6_NODE, &no_ospf6_max_multipath_cmd);
+ /* ASBR Summarisation */
+ install_element(OSPF6_NODE, &ospf6_external_route_aggregation_cmd);
+ install_element(OSPF6_NODE, &no_ospf6_external_route_aggregation_cmd);
+ install_element(OSPF6_NODE,
+ &ospf6_external_route_aggregation_no_advertise_cmd);
+ install_element(OSPF6_NODE,
+ &no_ospf6_external_route_aggregation_no_advertise_cmd);
+ install_element(OSPF6_NODE, &ospf6_route_aggregation_timer_cmd);
+ install_element(OSPF6_NODE, &no_ospf6_route_aggregation_timer_cmd);
+ install_element(VIEW_NODE, &show_ipv6_ospf6_external_aggregator_cmd);
+
install_element(OSPF6_NODE, &ospf6_distance_cmd);
install_element(OSPF6_NODE, &no_ospf6_distance_cmd);
install_element(OSPF6_NODE, &ospf6_distance_ospf6_cmd);
diff --git a/ospf6d/ospf6_top.h b/ospf6d/ospf6_top.h
index 3eb423f681..fe02cd3f84 100644
--- a/ospf6d/ospf6_top.h
+++ b/ospf6d/ospf6_top.h
@@ -91,6 +91,7 @@ struct ospf6 {
struct ospf6_route_table *external_table;
struct route_table *external_id_table;
+#define OSPF6_EXT_INIT_LS_ID 1
uint32_t external_id;
/* OSPF6 redistribute configuration */
@@ -130,6 +131,7 @@ struct ospf6 {
struct thread *maxage_remover;
struct thread *t_distribute_update; /* Distirbute update timer. */
struct thread *t_ospf6_receive; /* OSPF6 receive timer */
+ struct thread *t_external_aggr; /* OSPF6 aggregation timer */
#define OSPF6_WRITE_INTERFACE_COUNT_DEFAULT 20
struct thread *t_write;
@@ -158,16 +160,22 @@ struct ospf6 {
struct list *oi_write_q;
uint32_t redist_count;
+
+ /* Action for aggregation of external LSAs */
+ int aggr_action;
+
+#define OSPF6_EXTL_AGGR_DEFAULT_DELAY 5
+ /* For ASBR summary delay timer */
+ int aggr_delay_interval;
+ /* Table of configured Aggregate addresses */
+ struct route_table *rt_aggr_tbl;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(ospf6);
#define OSPF6_DISABLED 0x01
#define OSPF6_STUB_ROUTER 0x02
-#define OSPF6_MAX_IF_ADDRS 100
-#define OSPF6_MAX_IF_ADDRS_JUMBO 200
-#define OSPF6_DEFAULT_MTU 1500
-#define OSPF6_JUMBO_MTU 9000
/* global pointer for OSPF top data structure */
extern struct ospf6 *ospf6;
@@ -188,4 +196,5 @@ struct ospf6 *ospf6_lookup_by_vrf_id(vrf_id_t vrf_id);
struct ospf6 *ospf6_lookup_by_vrf_name(const char *name);
const char *ospf6_vrf_id_to_name(vrf_id_t vrf_id);
void ospf6_vrf_init(void);
+bool ospf6_is_valid_summary_addr(struct vty *vty, struct prefix *p);
#endif /* OSPF6_TOP_H */
diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c
index 72bc3a2f3a..5403e643dc 100644
--- a/ospf6d/ospf6_zebra.c
+++ b/ospf6d/ospf6_zebra.c
@@ -131,38 +131,17 @@ void ospf6_zebra_no_redistribute(int type, vrf_id_t vrf_id)
static int ospf6_zebra_if_address_update_add(ZAPI_CALLBACK_ARGS)
{
struct connected *c;
- struct ospf6_interface *oi;
- int ipv6_count = 0;
c = zebra_interface_address_read(ZEBRA_INTERFACE_ADDRESS_ADD,
zclient->ibuf, vrf_id);
if (c == NULL)
return 0;
- oi = (struct ospf6_interface *)c->ifp->info;
- if (oi == NULL)
- oi = ospf6_interface_create(c->ifp);
- assert(oi);
-
if (IS_OSPF6_DEBUG_ZEBRA(RECV))
zlog_debug("Zebra Interface address add: %s %5s %pFX",
c->ifp->name, prefix_family_str(c->address),
c->address);
- ipv6_count = connected_count_by_family(c->ifp, AF_INET6);
- if (oi->ifmtu == OSPF6_DEFAULT_MTU && ipv6_count > OSPF6_MAX_IF_ADDRS) {
- zlog_warn(
- "Zebra Interface : %s has too many interface addresses %d only support %d, increase MTU",
- c->ifp->name, ipv6_count, OSPF6_MAX_IF_ADDRS);
- return 0;
- } else if (oi->ifmtu >= OSPF6_JUMBO_MTU
- && ipv6_count > OSPF6_MAX_IF_ADDRS_JUMBO) {
- zlog_warn(
- "Zebra Interface : %s has too many interface addresses %d only support %d",
- c->ifp->name, ipv6_count, OSPF6_MAX_IF_ADDRS_JUMBO);
- return 0;
- }
-
if (c->address->family == AF_INET6) {
ospf6_interface_state_update(c->ifp);
ospf6_interface_connected_route_update(c->ifp);
diff --git a/ospf6d/ospf6d.h b/ospf6d/ospf6d.h
index e054803df3..5afece9b0a 100644
--- a/ospf6d/ospf6d.h
+++ b/ospf6d/ospf6d.h
@@ -49,6 +49,10 @@ extern struct thread_master *master;
#define MSG_OK 0
#define MSG_NG 1
+#define OSPF6_SUCCESS 1
+#define OSPF6_FAILURE 0
+#define OSPF6_INVALID -1
+
/* cast macro: XXX - these *must* die, ick ick. */
#define OSPF6_PROCESS(x) ((struct ospf6 *) (x))
#define OSPF6_AREA(x) ((struct ospf6_area *) (x))
diff --git a/ospf6d/subdir.am b/ospf6d/subdir.am
index 3f9ff76f3b..78fb26b00e 100644
--- a/ospf6d/subdir.am
+++ b/ospf6d/subdir.am
@@ -90,6 +90,7 @@ ospf6d_ospf6d_snmp_la_LIBADD = lib/libfrrsnmp.la
clippy_scan += \
ospf6d/ospf6_top.c \
ospf6d/ospf6_asbr.c \
+ ospf6d/ospf6_lsa.c \
# end
nodist_ospf6d_ospf6d_SOURCES = \
diff --git a/ospfd/ospf_network.c b/ospfd/ospf_network.c
index 00fbdc21a1..be06afe532 100644
--- a/ospfd/ospf_network.c
+++ b/ospfd/ospf_network.c
@@ -190,7 +190,7 @@ int ospf_sock_init(struct ospf *ospf)
flog_err(EC_LIB_SOCKET,
"ospf_read_sock_init: socket: %s",
safe_strerror(errno));
- exit(1);
+ return -1;
}
#ifdef IP_HDRINCL
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index bf2a8564f0..a7b59e7916 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -286,7 +286,7 @@ DEFPY (ospf_router_id,
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area))
if (area->full_nbrs) {
vty_out(vty,
- "For this router-id change to take effect, use “clear ip ospf process” command\n");
+ "For this router-id change to take effect, use \"clear ip ospf process\" command\n");
return CMD_SUCCESS;
}
@@ -319,7 +319,7 @@ DEFUN_HIDDEN (ospf_router_id_old,
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area))
if (area->full_nbrs) {
vty_out(vty,
- "For this router-id change to take effect, use “clear ip ospf process” command\n");
+ "For this router-id change to take effect, use \"clear ip ospf process\" command\n");
return CMD_SUCCESS;
}
@@ -352,7 +352,7 @@ DEFPY (no_ospf_router_id,
for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area))
if (area->full_nbrs) {
vty_out(vty,
- "For this router-id change to take effect, use “clear ip ospf process” command\n");
+ "For this router-id change to take effect, use \"clear ip ospf process\" command\n");
return CMD_SUCCESS;
}
@@ -375,10 +375,27 @@ static void ospf_passive_interface_default_update(struct ospf *ospf,
ospf_if_set_multicast(oi);
}
-static void ospf_passive_interface_update(struct interface *ifp)
+static void ospf_passive_interface_update(struct interface *ifp,
+ struct ospf_if_params *params,
+ struct in_addr addr, uint8_t newval)
{
struct route_node *rn;
+ if (OSPF_IF_PARAM_CONFIGURED(params, passive_interface)) {
+ if (params->passive_interface == newval)
+ return;
+
+ params->passive_interface = newval;
+ UNSET_IF_PARAM(params, passive_interface);
+ if (params != IF_DEF_PARAMS(ifp)) {
+ ospf_free_if_params(ifp, addr);
+ ospf_if_update_params(ifp, addr);
+ }
+ } else {
+ params->passive_interface = newval;
+ SET_IF_PARAM(params, passive_interface);
+ }
+
/*
* XXX We should call ospf_if_set_multicast on exactly those
* interfaces for which the passive property changed. It is too much
@@ -457,10 +474,7 @@ DEFUN_HIDDEN (ospf_passive_interface_addr,
params = IF_DEF_PARAMS(ifp);
}
- params->passive_interface = OSPF_IF_PASSIVE;
- SET_IF_PARAM(params, passive_interface);
-
- ospf_passive_interface_update(ifp);
+ ospf_passive_interface_update(ifp, params, addr, OSPF_IF_PASSIVE);
return CMD_SUCCESS;
}
@@ -521,14 +535,7 @@ DEFUN_HIDDEN (no_ospf_passive_interface,
params = IF_DEF_PARAMS(ifp);
}
- params->passive_interface = OSPF_IF_ACTIVE;
- UNSET_IF_PARAM(params, passive_interface);
- if (params != IF_DEF_PARAMS(ifp)) {
- ospf_free_if_params(ifp, addr);
- ospf_if_update_params(ifp, addr);
- }
-
- ospf_passive_interface_update(ifp);
+ ospf_passive_interface_update(ifp, params, addr, OSPF_IF_ACTIVE);
return CMD_SUCCESS;
}
@@ -9082,7 +9089,7 @@ DEFUN (ip_ospf_passive,
{
VTY_DECLVAR_CONTEXT(interface, ifp);
int idx_ipv4 = 3;
- struct in_addr addr;
+ struct in_addr addr = {.s_addr = INADDR_ANY};
struct ospf_if_params *params;
int ret;
@@ -9099,10 +9106,7 @@ DEFUN (ip_ospf_passive,
params = IF_DEF_PARAMS(ifp);
}
- params->passive_interface = OSPF_IF_PASSIVE;
- SET_IF_PARAM(params, passive_interface);
-
- ospf_passive_interface_update(ifp);
+ ospf_passive_interface_update(ifp, params, addr, OSPF_IF_PASSIVE);
return CMD_SUCCESS;
}
@@ -9118,7 +9122,7 @@ DEFUN (no_ip_ospf_passive,
{
VTY_DECLVAR_CONTEXT(interface, ifp);
int idx_ipv4 = 4;
- struct in_addr addr;
+ struct in_addr addr = {.s_addr = INADDR_ANY};
struct ospf_if_params *params;
int ret;
@@ -9136,14 +9140,7 @@ DEFUN (no_ip_ospf_passive,
params = IF_DEF_PARAMS(ifp);
}
- params->passive_interface = OSPF_IF_ACTIVE;
- UNSET_IF_PARAM(params, passive_interface);
- if (params != IF_DEF_PARAMS(ifp)) {
- ospf_free_if_params(ifp, addr);
- ospf_if_update_params(ifp, addr);
- }
-
- ospf_passive_interface_update(ifp);
+ ospf_passive_interface_update(ifp, params, addr, OSPF_IF_ACTIVE);
return CMD_SUCCESS;
}
@@ -11932,7 +11929,11 @@ static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
if (OSPF_IF_PARAM_CONFIGURED(params,
passive_interface)) {
- vty_out(vty, " ip ospf passive");
+ vty_out(vty, " %sip ospf passive",
+ params->passive_interface
+ == OSPF_IF_ACTIVE
+ ? "no "
+ : "");
if (params != IF_DEF_PARAMS(ifp) && rn)
vty_out(vty, " %pI4", &rn->p.u.prefix4);
vty_out(vty, "\n");
@@ -12622,22 +12623,11 @@ void ospf_vty_show_init(void)
install_element(VIEW_NODE, &show_ip_ospf_external_aggregator_cmd);
}
-static int config_write_interface(struct vty *vty);
-/* ospfd's interface node. */
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = config_write_interface,
-};
-
/* Initialization of OSPF interface. */
static void ospf_vty_if_init(void)
{
/* Install interface node. */
- install_node(&interface_node);
- if_cmd_init();
+ if_cmd_init(config_write_interface);
/* "ip ospf authentication" commands. */
install_element(INTERFACE_NODE, &ip_ospf_authentication_args_addr_cmd);
diff --git a/pbrd/pbr_map.h b/pbrd/pbr_map.h
index caeadb0644..694b915f48 100644
--- a/pbrd/pbr_map.h
+++ b/pbrd/pbr_map.h
@@ -85,6 +85,17 @@ struct pbr_map_sequence {
uint32_t ruleno;
/*
+ * src and dst ports
+ */
+ uint16_t src_prt;
+ uint16_t dst_prt;
+
+ /*
+ * The ip protocol we want to match on
+ */
+ uint8_t ip_proto;
+
+ /*
* Our policy Catchers
*/
struct prefix *src;
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 3d56fc3daa..2936d1e346 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -193,6 +193,76 @@ DEFPY(pbr_map_match_dst, pbr_map_match_dst_cmd,
return CMD_SUCCESS;
}
+DEFPY(pbr_map_match_ip_proto, pbr_map_match_ip_proto_cmd,
+ "[no] match ip-protocol [tcp|udp]$ip_proto",
+ NO_STR
+ "Match the rest of the command\n"
+ "Choose an ip-protocol\n"
+ "Match on tcp flows\n"
+ "Match on udp flows\n")
+{
+ struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
+ struct protoent *p;
+
+ if (!no) {
+ p = getprotobyname(ip_proto);
+ if (!p) {
+ vty_out(vty, "Unable to convert %s to proto id\n",
+ ip_proto);
+ return CMD_WARNING;
+ }
+
+ pbrms->ip_proto = p->p_proto;
+ } else
+ pbrms->ip_proto = 0;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(pbr_map_match_src_port, pbr_map_match_src_port_cmd,
+ "[no] match src-port (1-65535)$port",
+ NO_STR
+ "Match the rest of the command\n"
+ "Choose the source port to use\n"
+ "The Source Port\n")
+{
+ struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
+
+ if (!no) {
+ if (pbrms->src_prt == port)
+ return CMD_SUCCESS;
+ else
+ pbrms->src_prt = port;
+ } else
+ pbrms->src_prt = 0;
+
+ pbr_map_check(pbrms, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(pbr_map_match_dst_port, pbr_map_match_dst_port_cmd,
+ "[no] match dst-port (1-65535)$port",
+ NO_STR
+ "Match the rest of the command\n"
+ "Choose the destination port to use\n"
+ "The Destination Port\n")
+{
+ struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
+
+ if (!no) {
+ if (pbrms->dst_prt == port)
+ return CMD_SUCCESS;
+ else
+ pbrms->dst_prt = port;
+ } else
+ pbrms->dst_prt = 0;
+
+ pbr_map_check(pbrms, true);
+
+ return CMD_SUCCESS;
+}
+
DEFPY(pbr_map_match_dscp, pbr_map_match_dscp_cmd,
"[no] match dscp DSCP$dscp",
NO_STR
@@ -674,6 +744,13 @@ static void vty_show_pbrms(struct vty *vty,
pbrms->installed ? "yes" : "no",
pbrms->reason ? rbuf : "Valid");
+ if (pbrms->ip_proto) {
+ struct protoent *p;
+
+ p = getprotobynumber(pbrms->ip_proto);
+ vty_out(vty, " IP Protocol Match: %s\n", p->p_name);
+ }
+
if (pbrms->src)
vty_out(vty, " SRC Match: %pFX\n", pbrms->src);
if (pbrms->dst)
@@ -1023,15 +1100,6 @@ DEFUN_NOSH(show_debugging_pbr,
/* ------------------------------------------------------------------------- */
-static int pbr_interface_config_write(struct vty *vty);
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = pbr_interface_config_write,
-};
-
static int pbr_interface_config_write(struct vty *vty)
{
struct interface *ifp;
@@ -1079,6 +1147,18 @@ static int pbr_vty_map_config_write_sequence(struct vty *vty,
if (pbrms->dst)
vty_out(vty, " match dst-ip %pFX\n", pbrms->dst);
+ if (pbrms->src_prt)
+ vty_out(vty, " match src-port %u\n", pbrms->src_prt);
+ if (pbrms->dst_prt)
+ vty_out(vty, " match dst-port %u\n", pbrms->dst_prt);
+
+ if (pbrms->ip_proto) {
+ struct protoent *p;
+
+ p = getprotobynumber(pbrms->ip_proto);
+ vty_out(vty, " match ip-protocol %s\n", p->p_name);
+ }
+
if (pbrms->dsfield & PBR_DSFIELD_DSCP)
vty_out(vty, " match dscp %u\n",
(pbrms->dsfield & PBR_DSFIELD_DSCP) >> 2);
@@ -1151,8 +1231,7 @@ void pbr_vty_init(void)
vrf_cmd_init(NULL, &pbr_privs);
- install_node(&interface_node);
- if_cmd_init();
+ if_cmd_init(pbr_interface_config_write);
install_node(&pbr_map_node);
@@ -1169,6 +1248,9 @@ void pbr_vty_init(void)
install_element(CONFIG_NODE, &pbr_set_table_range_cmd);
install_element(CONFIG_NODE, &no_pbr_set_table_range_cmd);
install_element(INTERFACE_NODE, &pbr_policy_cmd);
+ install_element(PBRMAP_NODE, &pbr_map_match_ip_proto_cmd);
+ install_element(PBRMAP_NODE, &pbr_map_match_src_port_cmd);
+ install_element(PBRMAP_NODE, &pbr_map_match_dst_port_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_src_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_dst_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_dscp_cmd);
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index fc5303c9d8..28def509d5 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -534,10 +534,11 @@ static void pbr_encode_pbr_map_sequence(struct stream *s,
stream_putl(s, pbrms->seqno);
stream_putl(s, pbrms->ruleno);
stream_putl(s, pbrms->unique);
+ stream_putc(s, pbrms->ip_proto); /* The ip_proto */
pbr_encode_pbr_map_sequence_prefix(s, pbrms->src, family);
- stream_putw(s, 0); /* src port */
+ stream_putw(s, pbrms->src_prt);
pbr_encode_pbr_map_sequence_prefix(s, pbrms->dst, family);
- stream_putw(s, 0); /* dst port */
+ stream_putw(s, pbrms->dst_prt);
stream_putc(s, pbrms->dsfield);
stream_putl(s, pbrms->mark);
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index ac9b15fb52..273100c492 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -72,14 +72,6 @@
#include "pimd/pim_cmd_clippy.c"
#endif
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = pim_interface_config_write,
-};
-
static struct cmd_node debug_node = {
.name = "debug",
.node = DEBUG_NODE,
@@ -7187,7 +7179,7 @@ DEFPY (pim_register_accept_list,
DEFUN (ip_pim_joinprune_time,
ip_pim_joinprune_time_cmd,
- "ip pim join-prune-interval (60-600)",
+ "ip pim join-prune-interval (5-600)",
IP_STR
"pim multicast routing\n"
"Join Prune Send Interval\n"
@@ -7201,7 +7193,7 @@ DEFUN (ip_pim_joinprune_time,
DEFUN (no_ip_pim_joinprune_time,
no_ip_pim_joinprune_time_cmd,
- "no ip pim join-prune-interval (60-600)",
+ "no ip pim join-prune-interval (5-600)",
NO_STR
IP_STR
"pim multicast routing\n"
@@ -11104,8 +11096,7 @@ DEFUN_HIDDEN (ip_pim_mlag,
void pim_cmd_init(void)
{
- install_node(&interface_node); /* INTERFACE_NODE */
- if_cmd_init();
+ if_cmd_init(pim_interface_config_write);
install_node(&debug_node);
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 3325b6ee34..71b2d9187a 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -473,15 +473,33 @@ static int igmp_v1_recv_report(struct igmp_sock *igmp, struct in_addr from,
return 0;
}
-bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, int igmp_msg_len,
- int msg_type)
+bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, size_t *hlen)
{
+ char *igmp_msg;
+ int igmp_msg_len;
+ int msg_type;
+ size_t ip_hlen; /* ip header length in bytes */
+
if (len < sizeof(*ip_hdr)) {
zlog_warn("IGMP packet size=%zu shorter than minimum=%zu", len,
sizeof(*ip_hdr));
return false;
}
+ ip_hlen = ip_hdr->ip_hl << 2; /* ip_hl gives length in 4-byte words */
+ *hlen = ip_hlen;
+
+ if (ip_hlen > len) {
+ zlog_warn(
+ "IGMP packet header claims size %zu, but we only have %zu bytes",
+ ip_hlen, len);
+ return false;
+ }
+
+ igmp_msg = (char *)ip_hdr + ip_hlen;
+ igmp_msg_len = len - ip_hlen;
+ msg_type = *igmp_msg;
+
if (igmp_msg_len < PIM_IGMP_MIN_LEN) {
zlog_warn("IGMP message size=%d shorter than minimum=%d",
igmp_msg_len, PIM_IGMP_MIN_LEN);
@@ -494,7 +512,7 @@ bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, int igmp_msg_len,
zlog_warn(
"Recv IGMP packet with invalid ttl=%u, discarding the packet",
ip_hdr->ip_ttl);
- return -1;
+ return false;
}
}
@@ -505,7 +523,7 @@ bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, int igmp_msg_len,
if (ip_hdr->ip_tos != IPTOS_PREC_INTERNETCONTROL) {
zlog_warn("Received IGMP Packet with invalid TOS %u",
ip_hdr->ip_tos);
- return -1;
+ return false;
}
}
@@ -514,7 +532,7 @@ bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, int igmp_msg_len,
int pim_igmp_packet(struct igmp_sock *igmp, char *buf, size_t len)
{
- struct ip *ip_hdr;
+ struct ip *ip_hdr = (struct ip *)buf;
size_t ip_hlen; /* ip header length in bytes */
char *igmp_msg;
int igmp_msg_len;
@@ -522,16 +540,8 @@ int pim_igmp_packet(struct igmp_sock *igmp, char *buf, size_t len)
char from_str[INET_ADDRSTRLEN];
char to_str[INET_ADDRSTRLEN];
- ip_hdr = (struct ip *)buf;
-
- ip_hlen = ip_hdr->ip_hl << 2; /* ip_hl gives length in 4-byte words */
-
- if (ip_hlen > len) {
- zlog_warn(
- "IGMP packet header claims size %zu, but we only have %zu bytes",
- ip_hlen, len);
+ if (!pim_igmp_verify_header(ip_hdr, len, &ip_hlen))
return -1;
- }
igmp_msg = buf + ip_hlen;
igmp_msg_len = len - ip_hlen;
@@ -547,14 +557,6 @@ int pim_igmp_packet(struct igmp_sock *igmp, char *buf, size_t len)
msg_type, igmp_msg_len);
}
- if (!pim_igmp_verify_header(ip_hdr, len, igmp_msg_len, msg_type)) {
- zlog_warn(
- "Recv IGMP packet from %s to %s on %s: size=%zu ttl=%u msg_type=%d msg_size=%d",
- from_str, to_str, igmp->interface->name, len,
- ip_hdr->ip_ttl, msg_type, igmp_msg_len);
- return -1;
- }
-
switch (msg_type) {
case PIM_IGMP_MEMBERSHIP_QUERY: {
int max_resp_code = igmp_msg[1];
diff --git a/pimd/pim_igmp.h b/pimd/pim_igmp.h
index 01bf02da9f..abb8af836b 100644
--- a/pimd/pim_igmp.h
+++ b/pimd/pim_igmp.h
@@ -116,8 +116,7 @@ void igmp_sock_delete(struct igmp_sock *igmp);
void igmp_sock_free(struct igmp_sock *igmp);
void igmp_sock_delete_all(struct interface *ifp);
int pim_igmp_packet(struct igmp_sock *igmp, char *buf, size_t len);
-bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, int igmp_msg_len,
- int msg_type);
+bool pim_igmp_verify_header(struct ip *ip_hdr, size_t len, size_t *ip_hlen);
void pim_igmp_general_query_on(struct igmp_sock *igmp);
void pim_igmp_general_query_off(struct igmp_sock *igmp);
void pim_igmp_other_querier_timer_on(struct igmp_sock *igmp);
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
index 2a8f0c1216..da8916ddbf 100644
--- a/pimd/pim_msdp.c
+++ b/pimd/pim_msdp.c
@@ -1217,6 +1217,7 @@ void pim_msdp_mg_free(struct pim_instance *pim, struct pim_msdp_mg **mgp)
if ((*mgp)->mbr_list)
list_delete(&(*mgp)->mbr_list);
+ SLIST_REMOVE(&pim->msdp.mglist, (*mgp), pim_msdp_mg, mg_entry);
XFREE(MTYPE_PIM_MSDP_MG, (*mgp));
}
diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c
index 7a8e10f30b..a2c86e3b22 100644
--- a/ripd/rip_interface.c
+++ b/ripd/rip_interface.c
@@ -1156,15 +1156,6 @@ int rip_show_network_config(struct vty *vty, struct rip *rip)
return 0;
}
-static int rip_interface_config_write(struct vty *vty);
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = rip_interface_config_write,
-};
-
void rip_interface_sync(struct interface *ifp)
{
struct vrf *vrf;
@@ -1204,8 +1195,7 @@ void rip_if_init(void)
hook_register_prio(if_del, 0, rip_interface_delete_hook);
/* Install interface node. */
- install_node(&interface_node);
- if_cmd_init();
+ if_cmd_init(rip_interface_config_write);
if_zapi_callbacks(rip_ifp_create, rip_ifp_up,
rip_ifp_down, rip_ifp_destroy);
}
diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c
index 6828398617..f374fcb839 100644
--- a/ripngd/ripng_interface.c
+++ b/ripngd/ripng_interface.c
@@ -951,16 +951,6 @@ static int interface_config_write(struct vty *vty)
return write;
}
-static int interface_config_write(struct vty *vty);
-/* ripngd's interface node. */
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = interface_config_write,
-};
-
/* Initialization of interface. */
void ripng_if_init(void)
{
@@ -969,8 +959,7 @@ void ripng_if_init(void)
hook_register_prio(if_del, 0, ripng_if_delete_hook);
/* Install interface node. */
- install_node(&interface_node);
- if_cmd_init();
+ if_cmd_init(interface_config_write);
if_zapi_callbacks(ripng_ifp_create, ripng_ifp_up,
ripng_ifp_down, ripng_ifp_destroy);
}
diff --git a/tests/lib/script1.lua b/tests/lib/script1.lua
index e9ebc29bd9..6361c960a7 100644
--- a/tests/lib/script1.lua
+++ b/tests/lib/script1.lua
@@ -1 +1,54 @@
-a = a + b
+
+-- Positive testing
+
+function foo(a, b)
+ a = a + 1
+ b = b + 1
+ return {
+ a = a,
+ b = b,
+ }
+end
+
+function bar(a, b)
+ a = a + 1
+ b = b + 1
+ c = 303
+ return {
+ b = b,
+ c = c,
+ }
+end
+
+function fact(n)
+ -- outer function must return a table
+ -- inner functions can be used to recurse or as helpers
+ function helper(m)
+ if m == 0 then
+ return 1
+ else
+ return m * helper(m - 1)
+ end
+ end
+ return {
+ ans = helper(n)
+ }
+end
+
+-- Negative testing
+
+function bad_return1()
+end
+
+function bad_return2()
+ return 123
+end
+
+function bad_return3()
+ return {}
+end
+
+function bad_return4()
+ error("Something bad!")
+end
+
diff --git a/tests/lib/test_frrscript.c b/tests/lib/test_frrscript.c
index bd75cc5552..7b23045978 100644
--- a/tests/lib/test_frrscript.c
+++ b/tests/lib/test_frrscript.c
@@ -20,18 +20,85 @@
#include <zebra.h>
#include "lib/frrscript.h"
+#include "lib/frrlua.h"
int main(int argc, char **argv)
{
frrscript_init("./lib");
+ struct frrscript *fs = frrscript_new("script1");
+ int result;
+
+ /* Positive testing */
- struct frrscript *fs = frrscript_load("script1", NULL);
long long a = 100, b = 200;
- int result = frrscript_call(fs, ("a", &a), ("b", &b));
+ result = frrscript_load(fs, "foo", NULL);
+ assert(result == 0);
+ result = frrscript_call(fs, "foo", ("a", &a), ("b", &b));
+ assert(result == 0);
+ assert(a == 101);
+ assert(b == 201);
+
+ a = 100, b = 200;
+
+ result = frrscript_load(fs, "bar", NULL);
+ assert(result == 0);
+ result = frrscript_call(fs, "bar", ("a", &a), ("b", &b));
+ assert(result == 0);
+ long long *cptr = frrscript_get_result(fs, "bar", "c", lua_tointegerp);
+
+ /* a should not occur in the returned table in script */
+ assert(a == 100);
+ assert(b == 201);
+ assert(*cptr == 303);
+ XFREE(MTYPE_SCRIPT_RES, cptr);
+
+ long long n = 5;
+
+ result = frrscript_load(fs, "fact", NULL);
assert(result == 0);
- assert(a == 300);
- assert(b == 200);
+ result = frrscript_call(fs, "fact", ("n", &n));
+ assert(result == 0);
+ long long *ansptr =
+ frrscript_get_result(fs, "fact", "ans", lua_tointegerp);
+ assert(*ansptr == 120);
+ XFREE(MTYPE_SCRIPT_RES, ansptr);
+
+ /* Negative testing */
+
+ /* Function does not exist in script file*/
+ result = frrscript_load(fs, "does_not_exist", NULL);
+ assert(result == 1);
+
+ /* Function was not (successfully) loaded */
+ result = frrscript_call(fs, "does_not_exist", ("a", &a), ("b", &b));
+ assert(result == 1);
+
+ /* Get result from a function that was not loaded */
+ long long *llptr =
+ frrscript_get_result(fs, "does_not_exist", "c", lua_tointegerp);
+ assert(llptr == NULL);
+
+ /* Function returns void */
+ result = frrscript_call(fs, "bad_return1");
+ assert(result == 1);
+
+ /* Function returns number */
+ result = frrscript_call(fs, "bad_return2");
+ assert(result == 1);
+
+ /* Get non-existent result from a function */
+ result = frrscript_call(fs, "bad_return3");
+ assert(result == 1);
+ long long *cllptr =
+ frrscript_get_result(fs, "bad_return3", "c", lua_tointegerp);
+ assert(cllptr == NULL);
+
+ /* Function throws exception */
+ result = frrscript_call(fs, "bad_return4");
+ assert(result == 1);
+
+ frrscript_delete(fs);
return 0;
}
diff --git a/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py b/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py
index fbce2809e0..6728f76004 100755
--- a/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py
+++ b/tests/topotests/bgp-evpn-overlay-index-gateway/test_bgp_evpn_overlay_index_gateway.py
@@ -77,6 +77,9 @@ from lib.common_config import (
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
+
#Global variables
PES = ['PE1', 'PE2']
HOSTS = ['host1', 'host2']
diff --git a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py
index 089b1acb1c..9f26978259 100644
--- a/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py
+++ b/tests/topotests/bgp_aggregate_address_topo1/test_bgp_aggregate_address_topo1.py
@@ -42,6 +42,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class BgpAggregateAddressTopo1(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py
index 485a76c6b2..4753c49397 100644
--- a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py
+++ b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py
@@ -1135,9 +1135,9 @@ def test_bgp_with_loopback_with_same_subnet_p1(request):
protocol = "bgp"
for addr_type in ADDR_TYPES:
result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1)
- assert result is not True, "Testcase {} : Failed \n".format(tc_name)
+ assert result is not True, "Testcase {} : Failed \n"
"Expected behavior: routes should not present in fib \n"
- "Error: {}".format(result)
+ "Error: {}".format(tc_name, result)
step("Verify Ipv4 and Ipv6 network installed in r3 RIB but not in FIB")
input_dict_r3 = {
@@ -1152,9 +1152,9 @@ def test_bgp_with_loopback_with_same_subnet_p1(request):
protocol = "bgp"
for addr_type in ADDR_TYPES:
result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1)
- assert result is not True, "Testcase {} : Failed \n".format(tc_name)
+ assert result is not True, "Testcase {} : Failed \n"
"Expected behavior: routes should not present in fib \n"
- "Error: {}".format(result)
+ "Error: {}".format(tc_name, result)
write_test_footer(tc_name)
diff --git a/tests/topotests/bgp_community_alias/test_bgp-community-alias.py b/tests/topotests/bgp_community_alias/test_bgp-community-alias.py
index c41ba810f1..6aadff1cfa 100644
--- a/tests/topotests/bgp_community_alias/test_bgp-community-alias.py
+++ b/tests/topotests/bgp_community_alias/test_bgp-community-alias.py
@@ -40,6 +40,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py
index 95e63c617e..9f449d7979 100644
--- a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py
+++ b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py
@@ -59,7 +59,7 @@ from mininet.topo import Topo
from lib.common_config import step
from time import sleep
-pytestmark = [pytest.mark.bgpd, pytest.mark.bgpd]
+pytestmark = [pytest.mark.bgpd]
class TemplateTopo(Topo):
diff --git a/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py b/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py
index 0e31ab1995..44f54c7b51 100644
--- a/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py
+++ b/tests/topotests/bgp_conditional_advertisement/test_bgp_conditional_advertisement.py
@@ -139,6 +139,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class BgpConditionalAdvertisementTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py b/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py
index 28117b7fe4..6ed7023044 100644
--- a/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py
+++ b/tests/topotests/bgp_default_afi_safi/test_bgp-default-afi-safi.py
@@ -47,6 +47,8 @@ from lib.topolog import logger
from mininet.topo import Topo
from lib.common_config import step
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_default_route/test_bgp_default-originate.py b/tests/topotests/bgp_default_route/test_bgp_default-originate.py
index 19632162b4..6fbdfbe78a 100644
--- a/tests/topotests/bgp_default_route/test_bgp_default-originate.py
+++ b/tests/topotests/bgp_default_route/test_bgp_default-originate.py
@@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py b/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py
index 089c9a964e..e7e3512b17 100644
--- a/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py
+++ b/tests/topotests/bgp_default_route_route_map_match/test_bgp_default-originate_route-map_match.py
@@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py b/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py
index 42a6b6edf6..5852ac268b 100644
--- a/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py
+++ b/tests/topotests/bgp_default_route_route_map_match2/test_bgp_default-originate_route-map_match2.py
@@ -41,6 +41,8 @@ from lib.topolog import logger
from mininet.topo import Topo
from lib.common_config import step
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py b/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py
index 12d1d01bfb..e2fa89fccb 100644
--- a/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py
+++ b/tests/topotests/bgp_default_route_route_map_match_set/test_bgp_default-originate_route-map_match_set.py
@@ -40,6 +40,9 @@ from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
diff --git a/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py b/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py
index 2622c33f5b..be87dc61cf 100644
--- a/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py
+++ b/tests/topotests/bgp_default_route_route_map_set/test_bgp_default-originate_route-map_set.py
@@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py
index f338d52e70..bf26714087 100644
--- a/tests/topotests/bgp_distance_change/test_bgp_distance_change.py
+++ b/tests/topotests/bgp_distance_change/test_bgp_distance_change.py
@@ -49,6 +49,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_dont_capability_negogiate/__init__.py b/tests/topotests/bgp_dont_capability_negogiate/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_dont_capability_negogiate/__init__.py
diff --git a/tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf b/tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf
new file mode 100644
index 0000000000..b429efe076
--- /dev/null
+++ b/tests/topotests/bgp_dont_capability_negogiate/r1/bgpd.conf
@@ -0,0 +1,6 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 dont-capability-negotiate
+!
diff --git a/tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf b/tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf
new file mode 100644
index 0000000000..b29940f46a
--- /dev/null
+++ b/tests/topotests/bgp_dont_capability_negogiate/r1/zebra.conf
@@ -0,0 +1,4 @@
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
diff --git a/tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf b/tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf
new file mode 100644
index 0000000000..4af2cd6a80
--- /dev/null
+++ b/tests/topotests/bgp_dont_capability_negogiate/r2/bgpd.conf
@@ -0,0 +1,7 @@
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf b/tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf
new file mode 100644
index 0000000000..dc15cf756a
--- /dev/null
+++ b/tests/topotests/bgp_dont_capability_negogiate/r2/zebra.conf
@@ -0,0 +1,7 @@
+!
+int lo
+ ip address 172.16.16.1/32
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py b/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py
new file mode 100644
index 0000000000..398fa57ba9
--- /dev/null
+++ b/tests/topotests/bgp_dont_capability_negogiate/test_bgp_dont_capability_negotiate.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2021 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if BGP connection is established if at least one peer
+sets `dont-capability-negotiate`.
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+import functools
+
+pytestmark = pytest.mark.bgpd
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+pytestmark = [pytest.mark.bgpd]
+
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_dont_capability_negotiate():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+
+ def _bgp_converge(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast summary json"))
+ expected = {
+ "peers": {
+ "192.168.1.2": {
+ "pfxRcd": 2,
+ "pfxSnt": 2,
+ "state": "Established",
+ "peerState": "OK",
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge, router)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Can't converge with dont-capability-negotiate"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py b/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py
index 3b99065fe0..6db2697e75 100644
--- a/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py
+++ b/tests/topotests/bgp_ebgp_common_subnet_nexthop_unchanged/test_bgp-ebgp-common-subnet-nexthop-unchanged.py
@@ -51,6 +51,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py
index fa155dd5fe..2731d37fb0 100644
--- a/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py
+++ b/tests/topotests/bgp_ebgp_requires_policy/test_bgp_ebgp_requires_policy.py
@@ -57,6 +57,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_evpn_mh/test_evpn_mh.py b/tests/topotests/bgp_evpn_mh/test_evpn_mh.py
index f389632b1e..2dcf70f14a 100644
--- a/tests/topotests/bgp_evpn_mh/test_evpn_mh.py
+++ b/tests/topotests/bgp_evpn_mh/test_evpn_mh.py
@@ -50,6 +50,9 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
+
+
#####################################################
##
## Network Topology Definition
@@ -594,14 +597,23 @@ def test_evpn_ead_update():
def ping_anycast_gw(tgen):
- local_host = tgen.gears["hostd11"]
- remote_host = tgen.gears["hostd21"]
-
# ping the anycast gw from the local and remote hosts to populate
# the mac address on the PEs
- cmd_str = "arping -I torbond -c 1 45.0.0.1"
- local_host.run(cmd_str)
- remote_host.run(cmd_str)
+ script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py"))
+ intf = "torbond"
+ ipaddr = "45.0.0.1"
+ ping_cmd = [
+ script_path,
+ "--imports=Ether,ARP",
+ "--interface=" + intf,
+ "'Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"{}\")'".format(ipaddr)
+ ]
+ for name in ("hostd11", "hostd21"):
+ host = tgen.net[name]
+ stdout = host.cmd(ping_cmd)
+ stdout = stdout.strip()
+ if stdout:
+ host.logger.debug("%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout)
def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
index 36605d44f0..59024f7b71 100644
--- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
+++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
@@ -46,6 +46,7 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
class BGPEVPNTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py
index ee57b9c479..330ae5e437 100644
--- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py
+++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py
@@ -139,6 +139,9 @@ from lib.common_config import (
required_linux_kernel_version,
)
+pytestmark = [pytest.mark.bgpd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/bgp_gr_topojson_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
index b6f8bf4cd9..e7ce216042 100644
--- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
+++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py
@@ -138,6 +138,9 @@ from lib.common_config import (
required_linux_kernel_version,
)
+pytestmark = [pytest.mark.bgpd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/bgp_gr_topojson_topo2.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_gshut/test_bgp_gshut.py b/tests/topotests/bgp_gshut/test_bgp_gshut.py
index fe945a4565..77f86a0bb8 100644
--- a/tests/topotests/bgp_gshut/test_bgp_gshut.py
+++ b/tests/topotests/bgp_gshut/test_bgp_gshut.py
@@ -75,6 +75,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py
index 868aec9f3e..fcfeaab613 100644
--- a/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py
+++ b/tests/topotests/bgp_gshut_topo1/test_ebgp_gshut_topo1.py
@@ -75,6 +75,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/ebgp_gshut_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py
index 69f4916374..d83e9e25a1 100644
--- a/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py
+++ b/tests/topotests/bgp_gshut_topo1/test_ibgp_gshut_topo1.py
@@ -75,6 +75,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/ibgp_gshut_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py
index 31fbdcd4b5..69eba23e0f 100644
--- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py
+++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py
@@ -73,6 +73,9 @@ from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd]
+
+
# Save the Current Working Directory to find configuration files.
CWD = os_path.dirname(os_path.realpath(__file__))
sys.path.append(os_path.join(CWD, "../"))
diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py
index 84d9c48f35..b033c7e5cd 100644
--- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py
+++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py
@@ -97,6 +97,9 @@ from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/bgp_large_community_topo_2.json".format(CWD)
diff --git a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py
index f09ff20651..3fcc3bec9a 100644
--- a/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py
+++ b/tests/topotests/bgp_link_bw_ip/test_bgp_linkbw_ip.py
@@ -46,6 +46,9 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
+
"""
This topology is for validating one of the primary use cases for
weighted ECMP (a.k.a. Unequal cost multipath) using BGP link-bandwidth:
diff --git a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py
index a3ca1408e2..a7959fe61b 100755
--- a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py
+++ b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py
@@ -50,11 +50,14 @@ sys.path.append(os.path.join(CWD, "../"))
from lib.topogen import Topogen, get_topogen
from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import linux_intf_config_from_json
from lib.common_config import start_topology
from lib.topotest import router_json_cmp, run_and_expect
from mininet.topo import Topo
from functools import partial
+pytestmark = [pytest.mark.bgpd]
+
LISTEN_ADDRESSES = {
"r1": ["10.0.0.1"],
@@ -94,6 +97,9 @@ def setup_module(mod):
)
start_topology(tgen)
+
+ linux_intf_config_from_json(tgen, topo)
+
build_config_from_json(tgen, topo)
diff --git a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py
index 32e7a4df61..7c5ed87dd0 100644
--- a/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py
+++ b/tests/topotests/bgp_local_as_private_remove/test_bgp_local_as_private_remove.py
@@ -43,6 +43,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py
index 8494653dfe..0fde32a68b 100644
--- a/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py
+++ b/tests/topotests/bgp_maximum_prefix_invalid_update/test_bgp_maximum_prefix_invalid_update.py
@@ -47,6 +47,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py
index b99664e700..5c93910788 100644
--- a/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py
+++ b/tests/topotests/bgp_maximum_prefix_out/test_bgp_maximum_prefix_out.py
@@ -43,6 +43,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py
index 7ea5a24fd7..c9a93bd75f 100644
--- a/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py
+++ b/tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py
@@ -145,6 +145,7 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_multi_vrf_topo1.json".format(CWD)
diff --git a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py
index d8815a0d39..01e90fb4b8 100644
--- a/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py
+++ b/tests/topotests/bgp_multiview_topo1/test_bgp_multiview_topo1.py
@@ -81,6 +81,10 @@ from functools import partial
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
+
+pytestmark = [pytest.mark.bgpd]
+
+
fatal_error = ""
diff --git a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py
index a9541a55c5..a591c2f3f4 100644
--- a/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py
+++ b/tests/topotests/bgp_path_attributes_topo1/test_bgp_path_attributes.py
@@ -94,6 +94,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_path_attributes.json".format(CWD)
diff --git a/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py b/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py
index 39a0beeb11..743fcf7b3a 100755
--- a/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py
+++ b/tests/topotests/bgp_peer_type_multipath_relax/test_bgp_peer-type_multipath-relax.py
@@ -73,6 +73,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
class PeerTypeRelaxTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py b/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py
index 22952f645c..10dee0f77b 100644
--- a/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py
+++ b/tests/topotests/bgp_prefix_list_topo1/test_prefix_lists.py
@@ -73,6 +73,9 @@ from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/prefix_lists.json".format(CWD)
diff --git a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py
index ceac84709b..fffe135b77 100644
--- a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py
+++ b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py
@@ -41,6 +41,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, **_opts):
diff --git a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
index 25362530d4..703dcd7e2d 100755
--- a/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
+++ b/tests/topotests/bgp_prefix_sid2/test_bgp_prefix_sid2.py
@@ -41,6 +41,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, **_opts):
diff --git a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py
index d514dccd4a..c644d2104f 100644
--- a/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py
+++ b/tests/topotests/bgp_reject_as_sets/test_bgp_reject_as_sets.py
@@ -50,6 +50,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py
index 0467bf1bfb..ecf1ed521c 100644
--- a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py
+++ b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py
@@ -70,6 +70,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/bgp_aggregation.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_route_map/test_route_map_topo1.py b/tests/topotests/bgp_route_map/test_route_map_topo1.py
index 74172501db..7de56849c8 100644
--- a/tests/topotests/bgp_route_map/test_route_map_topo1.py
+++ b/tests/topotests/bgp_route_map/test_route_map_topo1.py
@@ -67,6 +67,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
#################################
# TOPOLOGY
#################################
diff --git a/tests/topotests/bgp_route_map/test_route_map_topo2.py b/tests/topotests/bgp_route_map/test_route_map_topo2.py
index 958eceba62..230a89ace1 100644
--- a/tests/topotests/bgp_route_map/test_route_map_topo2.py
+++ b/tests/topotests/bgp_route_map/test_route_map_topo2.py
@@ -149,6 +149,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/bgp_route_map_topo2.json".format(CWD)
diff --git a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py
index 6a604765ca..664c9dc91a 100644
--- a/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py
+++ b/tests/topotests/bgp_rr_ibgp/test_bgp_rr_ibgp_topo1.py
@@ -49,6 +49,9 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
+
#####################################################
##
## Network Topology Definition
diff --git a/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py b/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py
index af64648951..b4af911d91 100644
--- a/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py
+++ b/tests/topotests/bgp_set_local_preference_add_subtract/test_bgp_set_local-preference_add_subtract.py
@@ -44,6 +44,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
index 2d80c66b0b..3251484514 100755
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf/test_bgp_srv6l3vpn_to_bgp_vrf.py
@@ -39,6 +39,8 @@ from lib.topolog import logger
from lib.common_config import required_linux_kernel_version
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class Topology(Topo):
"""
diff --git a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py
index c75055c26f..476f6b556b 100644
--- a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py
+++ b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py
@@ -40,6 +40,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py b/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py
index 7500c3b3ad..cb1d28cc06 100644
--- a/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py
+++ b/tests/topotests/bgp_tcp_mss/test_bgp_tcp_mss.py
@@ -51,6 +51,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py
index 71bd58bf73..2972a25f38 100644
--- a/tests/topotests/bgp_update_delay/test_bgp_update_delay.py
+++ b/tests/topotests/bgp_update_delay/test_bgp_update_delay.py
@@ -73,6 +73,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py
index 83682fb36d..d6f1058a98 100644
--- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo1.py
@@ -81,6 +81,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_vrf_dynamic_route_leak_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
index 6e7495d929..f701529b52 100644
--- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py
@@ -78,6 +78,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_vrf_dynamic_route_leak_topo2.json".format(CWD)
try:
diff --git a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py
index a17819f747..57ba87e887 100644
--- a/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py
+++ b/tests/topotests/bgp_vrf_lite_ipv6_rtadv/test_bgp_vrf_lite_ipv6_rtadv.py
@@ -47,6 +47,8 @@ from lib.common_config import required_linux_kernel_version
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class BGPIPV6RTADVVRFTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py
index 30bb9595b7..9889e1cdd5 100644
--- a/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py
+++ b/tests/topotests/bgp_vrf_netns/test_bgp_vrf_netns_topo.py
@@ -44,6 +44,9 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
+
total_ebgp_peers = 1
CustomizeVrfWithNetns = True
diff --git a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
index 71f64e9b70..fcec0c23af 100644
--- a/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
+++ b/tests/topotests/bgp_vrf_route_leak_basic/test_bgp-vrf-route-leak-basic.py
@@ -41,6 +41,8 @@ from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class BGPVRFTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/evpn_pim_1/leaf1/pimd.conf b/tests/topotests/evpn_pim_1/leaf1/pimd.conf
index 293e252086..d85f33d1fc 100644
--- a/tests/topotests/evpn_pim_1/leaf1/pimd.conf
+++ b/tests/topotests/evpn_pim_1/leaf1/pimd.conf
@@ -2,6 +2,7 @@ debug pim events
debug pim nht
debug pim zebra
ip pim rp 192.168.100.1
+ip pim join-prune-interval 5
!
int lo
ip pim
diff --git a/tests/topotests/evpn_pim_1/leaf2/pimd.conf b/tests/topotests/evpn_pim_1/leaf2/pimd.conf
index 08d5a19a2a..d775b800b3 100644
--- a/tests/topotests/evpn_pim_1/leaf2/pimd.conf
+++ b/tests/topotests/evpn_pim_1/leaf2/pimd.conf
@@ -1,4 +1,5 @@
ip pim rp 192.168.100.1
+ip pim join-prune-interval 5
!
int lo
ip pim
diff --git a/tests/topotests/evpn_pim_1/spine/pimd.conf b/tests/topotests/evpn_pim_1/spine/pimd.conf
index 56adda5cc4..12c6d6f85c 100644
--- a/tests/topotests/evpn_pim_1/spine/pimd.conf
+++ b/tests/topotests/evpn_pim_1/spine/pimd.conf
@@ -1,4 +1,5 @@
ip pim rp 192.168.100.1
+ip pim join-prune-interval 5
!
int lo
ip pim
diff --git a/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py b/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py
index 260a197aca..b1f5daef1e 100644
--- a/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py
+++ b/tests/topotests/evpn_pim_1/test_evpn_pim_topo1.py
@@ -49,6 +49,9 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd, pytest.mark.bgpd]
+
+
#####################################################
##
## Network Topology Definition
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
index 46e21857c8..09d66baa79 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
@@ -85,6 +85,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/evpn_type5_chaos_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index 1a399ab32e..521f2335b4 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -91,6 +91,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/evpn_type5_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py b/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py
index 27dc1073c6..70dcff035f 100755
--- a/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py
+++ b/tests/topotests/isis_lsp_bits_topo1/test_isis_lsp_bits_topo1.py
@@ -84,6 +84,9 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.isisd]
+
+
# Global multi-dimensional dictionary containing all expected outputs
outputs = {}
diff --git a/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py b/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py
index 9ad41c5934..ded1a4cc22 100755
--- a/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py
+++ b/tests/topotests/isis_rlfa_topo1/test_isis_rlfa_topo1.py
@@ -82,7 +82,7 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.isisd]
+pytestmark = [pytest.mark.isisd, pytest.mark.ldpd]
# Global multi-dimensional dictionary containing all expected outputs
outputs = {}
diff --git a/tests/topotests/isis_snmp/test_isis_snmp.py b/tests/topotests/isis_snmp/test_isis_snmp.py
index 04e043847d..2cd07299b0 100755
--- a/tests/topotests/isis_snmp/test_isis_snmp.py
+++ b/tests/topotests/isis_snmp/test_isis_snmp.py
@@ -82,6 +82,8 @@ from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.isisd, pytest.mark.ldpd, pytest.mark.snmp]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py
index f47d906157..8052316d73 100644
--- a/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py
+++ b/tests/topotests/ldp_snmp/test_ldp_snmp_topo1.py
@@ -81,6 +81,7 @@ from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.ldpd, pytest.mark.isisd, pytest.mark.snmp]
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py b/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py
index 331e6fafd4..44b34c485f 100644
--- a/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py
+++ b/tests/topotests/ldp_sync_isis_topo1/test_ldp_sync_isis_topo1.py
@@ -80,6 +80,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.isisd, pytest.mark.ldpd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 6a02e50127..07bb5153ab 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -1859,7 +1859,7 @@ def create_interfaces_cfg(tgen, topo, build=False):
)
if "ospf6" in data:
interface_data += _create_interfaces_ospf_cfg(
- "ospf6", c_data, data, ospf_keywords
+ "ospf6", c_data, data, ospf_keywords + ["area"]
)
result = create_common_configuration(
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index dc9fe0fcca..40da7c8fbe 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -28,6 +28,7 @@ from time import sleep
from lib.topolog import logger
from lib.topotest import frr_unicode
from ipaddress import IPv6Address
+
# Import common_config to use commomnly used APIs
from lib.common_config import (
create_common_configuration,
@@ -89,8 +90,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
logger.debug("Router %s: 'ospf' not present in input_dict", router)
continue
- result = __create_ospf_global(
- tgen, input_dict, router, build, load_config)
+ result = __create_ospf_global(tgen, input_dict, router, build, load_config)
if result is True:
ospf_data = input_dict[router]["ospf"]
@@ -100,7 +100,8 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru
continue
result = __create_ospf_global(
- tgen, input_dict, router, build, load_config, ospf='ospf6')
+ tgen, input_dict, router, build, load_config, ospf="ospf6"
+ )
if result is True:
ospf_data = input_dict[router]["ospf6"]
@@ -172,7 +173,6 @@ def __create_ospf_global(
config_data.append(cmd)
-
# router id
router_id = ospf_data.setdefault("router_id", None)
del_router_id = ospf_data.setdefault("del_router_id", False)
@@ -187,8 +187,7 @@ def __create_ospf_global(
if del_log_adj_changes:
config_data.append("no log-adjacency-changes detail")
if log_adj_changes:
- config_data.append("log-adjacency-changes {}".format(
- log_adj_changes))
+ config_data.append("log-adjacency-changes {}".format(log_adj_changes))
# aggregation timer
aggr_timer = ospf_data.setdefault("aggr_timer", None)
@@ -196,8 +195,7 @@ def __create_ospf_global(
if del_aggr_timer:
config_data.append("no aggregation timer")
if aggr_timer:
- config_data.append("aggregation timer {}".format(
- aggr_timer))
+ config_data.append("aggregation timer {}".format(aggr_timer))
# maximum path information
ecmp_data = ospf_data.setdefault("maximum-paths", {})
@@ -245,12 +243,13 @@ def __create_ospf_global(
cmd = "no {}".format(cmd)
config_data.append(cmd)
- #def route information
+ # def route information
def_rte_data = ospf_data.setdefault("default-information", {})
if def_rte_data:
if "originate" not in def_rte_data:
- logger.debug("Router %s: 'originate key' not present in "
- "input_dict", router)
+ logger.debug(
+ "Router %s: 'originate key' not present in " "input_dict", router
+ )
else:
cmd = "default-information originate"
@@ -261,12 +260,10 @@ def __create_ospf_global(
cmd = cmd + " metric {}".format(def_rte_data["metric"])
if "metric-type" in def_rte_data:
- cmd = cmd + " metric-type {}".format(def_rte_data[
- "metric-type"])
+ cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"])
if "route-map" in def_rte_data:
- cmd = cmd + " route-map {}".format(def_rte_data[
- "route-map"])
+ cmd = cmd + " route-map {}".format(def_rte_data["route-map"])
del_action = def_rte_data.setdefault("delete", False)
if del_action:
@@ -288,19 +285,19 @@ def __create_ospf_global(
config_data.append(cmd)
try:
- if "area" in input_dict[router]['links'][neighbor][
- 'ospf6']:
+ if "area" in input_dict[router]["links"][neighbor]["ospf6"]:
iface = input_dict[router]["links"][neighbor]["interface"]
cmd = "interface {} area {}".format(
- iface, input_dict[router]['links'][neighbor][
- 'ospf6']['area'])
- if input_dict[router]['links'][neighbor].setdefault(
- "delete", False):
+ iface,
+ input_dict[router]["links"][neighbor]["ospf6"]["area"],
+ )
+ if input_dict[router]["links"][neighbor].setdefault(
+ "delete", False
+ ):
cmd = "no {}".format(cmd)
config_data.append(cmd)
except KeyError:
- pass
-
+ pass
# summary information
summary_data = ospf_data.setdefault("summary-address", {})
@@ -420,6 +417,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=
True or False
"""
logger.debug("Enter lib config_ospf_interface")
+ result = False
if not input_dict:
input_dict = deepcopy(topo)
else:
@@ -502,7 +500,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=
# interface ospf mtu
if data_ospf_mtu:
cmd = "ip ospf mtu-ignore"
- if 'del_action' in ospf_data:
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
@@ -543,8 +541,7 @@ def clear_ospf(tgen, router, ospf=None):
version = "ip"
cmd = "clear {} ospf interface".format(version)
- logger.info(
- "Clearing ospf process on router %s.. using command '%s'", router, cmd)
+ logger.info("Clearing ospf process on router %s.. using command '%s'", router, cmd)
run_frr_cmd(rnode, cmd)
logger.debug("Exiting lib API: clear_ospf()")
@@ -774,7 +771,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expec
################################
# Verification procs
################################
-@retry(retry_timeout=20)
+@retry(retry_timeout=50)
def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
"""
This API is to verify ospf neighborship by running
@@ -825,105 +822,133 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
if input_dict:
for router, rnode in tgen.routers().items():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying OSPF neighborship on router %s:", router)
- show_ospf_json = run_frr_cmd(rnode,
- "show ipv6 ospf neighbor json", isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf neighbor json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF6 is not running"
return errormsg
ospf_data_list = input_dict[router]["ospf6"]
- ospf_nbr_list = ospf_data_list['neighbors']
+ ospf_nbr_list = ospf_data_list["neighbors"]
for ospf_nbr, nbr_data in ospf_nbr_list.items():
- data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id']
+
+ try:
+ data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ except KeyError:
+ data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
+ "router_id"
+ ]
+
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
- for switch in topo['switches']:
- if 'ospf6' in topo['switches'][switch]['links'][router]:
+ for switch in topo["switches"]:
+ if "ospf6" in topo["switches"][switch]["links"][router]:
neighbor_ip = data_ip
else:
continue
else:
- neighbor_ip = data_ip[router]['ipv6'].split("/")[0]
+ neighbor_ip = data_ip[router]["ipv6"].split("/")[0]
nh_state = None
neighbor_ip = neighbor_ip.lower()
nbr_rid = data_rid
- get_index_val = dict((d['neighborId'], dict( \
- d, index=index)) for (index, d) in enumerate( \
- show_ospf_json['neighbors']))
+ get_index_val = dict(
+ (d["neighborId"], dict(d, index=index))
+ for (index, d) in enumerate(show_ospf_json["neighbors"])
+ )
try:
- nh_state = get_index_val.get(neighbor_ip)['state']
- intf_state = get_index_val.get(neighbor_ip)['ifState']
+ nh_state = get_index_val.get(neighbor_ip)["state"]
+ intf_state = get_index_val.get(neighbor_ip)["ifState"]
except TypeError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from "\
- "{} ".format(router,
- nbr_rid, ospf_nbr)
+ errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
+ router, nbr_rid, ospf_nbr
+ )
return errormsg
- nbr_state = nbr_data.setdefault("state",None)
- nbr_role = nbr_data.setdefault("role",None)
+ nbr_state = nbr_data.setdefault("state", None)
+ nbr_role = nbr_data.setdefault("role", None)
if nbr_state:
if nbr_state == nh_state:
- logger.info("[DUT: {}] OSPF6 Nbr is {}:{} State {}".format
- (router, ospf_nbr, nbr_rid, nh_state))
+ logger.info(
+ "[DUT: {}] OSPF6 Nbr is {}:{} State {}".format(
+ router, ospf_nbr, nbr_rid, nh_state
+ )
+ )
result = True
else:
- errormsg = ("[DUT: {}] OSPF6 is not Converged, neighbor"
- " state is {} , Expected state is {}".format(router,
- nh_state, nbr_state))
+ errormsg = (
+ "[DUT: {}] OSPF6 is not Converged, neighbor"
+ " state is {} , Expected state is {}".format(
+ router, nh_state, nbr_state
+ )
+ )
return errormsg
if nbr_role:
if nbr_role == intf_state:
- logger.info("[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format(
- router, ospf_nbr, nbr_rid, nbr_role))
+ logger.info(
+ "[DUT: {}] OSPF6 Nbr is {}: {} Role {}".format(
+ router, ospf_nbr, nbr_rid, nbr_role
+ )
+ )
else:
- errormsg = ("[DUT: {}] OSPF6 is not Converged with rid"
- "{}, role is {}, Expected role is {}".format(router,
- nbr_rid, intf_state, nbr_role))
+ errormsg = (
+ "[DUT: {}] OSPF6 is not Converged with rid"
+ "{}, role is {}, Expected role is {}".format(
+ router, nbr_rid, intf_state, nbr_role
+ )
+ )
return errormsg
continue
else:
for router, rnode in tgen.routers().items():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying OSPF6 neighborship on router %s:", router)
- show_ospf_json = run_frr_cmd(rnode,
- "show ipv6 ospf neighbor json", isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf neighbor json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF6 is not running"
return errormsg
ospf_data_list = topo["routers"][router]["ospf6"]
- ospf_neighbors = ospf_data_list['neighbors']
+ ospf_neighbors = ospf_data_list["neighbors"]
total_peer = 0
total_peer = len(ospf_neighbors.keys())
no_of_ospf_nbr = 0
- ospf_nbr_list = ospf_data_list['neighbors']
+ ospf_nbr_list = ospf_data_list["neighbors"]
no_of_peer = 0
for ospf_nbr, nbr_data in ospf_nbr_list.items():
- data_ip = data_rid = topo['routers'][ospf_nbr]['ospf6']['router_id']
+ try:
+ data_ip = data_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"]
+ except KeyError:
+ data_ip = data_rid = topo["routers"][nbr_data["nbr"]]["ospf6"][
+ "router_id"
+ ]
+
if ospf_nbr in data_ip:
nbr_details = nbr_data[ospf_nbr]
elif lan:
- for switch in topo['switches']:
- if 'ospf6' in topo['switches'][switch]['links'][router]:
+ for switch in topo["switches"]:
+ if "ospf6" in topo["switches"][switch]["links"][router]:
neighbor_ip = data_ip
else:
continue
@@ -933,26 +958,27 @@ def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
nh_state = None
neighbor_ip = neighbor_ip.lower()
nbr_rid = data_rid
- get_index_val = dict((d['neighborId'], dict( \
- d, index=index)) for (index, d) in enumerate( \
- show_ospf_json['neighbors']))
+ get_index_val = dict(
+ (d["neighborId"], dict(d, index=index))
+ for (index, d) in enumerate(show_ospf_json["neighbors"])
+ )
try:
- nh_state = get_index_val.get(neighbor_ip)['state']
- intf_state = get_index_val.get(neighbor_ip)['ifState']
+ nh_state = get_index_val.get(neighbor_ip)["state"]
+ intf_state = get_index_val.get(neighbor_ip)["ifState"]
except TypeError:
- errormsg = "[DUT: {}] OSPF peer {} missing,from "\
- "{} ".format(router,
- nbr_rid, ospf_nbr)
+ errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format(
+ router, nbr_rid, ospf_nbr
+ )
return errormsg
- if nh_state == 'Full':
+ if nh_state == "Full":
no_of_peer += 1
if no_of_peer == total_peer:
logger.info("[DUT: {}] OSPF6 is Converged".format(router))
result = True
else:
- errormsg = ("[DUT: {}] OSPF6 is not Converged".format(router))
+ errormsg = "[DUT: {}] OSPF6 is not Converged".format(router)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1491,7 +1517,7 @@ def verify_ospf_database(tgen, topo, dut, input_dict, expected=True):
@retry(retry_timeout=20)
-def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True):
+def verify_ospf_summary(tgen, topo, dut, input_dict, ospf=None, expected=True):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
@@ -1502,7 +1528,6 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True):
* `topo` : topology descriptions
* `dut`: device under test
* `input_dict` : Input dict data, required when configuring from testcase
- * `expected` : expected results from API, by-default True
Usage
-----
@@ -1522,18 +1547,30 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True):
True or False (Error Message)
"""
- logger.debug("Entering lib API: verify_ospf_summary()")
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
router = dut
logger.info("Verifying OSPF summary on router %s:", router)
- if "ospf" not in topo["routers"][dut]:
- errormsg = "[DUT: {}] OSPF is not configured on the router.".format(router)
- return errormsg
-
rnode = tgen.routers()[dut]
- show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json", isjson=True)
+
+ if ospf:
+ if 'ospf6' not in topo['routers'][dut]:
+ errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format(
+ router)
+ return errormsg
+
+ show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf summary detail json",
+ isjson=True)
+ else:
+ if 'ospf' not in topo['routers'][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
+ router)
+ return errormsg
+
+ show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json",
+ isjson=True)
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
@@ -1542,35 +1579,31 @@ def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True):
# To find neighbor ip type
ospf_summary_data = input_dict
+
+ if ospf:
+ show_ospf_json = show_ospf_json['default']
+
for ospf_summ, summ_data in ospf_summary_data.items():
if ospf_summ not in show_ospf_json:
continue
- summary = ospf_summary_data[ospf_summ]["Summary address"]
+ summary = ospf_summary_data[ospf_summ]['Summary address']
+
if summary in show_ospf_json:
for summ in summ_data:
if summ_data[summ] == show_ospf_json[summary][summ]:
- logger.info(
- "[DUT: %s] OSPF summary %s:%s is %s",
- router,
- summary,
- summ,
- summ_data[summ],
- )
+ logger.info("[DUT: %s] OSPF summary %s:%s is %s",
+ router, summary, summ, summ_data[summ])
result = True
else:
- errormsg = (
- "[DUT: {}] OSPF summary {}:{} is %s, "
- "Expected is {}".format(
- router, summary, summ, show_ospf_json[summary][summ]
- )
- )
+ errormsg = ("[DUT: {}] OSPF summary {} : {} is {}, "
+ "Expected is {}".format(router, summary, summ,show_ospf_json[
+ summary][summ], summ_data[summ] ))
return errormsg
- logger.debug("Exiting API: verify_ospf_summary()")
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
-
@retry(retry_timeout=30)
def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
tag=None, metric=None, fib=None):
@@ -1627,31 +1660,34 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
found_routes = []
missing_routes = []
- if "static_routes" in input_dict[routerInput] or \
- "prefix" in input_dict[routerInput]:
+ if (
+ "static_routes" in input_dict[routerInput]
+ or "prefix" in input_dict[routerInput]
+ ):
if "prefix" in input_dict[routerInput]:
static_routes = input_dict[routerInput]["prefix"]
else:
static_routes = input_dict[routerInput]["static_routes"]
-
for static_route in static_routes:
cmd = "{}".format(command)
cmd = "{} json".format(cmd)
- ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
+ ospf_rib_json = run_frr_cmd(rnode, cmd, isjson=True)
# Fix for PR 2644182
try:
- ospf_rib_json = ospf_rib_json['routes']
+ ospf_rib_json = ospf_rib_json["routes"]
except KeyError:
pass
# Verifying output dictionary ospf_rib_json is not empty
if bool(ospf_rib_json) is False:
- errormsg = "[DUT: {}] No routes found in OSPF6 route " \
+ errormsg = (
+ "[DUT: {}] No routes found in OSPF6 route "
"table".format(router)
+ )
return errormsg
network = static_route["network"]
@@ -1659,7 +1695,6 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
_tag = static_route.setdefault("tag", None)
_rtype = static_route.setdefault("routeType", None)
-
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
st_found = False
@@ -1668,7 +1703,7 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
_addr_type = validate_ip_address(st_rt)
- if _addr_type != 'ipv6':
+ if _addr_type != "ipv6":
continue
if st_rt in ospf_rib_json:
@@ -1681,17 +1716,26 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
next_hop = [next_hop]
for mnh in range(0, len(ospf_rib_json[st_rt])):
- if 'fib' in ospf_rib_json[st_rt][
- mnh]["nextHops"][0]:
- found_hops.append([rib_r[
- "ip"] for rib_r in ospf_rib_json[
- st_rt][mnh]["nextHops"]])
+ if (
+ "fib"
+ in ospf_rib_json[st_rt][mnh]["nextHops"][0]
+ ):
+ found_hops.append(
+ [
+ rib_r["ip"]
+ for rib_r in ospf_rib_json[st_rt][mnh][
+ "nextHops"
+ ]
+ ]
+ )
if found_hops[0]:
- missing_list_of_nexthops = \
- set(found_hops[0]).difference(next_hop)
- additional_nexthops_in_required_nhs = \
- set(next_hop).difference(found_hops[0])
+ missing_list_of_nexthops = set(
+ found_hops[0]
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops[0])
if additional_nexthops_in_required_nhs:
logger.info(
@@ -1699,13 +1743,18 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
"%s is not active for route %s in "
"RIB of router %s\n",
additional_nexthops_in_required_nhs,
- st_rt, dut)
+ st_rt,
+ dut,
+ )
errormsg = (
"Nexthop {} is not active"
" for route {} in RIB of router"
" {}\n".format(
- additional_nexthops_in_required_nhs,
- st_rt, dut))
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
return errormsg
else:
nh_found = True
@@ -1713,98 +1762,118 @@ def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
elif next_hop and fib is None:
if type(next_hop) is not list:
next_hop = [next_hop]
- found_hops = [rib_r['nextHop'] for rib_r in
- ospf_rib_json[st_rt][
- "nextHops"]]
+ found_hops = [
+ rib_r["nextHop"]
+ for rib_r in ospf_rib_json[st_rt]["nextHops"]
+ ]
if found_hops:
- missing_list_of_nexthops = \
- set(found_hops).difference(next_hop)
- additional_nexthops_in_required_nhs = \
- set(next_hop).difference(found_hops)
+ missing_list_of_nexthops = set(
+ found_hops
+ ).difference(next_hop)
+ additional_nexthops_in_required_nhs = set(
+ next_hop
+ ).difference(found_hops)
if additional_nexthops_in_required_nhs:
logger.info(
- "Missing nexthop %s for route"\
- " %s in RIB of router %s\n", \
- additional_nexthops_in_required_nhs, \
- st_rt, dut)
- errormsg=("Nexthop {} is Missing for "\
- "route {} in RIB of router {}\n".format(
+ "Missing nexthop %s for route"
+ " %s in RIB of router %s\n",
additional_nexthops_in_required_nhs,
- st_rt, dut))
+ st_rt,
+ dut,
+ )
+ errormsg = (
+ "Nexthop {} is Missing for "
+ "route {} in RIB of router {}\n".format(
+ additional_nexthops_in_required_nhs,
+ st_rt,
+ dut,
+ )
+ )
return errormsg
else:
nh_found = True
if _rtype:
- if "destinationType" not in ospf_rib_json[
- st_rt]:
- errormsg = ("[DUT: {}]: destinationType missing"
- "for route {} in OSPF RIB \n".\
- format(dut, st_rt))
+ if "destinationType" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: destinationType missing"
+ "for route {} in OSPF RIB \n".format(dut, st_rt)
+ )
return errormsg
- elif _rtype != ospf_rib_json[st_rt][
- "destinationType"]:
- errormsg = ("[DUT: {}]: destinationType mismatch"
- "for route {} in OSPF RIB \n".\
- format(dut, st_rt))
+ elif _rtype != ospf_rib_json[st_rt]["destinationType"]:
+ errormsg = (
+ "[DUT: {}]: destinationType mismatch"
+ "for route {} in OSPF RIB \n".format(dut, st_rt)
+ )
return errormsg
else:
- logger.info("DUT: {}]: Found destinationType {}"
- "for route {}".\
- format(dut, _rtype, st_rt))
+ logger.info(
+ "DUT: {}]: Found destinationType {}"
+ "for route {}".format(dut, _rtype, st_rt)
+ )
if tag:
- if "tag" not in ospf_rib_json[
- st_rt]:
- errormsg = ("[DUT: {}]: tag is not"
- " present for"
- " route {} in RIB \n".\
- format(dut, st_rt
- ))
+ if "tag" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: tag is not"
+ " present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
return errormsg
- if _tag != ospf_rib_json[
- st_rt]["tag"]:
- errormsg = ("[DUT: {}]: tag value {}"
- " is not matched for"
- " route {} in RIB \n".\
- format(dut, _tag, st_rt,
- ))
+ if _tag != ospf_rib_json[st_rt]["tag"]:
+ errormsg = (
+ "[DUT: {}]: tag value {}"
+ " is not matched for"
+ " route {} in RIB \n".format(
+ dut,
+ _tag,
+ st_rt,
+ )
+ )
return errormsg
if metric is not None:
- if "type2cost" not in ospf_rib_json[
- st_rt]:
- errormsg = ("[DUT: {}]: metric is"
- " not present for"
- " route {} in RIB \n".\
- format(dut, st_rt))
+ if "type2cost" not in ospf_rib_json[st_rt]:
+ errormsg = (
+ "[DUT: {}]: metric is"
+ " not present for"
+ " route {} in RIB \n".format(dut, st_rt)
+ )
return errormsg
- if metric != ospf_rib_json[
- st_rt]["type2cost"]:
- errormsg = ("[DUT: {}]: metric value "
- "{} is not matched for "
- "route {} in RIB \n".\
- format(dut, metric, st_rt,
- ))
+ if metric != ospf_rib_json[st_rt]["type2cost"]:
+ errormsg = (
+ "[DUT: {}]: metric value "
+ "{} is not matched for "
+ "route {} in RIB \n".format(
+ dut,
+ metric,
+ st_rt,
+ )
+ )
return errormsg
else:
missing_routes.append(st_rt)
if nh_found:
- logger.info("[DUT: {}]: Found next_hop {} for all OSPF"
- " routes in RIB".format(router, next_hop))
+ logger.info(
+ "[DUT: {}]: Found next_hop {} for all OSPF"
+ " routes in RIB".format(router, next_hop)
+ )
if len(missing_routes) > 0:
- errormsg = ("[DUT: {}]: Missing route in RIB, "
- "routes: {}".\
- format(dut, missing_routes))
+ errormsg = "[DUT: {}]: Missing route in RIB, " "routes: {}".format(
+ dut, missing_routes
+ )
return errormsg
if found_routes:
- logger.info("[DUT: %s]: Verified routes in RIB, found"
- " routes are: %s\n", dut, found_routes)
+ logger.info(
+ "[DUT: %s]: Verified routes in RIB, found" " routes are: %s\n",
+ dut,
+ found_routes,
+ )
result = True
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1855,15 +1924,16 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
result = False
for router, rnode in tgen.routers().iteritems():
- if 'ospf6' not in topo['routers'][router]:
+ if "ospf6" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying OSPF interface on router %s:", router)
- show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf interface json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf interface json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
@@ -1873,32 +1943,49 @@ def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
# To find neighbor ip type
ospf_intf_data = input_dict[router]["links"]
for ospf_intf, intf_data in ospf_intf_data.items():
- intf = topo['routers'][router]['links'][ospf_intf]['interface']
- if intf in show_ospf_json:
- for intf_attribute in intf_data['ospf6']:
- if intf_data['ospf6'][intf_attribute] is not list:
- if intf_data['ospf6'][intf_attribute] == show_ospf_json[
- intf][intf_attribute]:
- logger.info("[DUT: %s] OSPF6 interface %s: %s is %s",
- router, intf, intf_attribute, intf_data['ospf6'][
- intf_attribute])
- elif intf_data['ospf6'][intf_attribute] is list:
+ intf = topo["routers"][router]["links"][ospf_intf]["interface"]
+ if intf in show_ospf_json:
+ for intf_attribute in intf_data["ospf6"]:
+ if intf_data["ospf6"][intf_attribute] is not list:
+ if (
+ intf_data["ospf6"][intf_attribute]
+ == show_ospf_json[intf][intf_attribute]
+ ):
+ logger.info(
+ "[DUT: %s] OSPF6 interface %s: %s is %s",
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ )
+ elif intf_data["ospf6"][intf_attribute] is list:
for addr_list in len(show_ospf_json[intf][intf_attribute]):
- if show_ospf_json[intf][intf_attribute][addr_list][
- 'address'].split('/')[0] == intf_data['ospf6'][
- 'internetAddress'][0]['address']:
- break
+ if (
+ show_ospf_json[intf][intf_attribute][addr_list][
+ "address"
+ ].split("/")[0]
+ == intf_data["ospf6"]["internetAddress"][0]["address"]
+ ):
+ break
else:
- errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \
- Expected is {}".format(router, intf, intf_attribute,
- intf_data['ospf6'][intf_attribute], intf_data['ospf6'][
- intf_attribute])
+ errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ intf_data["ospf6"][intf_attribute],
+ )
return errormsg
else:
- errormsg= "[DUT: {}] OSPF6 interface {}: {} is {}, \
- Expected is {}".format(router, intf, intf_attribute,
- intf_data['ospf6'][intf_attribute], intf_data['ospf6'][
- intf_attribute])
+ errormsg = "[DUT: {}] OSPF6 interface {}: {} is {}, \
+ Expected is {}".format(
+ router,
+ intf,
+ intf_attribute,
+ intf_data["ospf6"][intf_attribute],
+ intf_data["ospf6"][intf_attribute],
+ )
return errormsg
result = True
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@@ -1956,16 +2043,14 @@ def verify_ospf6_database(tgen, topo, dut, input_dict):
router = dut
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- if 'ospf' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
- dut)
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
return errormsg
rnode = tgen.routers()[dut]
logger.info("Verifying OSPF interface on router %s:", dut)
- show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", isjson=True)
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
errormsg = "OSPF is not running"
@@ -1973,167 +2058,209 @@ def verify_ospf6_database(tgen, topo, dut, input_dict):
# for inter and inter lsa's
ospf_db_data = input_dict.setdefault("areas", None)
- ospf_external_lsa = input_dict.setdefault(
- 'asExternalLinkStates', None)
+ ospf_external_lsa = input_dict.setdefault("asExternalLinkStates", None)
if ospf_db_data:
- for ospf_area, area_lsa in ospf_db_data.items():
- if ospf_area in show_ospf_json['areas']:
- if 'routerLinkStates' in area_lsa:
- for lsa in area_lsa['routerLinkStates']:
- for rtrlsa in show_ospf_json['areas'][ospf_area][
- 'routerLinkStates']:
- if lsa['lsaId'] == rtrlsa['lsaId'] and \
- lsa['advertisedRouter'] == rtrlsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Router "
- "LSA %s", router, ospf_area, lsa)
+ for ospf_area, area_lsa in ospf_db_data.items():
+ if ospf_area in show_ospf_json["areas"]:
+ if "routerLinkStates" in area_lsa:
+ for lsa in area_lsa["routerLinkStates"]:
+ for rtrlsa in show_ospf_json["areas"][ospf_area][
+ "routerLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == rtrlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == rtrlsa["advertisedRouter"]
+ ):
+ result = True
break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Router LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'networkLinkStates' in area_lsa:
- for lsa in area_lsa['networkLinkStates']:
- for netlsa in show_ospf_json['areas'][ospf_area][
- 'networkLinkStates']:
- if lsa in show_ospf_json['areas'][ospf_area][
- 'networkLinkStates']:
- if lsa['lsaId'] == netlsa['lsaId'] and \
- lsa['advertisedRouter'] == netlsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Network "
- "LSA %s", router, ospf_area, lsa)
- break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if "networkLinkStates" in area_lsa:
+ for lsa in area_lsa["networkLinkStates"]:
+ for netlsa in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]:
+ if (
+ lsa
+ in show_ospf_json["areas"][ospf_area][
+ "networkLinkStates"
+ ]
+ ):
+ if (
+ lsa["lsaId"] == netlsa["lsaId"]
+ and lsa["advertisedRouter"]
+ == netlsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Network LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'summaryLinkStates' in area_lsa:
- for lsa in area_lsa['summaryLinkStates']:
- for t3lsa in show_ospf_json['areas'][ospf_area][
- 'summaryLinkStates']:
- if lsa['lsaId'] == t3lsa['lsaId'] and \
- lsa['advertisedRouter'] == t3lsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Summary "
- "LSA %s", router, ospf_area, lsa)
+ if "summaryLinkStates" in area_lsa:
+ for lsa in area_lsa["summaryLinkStates"]:
+ for t3lsa in show_ospf_json["areas"][ospf_area][
+ "summaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t3lsa["lsaId"]
+ and lsa["advertisedRouter"] == t3lsa["advertisedRouter"]
+ ):
+ result = True
break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Summary LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'nssaExternalLinkStates' in area_lsa:
- for lsa in area_lsa['nssaExternalLinkStates']:
- for t7lsa in show_ospf_json['areas'][ospf_area][
- 'nssaExternalLinkStates']:
- if lsa['lsaId'] == t7lsa['lsaId'] and \
- lsa['advertisedRouter'] == t7lsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:Type7 "
- "LSA %s", router, ospf_area, lsa)
+ if "nssaExternalLinkStates" in area_lsa:
+ for lsa in area_lsa["nssaExternalLinkStates"]:
+ for t7lsa in show_ospf_json["areas"][ospf_area][
+ "nssaExternalLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t7lsa["lsaId"]
+ and lsa["advertisedRouter"] == t7lsa["advertisedRouter"]
+ ):
+ result = True
break
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:Type7 " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ break
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
" Type7 LSA is {}".format(router, ospf_area, lsa)
- return errormsg
+ )
+ return errormsg
- if 'asbrSummaryLinkStates' in area_lsa:
- for lsa in area_lsa['asbrSummaryLinkStates']:
- for t4lsa in show_ospf_json['areas'][ospf_area][
- 'asbrSummaryLinkStates']:
- if lsa['lsaId'] == t4lsa['lsaId'] and \
- lsa['advertisedRouter'] == t4lsa[
- 'advertisedRouter']:
- result = True
- break
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB area %s:ASBR Summary "
- "LSA %s", router, ospf_area, lsa)
+ if "asbrSummaryLinkStates" in area_lsa:
+ for lsa in area_lsa["asbrSummaryLinkStates"]:
+ for t4lsa in show_ospf_json["areas"][ospf_area][
+ "asbrSummaryLinkStates"
+ ]:
+ if (
+ lsa["lsaId"] == t4lsa["lsaId"]
+ and lsa["advertisedRouter"] == t4lsa["advertisedRouter"]
+ ):
result = True
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB area {}: expected" \
- " ASBR Summary LSA is {}".format(
- router, ospf_area, lsa)
- return errormsg
+ break
+ if result:
+ logger.info(
+ "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s",
+ router,
+ ospf_area,
+ lsa,
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB area {}: expected"
+ " ASBR Summary LSA is {}".format(router, ospf_area, lsa)
+ )
+ return errormsg
- if 'linkLocalOpaqueLsa' in area_lsa:
- for lsa in area_lsa['linkLocalOpaqueLsa']:
- try:
- for lnklsa in show_ospf_json['areas'][ospf_area][
- 'linkLocalOpaqueLsa']:
- if lsa['lsaId'] in lnklsa['lsaId'] and \
- 'linkLocalOpaqueLsa' in show_ospf_json[
- 'areas'][ospf_area]:
- logger.info((
- "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
- "%s", ospf_area, lsa))
- result = True
- else:
- errormsg = ("[DUT: FRR] OSPF LSDB area: {} "
- "expected Opaque-LSA is {}, Found is {}".format(
- ospf_area, lsa, show_ospf_json))
- raise ValueError (errormsg)
- return errormsg
- except KeyError:
- errormsg = ("[DUT: FRR] linkLocalOpaqueLsa Not "
- "present")
- return errormsg
+ if "linkLocalOpaqueLsa" in area_lsa:
+ for lsa in area_lsa["linkLocalOpaqueLsa"]:
+ try:
+ for lnklsa in show_ospf_json["areas"][ospf_area][
+ "linkLocalOpaqueLsa"
+ ]:
+ if (
+ lsa["lsaId"] in lnklsa["lsaId"]
+ and "linkLocalOpaqueLsa"
+ in show_ospf_json["areas"][ospf_area]
+ ):
+ logger.info(
+ (
+ "[DUT: FRR] OSPF LSDB area %s:Opaque-LSA"
+ "%s",
+ ospf_area,
+ lsa,
+ )
+ )
+ result = True
+ else:
+ errormsg = (
+ "[DUT: FRR] OSPF LSDB area: {} "
+ "expected Opaque-LSA is {}, Found is {}".format(
+ ospf_area, lsa, show_ospf_json
+ )
+ )
+ raise ValueError(errormsg)
+ return errormsg
+ except KeyError:
+ errormsg = "[DUT: FRR] linkLocalOpaqueLsa Not " "present"
+ return errormsg
if ospf_external_lsa:
- for lsa in ospf_external_lsa:
- try:
- for t5lsa in show_ospf_json['asExternalLinkStates']:
- if lsa['lsaId'] == t5lsa['lsaId'] and \
- lsa['advertisedRouter'] == t5lsa[
- 'advertisedRouter']:
- result = True
- break
- except KeyError:
- result = False
- if result:
- logger.info(
- "[DUT: %s] OSPF LSDB:External LSA %s",
- router, lsa)
- result = True
- else:
- errormsg = \
- "[DUT: {}] OSPF LSDB : expected" \
- " External LSA is {}".format(router, lsa)
- return errormsg
+ for lsa in ospf_external_lsa:
+ try:
+ for t5lsa in show_ospf_json["asExternalLinkStates"]:
+ if (
+ lsa["lsaId"] == t5lsa["lsaId"]
+ and lsa["advertisedRouter"] == t5lsa["advertisedRouter"]
+ ):
+ result = True
+ break
+ except KeyError:
+ result = False
+ if result:
+ logger.info("[DUT: %s] OSPF LSDB:External LSA %s", router, lsa)
+ result = True
+ else:
+ errormsg = (
+ "[DUT: {}] OSPF LSDB : expected"
+ " External LSA is {}".format(router, lsa)
+ )
+ return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
-
-def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
- load_config=True):
+def config_ospf6_interface(tgen, topo, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router.
@@ -2180,17 +2307,17 @@ def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
"input_dict, passed input_dict %s", router,
str(input_dict))
continue
- ospf_data = input_dict[router]['links'][lnk]['ospf6']
+ ospf_data = input_dict[router]["links"][lnk]["ospf6"]
data_ospf_area = ospf_data.setdefault("area", None)
- data_ospf_auth = ospf_data.setdefault("authentication", None)
+ data_ospf_auth = ospf_data.setdefault("hash-algo", None)
data_ospf_dr_priority = ospf_data.setdefault("priority", None)
data_ospf_cost = ospf_data.setdefault("cost", None)
data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None)
try:
- intf = topo['routers'][router]['links'][lnk]['interface']
+ intf = topo["routers"][router]["links"][lnk]["interface"]
except KeyError:
- intf = topo['switches'][router]['links'][lnk]['interface']
+ intf = topo["switches"][router]["links"][lnk]["interface"]
# interface
cmd = "interface {}".format(intf)
@@ -2201,34 +2328,50 @@ def config_ospf6_interface (tgen, topo, input_dict=None, build=False,
cmd = "ipv6 ospf area {}".format(data_ospf_area)
config_data.append(cmd)
+ # interface ospf auth
+ if data_ospf_auth:
+ cmd = "ipv6 ospf6 authentication"
+
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+
+ if "hash-algo" in ospf_data:
+ cmd = "{} key-id {} hash-algo {} key {}".format(
+ cmd,
+ ospf_data["key-id"],
+ ospf_data["hash-algo"],
+ ospf_data["key"],
+ )
+ if "del_action" in ospf_data:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
# interface ospf dr priority
if data_ospf_dr_priority:
- cmd = "ipv6 ospf priority {}".format(
- ospf_data["priority"])
- if 'del_action' in ospf_data:
+ cmd = "ipv6 ospf priority {}".format(ospf_data["priority"])
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
# interface ospf cost
if data_ospf_cost:
- cmd = "ipv6 ospf cost {}".format(
- ospf_data["cost"])
- if 'del_action' in ospf_data:
+ cmd = "ipv6 ospf cost {}".format(ospf_data["cost"])
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
# interface ospf mtu
if data_ospf_mtu:
cmd = "ipv6 ospf mtu-ignore"
- if 'del_action' in ospf_data:
+ if "del_action" in ospf_data:
cmd = "no {}".format(cmd)
config_data.append(cmd)
if build:
return config_data
else:
- result = create_common_configuration(tgen, router, config_data,
- "interface_config",
- build=build)
+ result = create_common_configuration(
+ tgen, router, config_data, "interface_config", build=build
+ )
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
diff --git a/tests/topotests/lib/scapy_sendpkt.py b/tests/topotests/lib/scapy_sendpkt.py
new file mode 100755
index 0000000000..0bb6a72092
--- /dev/null
+++ b/tests/topotests/lib/scapy_sendpkt.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# July 29 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C. ("LabN")
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+import argparse
+import logging
+import re
+import sys
+
+from scapy.all import conf, srp
+
+conf.verb = 0
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-i", "--interface", help="interface to send packet on.")
+ parser.add_argument("-I", "--imports", help="scapy symbols to import")
+ parser.add_argument(
+ "-t", "--timeout", type=float, default=2.0, help="timeout for reply receipts"
+ )
+ parser.add_argument("pktdef", help="scapy packet definition to send")
+ args = parser.parse_args()
+
+ if args.imports:
+ i = args.imports.replace("\n", "").strip()
+ if not re.match("[a-zA-Z0-9_ \t,]", i):
+ logging.critical('Invalid imports specified: "%s"', i)
+ sys.exit(1)
+ exec("from scapy.all import " + i, globals(), locals())
+
+ ans, unans = srp(eval(args.pktdef), iface=args.interface, timeout=args.timeout)
+ if not ans:
+ sys.exit(2)
+ for pkt in ans:
+ print(pkt.answer.show(dump=True))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
index fcc6c19868..1ae482a265 100644
--- a/tests/topotests/lib/topojson.py
+++ b/tests/topotests/lib/topojson.py
@@ -293,6 +293,24 @@ def build_topo_from_json(tgen, topo):
)
+def linux_intf_config_from_json(tgen, topo):
+ """Configure interfaces from linux based on topo."""
+ routers = topo["routers"]
+ for rname in routers:
+ router = tgen.gears[rname]
+ links = routers[rname]["links"]
+ for rrname in links:
+ link = links[rrname]
+ if rrname == "lo":
+ lname = "lo"
+ else:
+ lname = link["interface"]
+ if "ipv4" in link:
+ router.run("ip addr add {} dev {}".format(link["ipv4"], lname))
+ if "ipv6" in link:
+ router.run("ip -6 addr add {} dev {}".format(link["ipv6"], lname))
+
+
def build_config_from_json(tgen, topo, save_bkup=True):
"""
Reads initial configuraiton from JSON for each router, builds
diff --git a/tests/topotests/msdp_mesh_topo1/r1/pimd.conf b/tests/topotests/msdp_mesh_topo1/r1/pimd.conf
index 30cecee9e1..c2ffed4762 100644
--- a/tests/topotests/msdp_mesh_topo1/r1/pimd.conf
+++ b/tests/topotests/msdp_mesh_topo1/r1/pimd.conf
@@ -10,6 +10,7 @@ interface r1-eth1
ip igmp
!
ip pim rp 10.254.254.1
+ip pim join-prune-interval 5
ip msdp timers 10 20 3
ip msdp mesh-group mg-1 source 10.254.254.1
ip msdp mesh-group mg-1 member 10.254.254.2
diff --git a/tests/topotests/msdp_mesh_topo1/r2/pimd.conf b/tests/topotests/msdp_mesh_topo1/r2/pimd.conf
index a51c6d58c7..1719a17007 100644
--- a/tests/topotests/msdp_mesh_topo1/r2/pimd.conf
+++ b/tests/topotests/msdp_mesh_topo1/r2/pimd.conf
@@ -9,6 +9,7 @@ interface r2-eth1
ip pim
!
ip pim rp 10.254.254.2
+ip pim join-prune-interval 5
ip msdp timers 10 20 3
ip msdp mesh-group mg-1 source 10.254.254.2
ip msdp mesh-group mg-1 member 10.254.254.1
diff --git a/tests/topotests/msdp_mesh_topo1/r3/pimd.conf b/tests/topotests/msdp_mesh_topo1/r3/pimd.conf
index 663f78620e..2748a55d83 100644
--- a/tests/topotests/msdp_mesh_topo1/r3/pimd.conf
+++ b/tests/topotests/msdp_mesh_topo1/r3/pimd.conf
@@ -9,6 +9,7 @@ interface r3-eth1
ip pim
ip igmp
!
+ip pim join-prune-interval 5
ip pim rp 10.254.254.3
ip msdp timers 10 20 3
ip msdp mesh-group mg-1 source 10.254.254.3
diff --git a/tests/topotests/msdp_topo1/r1/pimd.conf b/tests/topotests/msdp_topo1/r1/pimd.conf
index fc289031f4..4274315271 100644
--- a/tests/topotests/msdp_topo1/r1/pimd.conf
+++ b/tests/topotests/msdp_topo1/r1/pimd.conf
@@ -19,3 +19,4 @@ ip msdp timers 10 20 3
ip msdp peer 192.168.0.2 source 192.168.0.1
ip msdp peer 192.168.1.2 source 192.168.1.1
ip pim rp 10.254.254.1
+ip pim join-prune-interval 5
diff --git a/tests/topotests/msdp_topo1/r2/pimd.conf b/tests/topotests/msdp_topo1/r2/pimd.conf
index ffa80b12d3..a4a69bf05c 100644
--- a/tests/topotests/msdp_topo1/r2/pimd.conf
+++ b/tests/topotests/msdp_topo1/r2/pimd.conf
@@ -15,3 +15,4 @@ ip msdp timers 10 20 3
ip msdp peer 192.168.0.1 source 192.168.0.2
ip msdp peer 192.168.2.2 source 192.168.2.1
ip pim rp 10.254.254.2
+ip pim join-prune-interval 5
diff --git a/tests/topotests/msdp_topo1/r3/pimd.conf b/tests/topotests/msdp_topo1/r3/pimd.conf
index ab12f0573a..db94447c76 100644
--- a/tests/topotests/msdp_topo1/r3/pimd.conf
+++ b/tests/topotests/msdp_topo1/r3/pimd.conf
@@ -15,3 +15,4 @@ ip msdp timers 10 20 3
ip msdp peer 192.168.1.1 source 192.168.1.2
ip msdp peer 192.168.3.2 source 192.168.3.1
ip pim rp 10.254.254.3
+ip pim join-prune-interval 5
diff --git a/tests/topotests/msdp_topo1/r4/pimd.conf b/tests/topotests/msdp_topo1/r4/pimd.conf
index b2e05cb3cb..e9bb59054c 100644
--- a/tests/topotests/msdp_topo1/r4/pimd.conf
+++ b/tests/topotests/msdp_topo1/r4/pimd.conf
@@ -19,3 +19,4 @@ ip msdp timers 10 20 3
ip msdp peer 192.168.2.1 source 192.168.2.2
ip msdp peer 192.168.3.1 source 192.168.3.2
ip pim rp 10.254.254.4
+ip pim join-prune-interval 5
diff --git a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
index cd398a5111..827dde69ec 100644
--- a/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
+++ b/tests/topotests/multicast_pim_bsm_topo1/test_mcast_pim_bsmp_01.py
@@ -113,6 +113,9 @@ from lib.pim import (
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/mcast_pim_bsmp_01.json".format(CWD)
try:
diff --git a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
index 60bd6de35d..98af4433ab 100644
--- a/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
+++ b/tests/topotests/multicast_pim_bsm_topo2/test_mcast_pim_bsmp_02.py
@@ -104,8 +104,7 @@ from lib.pim import (
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.pimd]
+pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
index b880e0e462..99a6e5bacf 100755
--- a/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
+++ b/tests/topotests/multicast_pim_sm_topo1/test_multicast_pim_sm_topo1.py
@@ -107,6 +107,9 @@ from lib.pim import (
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.pimd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/multicast_pim_sm_topo1.json".format(CWD)
try:
diff --git a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
index ad3b77b843..f30902c1b2 100755
--- a/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
+++ b/tests/topotests/multicast_pim_sm_topo2/test_multicast_pim_sm_topo2.py
@@ -102,6 +102,9 @@ from lib.pim import (
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.pimd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/multicast_pim_sm_topo2.json".format(CWD)
try:
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
index d73e8dc9e8..736cb1659c 100755
--- a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
+++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pim_static_rp.py
@@ -153,6 +153,9 @@ from lib.pim import (
clear_ip_mroute_verify,
)
+pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/multicast_pim_static_rp.json".format(CWD)
try:
diff --git a/tests/topotests/nhrp_topo/test_nhrp_topo.py b/tests/topotests/nhrp_topo/test_nhrp_topo.py
index 1687961f34..f59e3ae1b9 100644
--- a/tests/topotests/nhrp_topo/test_nhrp_topo.py
+++ b/tests/topotests/nhrp_topo/test_nhrp_topo.py
@@ -45,6 +45,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.nhrpd]
+
class NHRPTopo(Topo):
"Test topology builder"
@@ -115,7 +117,7 @@ def setup_module(mod):
)
# Initialize all routers.
- logger.info('Launching BGP, NHRP')
+ logger.info('Launching NHRP')
for name in router_list:
router = tgen.gears[name]
router.start()
diff --git a/tests/topotests/ospf6_topo1/test_ospf6_topo1.py b/tests/topotests/ospf6_topo1/test_ospf6_topo1.py
index bbd18a57ff..8a6544734a 100644
--- a/tests/topotests/ospf6_topo1/test_ospf6_topo1.py
+++ b/tests/topotests/ospf6_topo1/test_ospf6_topo1.py
@@ -91,6 +91,9 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
import platform
+pytestmark = [pytest.mark.ospfd]
+
+
#####################################################
##
## Network Topology Definition
diff --git a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py
index b158099d9a..61a80cc9ec 100755
--- a/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py
+++ b/tests/topotests/ospf6_topo1_vrf/test_ospf6_topo1_vrf.py
@@ -94,6 +94,9 @@ from lib.topolog import logger
from lib.topotest import iproute2_is_vrf_capable
from lib.common_config import required_linux_kernel_version
+pytestmark = [pytest.mark.ospfd]
+
+
#####################################################
##
## Network Topology Definition
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
index 41960ac79f..e61a6b5905 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_topo1.py
@@ -70,6 +70,9 @@ from lib.ospf import (
verify_ospf_summary,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py
index 393eb19a53..db177360b4 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_asbr_summary_type7_lsa.py
@@ -70,6 +70,9 @@ from lib.ospf import (
verify_ospf_summary,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py
index a7f2893eab..bdba8fd8e4 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py
@@ -56,6 +56,9 @@ from lib.topojson import build_topo_from_json, build_config_from_json
from lib.ospf import verify_ospf_neighbor, config_ospf_interface, clear_ospf
from ipaddress import IPv4Address
+pytestmark = [pytest.mark.ospfd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py
index 49ecaac9f7..5c57f8be25 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py
@@ -68,6 +68,9 @@ from lib.ospf import (
redistribute_ospf,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py
index 47c6c45e39..96f781c150 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py
@@ -70,6 +70,9 @@ from lib.ospf import (
)
from ipaddress import IPv4Address
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py
index d9b90a132a..c89a663380 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py
@@ -71,6 +71,9 @@ from lib.ospf import (
)
from ipaddress import IPv4Address
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py
index 3644bff3dc..0af83548b9 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py
@@ -63,6 +63,9 @@ sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
index be18ba5a78..0172f589c5 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py
@@ -68,6 +68,9 @@ from lib.ospf import (
verify_ospf_database,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
index 0848f6c94a..bc6c248ad2 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py
@@ -65,6 +65,9 @@ from lib.ospf import (
redistribute_ospf,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
# Reading the data from JSON File for topology creation
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py
index f17346d5b1..0e2fef4a22 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py
@@ -66,6 +66,9 @@ from lib.ospf import (
verify_ospf_interface,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
index e94680d974..a595bc0491 100644
--- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
+++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py
@@ -69,6 +69,7 @@ from lib.ospf import (
verify_ospf_database,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
diff --git a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
index 5e7802fa04..b5f535cd06 100644
--- a/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
+++ b/tests/topotests/ospf_dual_stack/test_ospf_dual_stack.py
@@ -42,6 +42,9 @@ from lib.ospf import (
verify_ospf_database,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
diff --git a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py
index 76e50beb5c..a22fbf458a 100644
--- a/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py
+++ b/tests/topotests/ospf_suppress_fa/test_ospf_suppress_fa.py
@@ -50,6 +50,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.ospfd]
+
class NetworkTopo(Topo):
"OSPF topology builder"
diff --git a/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py b/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py
index 489690471c..b3da6e2a1a 100644
--- a/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py
+++ b/tests/topotests/ospf_tilfa_topo1/test_ospf_tilfa_topo1.py
@@ -71,6 +71,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.ospfd]
+
class TemplateTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ospf_topo2/test_ospf_topo2.py b/tests/topotests/ospf_topo2/test_ospf_topo2.py
index 6451f5fb32..8b8d5d6e9f 100644
--- a/tests/topotests/ospf_topo2/test_ospf_topo2.py
+++ b/tests/topotests/ospf_topo2/test_ospf_topo2.py
@@ -46,6 +46,8 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.ospfd]
+
class OSPFTopo(Topo):
"Test topology builder"
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json
new file mode 100644
index 0000000000..74a0de489f
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_topo1.json
@@ -0,0 +1,198 @@
+{
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r0-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR0"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR1",
+ "ospf6": {
+ "area": "0.0.0.0"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json
new file mode 100644
index 0000000000..c928093925
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json
@@ -0,0 +1,347 @@
+{
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link4": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link5": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link6": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link7": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r1-link1": {
+ "nbr": "r1"
+ },
+ "r1-link2": {
+ "nbr": "r1"
+ },
+ "r1-link3": {
+ "nbr": "r1"
+ },
+ "r1-link4": {
+ "nbr": "r1"
+ },
+ "r1-link5": {
+ "nbr": "r1"
+ },
+ "r1-link6": {
+ "nbr": "r1"
+ },
+ "r1-link7": {
+ "nbr": "r1"
+ },
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link4": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link5": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link6": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r0-link7": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r0-link1": {
+ "nbr": "r0"
+ },
+ "r0-link2": {
+ "nbr": "r0"
+ },
+ "r0-link3": {
+ "nbr": "r0"
+ },
+ "r0-link4": {
+ "nbr": "r0"
+ },
+ "r0-link5": {
+ "nbr": "r0"
+ },
+ "r0-link6": {
+ "nbr": "r0"
+ },
+ "r0-link7": {
+ "nbr": "r0"
+ },
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR1",
+ "ospf": {
+ "area": "0.0.0.0"
+ },
+ "ospf6": {
+ "area": "0.0.0.0"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json
new file mode 100644
index 0000000000..226f84f320
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json
@@ -0,0 +1,137 @@
+{
+ "address_types": ["ipv6"],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv6": "2001:db8:f::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {"r1": {}, "r2": {}, "r3": {}}
+ }
+ },
+ "r1": {
+ "links": {
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {"r0": {}, "r2": {}, "r3": {}}
+ }
+ },
+ "r2": {
+ "links": {
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {"r1": {}, "r0": {}, "r3": {}}
+ }
+ },
+ "r3": {
+ "links": {
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {"r0": {}, "r1": {}, "r2": {}}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
new file mode 100644
index 0000000000..6a4b60fbed
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
@@ -0,0 +1,1928 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Summarisation Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+from copy import deepcopy
+from ipaddress import IPv4Address
+from lib.topotest import frr_unicode
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+import ipaddress
+from time import sleep
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ kill_router_daemons,
+ write_test_footer,
+ reset_config_on_routers,
+ stop_router,
+ start_router,
+ verify_rib,
+ create_static_routes,
+ step,
+ start_router_daemons,
+ create_route_maps,
+ shutdown_bringup_interface,
+ create_prefix_lists,
+ create_route_maps,
+ create_interfaces_cfg,
+ topo_daemons,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ clear_ospf,
+ verify_ospf6_rib,
+ create_router_ospf,
+ verify_ospf_summary,
+)
+
+
+# Global variables
+topo = None
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospfv3_asbr_summary_topo1.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": [
+ "2011:0:20::1/128",
+ "2011:0:20::2/128",
+ "2011:0:20::3/128",
+ "2011:0:20::4/128",
+ "2011:0:20::5/128",
+ ],
+}
+NETWORK_11 = {
+ "ipv4": ["11.0.20.6/32", "11.0.20.7/32"],
+ "ipv6": ["2011:0:20::6/128", "2011:0:20::7/128"],
+}
+
+NETWORK2 = {
+ "ipv4": [
+ "12.0.20.1/32",
+ "12.0.20.2/32",
+ "12.0.20.3/32",
+ "12.0.20.4/32",
+ "12.0.20.5/32",
+ ],
+ "ipv6": [
+ "2012:0:20::1/128",
+ "2012:0:20::2/128",
+ "2012:0:20::3/128",
+ "2012:0:20::4/128",
+ "2012:0:20::5/128",
+ ],
+}
+SUMMARY = {
+ "ipv4": ["11.0.0.0/8", "12.0.0.0/8", "11.0.0.0/24"],
+ "ipv6": ["2011::/32", "2012::/32", "2011::/64", "2011::/24"],
+}
+"""
+TOPOOLOGY =
+ Please view in a fixed-width font such as Courier.
+ +---+ A0 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A0
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A0 +---+
+
+TESTCASES =
+1. OSPF summarisation functionality.
+2. OSPF summarisation with advertise and no advertise option
+3. OSPF summarisation with route map modification of metric type.
+4. OSPF CLI Show verify ospf ASBR summary config and show commands behaviours.
+5. OSPF summarisation Chaos.
+"""
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def red_static(dut, config=True):
+ """
+ Local 'def' for Redstribute static routes inside ospf.
+
+ Parameters
+ ----------
+ * `dut` : DUT on which configs have to be made.
+ * `config` : True or False, True by default for configure, set False for
+ unconfiguration.
+ """
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+def red_connected(dut, config=True):
+ """
+ Local 'def' for Redstribute connected routes inside ospf
+
+ Parameters
+ ----------
+ * `dut` : DUT on which configs have to be made.
+ * `config` : True or False, True by default for configure, set False for
+ unconfiguration.
+ """
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {
+ "redistribute": [{"redist_type": "connected", "delete": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+def test_ospfv3_type5_summary_tc42_p0(request):
+ """OSPF summarisation functionality."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = 'ospf'
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ },
+ {
+ "network": NETWORK2["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ dut = 'r0'
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_static_rtes, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5"
+ " routes to one route. with aggregate timer as 6 sec")
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }],
+ "aggr_timer": 6
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "del_aggr_timer": True,
+ "delete": True
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Summary Route still present in RIB".format(tc_name)
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Summary still present in DB".format(tc_name)
+
+ dut = 'r1'
+ step("All 5 routes are advertised after deletion of configured summary.")
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_static_rtes, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("configure the summary again and delete static routes .")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole",
+ "delete": True
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ step("Verify that summary route is withdrawn from R1.")
+
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step("Add back static routes.")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and only one route is sent to R1.")
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_static_rtes,
+ protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show configure summaries.")
+
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure new static route which is matching configured summary.")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK_11["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Delete one of the static route.")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK_11["ipv6"],
+ "next_hop": "blackhole",
+ "delete": True
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ # step("Verify that deleted static route is removed from ospf LSDB.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step(
+ "Configure redistribute connected and configure ospf external"
+ " summary address to summarise the connected routes.")
+
+ dut = 'r0'
+ red_connected(dut)
+ clear_ospf(tgen, dut, ospf='ospf6')
+
+ ip = topo['routers']['r0']['links']['r3']['ipv6']
+
+ ip_net = str(ipaddress.ip_interface(u'{}'.format(ip)).network)
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": ip_net.split('/')[0],
+ "mask": "8"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured "
+ "summary address on R0 and only one route is sent to R1.")
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": "fd00::/64"}]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Shut one of the interface")
+ intf = topo['routers']['r0']['links']['r3-link0']['interface']
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ # step("Verify that deleted connected route is removed from ospf LSDB.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Un do shut the interface")
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ # step("Verify that deleted connected route is removed from ospf LSDB.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Delete OSPF process.")
+ ospf_del = {
+ "r0": {
+ "ospf6": {
+ "delete": True
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_del)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ step("Reconfigure ospf process with summary")
+ reset_config_on_routers(tgen)
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ },
+ {
+ "network": NETWORK2["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ dut = 'r0'
+ red_static(dut)
+ red_connected(dut)
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 and only one route is sent to R1.")
+
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Delete the redistribute command in ospf.")
+ dut = 'r0'
+ red_connected(dut, config=False)
+ red_static(dut, config=False)
+
+ step("Verify that summary route is withdrawn from the peer.")
+
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "metric": "1234"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_type5_summary_tc46_p0(request):
+ """OSPF summarisation with advertise and no advertise option"""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure OSPF on all the routers of the topology.")
+ reset_config_on_routers(tgen)
+
+ protocol = 'ospf'
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ },
+ {
+ "network": NETWORK2["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ dut = 'r0'
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_static_rtes, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5"
+ " routes to one route with no advertise option.")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "advertise": False
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and summary route is not advertised to neighbor as"
+ " no advertise is configured..")
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary,
+ protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Verify that show ip ospf summary should show the "
+ "configured summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "delete": True
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Summary has 5 sec delay timer, sleep 5 secs...")
+ sleep(5)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Summary Route still present in RIB".format(tc_name)
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Summary still present in DB".format(tc_name)
+
+ step("Reconfigure summary with no advertise.")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "advertise": False
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and summary route is not advertised to neighbor as"
+ " no advertise is configured..")
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary,
+ protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Verify that show ip ospf summary should show the "
+ "configured summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Change summary address from no advertise to advertise "
+ "(summary-address 10.0.0.0 255.255.0.0)")
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "advertise": False
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes is present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_type5_summary_tc48_p0(request):
+ """OSPF summarisation with route map modification of metric type."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = 'ospf'
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ },
+ {
+ "network": NETWORK2["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ dut = 'r0'
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_static_rtes, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5"
+ " routes to one route.")
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Configure route map and & rule to permit configured summary address,"
+ " redistribute static & connected routes with the route map.")
+ step("Configure prefixlist to permit the static routes, add to route map.")
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": "any",
+ "action": "permit"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [{
+ "action": "permit",
+ "seq_id": '1',
+ "match": {
+ "ipv6": {
+ "prefix_lists":
+ "pf_list_1_ipv6"
+ }
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ ospf_red_r1 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{
+ "redist_type": "static",
+ "route_map": "rmap_ipv6"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured"
+ "summary address on R0 and only one route is sent to R1. Verify that "
+ "show ip ospf summary should show the configure summaries.")
+
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure metric type as 1 in route map.")
+
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [{
+ "seq_id": '1',
+ "action": "permit",
+ "set":{
+ "metric-type": "type-1"
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes(static / connected) are summarised"
+ " to configured summary address with metric type 2.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Un configure metric type from route map.")
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [{
+ "action": "permit",
+ "seq_id": '1',
+ "set":{
+ "metric-type": "type-1",
+ "delete": True
+ }
+ }]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes(static / connected) are summarised"
+ " to configured summary address with metric type 2.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Change rule from permit to deny in prefix list.")
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": "any",
+ "action": "deny"
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that previously originated summary lsa "
+ "is withdrawn from the neighbor.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ step("summary route has delay of 5 secs, wait for 5 secs")
+
+ sleep(5)
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_type5_summary_tc51_p2(request):
+ """OSPF CLI Show.
+
+ verify ospf ASBR summary config and show commands behaviours.
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure all the supported OSPF ASBR summary commands on DUT.")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32",
+ "tag": 4294967295
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "16",
+ "advertise": True
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "24",
+ "advertise": False
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "24",
+ "advertise": False
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+ step("Configure and re configure all the commands 10 times in a loop.")
+
+ for itrate in range(0,10):
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "8",
+ "tag": 4294967295
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "16",
+ "advertise": True
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "24",
+ "advertise": False
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "24",
+ "advertise": False
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "8",
+ "tag": 4294967295,
+ "delete": True
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "16",
+ "advertise": True,
+ "delete": True
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Verify the show commands")
+
+ input_dict = {
+ SUMMARY["ipv6"][3]: {
+ "Summary address": SUMMARY["ipv6"][3],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 0
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_type5_summary_tc49_p2(request):
+ """OSPF summarisation Chaos."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = 'ospf'
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ },
+ {
+ "network": NETWORK2["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ dut = 'r0'
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_static_rtes, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5"
+ " routes to one route.")
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [{
+ "prefix": SUMMARY["ipv6"][0].split('/')[0],
+ "mask": "32"
+ }]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step('Reload the FRR router')
+ # stop/start -> restart FRR router and verify
+ stop_router(tgen, 'r0')
+ start_router(tgen, 'r0')
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step("Kill OSPF6d daemon on R0.")
+ kill_router_daemons(tgen, "r0", ["ospf6d"])
+
+ step("Bring up OSPF6d daemon on R0.")
+ start_router_daemons(tgen, "r0", ["ospf6d"])
+
+ step("Verify OSPF neighbors are up after bringing back ospf6d in R0")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, ("setup_module :Failed \n Error:"
+ " {}".format(ospf_covergence))
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ step("restart zebrad")
+ kill_router_daemons(tgen, "r0", ["zebra"])
+
+ step("Bring up zebra daemon on R0.")
+ start_router_daemons(tgen, "r0", ["zebra"])
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1.")
+ input_dict_summary = {
+ "r0": {
+ "static_routes": [{"network": SUMMARY["ipv6"][0]}]
+ }
+ }
+ dut = 'r1'
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut,
+ input_dict_summary, protocol=protocol)
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5
+ }
+ }
+ dut = 'r0'
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
+ assert result is True, "Testcase {} : Failed" \
+ "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that originally advertised routes are withdraw from there"
+ " peer.")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": "blackhole"
+ }
+ ]
+ }
+ }
+ dut = 'r1'
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: "\
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
+ expected=False)
+ assert result is not True, "Testcase {} : Failed" \
+ "Error: Routes still present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py
new file mode 100644
index 0000000000..50c5144b3f
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp.py
@@ -0,0 +1,523 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+from copy import deepcopy
+from ipaddress import IPv4Address
+from lib.topotest import frr_unicode
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+import ipaddress
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ step,
+ create_route_maps,
+ shutdown_bringup_interface,
+ create_interfaces_cfg,
+ topo_daemons,
+ get_frr_ipv6_linklocal,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ config_ospf_interface,
+ clear_ospf,
+ verify_ospf6_rib,
+ create_router_ospf,
+ verify_ospf6_interface,
+ verify_ospf6_database,
+ config_ospf6_interface,
+)
+
+from ipaddress import IPv6Address
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
+# Global variables
+topo = None
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospfv3_ecmp.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"],
+}
+"""
+TOPOLOGY :
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+
+TESTCASES :
+1. Verify OSPF ECMP with max path configured as 8 (ECMPconfigured at FRR level)
+2. Verify OSPF ECMP with max path configured as 2 (Edge having 2 uplink ports)
+ """
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def red_static(dut, config=True):
+ """Local def for Redstribute static routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+def red_connected(dut, config=True):
+ """Local def for Redstribute connected routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {
+ "redistribute": [{"redist_type": "connected", "del_action": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospfv3_ecmp_tc16_p0(request):
+ """
+ Verify OSPF ECMP.
+
+ Verify OSPF ECMP with max path configured as 8 (ECMP
+ configured at FRR level)
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure 8 interfaces between R1 and R2 and enable ospf in area 0.")
+
+ reset_config_on_routers(tgen)
+
+ step("Verify that OSPF is up with 8 neighborship sessions.")
+ dut = "r1"
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("Configure a static route in R0 and redistribute in OSPF.")
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ llip = get_llip("r0", "r1-link1")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that route in R2 in stalled with 8 next hops.")
+ nh = []
+ for item in range(1, 7):
+ nh.append(llip)
+
+ llip = get_llip("r0", "r1")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ nh2 = llip
+
+ nh.append(nh2)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("shut no shut all the interfaces on the remote router - R2")
+ dut = "r1"
+ for intfr in range(1, 7):
+ intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route present in OSPF RIB. Error: {}".format(
+ tc_name, result
+ )
+
+ protocol = "ospf"
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result)
+
+ for intfr in range(1, 7):
+ intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("shut no shut on all the interfaces on DUT (r1)")
+ for intfr in range(1, 7):
+ intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ for intfr in range(1, 7):
+ intf = topo["routers"]["r1"]["links"]["r0-link{}".format(intfr)]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ step(
+ "Verify that all the neighbours are up and routes are installed"
+ " with 8 next hop in ospf and ip route tables on R1."
+ )
+
+ step("Verify that OSPF is up with 8 neighborship sessions.")
+ dut = "r1"
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_ecmp_tc17_p0(request):
+ """
+ Verify OSPF ECMP.
+
+ Verify OSPF ECMP with max path configured as 2 (Edge having 2 uplink ports)
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure 2 interfaces between R1 and R2 & enable ospf in area 0.")
+
+ reset_config_on_routers(tgen)
+
+ step("Verify that OSPF is up with 2 neighborship sessions.")
+ dut = "r1"
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("Configure a static route in R0 and redistribute in OSPF.")
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that route in R2 in stalled with 2 next hops.")
+
+ llip = get_llip("r0", "r1-link1")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ nh1 = llip
+
+ llip = get_llip("r0", "r1")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ nh2 = llip
+
+ nh = [nh1, nh2]
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure ECMP value as 1.")
+ max_path = {"r1": {"ospf6": {"maximum-paths": 1}}}
+ result = create_router_ospf(tgen, topo, max_path)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ max_path = {"r1": {"ospf6": {"maximum-paths": 2}}}
+ result = create_router_ospf(tgen, topo, max_path)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure cost on R0 as 100")
+ r0_ospf_cost = {"r0": {"links": {"r1": {"ospf6": {"cost": 100}}}}}
+ result = config_ospf6_interface(tgen, topo, r0_ospf_cost)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
new file mode 100644
index 0000000000..d8cf3bd02d
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
@@ -0,0 +1,875 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+from copy import deepcopy
+from ipaddress import IPv4Address
+from lib.topotest import frr_unicode
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+import ipaddress
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ create_prefix_lists,
+ verify_rib,
+ create_static_routes,
+ step,
+ create_route_maps,
+ verify_prefix_lists,
+ get_frr_ipv6_linklocal,
+ topo_daemons,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ config_ospf_interface,
+ clear_ospf,
+ verify_ospf6_rib,
+ create_router_ospf,
+ verify_ospf6_interface,
+ verify_ospf6_database,
+ config_ospf6_interface,
+)
+
+from ipaddress import IPv6Address
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
+# Global variables
+topo = None
+
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospfv3_routemaps.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"],
+}
+
+routerids = ["100.1.1.0", "100.1.1.1", "100.1.1.2", "100.1.1.3"]
+
+"""
+TOPOOLOGY =
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+
+TESTCASES =
+2. Verify OSPF route map support functionality when route map is not
+ configured at system level but configured in OSPF
+4. Verify OSPF route map support functionality
+ when route map actions are toggled.
+5. Verify OSPF route map support functionality with multiple sequence
+ numbers in a single route-map for different match/set clauses.
+6. Verify OSPF route map support functionality when we add/remove route-maps
+ with multiple set clauses and without any match statement.(Set only)
+7. Verify OSPF route map support functionality when we
+ add/remove route-maps with multiple match clauses and without
+ any set statement.(Match only)
+8. Verify OSPF route map applied to ospf redistribution with ipv6 prefix list
+ """
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospfv3_routemaps_functionality_tc20_p0(request):
+ """
+ OSPF route map support functionality.
+
+ Verify OSPF route map support functionality when route map is not
+ configured at system level but configured in OSPF
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step("Create static routes(10.0.20.1/32 and 10.0.20.2/32) in R0")
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Redistribute to ospf using route map ( non existent route map)")
+ ospf_red_r1 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that routes are not allowed in OSPF even tough no "
+ "matching routing map is configured."
+ )
+
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "configure the route map with the same name that is used "
+ "in the ospf with deny rule."
+ )
+
+ # Create route map
+ routemaps = {"r0": {"route_maps": {"rmap_ipv6": [{"action": "deny"}]}}}
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that now route map is activated & routes are denied in OSPF.")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ routemaps = {"r0": {"route_maps": {"rmap_ipv6": [{"action": "deny"}]}}}
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that now route map is activated & routes are denied in OSPF.")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ step("Delete the route map.")
+ # Create route map
+ routemaps = {
+ "r0": {"route_maps": {"rmap_ipv6": [{"action": "deny", "delete": True}]}}
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that routes are allowed in OSPF even tough "
+ "no matching routing map is configured."
+ )
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_routemaps_functionality_tc25_p0(request):
+ """
+ OSPF route map support functionality.
+
+ Verify OSPF route map support functionality
+ when route map actions are toggled.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute "
+ "to OSPF using route map."
+ )
+
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("Configure route map with permit rule")
+ # Create route map
+ routemaps = {"r0": {"route_maps": {"rmap_ipv6": [{"action": "permit"}]}}}
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that route is advertised to R1.")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("Configure route map with deny rule")
+ # Create route map
+ routemaps = {
+ "r0": {"route_maps": {"rmap_ipv6": [{"seq_id": 10, "action": "deny"}]}}
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("Verify that route is not advertised to R1.")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_routemaps_functionality_tc22_p0(request):
+ """
+ OSPF Route map - Multiple sequence numbers.
+
+ Verify OSPF route map support functionality with multiple sequence
+ numbers in a single route-map for different match/set clauses.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step(
+ "Configure route map with seq number 10 to with ip prefix"
+ " permitting route 10.0.20.1/32 in R1"
+ )
+ step(
+ "Configure route map with seq number 20 to with ip prefix"
+ " permitting route 10.0.20.2/32 in R1"
+ )
+
+ # Create route map
+ input_dict_3 = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}},
+ },
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv4"}},
+ },
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv6": [
+ {"seqid": 10, "network": NETWORK["ipv6"][0], "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_2_ipv4": [
+ {"seqid": 10, "network": NETWORK["ipv6"][1], "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure static routes 10.0.20.1/32 and 10.0.20.2 in R1")
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure redistribute static route with route map.")
+ ospf_red_r0 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 2,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that both routes are learned in R1 and R2")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r2"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change route map with seq number 20 to deny.")
+ # Create route map
+ input_dict_3 = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "deny",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv4"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify the route 10.0.20.2/32 is withdrawn and not present "
+ "in the routing table of R0 and R1."
+ )
+
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"][1], "next_hop": "Null0"}]}
+ }
+
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_routemaps_functionality_tc24_p0(request):
+ """
+ OSPF Route map - Multiple set clauses.
+
+ Verify OSPF route map support functionality when we
+ add/remove route-maps with multiple match clauses and without
+ any set statement.(Match only)
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute to "
+ "OSPF using route map."
+ )
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 1,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that prefix-list is created in R0.")
+ result = verify_prefix_lists(tgen, pfx_list)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "permit",
+ "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that metric falls back to original metric for ospf routes.")
+ dut = "r1"
+ protocol = "ospf"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute to "
+ "OSPF using route map."
+ )
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][1],
+ "no_of_ip": 1,
+ "next_hop": "Null0",
+ "tag": 1000,
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that prefix-list is created in R0.")
+ result = verify_prefix_lists(tgen, pfx_list)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [{"action": "permit", "match": {"ipv6": {"tag": "1000"}}}]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that metric falls back to original metric for ospf routes.")
+ dut = "r1"
+ protocol = "ospf"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the match clause with tag in route map")
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "permit",
+ "match": {"ipv6": {"tag": "1000", "delete": True}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that metric falls back to original metric for ospf routes.")
+ dut = "r1"
+ protocol = "ospf"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the match clause with metric in route map.")
+
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "permit",
+ "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
index 4aa71bfb16..860f17ba67 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
@@ -74,6 +74,9 @@ from lib.ospf import (
from ipaddress import IPv6Address
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
@@ -281,6 +284,233 @@ def red_connected(dut, config=True):
# ##################################
# Test cases start here.
# ##################################
+def test_ospfv3_redistribution_tc5_p0(request):
+ """Test OSPF intra area route calculations."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config.")
+ reset_config_on_routers(tgen)
+
+ step("Verify that OSPF neighbors are FULL.")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("verify intra area route is calculated for r0-r3 interface ip in R1")
+ ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+
+ llip = get_llip("r0", "r1")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip)
+
+ nh = llip
+ input_dict = {
+ "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]}
+ }
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the ip address on newly configured loopback of R0")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"],
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result)
+
+ step("Add back the deleted ip address on newly configured interface of R0")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"],
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Shut no shut interface on R0")
+ dut = "r0"
+ intf = topo["routers"]["r0"]["links"]["r3"]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("un shut the OSPF interface on R0")
+ dut = "r0"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_redistribution_tc6_p0(request):
+ """Test OSPF inter area route calculations."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config.")
+ reset_config_on_routers(tgen)
+
+ step("Verify that OSPF neighbors are FULL.")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("verify intra area route is calculated for r0-r3 interface ip in R1")
+ ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ llip = get_llip("r0", "r1")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip)
+ nh = llip
+ input_dict = {
+ "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]}
+ }
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the ip address on newly configured loopback of R0")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"],
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route present in RIB. Error: {}".format(tc_name, result)
+
+ step("Add back the deleted ip address on newly configured interface of R0")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r3": {
+ "ipv6": topo["routers"]["r0"]["links"]["r3"]["ipv6"],
+ "interface": topo["routers"]["r0"]["links"]["r3"]["interface"],
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Shut no shut interface on R0")
+ dut = "r0"
+ intf = topo["routers"]["r0"]["links"]["r3"]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ step("Verify that intraroute calculated for R1 intf on R0 is deleted.")
+ dut = "r1"
+
+ step("un shut the OSPF interface on R0")
+ dut = "r0"
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
def test_ospfv3_cost_tc52_p0(request):
"""OSPF Cost - verifying ospf interface cost functionality"""
tc_name = request.node.name
@@ -368,7 +598,6 @@ def test_ospfv3_cost_tc52_p0(request):
write_test_footer(tc_name)
-
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
index a84f1a1eb6..0c1c51c78a 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
@@ -54,7 +54,7 @@ from lib.common_config import (
create_route_maps,
shutdown_bringup_interface,
create_interfaces_cfg,
- topo_daemons,
+ topo_daemons
)
from lib.topolog import logger
from lib.topojson import build_topo_from_json, build_config_from_json
@@ -72,6 +72,9 @@ from lib.ospf import (
from ipaddress import IPv6Address
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
# Global variables
topo = None
diff --git a/tests/topotests/pim_acl/r1/pimd.conf b/tests/topotests/pim_acl/r1/pimd.conf
index 72d28c9b02..a148c73146 100644
--- a/tests/topotests/pim_acl/r1/pimd.conf
+++ b/tests/topotests/pim_acl/r1/pimd.conf
@@ -13,6 +13,7 @@ ip pim rp 192.168.0.12 prefix-list rp-pl-2
ip pim rp 192.168.0.13 prefix-list rp-pl-3
ip pim rp 192.168.0.14 prefix-list rp-pl-4
ip pim rp 192.168.0.15 prefix-list rp-pl-5
+ip pim join-prune-interval 5
!
interface r1-eth0
ip igmp
diff --git a/tests/topotests/pim_acl/r11/pimd.conf b/tests/topotests/pim_acl/r11/pimd.conf
index 05cd5ac911..b1d45205da 100644
--- a/tests/topotests/pim_acl/r11/pimd.conf
+++ b/tests/topotests/pim_acl/r11/pimd.conf
@@ -7,6 +7,7 @@ debug pim zebra
debug pim bsm
!
ip pim rp 192.168.0.11 239.100.0.0/28
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_acl/r12/pimd.conf b/tests/topotests/pim_acl/r12/pimd.conf
index cedde73c59..ba9e7d902f 100644
--- a/tests/topotests/pim_acl/r12/pimd.conf
+++ b/tests/topotests/pim_acl/r12/pimd.conf
@@ -7,6 +7,7 @@ debug pim zebra
debug pim bsm
!
ip pim rp 192.168.0.12 239.100.0.17/32
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_acl/r13/pimd.conf b/tests/topotests/pim_acl/r13/pimd.conf
index 2dab0cabec..2ff1743574 100644
--- a/tests/topotests/pim_acl/r13/pimd.conf
+++ b/tests/topotests/pim_acl/r13/pimd.conf
@@ -7,6 +7,7 @@ debug pim zebra
debug pim bsm
!
ip pim rp 192.168.0.13 239.100.0.32/27
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_acl/r14/pimd.conf b/tests/topotests/pim_acl/r14/pimd.conf
index c6b949af16..1324a9e40b 100644
--- a/tests/topotests/pim_acl/r14/pimd.conf
+++ b/tests/topotests/pim_acl/r14/pimd.conf
@@ -8,6 +8,7 @@ debug pim bsm
!
ip pim rp 192.168.0.14 239.100.0.96/28
ip pim rp 192.168.0.14 239.100.0.128/25
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_acl/r15/pimd.conf b/tests/topotests/pim_acl/r15/pimd.conf
index 85c9c51e1e..f47e78c221 100644
--- a/tests/topotests/pim_acl/r15/pimd.conf
+++ b/tests/topotests/pim_acl/r15/pimd.conf
@@ -7,6 +7,7 @@ debug pim zebra
debug pim bsm
!
ip pim rp 192.168.0.15 239.100.0.64/28
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_acl/test_pim_acl.py b/tests/topotests/pim_acl/test_pim_acl.py
index 848f7fa8ed..77917a0239 100755
--- a/tests/topotests/pim_acl/test_pim_acl.py
+++ b/tests/topotests/pim_acl/test_pim_acl.py
@@ -121,7 +121,7 @@ from lib.topolog import logger
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.pimd]
+pytestmark = [pytest.mark.pimd, pytest.mark.ospfd]
#
diff --git a/tests/topotests/pim_basic/r1/pimd.conf b/tests/topotests/pim_basic/r1/pimd.conf
index f64a46deb3..737019fa51 100644
--- a/tests/topotests/pim_basic/r1/pimd.conf
+++ b/tests/topotests/pim_basic/r1/pimd.conf
@@ -15,3 +15,4 @@ interface lo
ip pim
!
ip pim rp 10.254.0.3
+ip pim join-prune-interval 5
diff --git a/tests/topotests/pim_basic/rp/pimd.conf b/tests/topotests/pim_basic/rp/pimd.conf
index 6e35c97971..fd26bc4d71 100644
--- a/tests/topotests/pim_basic/rp/pimd.conf
+++ b/tests/topotests/pim_basic/rp/pimd.conf
@@ -6,6 +6,7 @@ interface rp-eth0
interface lo
ip pim
!
+ip pim join-prune-interval 5
ip pim rp 10.254.0.3
ip pim register-accept-list ACCEPT
diff --git a/tests/topotests/pim_basic_topo2/r2/pimd.conf b/tests/topotests/pim_basic_topo2/r2/pimd.conf
index 0b32ded19a..9f389deb11 100644
--- a/tests/topotests/pim_basic_topo2/r2/pimd.conf
+++ b/tests/topotests/pim_basic_topo2/r2/pimd.conf
@@ -10,3 +10,4 @@ interface r2-eth2
ip pim
ip pim bfd
!
+ip pim join-prune-interval 5
diff --git a/tests/topotests/pim_igmp_vrf/r1/pimd.conf b/tests/topotests/pim_igmp_vrf/r1/pimd.conf
index 6ee264d3d0..f04c255de9 100644
--- a/tests/topotests/pim_igmp_vrf/r1/pimd.conf
+++ b/tests/topotests/pim_igmp_vrf/r1/pimd.conf
@@ -24,3 +24,4 @@ interface r1-eth2
interface r1-eth3
ip pim
!
+ip pim join-prune-interval 5
diff --git a/tests/topotests/pim_igmp_vrf/r11/pimd.conf b/tests/topotests/pim_igmp_vrf/r11/pimd.conf
index 05cd5ac911..b1d45205da 100644
--- a/tests/topotests/pim_igmp_vrf/r11/pimd.conf
+++ b/tests/topotests/pim_igmp_vrf/r11/pimd.conf
@@ -7,6 +7,7 @@ debug pim zebra
debug pim bsm
!
ip pim rp 192.168.0.11 239.100.0.0/28
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_igmp_vrf/r12/pimd.conf b/tests/topotests/pim_igmp_vrf/r12/pimd.conf
index 531aec61ed..5cb76efa22 100644
--- a/tests/topotests/pim_igmp_vrf/r12/pimd.conf
+++ b/tests/topotests/pim_igmp_vrf/r12/pimd.conf
@@ -7,6 +7,7 @@ debug pim zebra
debug pim bsm
!
ip pim rp 192.168.0.12 239.100.0.0/28
+ip pim join-prune-interval 5
!
interface lo
ip pim
diff --git a/tests/topotests/pim_igmp_vrf/test_pim_vrf.py b/tests/topotests/pim_igmp_vrf/test_pim_vrf.py
index 298adef9c6..cb207cb810 100755
--- a/tests/topotests/pim_igmp_vrf/test_pim_vrf.py
+++ b/tests/topotests/pim_igmp_vrf/test_pim_vrf.py
@@ -111,7 +111,7 @@ from lib.common_config import (
# Required to instantiate the topology builder class.
from mininet.topo import Topo
-pytestmark = [pytest.mark.pimd]
+pytestmark = [pytest.mark.ospfd, pytest.mark.pimd]
#
diff --git a/tests/topotests/route_scale/test_route_scale.py b/tests/topotests/route_scale/test_route_scale.py
index bbd6ef8d60..469ad42d64 100644
--- a/tests/topotests/route_scale/test_route_scale.py
+++ b/tests/topotests/route_scale/test_route_scale.py
@@ -48,6 +48,9 @@ from lib.common_config import shutdown_bringup_interface
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.sharpd]
+
+
#####################################################
##
## Network Topology Definition
diff --git a/tests/topotests/simple_snmp_test/test_simple_snmp.py b/tests/topotests/simple_snmp_test/test_simple_snmp.py
index 5647e2b663..bdb44816b6 100755
--- a/tests/topotests/simple_snmp_test/test_simple_snmp.py
+++ b/tests/topotests/simple_snmp_test/test_simple_snmp.py
@@ -90,6 +90,16 @@ def setup_module(mod):
r1 = tgen.gears["r1"]
+ r1.run("ip addr add 192.168.12.12/24 dev r1-eth0")
+ r1.run("ip -6 addr add 2000:1:1:12::12/64 dev r1-eth0")
+ r1.run("ip addr add 192.168.13.13/24 dev r1-eth1")
+ r1.run("ip -6 addr add 2000:1:1:13::13/64 dev r1-eth1")
+ r1.run("ip addr add 192.168.14.14/24 dev r1-eth2")
+ r1.run("ip -6 addr add 2000:1:1:14::14/64 dev r1-eth2")
+ r1.run("ip addr add 1.1.1.1/32 dev lo")
+ r1.run("ip -6 addr add 2000:1:1:1::1/128 dev lo")
+ r1.run("ip addr show")
+
router_list = tgen.routers()
# For all registred routers, load the zebra configuration file
diff --git a/tests/topotests/srv6_locator/test_srv6_locator.py b/tests/topotests/srv6_locator/test_srv6_locator.py
index a7416ce085..04b0d8db97 100755
--- a/tests/topotests/srv6_locator/test_srv6_locator.py
+++ b/tests/topotests/srv6_locator/test_srv6_locator.py
@@ -43,6 +43,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd, pytest.mark.sharpd]
+
def open_json_file(filename):
try:
diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py
index 812b39797f..626de6b422 100644
--- a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py
+++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py
@@ -72,6 +72,9 @@ from lib.bgp import (
)
from lib.topojson import build_topo_from_json, build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
# Reading the data from JSON File for topology creation
jsonFile = "{}/static_routes_topo4_ebgp.json".format(CWD)
try:
diff --git a/tests/topotests/zebra_netlink/test_zebra_netlink.py b/tests/topotests/zebra_netlink/test_zebra_netlink.py
index 94baf8438f..cf08ee9639 100644
--- a/tests/topotests/zebra_netlink/test_zebra_netlink.py
+++ b/tests/topotests/zebra_netlink/test_zebra_netlink.py
@@ -47,6 +47,9 @@ from lib.common_config import shutdown_bringup_interface
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.sharpd]
+
+
#####################################################
##
## Network Topology Definition
diff --git a/tests/topotests/zebra_opaque/test_zebra_opaque.py b/tests/topotests/zebra_opaque/test_zebra_opaque.py
index cc52fbd1a7..2339b0f5b0 100644
--- a/tests/topotests/zebra_opaque/test_zebra_opaque.py
+++ b/tests/topotests/zebra_opaque/test_zebra_opaque.py
@@ -38,6 +38,8 @@ from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from mininet.topo import Topo
+pytestmark = [pytest.mark.bgpd]
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py
index 9fcf7b6820..56d112b7c3 100644
--- a/tests/topotests/zebra_rib/test_zebra_rib.py
+++ b/tests/topotests/zebra_rib/test_zebra_rib.py
@@ -46,6 +46,8 @@ from time import sleep
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+pytestmark = [pytest.mark.sharpd]
+
class ZebraTopo(Topo):
"Test topology builder"
@@ -124,7 +126,7 @@ def test_zebra_kernel_admin_distance():
"show ip route 4.5.{}.0 json".format(i),
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"r1" JSON output mismatches'
assert result is None, assertmsg
# tgen.mininet_cli()
@@ -145,7 +147,7 @@ def test_zebra_kernel_override():
test_func = partial(
topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected
)
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assert result is None, '"r1" JSON output mismatches'
logger.info(
@@ -158,7 +160,7 @@ def test_zebra_kernel_override():
test_func = partial(
topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected
)
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assert result is None, '"r1" JSON output mismatches'
diff --git a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
index e83b2c1007..a83c6d6ec0 100755
--- a/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
+++ b/tests/topotests/zebra_seg6_route/test_zebra_seg6_route.py
@@ -43,6 +43,8 @@ from lib.topolog import logger
from lib.common_config import shutdown_bringup_interface
from mininet.topo import Topo
+pytestmark = [pytest.mark.sharpd]
+
def open_json_file(filename):
try:
diff --git a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
index 1c9d208fef..6cdb77b94b 100755
--- a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
+++ b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py
@@ -43,6 +43,8 @@ from lib.topolog import logger
from lib.common_config import shutdown_bringup_interface
from mininet.topo import Topo
+pytestmark = [pytest.mark.sharpd]
+
def open_json_file(filename):
try:
diff --git a/vrrpd/vrrp_vty.c b/vrrpd/vrrp_vty.c
index 7af9148a8e..1904e936cc 100644
--- a/vrrpd/vrrp_vty.c
+++ b/vrrpd/vrrp_vty.c
@@ -744,14 +744,6 @@ static int vrrp_config_write_interface(struct vty *vty)
return write;
}
-static struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = vrrp_config_write_interface,
-};
-
static struct cmd_node debug_node = {
.name = "debug",
.node = DEBUG_NODE,
@@ -769,10 +761,9 @@ static struct cmd_node vrrp_node = {
void vrrp_vty_init(void)
{
install_node(&debug_node);
- install_node(&interface_node);
install_node(&vrrp_node);
vrf_cmd_init(NULL, &vrrp_privs);
- if_cmd_init();
+ if_cmd_init(vrrp_config_write_interface);
install_element(VIEW_NODE, &vrrp_vrid_show_cmd);
install_element(VIEW_NODE, &vrrp_vrid_show_summary_cmd);
diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang
index f73776c1af..e846ffa1f8 100644
--- a/yang/frr-pim.yang
+++ b/yang/frr-pim.yang
@@ -529,7 +529,7 @@ module frr-pim {
}
leaf join-prune-interval {
type uint16 {
- range "60..600";
+ range "5..600";
}
default "60";
description
diff --git a/zebra/interface.c b/zebra/interface.c
index 408c016494..21eeb20543 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -1205,59 +1205,77 @@ void zebra_if_set_protodown(struct interface *ifp, bool down)
#endif
}
-/* Output prefix string to vty. */
-static int prefix_vty_out(struct vty *vty, struct prefix *p)
-{
- char str[INET6_ADDRSTRLEN];
-
- inet_ntop(p->family, &p->u.prefix, str, sizeof(str));
- vty_out(vty, "%s", str);
- return strlen(str);
-}
-
/* Dump if address information to vty. */
-static void connected_dump_vty(struct vty *vty, struct connected *connected)
+static void connected_dump_vty(struct vty *vty, json_object *json,
+ struct connected *connected)
{
struct prefix *p;
+ json_object *json_addr = NULL;
+ char buf[PREFIX2STR_BUFFER];
/* Print interface address. */
p = connected->address;
- vty_out(vty, " %s ", prefix_family_str(p));
- prefix_vty_out(vty, p);
- vty_out(vty, "/%d", p->prefixlen);
+
+ if (json) {
+ json_addr = json_object_new_object();
+ json_object_array_add(json, json_addr);
+ json_object_string_add(json_addr, "address",
+ prefix2str(p, buf, sizeof(buf)));
+ } else {
+ vty_out(vty, " %s %pFX", prefix_family_str(p), p);
+ }
/* If there is destination address, print it. */
if (CONNECTED_PEER(connected) && connected->destination) {
- vty_out(vty, " peer ");
- prefix_vty_out(vty, connected->destination);
- vty_out(vty, "/%d", connected->destination->prefixlen);
+ if (json) {
+ json_object_string_add(
+ json_addr, "peer",
+ prefix2str(connected->destination, buf,
+ sizeof(buf)));
+ } else {
+ vty_out(vty, " peer %pFX", connected->destination);
+ }
}
- if (CHECK_FLAG(connected->flags, ZEBRA_IFA_SECONDARY))
+ if (json)
+ json_object_boolean_add(
+ json_addr, "secondary",
+ CHECK_FLAG(connected->flags, ZEBRA_IFA_SECONDARY));
+ else if (CHECK_FLAG(connected->flags, ZEBRA_IFA_SECONDARY))
vty_out(vty, " secondary");
- if (CHECK_FLAG(connected->flags, ZEBRA_IFA_UNNUMBERED))
+ if (json)
+ json_object_boolean_add(
+ json_addr, "unnumbered",
+ CHECK_FLAG(connected->flags, ZEBRA_IFA_UNNUMBERED));
+ else if (CHECK_FLAG(connected->flags, ZEBRA_IFA_UNNUMBERED))
vty_out(vty, " unnumbered");
- if (connected->label)
- vty_out(vty, " %s", connected->label);
+ if (connected->label) {
+ if (json)
+ json_object_string_add(json_addr, "label",
+ connected->label);
+ else
+ vty_out(vty, " %s", connected->label);
+ }
- vty_out(vty, "\n");
+ if (!json)
+ vty_out(vty, "\n");
}
/* Dump interface neighbor address information to vty. */
-static void nbr_connected_dump_vty(struct vty *vty,
+static void nbr_connected_dump_vty(struct vty *vty, json_object *json,
struct nbr_connected *connected)
{
struct prefix *p;
+ char buf[PREFIX2STR_BUFFER];
/* Print interface address. */
p = connected->address;
- vty_out(vty, " %s ", prefix_family_str(p));
- prefix_vty_out(vty, p);
- vty_out(vty, "/%d", p->prefixlen);
-
- vty_out(vty, "\n");
+ if (json)
+ json_array_string_add(json, prefix2str(p, buf, sizeof(buf)));
+ else
+ vty_out(vty, " %s %pFX\n", prefix_family_str(p), p);
}
static const char *zebra_zifslavetype_2str(zebra_slave_iftype_t zif_slave_type)
@@ -1413,6 +1431,43 @@ static void ifs_dump_brief_vty(struct vty *vty, struct vrf *vrf)
vty_out(vty, "\n");
}
+static void ifs_dump_brief_vty_json(json_object *json, struct vrf *vrf)
+{
+ struct connected *connected;
+ struct listnode *node;
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ json_object *json_if;
+ json_object *json_addrs;
+
+ json_if = json_object_new_object();
+ json_object_object_add(json, ifp->name, json_if);
+
+ json_object_string_add(json_if, "status",
+ if_is_up(ifp) ? "up" : "down");
+ json_object_string_add(json_if, "vrfName", vrf->name);
+
+ json_addrs = json_object_new_array();
+ json_object_object_add(json_if, "addresses", json_addrs);
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, connected)) {
+ if (CHECK_FLAG(connected->conf, ZEBRA_IFC_REAL)
+ && !CHECK_FLAG(connected->flags,
+ ZEBRA_IFA_SECONDARY)
+ && !(connected->address->family == AF_INET6
+ && IN6_IS_ADDR_LINKLOCAL(
+ &connected->address->u.prefix6))) {
+ char buf[PREFIX2STR_BUFFER];
+
+ json_array_string_add(
+ json_addrs,
+ prefix2str(connected->address, buf,
+ sizeof(buf)));
+ }
+ }
+ }
+}
+
const char *zebra_protodown_rc_str(enum protodown_reasons protodown_rc,
char *pd_buf, uint32_t pd_buf_len)
{
@@ -1483,7 +1538,7 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
vty_out(vty, " Link downs: %5u last: %s\n", zebra_if->down_count,
zebra_if->down_last[0] ? zebra_if->down_last : "(never)");
- zebra_ptm_show_status(vty, ifp);
+ zebra_ptm_show_status(vty, NULL, ifp);
vrf = vrf_lookup_by_id(ifp->vrf_id);
vty_out(vty, " vrf: %s\n", vrf->name);
@@ -1531,13 +1586,13 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
for (ALL_LIST_ELEMENTS_RO((struct list *)rn->info, node,
connected))
- connected_dump_vty(vty, connected);
+ connected_dump_vty(vty, NULL, connected);
}
for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, connected)) {
if (CHECK_FLAG(connected->conf, ZEBRA_IFC_REAL)
&& (connected->address->family == AF_INET6))
- connected_dump_vty(vty, connected);
+ connected_dump_vty(vty, NULL, connected);
}
vty_out(vty, " Interface Type %s\n",
@@ -1637,7 +1692,7 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
if (zebra_if->flags & ZIF_FLAG_LACP_BYPASS)
vty_out(vty, " LACP bypass: on\n");
- zebra_evpn_if_es_print(vty, zebra_if);
+ zebra_evpn_if_es_print(vty, NULL, zebra_if);
vty_out(vty, " protodown: %s %s\n",
(zebra_if->flags & ZIF_FLAG_PROTODOWN) ? "on" : "off",
if_is_protodown_applicable(ifp) ? "" : "(n/a)");
@@ -1716,7 +1771,7 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
if (listhead(ifp->nbr_connected))
vty_out(vty, " Neighbor address(s):\n");
for (ALL_LIST_ELEMENTS_RO(ifp->nbr_connected, node, nbr_connected))
- nbr_connected_dump_vty(vty, nbr_connected);
+ nbr_connected_dump_vty(vty, NULL, nbr_connected);
#ifdef HAVE_PROC_NET_DEV
/* Statistics print out using proc file system. */
@@ -1774,6 +1829,382 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
#endif /* HAVE_NET_RT_IFLIST */
}
+static void if_dump_vty_json(struct vty *vty, struct interface *ifp,
+ json_object *json)
+{
+ struct connected *connected;
+ struct nbr_connected *nbr_connected;
+ struct listnode *node;
+ struct route_node *rn;
+ struct zebra_if *zebra_if;
+ struct vrf *vrf;
+ char pd_buf[ZEBRA_PROTODOWN_RC_STR_LEN];
+ char buf[BUFSIZ];
+ json_object *json_if;
+ json_object *json_addrs;
+
+ json_if = json_object_new_object();
+ json_object_object_add(json, ifp->name, json_if);
+
+ if (if_is_up(ifp)) {
+ json_object_string_add(json_if, "administrativeStatus", "up");
+
+ if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_LINKDETECTION)) {
+ json_object_string_add(json_if, "operationalStatus",
+ if_is_running(ifp) ? "up"
+ : "down");
+ json_object_boolean_add(json_if, "linkDetection", true);
+ } else {
+ json_object_boolean_add(json_if, "linkDetection",
+ false);
+ }
+ } else {
+ json_object_string_add(json_if, "administrativeStatus", "down");
+ }
+
+ zebra_if = ifp->info;
+
+ json_object_int_add(json_if, "linkUps", zebra_if->up_count);
+ json_object_int_add(json_if, "linkDowns", zebra_if->down_count);
+ if (zebra_if->up_last[0])
+ json_object_string_add(json_if, "lastLinkUp",
+ zebra_if->up_last);
+ if (zebra_if->down_last[0])
+ json_object_string_add(json_if, "lastLinkDown",
+ zebra_if->down_last);
+
+ zebra_ptm_show_status(vty, json, ifp);
+
+ vrf = vrf_lookup_by_id(ifp->vrf_id);
+ json_object_string_add(json_if, "vrfName", vrf->name);
+
+ if (ifp->desc)
+ json_object_string_add(json_if, "description", ifp->desc);
+ if (zebra_if->desc)
+ json_object_string_add(json_if, "OsDescription",
+ zebra_if->desc);
+
+ if (ifp->ifindex == IFINDEX_INTERNAL) {
+ json_object_boolean_add(json_if, "pseudoInterface", true);
+ return;
+ } else if (!CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) {
+ json_object_int_add(json_if, "index", ifp->ifindex);
+ return;
+ }
+
+ json_object_boolean_add(json_if, "pseudoInterface", false);
+ json_object_int_add(json_if, "index", ifp->ifindex);
+ json_object_int_add(json_if, "metric", ifp->metric);
+ json_object_int_add(json_if, "mtu", ifp->mtu);
+ if (ifp->mtu6 != ifp->mtu)
+ json_object_int_add(json_if, "mtu6", ifp->mtu6);
+ json_object_int_add(json_if, "speed", ifp->speed);
+ json_object_string_add(json_if, "flags", if_flag_dump(ifp->flags));
+
+ /* Hardware address. */
+ json_object_string_add(json_if, "type", if_link_type_str(ifp->ll_type));
+ if (ifp->hw_addr_len != 0) {
+ char hwbuf[BUFSIZ];
+
+ hwbuf[0] = '\0';
+ for (int i = 0; i < ifp->hw_addr_len; i++) {
+ snprintf(buf, sizeof(buf), "%s%02x", i == 0 ? "" : ":",
+ ifp->hw_addr[i]);
+ strlcat(hwbuf, buf, sizeof(hwbuf));
+ }
+ json_object_string_add(json_if, "hardwareAddress", hwbuf);
+ }
+
+ /* Bandwidth in Mbps */
+ if (ifp->bandwidth != 0)
+ json_object_int_add(json_if, "bandwidth", ifp->bandwidth);
+
+
+ /* IP addresses. */
+ json_addrs = json_object_new_array();
+ json_object_object_add(json_if, "ipAddresses", json_addrs);
+
+ for (rn = route_top(zebra_if->ipv4_subnets); rn; rn = route_next(rn)) {
+ if (!rn->info)
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO((struct list *)rn->info, node,
+ connected))
+ connected_dump_vty(vty, json_addrs, connected);
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, connected)) {
+ if (CHECK_FLAG(connected->conf, ZEBRA_IFC_REAL)
+ && (connected->address->family == AF_INET6))
+ connected_dump_vty(vty, json_addrs, connected);
+ }
+
+ json_object_string_add(json_if, "interfaceType",
+ zebra_ziftype_2str(zebra_if->zif_type));
+ json_object_string_add(
+ json_if, "interfaceSlaveType",
+ zebra_zifslavetype_2str(zebra_if->zif_slave_type));
+
+ if (IS_ZEBRA_IF_BRIDGE(ifp)) {
+ struct zebra_l2info_bridge *bridge_info;
+
+ bridge_info = &zebra_if->l2info.br;
+ json_object_boolean_add(json_if, "bridgeVlanAware",
+ bridge_info->vlan_aware);
+ } else if (IS_ZEBRA_IF_VLAN(ifp)) {
+ struct zebra_l2info_vlan *vlan_info;
+
+ vlan_info = &zebra_if->l2info.vl;
+ json_object_int_add(json_if, "vlanId", vlan_info->vid);
+ } else if (IS_ZEBRA_IF_VXLAN(ifp)) {
+ struct zebra_l2info_vxlan *vxlan_info;
+
+ vxlan_info = &zebra_if->l2info.vxl;
+ json_object_int_add(json_if, "vxlanId", vxlan_info->vni);
+ if (vxlan_info->vtep_ip.s_addr != INADDR_ANY)
+ json_object_string_add(json_if, "vtepIp",
+ inet_ntop(AF_INET,
+ &vxlan_info->vtep_ip,
+ buf, sizeof(buf)));
+ if (vxlan_info->access_vlan)
+ json_object_int_add(json_if, "accessVlanId",
+ vxlan_info->access_vlan);
+ if (vxlan_info->mcast_grp.s_addr != INADDR_ANY)
+ json_object_string_add(json_if, "mcastGroup",
+ inet_ntop(AF_INET,
+ &vxlan_info->mcast_grp,
+ buf, sizeof(buf)));
+ if (vxlan_info->ifindex_link
+ && (vxlan_info->link_nsid != NS_UNKNOWN)) {
+ struct interface *ifp;
+
+ ifp = if_lookup_by_index_per_ns(
+ zebra_ns_lookup(vxlan_info->link_nsid),
+ vxlan_info->ifindex_link);
+ json_object_string_add(json_if, "linkInterface",
+ ifp == NULL ? "Unknown"
+ : ifp->name);
+ }
+ } else if (IS_ZEBRA_IF_GRE(ifp)) {
+ struct zebra_l2info_gre *gre_info;
+
+ gre_info = &zebra_if->l2info.gre;
+ if (gre_info->vtep_ip.s_addr != INADDR_ANY) {
+ json_object_string_add(json_if, "vtepIp",
+ inet_ntop(AF_INET,
+ &gre_info->vtep_ip,
+ buf, sizeof(buf)));
+ if (gre_info->vtep_ip_remote.s_addr != INADDR_ANY)
+ json_object_string_add(
+ json_if, "vtepRemoteIp",
+ inet_ntop(AF_INET,
+ &gre_info->vtep_ip_remote,
+ buf, sizeof(buf)));
+ }
+ if (gre_info->ifindex_link
+ && (gre_info->link_nsid != NS_UNKNOWN)) {
+ struct interface *ifp;
+
+ ifp = if_lookup_by_index_per_ns(
+ zebra_ns_lookup(gre_info->link_nsid),
+ gre_info->ifindex_link);
+ json_object_string_add(json_if, "linkInterface",
+ ifp == NULL ? "Unknown"
+ : ifp->name);
+ }
+ }
+
+ if (IS_ZEBRA_IF_BRIDGE_SLAVE(ifp)) {
+ struct zebra_l2info_brslave *br_slave;
+
+ br_slave = &zebra_if->brslave_info;
+ if (br_slave->bridge_ifindex != IFINDEX_INTERNAL) {
+ if (br_slave->br_if)
+ json_object_string_add(json_if,
+ "masterInterface",
+ br_slave->br_if->name);
+ else
+ json_object_int_add(json_if, "masterIfindex",
+ br_slave->bridge_ifindex);
+ }
+ }
+
+ if (IS_ZEBRA_IF_BOND_SLAVE(ifp)) {
+ struct zebra_l2info_bondslave *bond_slave;
+
+ bond_slave = &zebra_if->bondslave_info;
+ if (bond_slave->bond_ifindex != IFINDEX_INTERNAL) {
+ if (bond_slave->bond_if)
+ json_object_string_add(
+ json_if, "masterInterface",
+ bond_slave->bond_if->name);
+ else
+ json_object_int_add(json_if, "masterIfindex",
+ bond_slave->bond_ifindex);
+ }
+ }
+
+ json_object_boolean_add(
+ json_if, "lacpBypass",
+ CHECK_FLAG(zebra_if->flags, ZIF_FLAG_LACP_BYPASS));
+
+ zebra_evpn_if_es_print(vty, json_if, zebra_if);
+
+ if (if_is_protodown_applicable(ifp)) {
+ json_object_string_add(
+ json_if, "protodown",
+ (zebra_if->flags & ZIF_FLAG_PROTODOWN) ? "on" : "off");
+ if (zebra_if->protodown_rc)
+ json_object_string_add(
+ json_if, "protodownReason",
+ zebra_protodown_rc_str(zebra_if->protodown_rc,
+ pd_buf, sizeof(pd_buf)));
+ }
+
+ if (zebra_if->link_ifindex != IFINDEX_INTERNAL) {
+ if (zebra_if->link)
+ json_object_string_add(json_if, "parentInterface",
+ zebra_if->link->name);
+ else
+ json_object_int_add(json_if, "parentIfindex",
+ zebra_if->link_ifindex);
+ }
+
+ if (HAS_LINK_PARAMS(ifp)) {
+ struct if_link_params *iflp = ifp->link_params;
+ json_object *json_te;
+
+ json_te = json_object_new_object();
+ json_object_object_add(
+ json_if, "trafficEngineeringLinkParameters", json_te);
+
+ if (IS_PARAM_SET(iflp, LP_TE_METRIC))
+ json_object_int_add(json_te, "teMetric",
+ iflp->te_metric);
+ if (IS_PARAM_SET(iflp, LP_MAX_BW))
+ json_object_double_add(json_te, "maximumBandwidth",
+ iflp->max_bw);
+ if (IS_PARAM_SET(iflp, LP_MAX_RSV_BW))
+ json_object_double_add(json_te,
+ "maximumReservableBandwidth",
+ iflp->max_rsv_bw);
+ if (IS_PARAM_SET(iflp, LP_UNRSV_BW)) {
+ json_object *json_bws;
+
+ json_bws = json_object_new_object();
+ json_object_object_add(json_te, "unreservedBandwidth",
+ json_bws);
+ for (unsigned int i = 0; i < MAX_CLASS_TYPE; ++i) {
+ char buf_ct[64];
+
+ snprintf(buf_ct, sizeof(buf_ct), "classType%u",
+ i);
+ json_object_double_add(json_bws, buf_ct,
+ iflp->unrsv_bw[i]);
+ }
+ }
+
+ if (IS_PARAM_SET(iflp, LP_ADM_GRP))
+ json_object_int_add(json_te, "administrativeGroup",
+ iflp->admin_grp);
+ if (IS_PARAM_SET(iflp, LP_DELAY)) {
+ json_object_int_add(json_te, "linkDelayAverage",
+ iflp->av_delay);
+ if (IS_PARAM_SET(iflp, LP_MM_DELAY)) {
+ json_object_int_add(json_te, "linkDelayMinimum",
+ iflp->min_delay);
+ json_object_int_add(json_te, "linkDelayMaximum",
+ iflp->max_delay);
+ }
+ }
+ if (IS_PARAM_SET(iflp, LP_DELAY_VAR))
+ json_object_int_add(json_te, "linkDelayVariation",
+ iflp->delay_var);
+ if (IS_PARAM_SET(iflp, LP_PKT_LOSS))
+ json_object_double_add(json_te, "linkPacketLoss",
+ iflp->pkt_loss);
+ if (IS_PARAM_SET(iflp, LP_AVA_BW))
+ json_object_double_add(json_te, "availableBandwidth",
+ iflp->ava_bw);
+ if (IS_PARAM_SET(iflp, LP_RES_BW))
+ json_object_double_add(json_te, "residualBandwidth",
+ iflp->res_bw);
+ if (IS_PARAM_SET(iflp, LP_USE_BW))
+ json_object_double_add(json_te, "utilizedBandwidth",
+ iflp->use_bw);
+ if (IS_PARAM_SET(iflp, LP_RMT_AS))
+ json_object_string_add(json_te, "neighborAsbrIp",
+ inet_ntop(AF_INET, &iflp->rmt_ip,
+ buf, sizeof(buf)));
+ json_object_int_add(json_te, "neighborAsbrAs", iflp->rmt_as);
+ }
+
+ if (listhead(ifp->nbr_connected)) {
+ json_object *json_nbr_addrs;
+
+ json_nbr_addrs = json_object_new_array();
+ json_object_object_add(json_if, "neighborIpAddresses",
+ json_nbr_addrs);
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->nbr_connected, node,
+ nbr_connected))
+ nbr_connected_dump_vty(vty, json_nbr_addrs,
+ nbr_connected);
+ }
+
+#ifdef HAVE_PROC_NET_DEV
+ json_object_int_add(json_if, "inputPackets", stats.rx_packets);
+ json_object_int_add(json_if, "inputBytes", ifp->stats.rx_bytes);
+ json_object_int_add(json_if, "inputDropped", ifp->stats.rx_dropped);
+ json_object_int_add(json_if, "inputMulticastPackets",
+ ifp->stats.rx_multicast);
+ json_object_int_add(json_if, "inputErrors", ifp->stats.rx_errors);
+ json_object_int_add(json_if, "inputLengthErrors",
+ ifp->stats.rx_length_errors);
+ json_object_int_add(json_if, "inputOverrunErrors",
+ ifp->stats.rx_over_errors);
+ json_object_int_add(json_if, "inputCrcErrors",
+ ifp->stats.rx_crc_errors);
+ json_object_int_add(json_if, "inputFrameErrors",
+ ifp->stats.rx_frame_errors);
+ json_object_int_add(json_if, "inputFifoErrors",
+ ifp->stats.rx_fifo_errors);
+ json_object_int_add(json_if, "inputMissedErrors",
+ ifp->stats.rx_missed_errors);
+ json_object_int_add(json_if, "outputPackets", ifp->stats.tx_packets);
+ json_object_int_add(json_if, "outputBytes", ifp->stats.tx_bytes);
+ json_object_int_add(json_if, "outputDroppedPackets",
+ ifp->stats.tx_dropped);
+ json_object_int_add(json_if, "outputErrors", ifp->stats.tx_errors);
+ json_object_int_add(json_if, "outputAbortedErrors",
+ ifp->stats.tx_aborted_errors);
+ json_object_int_add(json_if, "outputCarrierErrors",
+ ifp->stats.tx_carrier_errors);
+ json_object_int_add(json_if, "outputFifoErrors",
+ ifp->stats.tx_fifo_errors);
+ json_object_int_add(json_if, "outputHeartbeatErrors",
+ ifp->stats.tx_heartbeat_errors);
+ json_object_int_add(json_if, "outputWindowErrors",
+ ifp->stats.tx_window_errors);
+ json_object_int_add(json_if, "collisions", ifp->stats.collisions);
+#endif /* HAVE_PROC_NET_DEV */
+
+#ifdef HAVE_NET_RT_IFLIST
+ json_object_int_add(json_if, "inputPackets", ifp->stats.ifi_ipackets);
+ json_object_int_add(json_if, "inputBytes", ifp->stats.ifi_ibytes);
+ json_object_int_add(json_if, "inputDropd", ifp->stats.ifi_iqdrops);
+ json_object_int_add(json_if, "inputMulticastPackets",
+ ifp->stats.ifi_imcasts);
+ json_object_int_add(json_if, "inputErrors", ifp->stats.ifi_ierrors);
+ json_object_int_add(json_if, "outputPackets", ifp->stats.ifi_opackets);
+ json_object_int_add(json_if, "outputBytes", ifp->stats.ifi_obytes);
+ json_object_int_add(json_if, "outputMulticastPackets",
+ ifp->stats.ifi_omcasts);
+ json_object_int_add(json_if, "outputErrors", ifp->stats.ifi_oerrors);
+ json_object_int_add(json_if, "collisions", ifp->stats.ifi_collisions);
+#endif /* HAVE_NET_RT_IFLIST */
+}
+
static void interface_update_stats(void)
{
#ifdef HAVE_PROC_NET_DEV
@@ -1786,45 +2217,57 @@ static void interface_update_stats(void)
#endif /* HAVE_NET_RT_IFLIST */
}
-static int if_config_write(struct vty *vty);
-struct cmd_node interface_node = {
- .name = "interface",
- .node = INTERFACE_NODE,
- .parent_node = CONFIG_NODE,
- .prompt = "%s(config-if)# ",
- .config_write = if_config_write,
-};
-
#ifndef VTYSH_EXTRACT_PL
#include "zebra/interface_clippy.c"
#endif
/* Show all interfaces to vty. */
DEFPY(show_interface, show_interface_cmd,
- "show interface vrf NAME$vrf_name [brief$brief]",
+ "show interface vrf NAME$vrf_name [brief$brief] [json$uj]",
SHOW_STR
"Interface status and configuration\n"
VRF_CMD_HELP_STR
- "Interface status and configuration summary\n")
+ "Interface status and configuration summary\n"
+ JSON_STR)
{
struct vrf *vrf;
struct interface *ifp;
+ json_object *json = NULL;
interface_update_stats();
vrf = vrf_lookup_by_name(vrf_name);
if (!vrf) {
- vty_out(vty, "%% VRF %s not found\n", vrf_name);
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "%% VRF %s not found\n", vrf_name);
return CMD_WARNING;
}
+ if (uj)
+ json = json_object_new_object();
+
if (brief) {
- ifs_dump_brief_vty(vty, vrf);
+ if (json)
+ ifs_dump_brief_vty_json(json, vrf);
+ else
+ ifs_dump_brief_vty(vty, vrf);
} else {
FOR_ALL_INTERFACES (vrf, ifp) {
- if_dump_vty(vty, ifp);
+ if (json)
+ if_dump_vty_json(vty, ifp, json);
+ else
+ if_dump_vty(vty, ifp);
}
}
+ if (json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
return CMD_SUCCESS;
}
@@ -1832,85 +2275,140 @@ DEFPY(show_interface, show_interface_cmd,
/* Show all interfaces to vty. */
DEFPY (show_interface_vrf_all,
show_interface_vrf_all_cmd,
- "show interface [vrf all] [brief$brief]",
+ "show interface [vrf all] [brief$brief] [json$uj]",
SHOW_STR
"Interface status and configuration\n"
VRF_ALL_CMD_HELP_STR
- "Interface status and configuration summary\n")
+ "Interface status and configuration summary\n"
+ JSON_STR)
{
struct vrf *vrf;
struct interface *ifp;
+ json_object *json = NULL;
interface_update_stats();
+ if (uj)
+ json = json_object_new_object();
+
/* All interface print. */
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
if (brief) {
- ifs_dump_brief_vty(vty, vrf);
+ if (json)
+ ifs_dump_brief_vty_json(json, vrf);
+ else
+ ifs_dump_brief_vty(vty, vrf);
} else {
- FOR_ALL_INTERFACES (vrf, ifp)
- if_dump_vty(vty, ifp);
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ if (json)
+ if_dump_vty_json(vty, ifp, json);
+ else
+ if_dump_vty(vty, ifp);
+ }
}
}
+ if (json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
return CMD_SUCCESS;
}
/* Show specified interface to vty. */
-DEFUN (show_interface_name_vrf,
+DEFPY (show_interface_name_vrf,
show_interface_name_vrf_cmd,
- "show interface IFNAME vrf NAME",
+ "show interface IFNAME$ifname vrf NAME$vrf_name [json$uj]",
SHOW_STR
"Interface status and configuration\n"
"Interface name\n"
- VRF_CMD_HELP_STR)
+ VRF_CMD_HELP_STR
+ JSON_STR)
{
- int idx_ifname = 2;
- int idx_name = 4;
struct interface *ifp;
struct vrf *vrf;
+ json_object *json = NULL;
interface_update_stats();
- vrf = vrf_lookup_by_name(argv[idx_name]->arg);
+ vrf = vrf_lookup_by_name(vrf_name);
if (!vrf) {
- vty_out(vty, "%% VRF %s not found\n", argv[idx_name]->arg);
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "%% VRF %s not found\n", vrf_name);
return CMD_WARNING;
}
- ifp = if_lookup_by_name_vrf(argv[idx_ifname]->arg, vrf);
+ ifp = if_lookup_by_name_vrf(ifname, vrf);
if (ifp == NULL) {
- vty_out(vty, "%% Can't find interface %s\n",
- argv[idx_ifname]->arg);
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "%% Can't find interface %s\n", ifname);
return CMD_WARNING;
}
- if_dump_vty(vty, ifp);
+
+ if (uj)
+ json = json_object_new_object();
+
+ if (json)
+ if_dump_vty_json(vty, ifp, json);
+ else
+ if_dump_vty(vty, ifp);
+
+ if (json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
/* Show specified interface to vty. */
-DEFUN (show_interface_name_vrf_all,
+DEFPY (show_interface_name_vrf_all,
show_interface_name_vrf_all_cmd,
- "show interface IFNAME [vrf all]",
+ "show interface IFNAME$ifname [vrf all] [json$uj]",
SHOW_STR
"Interface status and configuration\n"
"Interface name\n"
- VRF_ALL_CMD_HELP_STR)
+ VRF_ALL_CMD_HELP_STR
+ JSON_STR)
{
- int idx_ifname = 2;
struct interface *ifp;
+ json_object *json = NULL;
interface_update_stats();
- ifp = if_lookup_by_name_all_vrf(argv[idx_ifname]->arg);
+ ifp = if_lookup_by_name_all_vrf(ifname);
if (ifp == NULL) {
- vty_out(vty, "%% Can't find interface %s\n",
- argv[idx_ifname]->arg);
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "%% Can't find interface %s\n", ifname);
return CMD_WARNING;
}
- if_dump_vty(vty, ifp);
+
+ if (uj)
+ json = json_object_new_object();
+
+ if (json)
+ if_dump_vty_json(vty, ifp, json);
+ else
+ if_dump_vty(vty, ifp);
+
+ if (json) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
@@ -3703,9 +4201,8 @@ void zebra_if_init(void)
hook_register_prio(if_del, 0, if_zebra_delete_hook);
/* Install configuration write function. */
- install_node(&interface_node);
+ if_cmd_init(if_config_write);
install_node(&link_params_node);
- if_cmd_init();
/*
* This is *intentionally* setting this to NULL, signaling
* that interface creation for zebra acts differently
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index 8b631a3726..011883649d 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -550,6 +550,12 @@ bool nl_attr_put(struct nlmsghdr *n, unsigned int maxlen, int type,
return true;
}
+bool nl_attr_put8(struct nlmsghdr *n, unsigned int maxlen, int type,
+ uint8_t data)
+{
+ return nl_attr_put(n, maxlen, type, &data, sizeof(uint8_t));
+}
+
bool nl_attr_put16(struct nlmsghdr *n, unsigned int maxlen, int type,
uint16_t data)
{
diff --git a/zebra/kernel_netlink.h b/zebra/kernel_netlink.h
index a7b152b31b..d8e5671b72 100644
--- a/zebra/kernel_netlink.h
+++ b/zebra/kernel_netlink.h
@@ -38,6 +38,8 @@ extern "C" {
*/
extern bool nl_attr_put(struct nlmsghdr *n, unsigned int maxlen, int type,
const void *data, unsigned int alen);
+extern bool nl_attr_put8(struct nlmsghdr *n, unsigned int maxlen, int type,
+ uint8_t data);
extern bool nl_attr_put16(struct nlmsghdr *n, unsigned int maxlen, int type,
uint16_t data);
extern bool nl_attr_put32(struct nlmsghdr *n, unsigned int maxlen, int type,
diff --git a/zebra/redistribute.c b/zebra/redistribute.c
index 89f46f9c97..26f6d404e9 100644
--- a/zebra/redistribute.c
+++ b/zebra/redistribute.c
@@ -153,10 +153,16 @@ static bool zebra_redistribute_check(const struct route_entry *re,
struct zserv *client,
const struct prefix *p, int afi)
{
+ struct zebra_vrf *zvrf;
+
/* Process only if there is valid re */
if (!re)
return false;
+ zvrf = vrf_info_lookup(re->vrf_id);
+ if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table)
+ return false;
+
/* If default route and redistributed */
if (is_default_prefix(p)
&& vrf_bitmap_check(client->redist_default[afi], re->vrf_id))
diff --git a/zebra/rule_netlink.c b/zebra/rule_netlink.c
index 08a675ef3a..b651edd8f9 100644
--- a/zebra/rule_netlink.c
+++ b/zebra/rule_netlink.c
@@ -58,12 +58,11 @@
* Returns -1 on failure, 0 when the msg doesn't fit entirely in the buffer
* or the number of bytes written to buf.
*/
-static ssize_t
-netlink_rule_msg_encode(int cmd, const struct zebra_dplane_ctx *ctx,
- uint32_t filter_bm, uint32_t priority, uint32_t table,
- const struct prefix *src_ip,
- const struct prefix *dst_ip, uint32_t fwmark,
- uint8_t dsfield, void *buf, size_t buflen)
+static ssize_t netlink_rule_msg_encode(
+ int cmd, const struct zebra_dplane_ctx *ctx, uint32_t filter_bm,
+ uint32_t priority, uint32_t table, const struct prefix *src_ip,
+ const struct prefix *dst_ip, uint32_t fwmark, uint8_t dsfield,
+ uint8_t ip_protocol, void *buf, size_t buflen)
{
uint8_t protocol = RTPROT_ZEBRA;
int family;
@@ -136,6 +135,10 @@ netlink_rule_msg_encode(int cmd, const struct zebra_dplane_ctx *ctx,
if (filter_bm & PBR_FILTER_DSFIELD)
req->frh.tos = dsfield;
+ /* protocol to match on */
+ if (filter_bm & PBR_FILTER_IP_PROTOCOL)
+ nl_attr_put8(&req->n, buflen, FRA_IP_PROTO, ip_protocol);
+
/* Route table to use to forward, if filter criteria matches. */
if (table < 256)
req->frh.table = table;
@@ -168,7 +171,8 @@ static ssize_t netlink_rule_msg_encoder(struct zebra_dplane_ctx *ctx, void *buf,
dplane_ctx_rule_get_table(ctx), dplane_ctx_rule_get_src_ip(ctx),
dplane_ctx_rule_get_dst_ip(ctx),
dplane_ctx_rule_get_fwmark(ctx),
- dplane_ctx_rule_get_dsfield(ctx), buf, buflen);
+ dplane_ctx_rule_get_dsfield(ctx),
+ dplane_ctx_rule_get_ipproto(ctx), buf, buflen);
}
static ssize_t netlink_oldrule_msg_encoder(struct zebra_dplane_ctx *ctx,
@@ -181,7 +185,8 @@ static ssize_t netlink_oldrule_msg_encoder(struct zebra_dplane_ctx *ctx,
dplane_ctx_rule_get_old_src_ip(ctx),
dplane_ctx_rule_get_old_dst_ip(ctx),
dplane_ctx_rule_get_old_fwmark(ctx),
- dplane_ctx_rule_get_old_dsfield(ctx), buf, buflen);
+ dplane_ctx_rule_get_old_dsfield(ctx),
+ dplane_ctx_rule_get_old_ipproto(ctx), buf, buflen);
}
/* Public functions */
@@ -236,6 +241,7 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
char *ifname;
struct zebra_pbr_rule rule = {};
uint8_t proto = 0;
+ uint8_t ip_proto = 0;
/* Basic validation followed by extracting attributes. */
if (h->nlmsg_type != RTM_NEWRULE && h->nlmsg_type != RTM_DELRULE)
@@ -312,6 +318,9 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
if (tb[FRA_PROTOCOL])
proto = *(uint8_t *)RTA_DATA(tb[FRA_PROTOCOL]);
+ if (tb[FRA_IP_PROTO])
+ ip_proto = *(uint8_t *)RTA_DATA(tb[FRA_IP_PROTO]);
+
ifname = (char *)RTA_DATA(tb[FRA_IFNAME]);
strlcpy(rule.ifname, ifname, sizeof(rule.ifname));
@@ -326,7 +335,7 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
ret = dplane_pbr_rule_delete(&rule);
zlog_debug(
- "%s: %s leftover rule: family %s IF %s Pref %u Src %pFX Dst %pFX Table %u",
+ "%s: %s leftover rule: family %s IF %s Pref %u Src %pFX Dst %pFX Table %u ip-proto: %u",
__func__,
((ret == ZEBRA_DPLANE_REQUEST_FAILURE)
? "Failed to remove"
@@ -334,7 +343,7 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
nl_family_to_str(frh->family), rule.ifname,
rule.rule.priority, &rule.rule.filter.src_ip,
&rule.rule.filter.dst_ip,
- rule.rule.action.table);
+ rule.rule.action.table, ip_proto);
}
/* TBD */
@@ -349,11 +358,12 @@ int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
- "Rx %s family %s IF %s Pref %u Src %pFX Dst %pFX Table %u",
+ "Rx %s family %s IF %s Pref %u Src %pFX Dst %pFX Table %u ip-proto: %u",
nl_msg_type_to_str(h->nlmsg_type),
nl_family_to_str(frh->family), rule.ifname,
rule.rule.priority, &rule.rule.filter.src_ip,
- &rule.rule.filter.dst_ip, rule.rule.action.table);
+ &rule.rule.filter.dst_ip, rule.rule.action.table,
+ ip_proto);
return kernel_pbr_rule_del(&rule);
}
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index a53e388062..27fb5d7c22 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -3107,6 +3107,8 @@ static void zread_vrf_label(ZAPI_HANDLER_ARGS)
}
zvrf->label[afi] = nlabel;
+ zvrf->label_proto[afi] = client->proto;
+
stream_failure:
return;
}
@@ -3129,6 +3131,7 @@ static inline void zread_rule(ZAPI_HANDLER_ARGS)
STREAM_GETL(s, zpr.rule.seq);
STREAM_GETL(s, zpr.rule.priority);
STREAM_GETL(s, zpr.rule.unique);
+ STREAM_GETC(s, zpr.rule.filter.ip_proto);
STREAM_GETC(s, zpr.rule.filter.src_ip.family);
STREAM_GETC(s, zpr.rule.filter.src_ip.prefixlen);
STREAM_GET(&zpr.rule.filter.src_ip.u.prefix, s,
@@ -3162,6 +3165,9 @@ static inline void zread_rule(ZAPI_HANDLER_ARGS)
if (zpr.rule.filter.dsfield)
zpr.rule.filter.filter_bm |= PBR_FILTER_DSFIELD;
+ if (zpr.rule.filter.ip_proto)
+ zpr.rule.filter.filter_bm |= PBR_FILTER_IP_PROTOCOL;
+
if (zpr.rule.filter.fwmark)
zpr.rule.filter.filter_bm |= PBR_FILTER_FWMARK;
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 1217ed915a..0760b2ebb3 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -259,6 +259,7 @@ struct dplane_ctx_rule {
uint8_t dsfield;
struct prefix src_ip;
struct prefix dst_ip;
+ uint8_t ip_proto;
char ifname[INTERFACE_NAMSIZ + 1];
};
@@ -1929,6 +1930,20 @@ uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx)
return ctx->u.rule.old.fwmark;
}
+uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.rule.new.ip_proto;
+}
+
+uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.rule.old.ip_proto;
+}
+
uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -2636,6 +2651,7 @@ static void dplane_ctx_rule_init_single(struct dplane_ctx_rule *dplane_rule,
dplane_rule->filter_bm = rule->rule.filter.filter_bm;
dplane_rule->fwmark = rule->rule.filter.fwmark;
dplane_rule->dsfield = rule->rule.filter.dsfield;
+ dplane_rule->ip_proto = rule->rule.filter.ip_proto;
prefix_copy(&(dplane_rule->dst_ip), &rule->rule.filter.dst_ip);
prefix_copy(&(dplane_rule->src_ip), &rule->rule.filter.src_ip);
strlcpy(dplane_rule->ifname, rule->ifname, INTERFACE_NAMSIZ);
@@ -2672,7 +2688,7 @@ static int dplane_ctx_rule_init(struct zebra_dplane_ctx *ctx,
ctx->zd_is_update = (op == DPLANE_OP_RULE_UPDATE);
ctx->zd_vrf_id = new_rule->vrf_id;
- memcpy(ctx->zd_ifname, new_rule->ifname, sizeof(new_rule->ifname));
+ strlcpy(ctx->zd_ifname, new_rule->ifname, sizeof(ctx->zd_ifname));
ctx->u.rule.sock = new_rule->sock;
ctx->u.rule.unique = new_rule->rule.unique;
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index e091655a48..5ec1bd5807 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -493,6 +493,8 @@ uint32_t dplane_ctx_rule_get_fwmark(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_rule_get_old_fwmark(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_rule_get_dsfield(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_rule_get_old_dsfield(const struct zebra_dplane_ctx *ctx);
+uint8_t dplane_ctx_rule_get_ipproto(const struct zebra_dplane_ctx *ctx);
+uint8_t dplane_ctx_rule_get_old_ipproto(const struct zebra_dplane_ctx *ctx);
const struct prefix *
dplane_ctx_rule_get_src_ip(const struct zebra_dplane_ctx *ctx);
const struct prefix *
diff --git a/zebra/zebra_evpn_mh.c b/zebra/zebra_evpn_mh.c
index 05947faf4f..f44b19b781 100644
--- a/zebra/zebra_evpn_mh.c
+++ b/zebra/zebra_evpn_mh.c
@@ -2781,41 +2781,76 @@ bool zebra_evpn_is_if_es_capable(struct zebra_if *zif)
return false;
}
-void zebra_evpn_if_es_print(struct vty *vty, struct zebra_if *zif)
+void zebra_evpn_if_es_print(struct vty *vty, json_object *json,
+ struct zebra_if *zif)
{
char buf[ETHER_ADDR_STRLEN];
- char mh_buf[80];
- bool vty_print = false;
char esi_buf[ESI_STR_LEN];
- mh_buf[0] = '\0';
- strlcat(mh_buf, " EVPN-MH:", sizeof(mh_buf));
- if (zif->es_info.lid || !is_zero_mac(&zif->es_info.sysmac)) {
- vty_print = true;
- snprintf(
- mh_buf + strlen(mh_buf),
- sizeof(mh_buf) - strlen(mh_buf),
- " ES id %u ES sysmac %s", zif->es_info.lid,
- prefix_mac2str(&zif->es_info.sysmac, buf, sizeof(buf)));
- } else if (memcmp(&zif->es_info.esi, zero_esi, sizeof(*zero_esi))) {
- vty_print = true;
- snprintf(mh_buf + strnlen(mh_buf, sizeof(mh_buf)),
- sizeof(mh_buf) - strnlen(mh_buf, sizeof(mh_buf)),
- " ES id %s",
- esi_to_str(&zif->es_info.esi, esi_buf,
- sizeof(esi_buf)));
- }
-
- if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK) {
- vty_print = true;
- if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP)
- strlcat(mh_buf, " uplink (up)", sizeof(mh_buf));
- else
- strlcat(mh_buf, " uplink (down)", sizeof(mh_buf));
- }
+ if (json) {
+ json_object *json_evpn;
+
+ json_evpn = json_object_new_object();
+ json_object_object_add(json, "evpnMh", json_evpn);
- if (vty_print)
- vty_out(vty, "%s\n", mh_buf);
+ if (zif->es_info.lid || !is_zero_mac(&zif->es_info.sysmac)) {
+ json_object_int_add(json_evpn, "esId",
+ zif->es_info.lid);
+ json_object_string_add(
+ json_evpn, "esSysmac",
+ prefix_mac2str(&zif->es_info.sysmac, buf,
+ sizeof(buf)));
+ } else if (memcmp(&zif->es_info.esi, zero_esi,
+ sizeof(*zero_esi))) {
+ json_object_string_add(json_evpn, "esId",
+ esi_to_str(&zif->es_info.esi,
+ esi_buf,
+ sizeof(esi_buf)));
+ }
+
+ if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK)
+ json_object_string_add(
+ json_evpn, "uplink",
+ CHECK_FLAG(zif->flags,
+ ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP)
+ ? "up"
+ : "down");
+ } else {
+ char mh_buf[80];
+ bool vty_print = false;
+
+ mh_buf[0] = '\0';
+ strlcat(mh_buf, " EVPN-MH:", sizeof(mh_buf));
+ if (zif->es_info.lid || !is_zero_mac(&zif->es_info.sysmac)) {
+ vty_print = true;
+ snprintf(mh_buf + strlen(mh_buf),
+ sizeof(mh_buf) - strlen(mh_buf),
+ " ES id %u ES sysmac %s", zif->es_info.lid,
+ prefix_mac2str(&zif->es_info.sysmac, buf,
+ sizeof(buf)));
+ } else if (memcmp(&zif->es_info.esi, zero_esi,
+ sizeof(*zero_esi))) {
+ vty_print = true;
+ snprintf(mh_buf + strnlen(mh_buf, sizeof(mh_buf)),
+ sizeof(mh_buf)
+ - strnlen(mh_buf, sizeof(mh_buf)),
+ " ES id %s",
+ esi_to_str(&zif->es_info.esi, esi_buf,
+ sizeof(esi_buf)));
+ }
+
+ if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK) {
+ vty_print = true;
+ if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK_OPER_UP)
+ strlcat(mh_buf, " uplink (up)", sizeof(mh_buf));
+ else
+ strlcat(mh_buf, " uplink (down)",
+ sizeof(mh_buf));
+ }
+
+ if (vty_print)
+ vty_out(vty, "%s\n", mh_buf);
+ }
}
static void zebra_evpn_local_mac_oper_state_change(struct zebra_evpn_es *es)
diff --git a/zebra/zebra_evpn_mh.h b/zebra/zebra_evpn_mh.h
index a828056f1f..cba536ea89 100644
--- a/zebra/zebra_evpn_mh.h
+++ b/zebra/zebra_evpn_mh.h
@@ -346,7 +346,8 @@ extern int zebra_evpn_mh_if_write(struct vty *vty, struct interface *ifp);
extern void zebra_evpn_acc_vl_show(struct vty *vty, bool uj);
extern void zebra_evpn_acc_vl_show_detail(struct vty *vty, bool uj);
extern void zebra_evpn_acc_vl_show_vid(struct vty *vty, bool uj, vlanid_t vid);
-extern void zebra_evpn_if_es_print(struct vty *vty, struct zebra_if *zif);
+extern void zebra_evpn_if_es_print(struct vty *vty, json_object *json,
+ struct zebra_if *zif);
extern void zebra_evpn_es_cleanup(void);
extern int zebra_evpn_mh_mac_holdtime_update(struct vty *vty,
uint32_t duration, bool set_default);
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index 07a8288605..855e19dc45 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -1002,7 +1002,6 @@ static int zfpm_build_route_updates(void)
data_len = zfpm_encode_route(dest, re, (char *)data,
buf_end - data, &msg_type);
- assert(data_len);
if (data_len) {
hdr->msg_type = msg_type;
msg_len = fpm_data_len_to_msg_len(data_len);
@@ -1013,6 +1012,9 @@ static int zfpm_build_route_updates(void)
zfpm_g->stats.route_adds++;
else
zfpm_g->stats.route_dels++;
+ } else {
+ zlog_err("%s: Encoding Prefix: %pRN No valid nexthops",
+ __func__, dest->rnode);
}
}
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index a2d1513ce4..66d2d6b4ba 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -3932,6 +3932,40 @@ void zebra_mpls_cleanup_tables(struct zebra_vrf *zvrf)
}
/*
+ * When a vrf label is assigned and the client goes away
+ * we should cleanup the vrf labels associated with
+ * that zclient.
+ */
+void zebra_mpls_client_cleanup_vrf_label(uint8_t proto)
+{
+ struct vrf *vrf;
+ struct zebra_vrf *def_zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (def_zvrf == NULL)
+ return;
+
+ RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
+ struct zebra_vrf *zvrf = vrf->info;
+ afi_t afi;
+
+ if (!zvrf)
+ continue;
+
+ for (afi = AFI_IP; afi < AFI_MAX; afi++) {
+ if (zvrf->label_proto[afi] == proto
+ && zvrf->label[afi] != MPLS_LABEL_NONE)
+ lsp_uninstall(def_zvrf, zvrf->label[afi]);
+
+ /*
+ * Cleanup data structures by fiat
+ */
+ zvrf->label_proto[afi] = 0;
+ zvrf->label[afi] = MPLS_LABEL_NONE;
+ }
+ }
+}
+
+/*
* Called upon process exiting, need to delete LSP forwarding
* entries from the kernel.
* NOTE: Currently supported only for default VRF.
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index 7059d393ed..5195b2f14f 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -416,6 +416,12 @@ void zebra_mpls_init(void);
*/
void zebra_mpls_vty_init(void);
+/*
+ * When cleaning up a client connection ensure that there are no
+ * vrf labels that need cleaning up too
+ */
+void zebra_mpls_client_cleanup_vrf_label(uint8_t proto);
+
/* Inline functions. */
/*
diff --git a/zebra/zebra_netns_notify.c b/zebra/zebra_netns_notify.c
index 3e89df68fd..054015846f 100644
--- a/zebra/zebra_netns_notify.c
+++ b/zebra/zebra_netns_notify.c
@@ -149,8 +149,6 @@ static int zebra_ns_delete(char *name)
"NS notify : no VRF found using NS %s", name);
return 0;
}
- /* Clear configured flag and invoke delete. */
- UNSET_FLAG(vrf->status, VRF_CONFIGURED);
ns = (struct ns *)vrf->ns_ctxt;
/* the deletion order is the same
* as the one used when siging signal is received
diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c
index 7bcd097371..3607110aa2 100644
--- a/zebra/zebra_pbr.c
+++ b/zebra/zebra_pbr.c
@@ -166,10 +166,8 @@ uint32_t zebra_pbr_rules_hash_key(const void *arg)
rule->rule.action.table,
prefix_hash_key(&rule->rule.filter.src_ip));
- if (rule->rule.filter.fwmark)
- key = jhash_2words(rule->rule.filter.fwmark, rule->vrf_id, key);
- else
- key = jhash_1word(rule->vrf_id, key);
+ key = jhash_3words(rule->rule.filter.fwmark, rule->vrf_id,
+ rule->rule.filter.ip_proto, key);
key = jhash(rule->ifname, strlen(rule->ifname), key);
@@ -207,6 +205,9 @@ bool zebra_pbr_rules_hash_equal(const void *arg1, const void *arg2)
if (r1->rule.filter.fwmark != r2->rule.filter.fwmark)
return false;
+ if (r1->rule.filter.ip_proto != r2->rule.filter.ip_proto)
+ return false;
+
if (!prefix_same(&r1->rule.filter.src_ip, &r2->rule.filter.src_ip))
return false;
diff --git a/zebra/zebra_ptm.c b/zebra/zebra_ptm.c
index 37d9399054..7e9382518f 100644
--- a/zebra/zebra_ptm.c
+++ b/zebra/zebra_ptm.c
@@ -1092,14 +1092,20 @@ static const char *zebra_ptm_get_status_str(int status)
}
}
-void zebra_ptm_show_status(struct vty *vty, struct interface *ifp)
+void zebra_ptm_show_status(struct vty *vty, json_object *json,
+ struct interface *ifp)
{
- vty_out(vty, " PTM status: ");
- if (ifp->ptm_enable) {
- vty_out(vty, "%s\n", zebra_ptm_get_status_str(ifp->ptm_status));
- } else {
- vty_out(vty, "disabled\n");
- }
+ const char *status;
+
+ if (ifp->ptm_enable)
+ status = zebra_ptm_get_status_str(ifp->ptm_status);
+ else
+ status = "disabled";
+
+ if (json)
+ json_object_string_add(json, "ptmStatus", status);
+ else
+ vty_out(vty, " PTM status: %s\n", status);
}
void zebra_ptm_send_status_req(void)
@@ -1537,6 +1543,7 @@ int zebra_ptm_get_enable_state(void)
}
void zebra_ptm_show_status(struct vty *vty __attribute__((__unused__)),
+ json_object *json __attribute__((__unused__)),
struct interface *ifp __attribute__((__unused__)))
{
/* NOTHING */
diff --git a/zebra/zebra_ptm.h b/zebra/zebra_ptm.h
index e578a02a94..88c9bccb44 100644
--- a/zebra/zebra_ptm.h
+++ b/zebra/zebra_ptm.h
@@ -86,7 +86,8 @@ void zebra_ptm_bfd_client_register(ZAPI_HANDLER_ARGS);
void zebra_ptm_bfd_dst_replay(ZAPI_HANDLER_ARGS);
#endif /* HAVE_BFDD */
-void zebra_ptm_show_status(struct vty *vty, struct interface *ifp);
+void zebra_ptm_show_status(struct vty *vty, json_object *json,
+ struct interface *ifp);
void zebra_ptm_if_init(struct zebra_if *zebra_ifp);
void zebra_ptm_if_set_ptm_state(struct interface *ifp,
struct zebra_if *zebra_ifp);
diff --git a/zebra/zebra_routemap_nb_config.c b/zebra/zebra_routemap_nb_config.c
index 8f5660610f..5bcfb720e1 100644
--- a/zebra/zebra_routemap_nb_config.c
+++ b/zebra/zebra_routemap_nb_config.c
@@ -247,9 +247,7 @@ lib_route_map_entry_set_action_rmap_set_action_ipv4_src_address_modify(
struct nb_cb_modify_args *args)
{
struct routemap_hook_context *rhc;
- struct interface *pif = NULL;
const char *source;
- struct vrf *vrf;
struct prefix p;
int rv;
@@ -262,18 +260,6 @@ lib_route_map_entry_set_action_rmap_set_action_ipv4_src_address_modify(
yang_dnode_get_string(args->dnode, NULL));
return NB_ERR_VALIDATION;
}
-
- RB_FOREACH(vrf, vrf_id_head, &vrfs_by_id) {
- pif = if_lookup_exact_address(&p.u.prefix4, AF_INET,
- vrf->vrf_id);
- if (pif != NULL)
- break;
- }
- if (pif == NULL) {
- zlog_warn("%s: is not a local address: %s", __func__,
- yang_dnode_get_string(args->dnode, NULL));
- return NB_ERR_VALIDATION;
- }
return NB_OK;
case NB_EV_PREPARE:
case NB_EV_ABORT:
@@ -325,9 +311,7 @@ lib_route_map_entry_set_action_rmap_set_action_ipv6_src_address_modify(
struct nb_cb_modify_args *args)
{
struct routemap_hook_context *rhc;
- struct interface *pif = NULL;
const char *source;
- struct vrf *vrf;
struct prefix p;
int rv;
@@ -340,18 +324,6 @@ lib_route_map_entry_set_action_rmap_set_action_ipv6_src_address_modify(
yang_dnode_get_string(args->dnode, NULL));
return NB_ERR_VALIDATION;
}
-
- RB_FOREACH(vrf, vrf_id_head, &vrfs_by_id) {
- pif = if_lookup_exact_address(&p.u.prefix6, AF_INET6,
- vrf->vrf_id);
- if (pif != NULL)
- break;
- }
- if (pif == NULL) {
- zlog_warn("%s: is not a local address: %s", __func__,
- yang_dnode_get_string(args->dnode, NULL));
- return NB_ERR_VALIDATION;
- }
return NB_OK;
case NB_EV_PREPARE:
case NB_EV_ABORT:
diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h
index 57dd0c20ad..f32f09850b 100644
--- a/zebra/zebra_vrf.h
+++ b/zebra/zebra_vrf.h
@@ -105,6 +105,7 @@ struct zebra_vrf {
/* MPLS Label to handle L3VPN <-> vrf popping */
mpls_label_t label[AFI_MAX];
+ uint8_t label_proto[AFI_MAX];
/* MPLS static LSP config table */
struct hash *slsp_table;
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 1d94fcae6b..e4a48093f7 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -595,6 +595,8 @@ static void zserv_client_free(struct zserv *client)
close(client->sock);
if (DYNAMIC_CLIENT_GR_DISABLED(client)) {
+ zebra_mpls_client_cleanup_vrf_label(client->proto);
+
nroutes = rib_score_proto(client->proto,
client->instance);
zlog_notice(