summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format3
-rw-r--r--README.md2
-rw-r--r--babeld/babel_interface.c18
-rw-r--r--bgpd/bgp_attr.c10
-rw-r--r--bgpd/bgp_bmp.c27
-rw-r--r--bgpd/bgp_errors.c6
-rw-r--r--bgpd/bgp_errors.h1
-rw-r--r--bgpd/bgp_evpn.c54
-rw-r--r--bgpd/bgp_evpn_mh.c550
-rw-r--r--bgpd/bgp_evpn_mh.h48
-rw-r--r--bgpd/bgp_evpn_private.h11
-rw-r--r--bgpd/bgp_evpn_vty.c109
-rw-r--r--bgpd/bgp_labelpool.c10
-rw-r--r--bgpd/bgp_memory.c1
-rw-r--r--bgpd/bgp_memory.h1
-rw-r--r--bgpd/bgp_nht.c35
-rw-r--r--bgpd/bgp_packet.c3
-rw-r--r--bgpd/bgp_route.c211
-rw-r--r--bgpd/bgp_rpki.c2
-rw-r--r--bgpd/bgp_vty.c2
-rw-r--r--bgpd/bgp_zebra.c6
-rw-r--r--bgpd/bgpd.c2
-rw-r--r--debian/.gitignore1
-rwxr-xr-xdebian/rules2
-rw-r--r--doc/developer/frr-release-procedure.rst15
-rw-r--r--doc/developer/lists.rst186
-rw-r--r--doc/developer/logging.rst4
-rw-r--r--doc/developer/topotests.rst3
-rw-r--r--doc/user/bgp.rst51
-rw-r--r--doc/user/index.rst1
-rw-r--r--doc/user/isisd.rst8
-rw-r--r--doc/user/pim.rst22
-rw-r--r--doc/user/pimv6.rst288
-rw-r--r--doc/user/sharp.rst4
-rw-r--r--doc/user/subdir.am1
-rw-r--r--doc/user/zebra.rst13
-rw-r--r--include/linux/if_link.h13
-rw-r--r--include/linux/mroute.h193
-rw-r--r--include/linux/mroute6.h163
-rw-r--r--include/subdir.am2
-rw-r--r--isisd/isis_adjacency.c214
-rw-r--r--isisd/isis_adjacency.h2
-rw-r--r--isisd/isis_circuit.c165
-rw-r--r--isisd/isis_circuit.h2
-rw-r--r--isisd/isis_lsp.c76
-rw-r--r--isisd/isis_lsp.h17
-rw-r--r--isisd/isis_pdu.c4
-rw-r--r--isisd/isis_pfpacket.c12
-rw-r--r--isisd/isis_spf.c92
-rw-r--r--isisd/isis_spf.h2
-rw-r--r--isisd/isis_tlvs.c1288
-rw-r--r--isisd/isis_tlvs.h2
-rw-r--r--isisd/isisd.c633
-rw-r--r--isisd/isisd.h12
-rw-r--r--ldpd/ldp_vty_exec.c4
-rw-r--r--lib/json.c13
-rw-r--r--lib/json.h22
-rw-r--r--lib/libfrr.c17
-rw-r--r--lib/libfrr.h11
-rw-r--r--lib/log.c2
-rw-r--r--lib/log_vty.c17
-rw-r--r--lib/monotime.h6
-rw-r--r--lib/northbound_grpc.cpp939
-rw-r--r--lib/northbound_sysrepo.c28
-rw-r--r--lib/plist.c113
-rw-r--r--lib/plist_int.h7
-rw-r--r--lib/prefix.c44
-rw-r--r--lib/prefix.h1
-rw-r--r--lib/routemap.c45
-rw-r--r--lib/sockunion.c15
-rw-r--r--lib/sockunion.h1
-rw-r--r--lib/subdir.am5
-rw-r--r--lib/typerb.c35
-rw-r--r--lib/typerb.h22
-rw-r--r--lib/typesafe.h48
-rw-r--r--lib/vty.c11
-rw-r--r--lib/wheel.c3
-rw-r--r--lib/zclient.c33
-rw-r--r--lib/zclient.h15
-rw-r--r--lib/zlog.c4
-rw-r--r--lib/zlog_live.c54
-rw-r--r--lib/zlog_live.h32
-rw-r--r--ospf6d/ospf6_flood.c19
-rw-r--r--ospf6d/ospf6_gr.c2
-rw-r--r--ospf6d/ospf6_gr_helper.c19
-rw-r--r--ospf6d/ospf6_lsa.c8
-rw-r--r--ospf6d/ospf6_lsdb.c2
-rw-r--r--ospf6d/ospf6_nssa.c51
-rw-r--r--ospf6d/ospf6_top.c15
-rw-r--r--ospf6d/ospf6_zebra.c11
-rw-r--r--ospfd/ospf_apiserver.c6
-rw-r--r--ospfd/ospf_apiserver.h4
-rw-r--r--ospfd/ospf_gr.c2
-rw-r--r--ospfd/ospf_ldp_sync.c7
-rw-r--r--ospfd/ospf_vty.c137
-rw-r--r--pathd/path_cli.c2
-rw-r--r--pbrd/pbr_zebra.c12
-rw-r--r--pimd/pim6_cmd.c446
-rw-r--r--pimd/pim6_cmd.h5
-rw-r--r--pimd/pim6_main.c8
-rw-r--r--pimd/pim6_mroute_msg.c196
-rw-r--r--pimd/pim6_stubs.c87
-rw-r--r--pimd/pim_addr.h2
-rw-r--r--pimd/pim_assert.c5
-rw-r--r--pimd/pim_br.c10
-rw-r--r--pimd/pim_br.h6
-rw-r--r--pimd/pim_bsm.c43
-rw-r--r--pimd/pim_bsm.h9
-rw-r--r--pimd/pim_cmd.c652
-rw-r--r--pimd/pim_cmd_common.c327
-rw-r--r--pimd/pim_cmd_common.h23
-rw-r--r--pimd/pim_iface.c141
-rw-r--r--pimd/pim_iface.h11
-rw-r--r--pimd/pim_ifchannel.c2
-rw-r--r--pimd/pim_igmp.c272
-rw-r--r--pimd/pim_igmp.h32
-rw-r--r--pimd/pim_igmp_mtrace.c4
-rw-r--r--pimd/pim_igmp_stats.c10
-rw-r--r--pimd/pim_igmp_stats.h28
-rw-r--r--pimd/pim_igmpv2.c28
-rw-r--r--pimd/pim_igmpv3.c103
-rw-r--r--pimd/pim_instance.h15
-rw-r--r--pimd/pim_join.c62
-rw-r--r--pimd/pim_mroute.c385
-rw-r--r--pimd/pim_mroute.h187
-rw-r--r--pimd/pim_mroute_msg.c239
-rw-r--r--pimd/pim_msdp_packet.c4
-rw-r--r--pimd/pim_msg.c46
-rw-r--r--pimd/pim_msg.h40
-rw-r--r--pimd/pim_nb.h6
-rw-r--r--pimd/pim_nb_config.c156
-rw-r--r--pimd/pim_nht.c63
-rw-r--r--pimd/pim_nht.h2
-rw-r--r--pimd/pim_oil.c27
-rw-r--r--pimd/pim_oil.h18
-rw-r--r--pimd/pim_pim.c272
-rw-r--r--pimd/pim_pim.h3
-rw-r--r--pimd/pim_register.c171
-rw-r--r--pimd/pim_register.h11
-rw-r--r--pimd/pim_rp.c456
-rw-r--r--pimd/pim_rp.h20
-rw-r--r--pimd/pim_rpf.c50
-rw-r--r--pimd/pim_sock.c440
-rw-r--r--pimd/pim_sock.h12
-rw-r--r--pimd/pim_ssm.c2
-rw-r--r--pimd/pim_ssm.h2
-rw-r--r--pimd/pim_ssmpingd.c278
-rw-r--r--pimd/pim_ssmpingd.h6
-rw-r--r--pimd/pim_tib.c178
-rw-r--r--pimd/pim_tib.h33
-rw-r--r--pimd/pim_tlv.c32
-rw-r--r--pimd/pim_upstream.c13
-rw-r--r--pimd/pim_util.c14
-rw-r--r--pimd/pim_util.h1
-rw-r--r--pimd/pim_vty.c11
-rw-r--r--pimd/pim_zebra.c350
-rw-r--r--pimd/pim_zebra.h10
-rw-r--r--pimd/pim_zlookup.c2
-rw-r--r--pimd/pimd.h6
-rw-r--r--pimd/subdir.am10
-rw-r--r--sharpd/sharp_vty.c64
-rw-r--r--sharpd/sharp_zebra.c24
-rw-r--r--sharpd/sharp_zebra.h2
-rw-r--r--staticd/static_nht.c29
-rw-r--r--staticd/static_nht.h8
-rw-r--r--staticd/static_zebra.c185
-rw-r--r--tests/.gitignore2
-rw-r--r--tests/isisd/test_fuzz_isis_tlv.c8
-rw-r--r--tests/isisd/test_isis_spf.c2
-rw-r--r--tests/lib/test_grpc.py10
-rw-r--r--tests/lib/test_printfrr.c18
-rw-r--r--tests/lib/test_typelist.c8
-rw-r--r--tests/lib/test_typelist.h64
-rwxr-xr-xtests/topotests/analyze.py7
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak_topo3/bgp_vrf_dynamic_route_leak_topo3.json1088
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py1803
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak_topo4/bgp_vrf_dynamic_route_leak_topo4.json1088
-rw-r--r--tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py1909
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py217
l---------tests/topotests/grpc_basic/lib1
-rw-r--r--tests/topotests/grpc_basic/r1/zebra.conf8
-rw-r--r--tests/topotests/grpc_basic/r2/zebra.conf8
-rw-r--r--tests/topotests/grpc_basic/test_basic_grpc.py179
-rw-r--r--tests/topotests/isis_topo1/test_isis_topo1.py88
-rw-r--r--tests/topotests/lib/bgp.py10
-rw-r--r--tests/topotests/lib/common_config.py75
-rwxr-xr-xtests/topotests/lib/grpc-query.py155
-rw-r--r--tests/topotests/lib/micronet_cli.py4
-rw-r--r--tests/topotests/lib/ospf.py8
-rw-r--r--tests/topotests/lib/topogen.py8
-rw-r--r--tests/topotests/lib/topotest.py3
-rw-r--r--tests/topotests/ospf6_topo2/test_ospf6_topo2.py27
-rwxr-xr-xtests/topotests/ospf_multi_vrf_bgp_route_leak/__init__.py0
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf57
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-default.txt19
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-neno.txt12
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt9
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt6
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf62
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-default.txt20
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-ray.txt15
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt10
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt9
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf22
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/ospf-vrf-default.txt17
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt8
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf22
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/ospf-vrf-default.txt17
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt7
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/test_ospf_multi_vrf_bgp_route_leak.py243
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_type7_lsa.json202
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_dual_stack.json312
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json36
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp_lan.json264
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_lan.json140
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_nssa2.json197
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json59
-rw-r--r--tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json17
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py1208
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py400
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py482
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py416
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py375
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py957
-rw-r--r--tests/topotests/zebra_rib/test_zebra_rib.py57
-rw-r--r--tools/coccinelle/json_object_add_camel_case.cocci19
-rw-r--r--tools/frrcommon.sh.in6
-rw-r--r--tools/frrinit.sh.in2
-rw-r--r--vrrpd/Makefile4
-rwxr-xr-xvtysh/extract.pl.in11
-rw-r--r--vtysh/vtysh.c28
-rw-r--r--yang/frr-route-types.yang4
-rw-r--r--zebra/debug_nl.c110
-rw-r--r--zebra/dplane_fpm_nl.c6
-rw-r--r--zebra/if_netlink.c300
-rw-r--r--zebra/if_netlink.h25
-rw-r--r--zebra/if_socket.c41
-rw-r--r--zebra/interface.c373
-rw-r--r--zebra/interface.h34
-rw-r--r--zebra/ioctl.c8
-rw-r--r--zebra/kernel_netlink.c22
-rw-r--r--zebra/kernel_socket.c41
-rw-r--r--zebra/rib.h3
-rw-r--r--zebra/rt.h3
-rw-r--r--zebra/rt_netlink.c2
-rw-r--r--zebra/rt_netlink.h2
-rw-r--r--zebra/rt_socket.c6
-rw-r--r--zebra/subdir.am1
-rw-r--r--zebra/zapi_msg.c24
-rw-r--r--zebra/zebra_dplane.c204
-rw-r--r--zebra/zebra_dplane.h19
-rw-r--r--zebra/zebra_errors.c9
-rw-r--r--zebra/zebra_errors.h1
-rw-r--r--zebra/zebra_evpn.c8
-rw-r--r--zebra/zebra_evpn_mac.c58
-rw-r--r--zebra/zebra_evpn_mac.h15
-rw-r--r--zebra/zebra_evpn_mh.c62
-rw-r--r--zebra/zebra_evpn_mh.h2
-rw-r--r--zebra/zebra_evpn_neigh.c29
-rw-r--r--zebra/zebra_netns_id.c2
-rw-r--r--zebra/zebra_netns_notify.c2
-rw-r--r--zebra/zebra_nhg.c38
-rw-r--r--zebra/zebra_ptm.c6
-rw-r--r--zebra/zebra_rib.c8
-rw-r--r--zebra/zebra_rnh.c53
-rw-r--r--zebra/zebra_rnh.h8
-rw-r--r--zebra/zebra_router.h16
-rw-r--r--zebra/zebra_script.c3
-rw-r--r--zebra/zebra_srte.c12
-rw-r--r--zebra/zebra_vty.c41
-rw-r--r--zebra/zebra_vxlan.c198
271 files changed, 22298 insertions, 4888 deletions
diff --git a/.clang-format b/.clang-format
index a620b5c2c0..b01157b051 100644
--- a/.clang-format
+++ b/.clang-format
@@ -28,6 +28,9 @@ ForEachMacros:
- frr_each
- frr_each_safe
- frr_each_from
+ - frr_rev_each
+ - frr_rev_each_safe
+ - frr_rev_each_from
- frr_with_mutex
- frr_with_privs
- LIST_FOREACH
diff --git a/README.md b/README.md
index 0724dae335..600a91e386 100644
--- a/README.md
+++ b/README.md
@@ -59,7 +59,7 @@ lists:
For chat, we currently use [Slack](https://frrouting.slack.com). You can join
by clicking the "Slack" link under the
-[Participate](https://frrouting.org/#participate) section of our website.
+[Participate](https://frrouting.org/community) section of our website.
Contributing
diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c
index 98c5d21196..00fb58e576 100644
--- a/babeld/babel_interface.c
+++ b/babeld/babel_interface.c
@@ -60,21 +60,10 @@ static void babel_interface_free (babel_interface_nfo *bi);
static vector babel_enable_if; /* enable interfaces (by cmd). */
-int
-babel_interface_up (ZAPI_CALLBACK_ARGS)
+int babel_ifp_up(struct interface *ifp)
{
- struct stream *s = NULL;
- struct interface *ifp = NULL;
-
debugf(BABEL_DEBUG_IF, "receive a 'interface up'");
- s = zclient->ibuf;
- ifp = zebra_interface_state_read(s, vrf_id); /* it updates iflist */
-
- if (ifp == NULL) {
- return 0;
- }
-
interface_recalculate(ifp);
return 0;
}
@@ -1235,11 +1224,6 @@ DEFUN (show_babel_parameters,
return CMD_SUCCESS;
}
-int babel_ifp_up(struct interface *ifp)
-{
- return 0;
-}
-
void
babel_if_init(void)
{
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index a96b63cac6..2f246e61d8 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -2572,6 +2572,16 @@ bgp_attr_srv6_service_data(struct bgp_attr_parser_args *args)
args->total);
}
+ if (length < BGP_PREFIX_SID_SRV6_L3_SERVICE_SID_STRUCTURE_LENGTH) {
+ flog_err(
+ EC_BGP_ATTR_LEN,
+ "Malformed SRv6 Service Data Sub-Sub-TLV attribute - insufficient data (need %u, have %hu remaining in UPDATE)",
+ BGP_PREFIX_SID_SRV6_L3_SERVICE_SID_STRUCTURE_LENGTH,
+ length);
+ return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
+ args->total);
+ }
+
if (type == BGP_PREFIX_SID_SRV6_L3_SERVICE_SID_STRUCTURE) {
loc_block_len = stream_getc(peer->curr);
loc_node_len = stream_getc(peer->curr);
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 96c34f9196..48d3706ec5 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -1619,6 +1619,8 @@ static void bmp_targets_put(struct bmp_targets *bt)
struct bmp *bmp;
struct bmp_active *ba;
+ THREAD_OFF(bt->t_stats);
+
frr_each_safe (bmp_actives, &bt->actives, ba)
bmp_active_put(ba);
@@ -1929,6 +1931,28 @@ static struct cmd_node bmp_node = {
.prompt = "%s(config-bgp-bmp)# "
};
+static void bmp_targets_autocomplete(vector comps, struct cmd_token *token)
+{
+ struct bgp *bgp;
+ struct bmp_targets *target;
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+
+ if (!bmpbgp)
+ continue;
+
+ frr_each_safe (bmp_targets, &bmpbgp->targets, target)
+ vector_set(comps,
+ XSTRDUP(MTYPE_COMPLETION, target->name));
+ }
+}
+
+static const struct cmd_variable_handler bmp_targets_var_handlers[] = {
+ {.tokenname = "BMPTARGETS", .completions = bmp_targets_autocomplete},
+ {.completions = NULL}};
+
#define BMP_STR "BGP Monitoring Protocol\n"
#ifndef VTYSH_EXTRACT_PL
@@ -2422,6 +2446,9 @@ static int bgp_bmp_init(struct thread_master *tm)
{
install_node(&bmp_node);
install_default(BMP_NODE);
+
+ cmd_variable_handler_register(bmp_targets_var_handlers);
+
install_element(BGP_NODE, &bmp_targets_cmd);
install_element(BGP_NODE, &no_bmp_targets_cmd);
diff --git a/bgpd/bgp_errors.c b/bgpd/bgp_errors.c
index f11717b41f..193c96a169 100644
--- a/bgpd/bgp_errors.c
+++ b/bgpd/bgp_errors.c
@@ -475,6 +475,12 @@ static struct log_ref ferr_bgp_err[] = {
.suggestion = "Get log files from router and open an issue",
},
{
+ .code = EC_BGP_NO_LL_ADDRESS_AVAILABLE,
+ .title = "BGP v6 peer with no LL address on outgoing interface",
+ .description = "BGP when using a v6 peer requires a v6 LL address to be configured on the outgoing interface as per RFC 4291 section 2.1",
+ .suggestion = "Add a v6 LL address to the outgoing interfaces as per RFC",
+ },
+ {
.code = END_FERR,
}
};
diff --git a/bgpd/bgp_errors.h b/bgpd/bgp_errors.h
index 20056d382a..0b71af3fc6 100644
--- a/bgpd/bgp_errors.h
+++ b/bgpd/bgp_errors.h
@@ -101,6 +101,7 @@ enum bgp_log_refs {
EC_BGP_ROUTER_ID_SAME,
EC_BGP_INVALID_BGP_INSTANCE,
EC_BGP_INVALID_ROUTE,
+ EC_BGP_NO_LL_ADDRESS_AVAILABLE,
};
extern void bgp_error_init(void);
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index a9c006ca2b..9f3f8389ad 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -325,8 +325,8 @@ static int is_vni_present_in_irt_vnis(struct list *vnis, struct bgpevpn *vpn)
/*
* Compare Route Targets.
*/
-static int evpn_route_target_cmp(struct ecommunity *ecom1,
- struct ecommunity *ecom2)
+int bgp_evpn_route_target_cmp(struct ecommunity *ecom1,
+ struct ecommunity *ecom2)
{
if (ecom1 && !ecom2)
return -1;
@@ -349,7 +349,7 @@ static int evpn_route_target_cmp(struct ecommunity *ecom1,
return strcmp(ecom1->str, ecom2->str);
}
-static void evpn_xxport_delete_ecomm(void *val)
+void bgp_evpn_xxport_delete_ecomm(void *val)
{
struct ecommunity *ecomm = val;
ecommunity_free(&ecomm);
@@ -3036,9 +3036,11 @@ int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
return 0;
/* don't import hosts that are locally attached */
- if (install
- && !bgp_evpn_skip_vrf_import_of_local_es(bgp_vrf, evp, pi,
- install))
+ if (install && bgp_evpn_skip_vrf_import_of_local_es(
+ bgp_vrf, evp, pi, install))
+ return 0;
+
+ if (install)
ret = install_evpn_route_entry_in_vrf(bgp_vrf, evp, pi);
else
ret = uninstall_evpn_route_entry_in_vrf(bgp_vrf, evp,
@@ -3291,9 +3293,11 @@ static int install_uninstall_route_in_vrfs(struct bgp *bgp_def, afi_t afi,
int ret;
/* don't import hosts that are locally attached */
- if (install
- && !bgp_evpn_skip_vrf_import_of_local_es(bgp_vrf, evp, pi,
- install))
+ if (install && bgp_evpn_skip_vrf_import_of_local_es(
+ bgp_vrf, evp, pi, install))
+ return 0;
+
+ if (install)
ret = install_evpn_route_entry_in_vrf(bgp_vrf, evp, pi);
else
ret = uninstall_evpn_route_entry_in_vrf(bgp_vrf, evp,
@@ -3506,20 +3510,6 @@ void bgp_evpn_import_type2_route(struct bgp_path_info *pi, int import)
&pi->net->p, pi, import);
}
-/* Import the pi into vrf routing tables */
-void bgp_evpn_import_route_in_vrfs(struct bgp_path_info *pi, int import)
-{
- struct bgp *bgp_evpn;
-
- bgp_evpn = bgp_get_evpn();
- if (!bgp_evpn)
- return;
-
- bgp_evpn_install_uninstall_table(bgp_evpn, AFI_L2VPN, SAFI_EVPN,
- &pi->net->p, pi, import, false /*vpn*/,
- true /*vrf*/);
-}
-
/*
* delete and withdraw all ipv4 and ipv6 routes in the vrf table as type-5
* routes
@@ -5332,11 +5322,13 @@ struct bgpevpn *bgp_evpn_new(struct bgp *bgp, vni_t vni,
/* Initialize route-target import and export lists */
vpn->import_rtl = list_new();
- vpn->import_rtl->cmp = (int (*)(void *, void *))evpn_route_target_cmp;
- vpn->import_rtl->del = evpn_xxport_delete_ecomm;
+ vpn->import_rtl->cmp =
+ (int (*)(void *, void *))bgp_evpn_route_target_cmp;
+ vpn->import_rtl->del = bgp_evpn_xxport_delete_ecomm;
vpn->export_rtl = list_new();
- vpn->export_rtl->cmp = (int (*)(void *, void *))evpn_route_target_cmp;
- vpn->export_rtl->del = evpn_xxport_delete_ecomm;
+ vpn->export_rtl->cmp =
+ (int (*)(void *, void *))bgp_evpn_route_target_cmp;
+ vpn->export_rtl->del = bgp_evpn_xxport_delete_ecomm;
bf_assign_index(bm->rd_idspace, vpn->rd_id);
derive_rd_rt_for_vni(bgp, vpn);
@@ -6037,12 +6029,12 @@ void bgp_evpn_init(struct bgp *bgp)
"BGP VRF Import RT Hash");
bgp->vrf_import_rtl = list_new();
bgp->vrf_import_rtl->cmp =
- (int (*)(void *, void *))evpn_route_target_cmp;
- bgp->vrf_import_rtl->del = evpn_xxport_delete_ecomm;
+ (int (*)(void *, void *))bgp_evpn_route_target_cmp;
+ bgp->vrf_import_rtl->del = bgp_evpn_xxport_delete_ecomm;
bgp->vrf_export_rtl = list_new();
bgp->vrf_export_rtl->cmp =
- (int (*)(void *, void *))evpn_route_target_cmp;
- bgp->vrf_export_rtl->del = evpn_xxport_delete_ecomm;
+ (int (*)(void *, void *))bgp_evpn_route_target_cmp;
+ bgp->vrf_export_rtl->del = bgp_evpn_xxport_delete_ecomm;
bgp->l2vnis = list_new();
bgp->l2vnis->cmp = vni_list_cmp;
/* By default Duplicate Address Dection is enabled.
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index ea179ec2b4..ed3716f601 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -462,7 +462,9 @@ int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
* ESR).
*/
static int bgp_evpn_mh_route_delete(struct bgp *bgp, struct bgp_evpn_es *es,
- struct bgpevpn *vpn, struct prefix_evpn *p)
+ struct bgpevpn *vpn,
+ struct bgp_evpn_es_frag *es_frag,
+ struct prefix_evpn *p)
{
afi_t afi = AFI_L2VPN;
safi_t safi = SAFI_EVPN;
@@ -477,7 +479,7 @@ static int bgp_evpn_mh_route_delete(struct bgp *bgp, struct bgp_evpn_es *es,
prd = &vpn->prd;
} else {
rt_table = es->route_table;
- prd = &es->prd;
+ prd = &es_frag->prd;
}
/* First, locate the route node within the ESI or VNI.
@@ -680,7 +682,7 @@ static int bgp_evpn_type4_route_update(struct bgp *bgp,
struct bgp_path_info *global_pi;
dest = bgp_global_evpn_node_get(bgp->rib[afi][safi], afi, safi,
- p, &es->prd);
+ p, &es->es_base_frag->prd);
bgp_evpn_mh_route_update(bgp, es, NULL, afi, safi, dest,
attr_new, 1, &global_pi,
&route_changed);
@@ -699,7 +701,11 @@ static int bgp_evpn_type4_route_update(struct bgp *bgp,
static int bgp_evpn_type4_route_delete(struct bgp *bgp,
struct bgp_evpn_es *es, struct prefix_evpn *p)
{
- return bgp_evpn_mh_route_delete(bgp, es, NULL /* l2vni */, p);
+ if (!es->es_base_frag)
+ return -1;
+
+ return bgp_evpn_mh_route_delete(bgp, es, NULL /* l2vni */,
+ es->es_base_frag, p);
}
/* Process remote/received EVPN type-4 route (advertise or withdraw) */
@@ -845,8 +851,9 @@ static int bgp_evpn_type4_remote_routes_import(struct bgp *bgp,
*/
/* Extended communities associated with EAD-per-ES */
-static void bgp_evpn_type1_es_route_extcomm_build(struct bgp_evpn_es *es,
- struct attr *attr)
+static void
+bgp_evpn_type1_es_route_extcomm_build(struct bgp_evpn_es_frag *es_frag,
+ struct attr *attr)
{
struct ecommunity ecom_encap;
struct ecommunity ecom_esi_label;
@@ -880,16 +887,22 @@ static void bgp_evpn_type1_es_route_extcomm_build(struct bgp_evpn_es *es,
/* XXX - suppress EAD-ES advertisment if there are no EVIs associated
* with it.
*/
- for (ALL_LIST_ELEMENTS_RO(es->es_evi_list,
- evi_node, es_evi)) {
- if (!CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL))
- continue;
- for (ALL_LIST_ELEMENTS_RO(es_evi->vpn->export_rtl,
- rt_node, ecom))
+ if (listcount(bgp_mh_info->ead_es_export_rtl)) {
+ for (ALL_LIST_ELEMENTS_RO(bgp_mh_info->ead_es_export_rtl,
+ rt_node, ecom))
bgp_attr_set_ecommunity(
- attr,
- ecommunity_merge(bgp_attr_get_ecommunity(attr),
- ecom));
+ attr, ecommunity_merge(attr->ecommunity, ecom));
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(es_frag->es_evi_frag_list, evi_node,
+ es_evi)) {
+ if (!CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL))
+ continue;
+ for (ALL_LIST_ELEMENTS_RO(es_evi->vpn->export_rtl,
+ rt_node, ecom))
+ bgp_attr_set_ecommunity(
+ attr, ecommunity_merge(attr->ecommunity,
+ ecom));
+ }
}
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);
@@ -926,9 +939,10 @@ static void bgp_evpn_type1_evi_route_extcomm_build(struct bgp_evpn_es *es,
/* Update EVPN EAD (type-1) route -
* vpn - valid for EAD-EVI routes and NULL for EAD-ES routes
*/
-static int bgp_evpn_type1_route_update(struct bgp *bgp,
- struct bgp_evpn_es *es, struct bgpevpn *vpn,
- struct prefix_evpn *p)
+static int bgp_evpn_type1_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
+ struct bgpevpn *vpn,
+ struct bgp_evpn_es_frag *es_frag,
+ struct prefix_evpn *p)
{
int ret = 0;
afi_t afi = AFI_L2VPN;
@@ -974,7 +988,7 @@ static int bgp_evpn_type1_route_update(struct bgp *bgp,
/* MPLS label is 0 for EAD-ES route */
/* Set up extended community */
- bgp_evpn_type1_es_route_extcomm_build(es, &attr);
+ bgp_evpn_type1_es_route_extcomm_build(es_frag, &attr);
/* First, create (or fetch) route node within the ES. */
/* NOTE: There is no RD here. */
@@ -990,7 +1004,7 @@ static int bgp_evpn_type1_route_update(struct bgp *bgp,
"%u ERROR: Failed to updated EAD-EVI route ESI: %s VTEP %pI4",
bgp->vrf_id, es->esi_str, &es->originator_ip);
}
- global_rd = &es->prd;
+ global_rd = &es_frag->prd;
}
@@ -1034,54 +1048,86 @@ static int bgp_evpn_type1_route_update(struct bgp *bgp,
* table and advertise these routes to peers.
*/
+static void bgp_evpn_ead_es_route_update(struct bgp *bgp,
+ struct bgp_evpn_es *es)
+{
+ struct listnode *node;
+ struct bgp_evpn_es_frag *es_frag;
+ struct prefix_evpn p;
+
+ build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG, &es->esi,
+ es->originator_ip);
+ for (ALL_LIST_ELEMENTS_RO(es->es_frag_list, node, es_frag)) {
+ if (!listcount(es_frag->es_evi_frag_list))
+ continue;
+
+ p.prefix.ead_addr.frag_id = es_frag->rd_id;
+ if (bgp_evpn_type1_route_update(bgp, es, NULL, es_frag, &p))
+ flog_err(
+ EC_BGP_EVPN_ROUTE_CREATE,
+ "EAD-ES route creation failure for ESI %s frag %u",
+ es->esi_str, es_frag->rd_id);
+ }
+}
+
+static void bgp_evpn_ead_evi_route_update(struct bgp *bgp,
+ struct bgp_evpn_es *es,
+ struct bgpevpn *vpn,
+ struct prefix_evpn *p)
+{
+ if (bgp_evpn_type1_route_update(bgp, es, vpn, NULL, p))
+ flog_err(EC_BGP_EVPN_ROUTE_CREATE,
+ "EAD-EVI route creation failure for ESI %s VNI %u",
+ es->esi_str, vpn->vni);
+}
+
void update_type1_routes_for_evi(struct bgp *bgp, struct bgpevpn *vpn)
{
struct prefix_evpn p;
struct bgp_evpn_es *es;
struct bgp_evpn_es_evi *es_evi;
- struct bgp_evpn_es_evi *es_evi_next;
- RB_FOREACH_SAFE(es_evi, bgp_es_evi_rb_head,
- &vpn->es_evi_rb_tree, es_evi_next) {
+
+ RB_FOREACH (es_evi, bgp_es_evi_rb_head, &vpn->es_evi_rb_tree) {
es = es_evi->es;
+ if (es_evi->vpn != vpn)
+ continue;
+
/* Update EAD-ES */
- if (CHECK_FLAG(es->flags, BGP_EVPNES_OPER_UP)) {
- build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG,
- &es->esi, es->originator_ip);
- if (bgp_evpn_type1_route_update(bgp, es, NULL, &p))
- flog_err(EC_BGP_EVPN_ROUTE_CREATE,
- "%u: EAD-ES route update failure for ESI %s VNI %u",
- bgp->vrf_id, es->esi_str,
- es_evi->vpn->vni);
- }
+ bgp_evpn_ead_es_route_update(bgp, es);
/* Update EAD-EVI */
if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI)) {
build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG,
&es->esi, es->originator_ip);
- if (bgp_evpn_type1_route_update(bgp, es, es_evi->vpn,
- &p))
- flog_err(EC_BGP_EVPN_ROUTE_DELETE,
- "%u: EAD-EVI route update failure for ESI %s VNI %u",
- bgp->vrf_id, es->esi_str,
- es_evi->vpn->vni);
+ bgp_evpn_ead_evi_route_update(bgp, es, vpn, &p);
}
}
}
/* Delete local Type-1 route */
-static int bgp_evpn_type1_es_route_delete(struct bgp *bgp,
- struct bgp_evpn_es *es, struct prefix_evpn *p)
+static void bgp_evpn_ead_es_route_delete(struct bgp *bgp,
+ struct bgp_evpn_es *es)
{
- return bgp_evpn_mh_route_delete(bgp, es, NULL /* l2vni */, p);
+ struct listnode *node;
+ struct bgp_evpn_es_frag *es_frag;
+ struct prefix_evpn p;
+
+ build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG, &es->esi,
+ es->originator_ip);
+ for (ALL_LIST_ELEMENTS_RO(es->es_frag_list, node, es_frag)) {
+ p.prefix.ead_addr.frag_id = es_frag->rd_id;
+ bgp_evpn_mh_route_delete(bgp, es, NULL, es_frag, &p);
+ }
}
-static int bgp_evpn_type1_evi_route_delete(struct bgp *bgp,
- struct bgp_evpn_es *es, struct bgpevpn *vpn,
- struct prefix_evpn *p)
+static int bgp_evpn_ead_evi_route_delete(struct bgp *bgp,
+ struct bgp_evpn_es *es,
+ struct bgpevpn *vpn,
+ struct prefix_evpn *p)
{
- return bgp_evpn_mh_route_delete(bgp, es, vpn, p);
+ return bgp_evpn_mh_route_delete(bgp, es, vpn, NULL, p);
}
/* Generate EAD-EVI for all VNIs */
@@ -1107,10 +1153,7 @@ static void bgp_evpn_local_type1_evi_route_add(struct bgp *bgp,
for (ALL_LIST_ELEMENTS_RO(es->es_evi_list, evi_node, es_evi)) {
if (!CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL))
continue;
- if (bgp_evpn_type1_route_update(bgp, es, es_evi->vpn, &p))
- flog_err(EC_BGP_EVPN_ROUTE_CREATE,
- "%u: Type4 route creation failure for ESI %s",
- bgp->vrf_id, es->esi_str);
+ bgp_evpn_ead_evi_route_update(bgp, es, es_evi->vpn, &p);
}
}
@@ -1135,7 +1178,7 @@ static void bgp_evpn_local_type1_evi_route_del(struct bgp *bgp,
for (ALL_LIST_ELEMENTS_RO(es->es_evi_list, evi_node, es_evi)) {
if (!CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL))
continue;
- if (bgp_evpn_mh_route_delete(bgp, es, es_evi->vpn, &p))
+ if (bgp_evpn_mh_route_delete(bgp, es, es_evi->vpn, NULL, &p))
flog_err(EC_BGP_EVPN_ROUTE_CREATE,
"%u: Type4 route creation failure for ESI %s",
bgp->vrf_id, es->esi_str);
@@ -1199,6 +1242,72 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
return ret;
}
+void bgp_evpn_mh_config_ead_export_rt(struct bgp *bgp,
+ struct ecommunity *ecomcfg, bool del)
+{
+ struct listnode *node, *nnode, *node_to_del;
+ struct ecommunity *ecom;
+ struct bgp_evpn_es *es;
+
+ if (del) {
+ if (ecomcfg == NULL) {
+ /* Reset to default and process all routes. */
+ for (ALL_LIST_ELEMENTS(bgp_mh_info->ead_es_export_rtl,
+ node, nnode, ecom)) {
+ ecommunity_free(&ecom);
+ list_delete_node(bgp_mh_info->ead_es_export_rtl,
+ node);
+ }
+ }
+
+ /* Delete a specific export RT */
+ else {
+ node_to_del = NULL;
+
+ for (ALL_LIST_ELEMENTS(bgp_mh_info->ead_es_export_rtl,
+ node, nnode, ecom)) {
+ if (ecommunity_match(ecom, ecomcfg)) {
+ ecommunity_free(&ecom);
+ node_to_del = node;
+ break;
+ }
+ }
+
+ if (node_to_del)
+ list_delete_node(bgp_mh_info->ead_es_export_rtl,
+ node_to_del);
+ }
+ } else {
+ listnode_add_sort(bgp_mh_info->ead_es_export_rtl, ecomcfg);
+ }
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_RT))
+ zlog_debug("local ES del/re-add EAD route on export RT change");
+ /*
+ * walk through all active ESs withdraw the old EAD and
+ * generate a new one
+ */
+ RB_FOREACH (es, bgp_es_rb_head, &bgp_mh_info->es_rb_tree) {
+ if (!bgp_evpn_is_es_local(es) ||
+ !bgp_evpn_local_es_is_active(es))
+ continue;
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_RT))
+ zlog_debug(
+ "local ES %s del/re-add EAD route on export RT change",
+ es->esi_str);
+
+ /*
+ * withdraw EAD-ES. XXX - this should technically not be
+ * needed; can be removed after testing
+ */
+ bgp_evpn_ead_es_route_delete(bgp, es);
+
+ /* generate EAD-ES */
+ bgp_evpn_ead_es_route_update(bgp, es);
+ }
+}
+
/*****************************************************************************/
/* Ethernet Segment Management
* 1. Ethernet Segment is a collection of links attached to the same
@@ -1612,6 +1721,167 @@ bgp_evpn_es_path_update_on_es_vrf_chg(struct bgp_evpn_es_vrf *es_vrf,
}
}
+static void bgp_evpn_es_frag_free(struct bgp_evpn_es_frag *es_frag)
+{
+ struct bgp_evpn_es *es = es_frag->es;
+
+ if (es->es_base_frag == es_frag)
+ es->es_base_frag = NULL;
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
+ zlog_debug("es %s frag %u free", es->esi_str, es_frag->rd_id);
+ list_delete_node(es->es_frag_list, &es_frag->es_listnode);
+
+ /* EVIs that are advertised using the info in this fragment */
+ list_delete(&es_frag->es_evi_frag_list);
+
+ bf_release_index(bm->rd_idspace, es_frag->rd_id);
+
+
+ XFREE(MTYPE_BGP_EVPN_ES_FRAG, es_frag);
+}
+
+static void bgp_evpn_es_frag_free_unused(struct bgp_evpn_es_frag *es_frag)
+{
+ if ((es_frag->es->es_base_frag == es_frag) ||
+ listcount(es_frag->es_evi_frag_list))
+ return;
+
+ bgp_evpn_es_frag_free(es_frag);
+}
+
+static void bgp_evpn_es_frag_free_all(struct bgp_evpn_es *es)
+{
+ struct listnode *node;
+ struct listnode *nnode;
+ struct bgp_evpn_es_frag *es_frag;
+
+ for (ALL_LIST_ELEMENTS(es->es_frag_list, node, nnode, es_frag))
+ bgp_evpn_es_frag_free(es_frag);
+}
+
+static struct bgp_evpn_es_frag *bgp_evpn_es_frag_new(struct bgp_evpn_es *es)
+{
+ struct bgp_evpn_es_frag *es_frag;
+ char buf[BGP_EVPN_PREFIX_RD_LEN];
+ struct bgp *bgp;
+
+ es_frag = XCALLOC(MTYPE_BGP_EVPN_ES_FRAG, sizeof(*es_frag));
+ bf_assign_index(bm->rd_idspace, es_frag->rd_id);
+ es_frag->prd.family = AF_UNSPEC;
+ es_frag->prd.prefixlen = 64;
+ bgp = bgp_get_evpn();
+ snprintfrr(buf, sizeof(buf), "%pI4:%hu", &bgp->router_id,
+ es_frag->rd_id);
+ (void)str2prefix_rd(buf, &es_frag->prd);
+
+ /* EVIs that are advertised using the info in this fragment */
+ es_frag->es_evi_frag_list = list_new();
+ listset_app_node_mem(es_frag->es_evi_frag_list);
+
+ /* Link the fragment to the parent ES */
+ es_frag->es = es;
+ listnode_init(&es_frag->es_listnode, es_frag);
+ listnode_add(es->es_frag_list, &es_frag->es_listnode);
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
+ zlog_debug("es %s frag %u new", es->esi_str, es_frag->rd_id);
+ return es_frag;
+}
+
+static struct bgp_evpn_es_frag *
+bgp_evpn_es_find_frag_with_space(struct bgp_evpn_es *es)
+{
+ struct listnode *node;
+ struct bgp_evpn_es_frag *es_frag;
+
+ for (ALL_LIST_ELEMENTS_RO(es->es_frag_list, node, es_frag)) {
+ if (listcount(es_frag->es_evi_frag_list) <
+ bgp_mh_info->evi_per_es_frag)
+ return es_frag;
+ }
+
+ /* No frags where found with space; allocate a new one */
+ return bgp_evpn_es_frag_new(es);
+}
+
+/* Link the ES-EVI to one of the ES fragments */
+static void bgp_evpn_es_frag_evi_add(struct bgp_evpn_es_evi *es_evi)
+{
+ struct bgp_evpn_es_frag *es_frag;
+ struct bgp_evpn_es *es = es_evi->es;
+
+ if (es_evi->es_frag ||
+ !(CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL)))
+ return;
+
+ es_frag = bgp_evpn_es_find_frag_with_space(es);
+
+ es_evi->es_frag = es_frag;
+ listnode_init(&es_evi->es_frag_listnode, es_evi);
+ listnode_add(es_frag->es_evi_frag_list, &es_evi->es_frag_listnode);
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
+ zlog_debug("es %s vni %d linked to frag %u", es->esi_str,
+ es_evi->vpn->vni, es_frag->rd_id);
+}
+
+/* UnLink the ES-EVI from the ES fragment */
+static void bgp_evpn_es_frag_evi_del(struct bgp_evpn_es_evi *es_evi,
+ bool send_ead_del_if_empty)
+{
+ struct bgp_evpn_es_frag *es_frag = es_evi->es_frag;
+ struct prefix_evpn p;
+ struct bgp_evpn_es *es;
+ struct bgp *bgp;
+
+ if (!es_frag)
+ return;
+
+ es = es_frag->es;
+ es_evi->es_frag = NULL;
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
+ zlog_debug("es %s vni %d unlinked from frag %u", es->esi_str,
+ es_evi->vpn->vni, es_frag->rd_id);
+
+ list_delete_node(es_frag->es_evi_frag_list, &es_evi->es_frag_listnode);
+
+ /*
+ * if there are no other EVIs on the fragment deleted the EAD-ES for
+ * the fragment
+ */
+ if (send_ead_del_if_empty && !listcount(es_frag->es_evi_frag_list)) {
+ bgp = bgp_get_evpn();
+
+ if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
+ zlog_debug("es %s frag %u ead-es route delete",
+ es->esi_str, es_frag->rd_id);
+ build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG, &es->esi,
+ es->originator_ip);
+ p.prefix.ead_addr.frag_id = es_frag->rd_id;
+ bgp_evpn_mh_route_delete(bgp, es, NULL, es_frag, &p);
+ }
+
+ /* We don't attempt to coalesce frags that may not be full. Instead we
+ * only free up the frag when it is completely empty.
+ */
+ bgp_evpn_es_frag_free_unused(es_frag);
+}
+
+/* Link the ES-EVIs to one of the ES fragments */
+static void bgp_evpn_es_frag_evi_update_all(struct bgp_evpn_es *es, bool add)
+{
+ struct listnode *node;
+ struct bgp_evpn_es_evi *es_evi;
+
+ for (ALL_LIST_ELEMENTS_RO(es->es_evi_list, node, es_evi)) {
+ if (add)
+ bgp_evpn_es_frag_evi_add(es_evi);
+ else
+ bgp_evpn_es_frag_evi_del(es_evi, false);
+ }
+}
+
/* compare ES-IDs for the global ES RB tree */
static int bgp_es_rb_cmp(const struct bgp_evpn_es *es1,
const struct bgp_evpn_es *es2)
@@ -1651,10 +1921,7 @@ static struct bgp_evpn_es *bgp_evpn_es_new(struct bgp *bgp, const esi_t *esi)
es->route_table = bgp_table_init(bgp, AFI_L2VPN, SAFI_EVPN);
/* Add to rb_tree */
- if (RB_INSERT(bgp_es_rb_head, &bgp_mh_info->es_rb_tree, es)) {
- XFREE(MTYPE_BGP_EVPN_ES, es);
- return NULL;
- }
+ RB_INSERT(bgp_es_rb_head, &bgp_mh_info->es_rb_tree, es);
/* Initialise the ES-EVI list */
es->es_evi_list = list_new();
@@ -1669,6 +1936,8 @@ static struct bgp_evpn_es *bgp_evpn_es_new(struct bgp *bgp, const esi_t *esi)
listset_app_node_mem(es->macip_evi_path_list);
es->macip_global_path_list = list_new();
listset_app_node_mem(es->macip_global_path_list);
+ es->es_frag_list = list_new();
+ listset_app_node_mem(es->es_frag_list);
QOBJ_REG(es, bgp_evpn_es);
@@ -1695,6 +1964,7 @@ static void bgp_evpn_es_free(struct bgp_evpn_es *es, const char *caller)
list_delete(&es->es_vtep_list);
list_delete(&es->macip_evi_path_list);
list_delete(&es->macip_global_path_list);
+ list_delete(&es->es_frag_list);
bgp_table_unlock(es->route_table);
/* remove the entry from various databases */
@@ -1714,7 +1984,6 @@ static inline bool bgp_evpn_is_es_local_and_non_bypass(struct bgp_evpn_es *es)
/* init local info associated with the ES */
static void bgp_evpn_es_local_info_set(struct bgp *bgp, struct bgp_evpn_es *es)
{
- char buf[BGP_EVPN_PREFIX_RD_LEN];
bool old_is_local;
bool is_local;
@@ -1727,12 +1996,12 @@ static void bgp_evpn_es_local_info_set(struct bgp *bgp, struct bgp_evpn_es *es)
listnode_init(&es->es_listnode, es);
listnode_add(bgp_mh_info->local_es_list, &es->es_listnode);
- /* auto derive RD for this es */
- bf_assign_index(bm->rd_idspace, es->rd_id);
- es->prd.family = AF_UNSPEC;
- es->prd.prefixlen = 64;
- snprintfrr(buf, sizeof(buf), "%pI4:%hu", &bgp->router_id, es->rd_id);
- (void)str2prefix_rd(buf, &es->prd);
+ /* setup the first ES fragment; more fragments may be allocated based
+ * on the the number of EVI entries
+ */
+ es->es_base_frag = bgp_evpn_es_frag_new(es);
+ /* distribute ES-EVIs to one or more ES fragments */
+ bgp_evpn_es_frag_evi_update_all(es, true);
is_local = bgp_evpn_is_es_local_and_non_bypass(es);
if (old_is_local != is_local)
@@ -1748,6 +2017,11 @@ static void bgp_evpn_es_local_info_clear(struct bgp_evpn_es *es, bool finish)
if (!CHECK_FLAG(es->flags, BGP_EVPNES_LOCAL))
return;
+ /* clear the es frag references and free them up */
+ bgp_evpn_es_frag_evi_update_all(es, false);
+ es->es_base_frag = NULL;
+ bgp_evpn_es_frag_free_all(es);
+
old_is_local = bgp_evpn_is_es_local_and_non_bypass(es);
UNSET_FLAG(es->flags, BGP_EVPNES_LOCAL);
@@ -1758,8 +2032,6 @@ static void bgp_evpn_es_local_info_clear(struct bgp_evpn_es *es, bool finish)
/* remove from the ES local list */
list_delete_node(bgp_mh_info->local_es_list, &es->es_listnode);
- bf_release_index(bm->rd_idspace, es->rd_id);
-
bgp_evpn_es_free(es, __func__);
}
@@ -1920,14 +2192,7 @@ static void bgp_evpn_local_es_deactivate(struct bgp *bgp,
bgp_evpn_local_type1_evi_route_del(bgp, es);
/* withdraw EAD-ES */
- build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG,
- &es->esi, es->originator_ip);
- ret = bgp_evpn_type1_es_route_delete(bgp, es, &p);
- if (ret) {
- flog_err(EC_BGP_EVPN_ROUTE_DELETE,
- "%u failed to delete type-1 route for ESI %s",
- bgp->vrf_id, es->esi_str);
- }
+ bgp_evpn_ead_es_route_delete(bgp, es);
bgp_evpn_mac_update_on_es_oper_chg(es);
}
@@ -1973,9 +2238,7 @@ static void bgp_evpn_local_es_activate(struct bgp *bgp, struct bgp_evpn_es *es,
bgp_evpn_local_type1_evi_route_add(bgp, es);
/* generate EAD-ES */
- build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG, &es->esi,
- es->originator_ip);
- (void)bgp_evpn_type1_route_update(bgp, es, NULL, &p);
+ bgp_evpn_ead_es_route_update(bgp, es);
}
bgp_evpn_mac_update_on_es_oper_chg(es);
@@ -2158,6 +2421,41 @@ int bgp_evpn_local_es_add(struct bgp *bgp, esi_t *esi,
return 0;
}
+static void bgp_evpn_es_json_frag_fill(json_object *json_frags,
+ struct bgp_evpn_es *es)
+{
+ json_object *json_frag;
+ char buf1[RD_ADDRSTRLEN];
+ struct listnode *node;
+ struct bgp_evpn_es_frag *es_frag;
+
+ for (ALL_LIST_ELEMENTS_RO(es->es_frag_list, node, es_frag)) {
+ json_frag = json_object_new_object();
+
+ json_object_string_add(
+ json_frag, "rd",
+ prefix_rd2str(&es_frag->prd, buf1, sizeof(buf1)));
+ json_object_int_add(json_frag, "eviCount",
+ listcount(es_frag->es_evi_frag_list));
+
+ json_object_array_add(json_frags, json_frag);
+ }
+}
+
+static void bgp_evpn_es_frag_show_detail(struct vty *vty,
+ struct bgp_evpn_es *es)
+{
+ struct listnode *node;
+ char buf1[RD_ADDRSTRLEN];
+ struct bgp_evpn_es_frag *es_frag;
+
+ for (ALL_LIST_ELEMENTS_RO(es->es_frag_list, node, es_frag)) {
+ vty_out(vty, " %s EVIs: %d\n",
+ prefix_rd2str(&es_frag->prd, buf1, sizeof(buf1)),
+ listcount(es_frag->es_evi_frag_list));
+ }
+}
+
static char *bgp_evpn_es_vteps_str(char *vtep_str, struct bgp_evpn_es *es,
uint8_t vtep_str_size)
{
@@ -2267,9 +2565,11 @@ static void bgp_evpn_es_show_entry(struct vty *vty,
json_object *json_types;
json_object_string_add(json, "esi", es->esi_str);
- json_object_string_add(json, "rd",
- prefix_rd2str(&es->prd, buf1,
- sizeof(buf1)));
+ if (es->es_base_frag)
+ json_object_string_add(
+ json, "rd",
+ prefix_rd2str(&es->es_base_frag->prd, buf1,
+ sizeof(buf1)));
if (es->flags & (BGP_EVPNES_LOCAL | BGP_EVPNES_REMOTE)) {
json_types = json_object_new_array();
@@ -2306,8 +2606,9 @@ static void bgp_evpn_es_show_entry(struct vty *vty,
bgp_evpn_es_vteps_str(vtep_str, es, sizeof(vtep_str));
- if (es->flags & BGP_EVPNES_LOCAL)
- prefix_rd2str(&es->prd, buf1, sizeof(buf1));
+ if (es->es_base_frag)
+ prefix_rd2str(&es->es_base_frag->prd, buf1,
+ sizeof(buf1));
else
strlcpy(buf1, "-", sizeof(buf1));
@@ -2324,6 +2625,7 @@ static void bgp_evpn_es_show_entry_detail(struct vty *vty,
json_object *json_flags;
json_object *json_incons;
json_object *json_vteps;
+ json_object *json_frags;
struct listnode *node;
struct bgp_evpn_es_vtep *es_vtep;
@@ -2362,6 +2664,11 @@ static void bgp_evpn_es_show_entry_detail(struct vty *vty,
}
json_object_object_add(json, "vteps", json_vteps);
}
+ if (listcount(es->es_frag_list)) {
+ json_frags = json_object_new_array();
+ bgp_evpn_es_json_frag_fill(json_frags, es);
+ json_object_object_add(json, "fragments", json_frags);
+ }
if (es->inconsistencies) {
json_incons = json_object_new_array();
if (es->inconsistencies & BGP_EVPNES_INCONS_VTEP_LIST)
@@ -2381,8 +2688,9 @@ static void bgp_evpn_es_show_entry_detail(struct vty *vty,
if (es->flags & BGP_EVPNES_REMOTE)
strlcat(type_str, "R", sizeof(type_str));
- if (es->flags & BGP_EVPNES_LOCAL)
- prefix_rd2str(&es->prd, buf1, sizeof(buf1));
+ if (es->es_base_frag)
+ prefix_rd2str(&es->es_base_frag->prd, buf1,
+ sizeof(buf1));
else
strlcpy(buf1, "-", sizeof(buf1));
@@ -2415,6 +2723,10 @@ static void bgp_evpn_es_show_entry_detail(struct vty *vty,
}
vty_out(vty, " Inconsistencies: %s\n",
incons_str);
+ if (listcount(es->es_frag_list)) {
+ vty_out(vty, " Fragments:\n");
+ bgp_evpn_es_frag_show_detail(vty, es);
+ }
if (listcount(es->es_vtep_list)) {
vty_out(vty, " VTEPs:\n");
bgp_evpn_es_vteps_show_detail(vty, es);
@@ -2716,10 +3028,7 @@ static struct bgp_evpn_es_vrf *bgp_evpn_es_vrf_create(struct bgp_evpn_es *es,
es_vrf->bgp_vrf = bgp_vrf;
/* insert into the VRF-ESI rb tree */
- if (RB_INSERT(bgp_es_vrf_rb_head, &bgp_vrf->es_vrf_rb_tree, es_vrf)) {
- XFREE(MTYPE_BGP_EVPN_ES_VRF, es_vrf);
- return NULL;
- }
+ RB_INSERT(bgp_es_vrf_rb_head, &bgp_vrf->es_vrf_rb_tree, es_vrf);
/* add to the ES's VRF list */
listnode_init(&es_vrf->es_listnode, es_vrf);
@@ -2823,8 +3132,6 @@ void bgp_evpn_es_vrf_ref(struct bgp_evpn_es_evi *es_evi, struct bgp *bgp_vrf)
es_vrf = bgp_evpn_es_vrf_find(es, bgp_vrf);
if (!es_vrf)
es_vrf = bgp_evpn_es_vrf_create(es, bgp_vrf);
- if (!es_vrf)
- return;
es_evi->es_vrf = es_vrf;
++es_vrf->ref_cnt;
@@ -3259,10 +3566,7 @@ static struct bgp_evpn_es_evi *bgp_evpn_es_evi_new(struct bgp_evpn_es *es,
es_evi->es_evi_vtep_list->cmp = bgp_evpn_es_evi_vtep_cmp;
/* insert into the VNI-ESI rb tree */
- if (RB_INSERT(bgp_es_evi_rb_head, &vpn->es_evi_rb_tree, es_evi)) {
- XFREE(MTYPE_BGP_EVPN_ES_EVI, es_evi);
- return NULL;
- }
+ RB_INSERT(bgp_es_evi_rb_head, &vpn->es_evi_rb_tree, es_evi);
/* add to the ES's VNI list */
listnode_init(&es_evi->es_listnode, es_evi);
@@ -3287,7 +3591,7 @@ bgp_evpn_es_evi_free(struct bgp_evpn_es_evi *es_evi)
*/
if (es_evi->flags & (BGP_EVPNES_EVI_LOCAL | BGP_EVPNES_EVI_REMOTE))
return es_evi;
-
+ bgp_evpn_es_frag_evi_del(es_evi, false);
bgp_evpn_es_vrf_deref(es_evi);
/* remove from the ES's VNI list */
@@ -3316,6 +3620,7 @@ static void bgp_evpn_es_evi_local_info_set(struct bgp_evpn_es_evi *es_evi)
SET_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL);
listnode_init(&es_evi->l2vni_listnode, es_evi);
listnode_add(vpn->local_es_evi_list, &es_evi->l2vni_listnode);
+ bgp_evpn_es_frag_evi_add(es_evi);
}
/* clear any local info associated with the ES-EVI */
@@ -3374,24 +3679,19 @@ bgp_evpn_local_es_evi_do_del(struct bgp_evpn_es_evi *es_evi)
bgp = bgp_get_evpn();
+ /* remove the es_evi from the es_frag before sending the update */
+ bgp_evpn_es_frag_evi_del(es_evi, true);
if (bgp) {
/* update EAD-ES with new list of VNIs */
- if (bgp_evpn_local_es_is_active(es)) {
- build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG,
- &es->esi, es->originator_ip);
- if (bgp_evpn_type1_route_update(bgp, es, NULL, &p))
- flog_err(EC_BGP_EVPN_ROUTE_CREATE,
- "%u: EAD-ES route update failure for ESI %s VNI %u",
- bgp->vrf_id, es->esi_str,
- es_evi->vpn->vni);
- }
+ if (bgp_evpn_local_es_is_active(es))
+ bgp_evpn_ead_es_route_update(bgp, es);
/* withdraw and delete EAD-EVI */
if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI)) {
build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG,
&es->esi, es->originator_ip);
- if (bgp_evpn_type1_evi_route_delete(bgp,
- es, es_evi->vpn, &p))
+ if (bgp_evpn_ead_evi_route_delete(bgp, es, es_evi->vpn,
+ &p))
flog_err(EC_BGP_EVPN_ROUTE_DELETE,
"%u: EAD-EVI route deletion failure for ESI %s VNI %u",
bgp->vrf_id, es->esi_str,
@@ -3479,11 +3779,8 @@ int bgp_evpn_local_es_evi_add(struct bgp *bgp, esi_t *esi, vni_t vni)
if (CHECK_FLAG(es_evi->flags, BGP_EVPNES_EVI_LOCAL))
/* dup */
return 0;
- } else {
+ } else
es_evi = bgp_evpn_es_evi_new(es, vpn);
- if (!es_evi)
- return -1;
- }
bgp_evpn_es_evi_local_info_set(es_evi);
@@ -3491,21 +3788,12 @@ int bgp_evpn_local_es_evi_add(struct bgp *bgp, esi_t *esi, vni_t vni)
if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI)) {
build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG, &es->esi,
es->originator_ip);
- if (bgp_evpn_type1_route_update(bgp, es, vpn, &p))
- flog_err(EC_BGP_EVPN_ROUTE_CREATE,
- "%u: EAD-EVI route creation failure for ESI %s VNI %u",
- bgp->vrf_id, es->esi_str, vni);
+ bgp_evpn_ead_evi_route_update(bgp, es, vpn, &p);
}
/* update EAD-ES */
- build_evpn_type1_prefix(&p, BGP_EVPN_AD_ES_ETH_TAG,
- &es->esi, es->originator_ip);
- if (bgp_evpn_local_es_is_active(es)) {
- if (bgp_evpn_type1_route_update(bgp, es, NULL, &p))
- flog_err(EC_BGP_EVPN_ROUTE_CREATE,
- "%u: EAD-ES route creation failure for ESI %s VNI %u",
- bgp->vrf_id, es->esi_str, vni);
- }
+ if (bgp_evpn_local_es_is_active(es))
+ bgp_evpn_ead_es_route_update(bgp, es);
return 0;
}
@@ -3544,13 +3832,8 @@ int bgp_evpn_remote_es_evi_add(struct bgp *bgp, struct bgpevpn *vpn,
}
es_evi = bgp_evpn_es_evi_find(es, vpn);
- if (!es_evi) {
+ if (!es_evi)
es_evi = bgp_evpn_es_evi_new(es, vpn);
- if (!es_evi) {
- bgp_evpn_es_free(es, __func__);
- return -1;
- }
- }
ead_es = !!p->prefix.ead_addr.eth_tag;
bgp_evpn_es_evi_vtep_add(bgp, es_evi, p->prefix.ead_addr.ip.ipaddr_v4,
@@ -3782,11 +4065,18 @@ static void bgp_evpn_es_evi_show_entry(struct vty *vty,
static void bgp_evpn_es_evi_show_entry_detail(struct vty *vty,
struct bgp_evpn_es_evi *es_evi, json_object *json)
{
+ char buf1[RD_ADDRSTRLEN];
+
if (json) {
json_object *json_flags;
/* Add the "brief" info first */
bgp_evpn_es_evi_show_entry(vty, es_evi, json);
+ if (es_evi->es_frag)
+ json_object_string_add(
+ json, "esFragmentRd",
+ prefix_rd2str(&es_evi->es_frag->prd, buf1,
+ sizeof(buf1)));
if (es_evi->flags & BGP_EVPNES_EVI_INCONS_VTEP_LIST) {
json_flags = json_object_new_array();
json_array_string_add(json_flags, "es-vtep-mismatch");
@@ -3809,6 +4099,10 @@ static void bgp_evpn_es_evi_show_entry_detail(struct vty *vty,
vty_out(vty, "VNI: %d ESI: %s\n",
es_evi->vpn->vni, es_evi->es->esi_str);
vty_out(vty, " Type: %s\n", type_str);
+ if (es_evi->es_frag)
+ vty_out(vty, " ES fragment RD: %s\n",
+ prefix_rd2str(&es_evi->es_frag->prd, buf1,
+ sizeof(buf1)));
vty_out(vty, " Inconsistencies: %s\n",
(es_evi->flags & BGP_EVPNES_EVI_INCONS_VTEP_LIST) ?
"es-vtep-mismatch":"-");
@@ -4664,6 +4958,10 @@ void bgp_evpn_mh_init(void)
bgp_mh_info->ead_evi_rx = BGP_EVPN_MH_EAD_EVI_RX_DEF;
bgp_mh_info->ead_evi_tx = BGP_EVPN_MH_EAD_EVI_TX_DEF;
+ bgp_mh_info->ead_es_export_rtl = list_new();
+ bgp_mh_info->ead_es_export_rtl->cmp =
+ (int (*)(void *, void *))bgp_evpn_route_target_cmp;
+ bgp_mh_info->ead_es_export_rtl->del = bgp_evpn_xxport_delete_ecomm;
/* config knobs - XXX add cli to control it */
bgp_mh_info->ead_evi_adv_for_down_links = true;
@@ -4672,6 +4970,7 @@ void bgp_evpn_mh_init(void)
bgp_mh_info->host_routes_use_l3nhg = BGP_EVPN_MH_USE_ES_L3NHG_DEF;
bgp_mh_info->suppress_l3_ecomm_on_inactive_es = true;
bgp_mh_info->bgp_evpn_nh_setup = true;
+ bgp_mh_info->evi_per_es_frag = BGP_EVPN_MAX_EVI_PER_ES_FRAG;
memset(&zero_esi_buf, 0, sizeof(esi_t));
}
@@ -4692,6 +4991,7 @@ void bgp_evpn_mh_finish(void)
thread_cancel(&bgp_mh_info->t_cons_check);
list_delete(&bgp_mh_info->local_es_list);
list_delete(&bgp_mh_info->pend_es_list);
+ list_delete(&bgp_mh_info->ead_es_export_rtl);
XFREE(MTYPE_BGP_EVPN_MH_INFO, bgp_mh_info);
}
diff --git a/bgpd/bgp_evpn_mh.h b/bgpd/bgp_evpn_mh.h
index 37a46c2f0e..d9e2e72e4f 100644
--- a/bgpd/bgp_evpn_mh.h
+++ b/bgpd/bgp_evpn_mh.h
@@ -35,6 +35,28 @@
#define BGP_EVPN_MH_USE_ES_L3NHG_DEF true
+/* XXX - tune this */
+#define BGP_EVPN_MAX_EVI_PER_ES_FRAG 128
+
+/* An ES can result in multiple EAD-per-ES route. Each EAD fragment is
+ * associated with an unique RD
+ */
+struct bgp_evpn_es_frag {
+ /* frag is associated with a parent ES */
+ struct bgp_evpn_es *es;
+
+ /* Id for deriving the RD automatically for this ES fragment */
+ uint16_t rd_id;
+ /* RD for this ES fragment */
+ struct prefix_rd prd;
+
+ /* Memory used for linking bgp_evpn_es_rd to bgp_evpn_es->rd_list */
+ struct listnode es_listnode;
+
+ /* List of ES-EVIs associated with this fragment */
+ struct list *es_evi_frag_list;
+};
+
/* Ethernet Segment entry -
* - Local and remote ESs are maintained in a global RB tree,
* bgp_mh_info->es_rb_tree using ESI as key
@@ -79,11 +101,9 @@ struct bgp_evpn_es {
*/
struct listnode pend_es_listnode;
- /* [EVPNES_LOCAL] Id for deriving the RD automatically for this ESI */
- uint16_t rd_id;
-
- /* [EVPNES_LOCAL] RD for this ES */
- struct prefix_rd prd;
+ /* [EVPNES_LOCAL] List of RDs for this ES (bgp_evpn_es_rd) */
+ struct list *es_frag_list;
+ struct bgp_evpn_es_frag *es_base_frag;
/* [EVPNES_LOCAL] originator ip address */
struct in_addr originator_ip;
@@ -203,6 +223,8 @@ struct bgp_evpn_es_vrf {
*/
struct bgp_evpn_es_evi {
struct bgp_evpn_es *es;
+ /* Only applicableif EVI_LOCAL */
+ struct bgp_evpn_es_frag *es_frag;
struct bgpevpn *vpn;
/* ES-EVI flags */
@@ -224,6 +246,10 @@ struct bgp_evpn_es_evi {
*/
struct listnode es_listnode;
+ /* memory used for linking the es_evi to
+ * es_evi->es_frag->es_evi_frag_list
+ */
+ struct listnode es_frag_listnode;
/* list of PEs (bgp_evpn_es_evi_vtep) attached to the ES for this VNI */
struct list *es_evi_vtep_list;
@@ -310,6 +336,16 @@ struct bgp_evpn_mh_info {
bool suppress_l3_ecomm_on_inactive_es;
/* Setup EVPN PE nexthops and their RMAC in bgpd */
bool bgp_evpn_nh_setup;
+
+ /* If global export-rts are configured that is used for sending
+ * sending the ead-per-es route instead of the L2-VNI(s) RTs
+ */
+ struct list *ead_es_export_rtl;
+
+ /* Number of EVIs in an ES fragment - used of EAD-per-ES route
+ * construction
+ */
+ uint32_t evi_per_es_frag;
};
/****************************************************************************/
@@ -434,5 +470,7 @@ extern void bgp_evpn_nh_finish(struct bgp *bgp_vrf);
extern void bgp_evpn_nh_show(struct vty *vty, bool uj);
extern void bgp_evpn_path_nh_add(struct bgp *bgp_vrf, struct bgp_path_info *pi);
extern void bgp_evpn_path_nh_del(struct bgp *bgp_vrf, struct bgp_path_info *pi);
+extern void bgp_evpn_mh_config_ead_export_rt(struct bgp *bgp,
+ struct ecommunity *ecom, bool del);
#endif /* _FRR_BGP_EVPN_MH_H */
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index c46f34bf74..763408782f 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -316,12 +316,6 @@ static inline int is_export_rt_configured(struct bgpevpn *vpn)
return (CHECK_FLAG(vpn->flags, VNI_FLAG_EXPRT_CFGD));
}
-static inline int is_vni_param_configured(struct bgpevpn *vpn)
-{
- return (is_rd_configured(vpn) || is_import_rt_configured(vpn)
- || is_export_rt_configured(vpn));
-}
-
static inline void encode_es_rt_extcomm(struct ecommunity_val *eval,
struct ethaddr *mac)
{
@@ -532,6 +526,7 @@ static inline void evpn_type1_prefix_global_copy(struct prefix_evpn *global_p,
memcpy(global_p, vni_p, sizeof(*global_p));
global_p->prefix.ead_addr.ip.ipa_type = 0;
global_p->prefix.ead_addr.ip.ipaddr_v4.s_addr = INADDR_ANY;
+ global_p->prefix.ead_addr.frag_id = 0;
}
/* EAD prefix in the global table doesn't include the VTEP-IP so
@@ -647,7 +642,6 @@ extern struct bgp_dest *
bgp_global_evpn_node_lookup(struct bgp_table *table, afi_t afi, safi_t safi,
const struct prefix_evpn *evp,
struct prefix_rd *prd);
-extern void bgp_evpn_import_route_in_vrfs(struct bgp_path_info *pi, int import);
extern void bgp_evpn_update_type2_route_entry(struct bgp *bgp,
struct bgpevpn *vpn,
struct bgp_node *rn,
@@ -657,4 +651,7 @@ extern int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
struct bgp_path_info *pi,
int install);
extern void bgp_evpn_import_type2_route(struct bgp_path_info *pi, int import);
+extern void bgp_evpn_xxport_delete_ecomm(void *val);
+extern int bgp_evpn_route_target_cmp(struct ecommunity *ecom1,
+ struct ecommunity *ecom2);
#endif /* _BGP_EVPN_PRIVATE_H */
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 7ddf159844..4da3fa8f3b 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -358,7 +358,7 @@ static void bgp_evpn_show_route_header(struct vty *vty, struct bgp *bgp,
"Status codes: s suppressed, d damped, h history, * valid, > best, i - internal\n");
vty_out(vty, "Origin codes: i - IGP, e - EGP, ? - incomplete\n");
vty_out(vty,
- "EVPN type-1 prefix: [1]:[EthTag]:[ESI]:[IPlen]:[VTEP-IP]\n");
+ "EVPN type-1 prefix: [1]:[EthTag]:[ESI]:[IPlen]:[VTEP-IP]:[Frag-id]\n");
vty_out(vty,
"EVPN type-2 prefix: [2]:[EthTag]:[MAClen]:[MAC]:[IPlen]:[IP]\n");
vty_out(vty, "EVPN type-3 prefix: [3]:[EthTag]:[IPlen]:[OrigIP]\n");
@@ -2712,7 +2712,7 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
/* RD header and legend - once overall. */
if (rd_header && !json) {
vty_out(vty,
- "EVPN type-1 prefix: [1]:[EthTag]:[ESI]:[IPlen]:[VTEP-IP]\n");
+ "EVPN type-1 prefix: [1]:[EthTag]:[ESI]:[IPlen]:[VTEP-IP]:[Frag-id]\n");
vty_out(vty,
"EVPN type-2 prefix: [2]:[EthTag]:[MAClen]:[MAC]\n");
vty_out(vty,
@@ -5992,6 +5992,87 @@ DEFUN (no_bgp_evpn_vrf_rt,
return CMD_SUCCESS;
}
+DEFPY(bgp_evpn_ead_ess_frag_evi_limit, bgp_evpn_ead_es_frag_evi_limit_cmd,
+ "[no$no] ead-es-frag evi-limit (1-1000)$limit",
+ NO_STR
+ "EAD ES fragment config\n"
+ "EVIs per-fragment\n"
+ "limit\n")
+{
+ bgp_mh_info->evi_per_es_frag =
+ no ? BGP_EVPN_MAX_EVI_PER_ES_FRAG : limit;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN(bgp_evpn_ead_es_rt, bgp_evpn_ead_es_rt_cmd,
+ "ead-es-route-target export RT",
+ "EAD ES Route Target\n"
+ "export\n"
+ "Route target (A.B.C.D:MN|EF:OPQR|GHJK:MN)\n")
+{
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+ struct ecommunity *ecomadd = NULL;
+
+ if (!bgp)
+ return CMD_WARNING;
+
+ if (!EVPN_ENABLED(bgp)) {
+ vty_out(vty, "This command is only supported under EVPN VRF\n");
+ return CMD_WARNING;
+ }
+
+ /* Add/update the export route-target */
+ ecomadd = ecommunity_str2com(argv[2]->arg, ECOMMUNITY_ROUTE_TARGET, 0);
+ if (!ecomadd) {
+ vty_out(vty, "%% Malformed Route Target list\n");
+ return CMD_WARNING;
+ }
+ ecommunity_str(ecomadd);
+
+ /* Do nothing if we already have this export route-target */
+ if (!bgp_evpn_rt_matches_existing(bgp_mh_info->ead_es_export_rtl,
+ ecomadd))
+ bgp_evpn_mh_config_ead_export_rt(bgp, ecomadd, false);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN(no_bgp_evpn_ead_es_rt, no_bgp_evpn_ead_es_rt_cmd,
+ "no ead-es-route-target export RT",
+ NO_STR
+ "EAD ES Route Target\n"
+ "export\n" EVPN_ASN_IP_HELP_STR)
+{
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+ struct ecommunity *ecomdel = NULL;
+
+ if (!bgp)
+ return CMD_WARNING;
+
+ if (!EVPN_ENABLED(bgp)) {
+ vty_out(vty, "This command is only supported under EVPN VRF\n");
+ return CMD_WARNING;
+ }
+
+ ecomdel = ecommunity_str2com(argv[3]->arg, ECOMMUNITY_ROUTE_TARGET, 0);
+ if (!ecomdel) {
+ vty_out(vty, "%% Malformed Route Target list\n");
+ return CMD_WARNING;
+ }
+ ecommunity_str(ecomdel);
+
+ if (!bgp_evpn_rt_matches_existing(bgp_mh_info->ead_es_export_rtl,
+ ecomdel)) {
+ vty_out(vty,
+ "%% RT specified does not match EAD-ES RT configuration\n");
+ return CMD_WARNING;
+ }
+ bgp_evpn_mh_config_ead_export_rt(bgp, ecomdel, true);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (bgp_evpn_vni_rt,
bgp_evpn_vni_rt_cmd,
"route-target <both|import|export> RT",
@@ -6258,6 +6339,10 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi,
if (bgp->resolve_overlay_index)
vty_out(vty, " enable-resolve-overlay-index\n");
+ if (bgp_mh_info->evi_per_es_frag != BGP_EVPN_MAX_EVI_PER_ES_FRAG)
+ vty_out(vty, " ead-es-frag evi-limit %u\n",
+ bgp_mh_info->evi_per_es_frag);
+
if (bgp_mh_info->host_routes_use_l3nhg !=
BGP_EVPN_MH_USE_ES_L3NHG_DEF) {
if (bgp_mh_info->host_routes_use_l3nhg)
@@ -6321,6 +6406,23 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi,
vty_out(vty, " advertise ipv4 unicast gateway-ip\n");
}
+ /* EAD ES export route-target */
+ if (listcount(bgp_mh_info->ead_es_export_rtl)) {
+ struct ecommunity *ecom;
+ char *ecom_str;
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(bgp_mh_info->ead_es_export_rtl, node,
+ ecom)) {
+
+ ecom_str = ecommunity_ecom2str(
+ ecom, ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
+ vty_out(vty, " ead-es-route-target export %s\n",
+ ecom_str);
+ XFREE(MTYPE_ECOMMUNITY_STR, ecom_str);
+ }
+ }
+
if (CHECK_FLAG(bgp->af_flags[AFI_L2VPN][SAFI_EVPN],
BGP_L2VPN_EVPN_ADV_IPV6_UNICAST)) {
if (bgp->adv_cmd_rmap[AFI_IP6][SAFI_UNICAST].name)
@@ -6506,6 +6608,9 @@ void bgp_ethernetvpn_init(void)
install_element(BGP_NODE, &no_bgp_evpn_vrf_rd_without_val_cmd);
install_element(BGP_EVPN_NODE, &bgp_evpn_vrf_rt_cmd);
install_element(BGP_EVPN_NODE, &no_bgp_evpn_vrf_rt_cmd);
+ install_element(BGP_EVPN_NODE, &bgp_evpn_ead_es_rt_cmd);
+ install_element(BGP_EVPN_NODE, &no_bgp_evpn_ead_es_rt_cmd);
+ install_element(BGP_EVPN_NODE, &bgp_evpn_ead_es_frag_evi_limit_cmd);
install_element(BGP_EVPN_VNI_NODE, &bgp_evpn_advertise_svi_ip_vni_cmd);
install_element(BGP_EVPN_VNI_NODE,
&bgp_evpn_advertise_default_gw_vni_cmd);
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 1bc7b62304..8772afd736 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -631,13 +631,23 @@ DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
if (uj) {
json = json_object_new_object();
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
json_object_int_add(json, "Ledger", skiplist_count(lp->ledger));
+ json_object_int_add(json, "ledger", skiplist_count(lp->ledger));
json_object_int_add(json, "InUse", skiplist_count(lp->inuse));
+ json_object_int_add(json, "inUse", skiplist_count(lp->inuse));
json_object_int_add(json, "Requests",
lp_fifo_count(&lp->requests));
+ json_object_int_add(json, "requests",
+ lp_fifo_count(&lp->requests));
json_object_int_add(json, "LabelChunks", listcount(lp->chunks));
+ json_object_int_add(json, "labelChunks", listcount(lp->chunks));
json_object_int_add(json, "Pending", lp->pending_count);
+ json_object_int_add(json, "pending", lp->pending_count);
json_object_int_add(json, "Reconnects", lp->reconnect_count);
+ json_object_int_add(json, "reconnects", lp->reconnect_count);
vty_json(vty, json);
} else {
vty_out(vty, "Labelpool Summary\n");
diff --git a/bgpd/bgp_memory.c b/bgpd/bgp_memory.c
index ffb1ec162b..a994b536c4 100644
--- a/bgpd/bgp_memory.c
+++ b/bgpd/bgp_memory.c
@@ -126,6 +126,7 @@ DEFINE_MTYPE(BGPD, BGP_EVPN_PATH_NH_INFO, "BGP EVPN PATH NH Information");
DEFINE_MTYPE(BGPD, BGP_EVPN_NH, "BGP EVPN Nexthop");
DEFINE_MTYPE(BGPD, BGP_EVPN_ES_EVI_VTEP, "BGP EVPN ES-EVI VTEP");
DEFINE_MTYPE(BGPD, BGP_EVPN_ES, "BGP EVPN ESI Information");
+DEFINE_MTYPE(BGPD, BGP_EVPN_ES_FRAG, "BGP EVPN ES Fragment Information");
DEFINE_MTYPE(BGPD, BGP_EVPN_ES_EVI, "BGP EVPN ES-per-EVI Information");
DEFINE_MTYPE(BGPD, BGP_EVPN_ES_VRF, "BGP EVPN ES-per-VRF Information");
DEFINE_MTYPE(BGPD, BGP_EVPN_IMPORT_RT, "BGP EVPN Import RT");
diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h
index 63e7b40ef7..76b2f9f56a 100644
--- a/bgpd/bgp_memory.h
+++ b/bgpd/bgp_memory.h
@@ -115,6 +115,7 @@ DECLARE_MTYPE(LCOMMUNITY_VAL);
DECLARE_MTYPE(BGP_EVPN_MH_INFO);
DECLARE_MTYPE(BGP_EVPN_ES);
+DECLARE_MTYPE(BGP_EVPN_ES_FRAG);
DECLARE_MTYPE(BGP_EVPN_ES_EVI);
DECLARE_MTYPE(BGP_EVPN_ES_VRF);
DECLARE_MTYPE(BGP_EVPN_ES_VTEP);
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 8313c12e61..d3ebc0e6a2 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -49,10 +49,8 @@
extern struct zclient *zclient;
-static void register_zebra_rnh(struct bgp_nexthop_cache *bnc,
- int is_bgp_static_route);
-static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc,
- int is_bgp_static_route);
+static void register_zebra_rnh(struct bgp_nexthop_cache *bnc);
+static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc);
static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
static void bgp_nht_ifp_initial(struct thread *thread);
@@ -92,8 +90,7 @@ static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
}
/* only unregister if this is the last nh for this prefix*/
if (!bnc_existing_for_prefix(bnc))
- unregister_zebra_rnh(
- bnc, CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE));
+ unregister_zebra_rnh(bnc);
bnc_free(bnc);
}
}
@@ -308,7 +305,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
} else if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED)
&& !is_default_host_route(&bnc->prefix))
- register_zebra_rnh(bnc, is_bgp_static_route);
+ register_zebra_rnh(bnc);
if (pi && pi->nexthop != bnc) {
/* Unlink from existing nexthop cache, if any. This will also
@@ -387,7 +384,7 @@ void bgp_delete_connected_nexthop(afi_t afi, struct peer *peer)
zlog_debug(
"Freeing connected NHT node %p for peer %s(%s)",
bnc, peer->host, bnc->bgp->name_pretty);
- unregister_zebra_rnh(bnc, 0);
+ unregister_zebra_rnh(bnc);
bnc_free(bnc);
}
}
@@ -665,6 +662,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
struct bgp_nexthop_cache_head *tree = NULL;
struct bgp_nexthop_cache *bnc_nhc, *bnc_import;
struct bgp *bgp;
+ struct prefix match;
struct zapi_route nhr;
afi_t afi;
@@ -677,16 +675,16 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
return;
}
- if (!zapi_nexthop_update_decode(zclient->ibuf, &nhr)) {
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &match, &nhr)) {
zlog_err("%s[%s]: Failure to decode nexthop update", __func__,
bgp->name_pretty);
return;
}
- afi = family2afi(nhr.prefix.family);
+ afi = family2afi(match.family);
tree = &bgp->nexthop_cache_table[afi];
- bnc_nhc = bnc_find(tree, &nhr.prefix, nhr.srte_color);
+ bnc_nhc = bnc_find(tree, &match, nhr.srte_color);
if (!bnc_nhc) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
@@ -697,7 +695,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
tree = &bgp->import_check_table[afi];
- bnc_import = bnc_find(tree, &nhr.prefix, nhr.srte_color);
+ bnc_import = bnc_find(tree, &match, nhr.srte_color);
if (!bnc_import) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
@@ -891,8 +889,9 @@ static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command)
zserv_command_string(command), &bnc->prefix,
bnc->bgp->name_pretty);
- ret = zclient_send_rnh(zclient, command, &bnc->prefix, exact_match,
- resolve_via_default, bnc->bgp->vrf_id);
+ ret = zclient_send_rnh(zclient, command, &bnc->prefix, SAFI_UNICAST,
+ exact_match, resolve_via_default,
+ bnc->bgp->vrf_id);
if (ret == ZCLIENT_SEND_FAILURE) {
flog_warn(EC_BGP_ZEBRA_SEND,
"sendmsg_nexthop: zclient_send_message() failed");
@@ -914,8 +913,7 @@ static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command)
* RETURNS:
* void.
*/
-static void register_zebra_rnh(struct bgp_nexthop_cache *bnc,
- int is_bgp_import_route)
+static void register_zebra_rnh(struct bgp_nexthop_cache *bnc)
{
/* Check if we have already registered */
if (bnc->flags & BGP_NEXTHOP_REGISTERED)
@@ -936,8 +934,7 @@ static void register_zebra_rnh(struct bgp_nexthop_cache *bnc,
* RETURNS:
* void.
*/
-static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc,
- int is_bgp_import_route)
+static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc)
{
/* Check if we have already registered */
if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED))
@@ -1172,7 +1169,7 @@ void bgp_nht_register_nexthops(struct bgp *bgp)
frr_each (bgp_nexthop_cache, &bgp->nexthop_cache_table[afi],
bnc) {
- register_zebra_rnh(bnc, 0);
+ register_zebra_rnh(bnc);
}
}
}
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 8fac36cf60..09db041780 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -665,9 +665,6 @@ static void bgp_write_notify(struct peer *peer)
assert(stream_get_endp(s) >= BGP_HEADER_SIZE);
- /* Stop collecting data within the socket */
- sockopt_cork(peer->fd, 0);
-
/*
* socket is in nonblocking mode, if we can't deliver the NOTIFY, well,
* we only care about getting a clean shutdown at this point.
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index ef8537f039..5255eb5800 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -5376,10 +5376,10 @@ void bgp_clear_stale_route(struct peer *peer, afi_t afi, safi_t safi)
bgp_attr_get_community(
pi->attr),
COMMUNITY_NO_LLGR))
- break;
+ continue;
if (!CHECK_FLAG(pi->flags,
BGP_PATH_STALE))
- break;
+ continue;
/*
* If this is VRF leaked route
@@ -5409,9 +5409,9 @@ void bgp_clear_stale_route(struct peer *peer, afi_t afi, safi_t safi)
!community_include(
bgp_attr_get_community(pi->attr),
COMMUNITY_NO_LLGR))
- break;
+ continue;
if (!CHECK_FLAG(pi->flags, BGP_PATH_STALE))
- break;
+ continue;
if (safi == SAFI_UNICAST &&
(peer->bgp->inst_type ==
BGP_INSTANCE_TYPE_VRF ||
@@ -9529,6 +9529,8 @@ void route_vty_out_overlay(struct vty *vty, const struct prefix *p,
} else {
json_object_string_add(json_nexthop, "Error",
"Unsupported address-family");
+ json_object_string_add(json_nexthop, "error",
+ "Unsupported address-family");
}
}
@@ -9898,9 +9900,12 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
if (tag_buf[0] != '\0')
vty_out(vty, " VNI %s", tag_buf);
} else {
- if (tag_buf[0])
+ if (tag_buf[0]) {
json_object_string_add(json_path, "VNI",
tag_buf);
+ json_object_string_add(json_path, "vni",
+ tag_buf);
+ }
}
}
@@ -12458,6 +12463,8 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
output_arg, show_flags,
rpki_target_state);
} else {
+ struct listnode *node;
+ struct bgp *abgp;
/* show <ip> bgp ipv4 all: AFI_IP, show <ip> bgp ipv6 all:
* AFI_IP6 */
@@ -12469,66 +12476,80 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
afi = CHECK_FLAG(show_flags, BGP_SHOW_OPT_AFI_IP)
? AFI_IP
: AFI_IP6;
- FOREACH_SAFI (safi) {
- if (!bgp_afi_safi_peer_exists(bgp, afi, safi))
- continue;
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, abgp)) {
+ FOREACH_SAFI (safi) {
+ if (!bgp_afi_safi_peer_exists(abgp, afi,
+ safi))
+ continue;
- if (uj) {
- if (first)
- first = false;
+ if (uj) {
+ if (first)
+ first = false;
+ else
+ vty_out(vty, ",\n");
+ vty_out(vty, "\"%s\":{\n",
+ get_afi_safi_str(afi,
+ safi,
+ true));
+ } else
+ vty_out(vty,
+ "\nFor address family: %s\n",
+ get_afi_safi_str(
+ afi, safi,
+ false));
+
+ if (community)
+ bgp_show_community(
+ vty, abgp, community,
+ exact_match, afi, safi,
+ show_flags);
else
- vty_out(vty, ",\n");
- vty_out(vty, "\"%s\":{\n",
- get_afi_safi_str(afi, safi,
- true));
- } else
- vty_out(vty,
- "\nFor address family: %s\n",
- get_afi_safi_str(afi, safi,
- false));
-
- if (community)
- bgp_show_community(vty, bgp, community,
- exact_match, afi,
- safi, show_flags);
- else
- bgp_show(vty, bgp, afi, safi, sh_type,
- output_arg, show_flags,
- rpki_target_state);
- if (uj)
- vty_out(vty, "}\n");
+ bgp_show(vty, abgp, afi, safi,
+ sh_type, output_arg,
+ show_flags,
+ rpki_target_state);
+ if (uj)
+ vty_out(vty, "}\n");
+ }
}
} else {
/* show <ip> bgp all: for each AFI and SAFI*/
- FOREACH_AFI_SAFI (afi, safi) {
- if (!bgp_afi_safi_peer_exists(bgp, afi, safi))
- continue;
-
- if (uj) {
- if (first)
- first = false;
- else
- vty_out(vty, ",\n");
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, abgp)) {
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (!bgp_afi_safi_peer_exists(abgp, afi,
+ safi))
+ continue;
- vty_out(vty, "\"%s\":{\n",
- get_afi_safi_str(afi, safi,
- true));
- } else
- vty_out(vty,
- "\nFor address family: %s\n",
- get_afi_safi_str(afi, safi,
- false));
+ if (uj) {
+ if (first)
+ first = false;
+ else
+ vty_out(vty, ",\n");
- if (community)
- bgp_show_community(vty, bgp, community,
- exact_match, afi,
- safi, show_flags);
- else
- bgp_show(vty, bgp, afi, safi, sh_type,
- output_arg, show_flags,
- rpki_target_state);
- if (uj)
- vty_out(vty, "}\n");
+ vty_out(vty, "\"%s\":{\n",
+ get_afi_safi_str(afi,
+ safi,
+ true));
+ } else
+ vty_out(vty,
+ "\nFor address family: %s\n",
+ get_afi_safi_str(
+ afi, safi,
+ false));
+
+ if (community)
+ bgp_show_community(
+ vty, abgp, community,
+ exact_match, afi, safi,
+ show_flags);
+ else
+ bgp_show(vty, abgp, afi, safi,
+ sh_type, output_arg,
+ show_flags,
+ rpki_target_state);
+ if (uj)
+ vty_out(vty, "}\n");
+ }
}
}
if (uj)
@@ -13956,6 +13977,8 @@ DEFPY (show_ip_bgp_instance_neighbor_advertised_route,
int idx = 0;
bool first = true;
uint16_t show_flags = 0;
+ struct listnode *node;
+ struct bgp *abgp;
if (uj) {
argc--;
@@ -14007,42 +14030,52 @@ DEFPY (show_ip_bgp_instance_neighbor_advertised_route,
|| CHECK_FLAG(show_flags, BGP_SHOW_OPT_AFI_IP6)) {
afi = CHECK_FLAG(show_flags, BGP_SHOW_OPT_AFI_IP) ? AFI_IP
: AFI_IP6;
- FOREACH_SAFI (safi) {
- if (!bgp_afi_safi_peer_exists(bgp, afi, safi))
- continue;
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, abgp)) {
+ FOREACH_SAFI (safi) {
+ if (!bgp_afi_safi_peer_exists(abgp, afi, safi))
+ continue;
- if (uj) {
- if (first)
- first = false;
- else
- vty_out(vty, ",\n");
- vty_out(vty, "\"%s\":",
- get_afi_safi_str(afi, safi, true));
- } else
- vty_out(vty, "\nFor address family: %s\n",
- get_afi_safi_str(afi, safi, false));
+ if (uj) {
+ if (first)
+ first = false;
+ else
+ vty_out(vty, ",\n");
+ vty_out(vty, "\"%s\":",
+ get_afi_safi_str(afi, safi,
+ true));
+ } else
+ vty_out(vty,
+ "\nFor address family: %s\n",
+ get_afi_safi_str(afi, safi,
+ false));
- peer_adj_routes(vty, peer, afi, safi, type, rmap_name,
- show_flags);
+ peer_adj_routes(vty, peer, afi, safi, type,
+ rmap_name, show_flags);
+ }
}
} else {
- FOREACH_AFI_SAFI (afi, safi) {
- if (!bgp_afi_safi_peer_exists(bgp, afi, safi))
- continue;
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, abgp)) {
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (!bgp_afi_safi_peer_exists(abgp, afi, safi))
+ continue;
- if (uj) {
- if (first)
- first = false;
- else
- vty_out(vty, ",\n");
- vty_out(vty, "\"%s\":",
- get_afi_safi_str(afi, safi, true));
- } else
- vty_out(vty, "\nFor address family: %s\n",
- get_afi_safi_str(afi, safi, false));
+ if (uj) {
+ if (first)
+ first = false;
+ else
+ vty_out(vty, ",\n");
+ vty_out(vty, "\"%s\":",
+ get_afi_safi_str(afi, safi,
+ true));
+ } else
+ vty_out(vty,
+ "\nFor address family: %s\n",
+ get_afi_safi_str(afi, safi,
+ false));
- peer_adj_routes(vty, peer, afi, safi, type, rmap_name,
- show_flags);
+ peer_adj_routes(vty, peer, afi, safi, type,
+ rmap_name, show_flags);
+ }
}
}
if (uj)
@@ -14161,7 +14194,7 @@ DEFUN (show_ip_bgp_flowspec_routes_detailed,
struct bgp *bgp = NULL;
int idx = 0;
bool uj = use_json(argc, argv);
- uint16_t show_flags = 0;
+ uint16_t show_flags = BGP_SHOW_OPT_DETAIL;
if (uj) {
argc--;
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 857462a601..c724b938d1 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -369,7 +369,7 @@ static void bgpd_sync_callback(struct thread *thread)
thread_add_read(bm->master, bgpd_sync_callback, NULL, socket, &t_rpki);
if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) {
- while (read(socket, &rec, sizeof(struct pfx_record) != -1))
+ while (read(socket, &rec, sizeof(struct pfx_record)) != -1)
;
atomic_store_explicit(&rtr_update_overflow, 0,
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index d21e257cb5..dea1433f6d 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -1486,7 +1486,7 @@ DEFUN (no_router_bgp,
}
if (bgp->l3vni) {
- vty_out(vty, "%% Please unconfigure l3vni %u",
+ vty_out(vty, "%% Please unconfigure l3vni %u\n",
bgp->l3vni);
return CMD_WARNING_CONFIG_FAILED;
}
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index c0a9a38773..78eaac7806 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -882,6 +882,12 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
*/
if (!v6_ll_avail && if_is_loopback(ifp))
v6_ll_avail = true;
+ else {
+ flog_warn(
+ EC_BGP_NO_LL_ADDRESS_AVAILABLE,
+ "Interface: %s does not have a v6 LL address associated with it, waiting until one is created for it",
+ ifp->name);
+ }
} else
/* Link-local address. */
{
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 16488eb4a4..38a106359e 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1223,7 +1223,7 @@ int bgp_global_gr_init(struct bgp *bgp)
{
/*Event -> */
/*GLOBAL_GR_cmd*/ /*no_Global_GR_cmd*/
- GLOBAL_INVALID, GLOBAL_HELPER,
+ GLOBAL_GR, GLOBAL_HELPER,
/*GLOBAL_DISABLE_cmd*/ /*no_Global_Disable_cmd*/
GLOBAL_DISABLE, GLOBAL_INVALID
},
diff --git a/debian/.gitignore b/debian/.gitignore
index 0b267c6f5c..d95d33a610 100644
--- a/debian/.gitignore
+++ b/debian/.gitignore
@@ -11,3 +11,4 @@
/files
/frr.init
/frr.service
+/frr@.service
diff --git a/debian/rules b/debian/rules
index 0fa9c3a3b0..7a719b7c60 100755
--- a/debian/rules
+++ b/debian/rules
@@ -72,6 +72,7 @@ override_dh_auto_install:
# let dh_systemd_* and dh_installinit do their thing automatically
cp tools/frr.service debian/frr.service
+ cp tools/frr@.service debian/frr@.service
cp tools/frrinit.sh debian/frr.init
-rm -f debian/tmp/usr/lib/frr/frr
@@ -112,3 +113,4 @@ override_dh_auto_clean:
if test -f Makefile; then make redistclean; fi
-rm -f debian/frr.init
-rm -f debian/frr.service
+ -rm -f debian/frr@.service
diff --git a/doc/developer/frr-release-procedure.rst b/doc/developer/frr-release-procedure.rst
index 6a7f9c4ca9..4ef0ca8416 100644
--- a/doc/developer/frr-release-procedure.rst
+++ b/doc/developer/frr-release-procedure.rst
@@ -204,7 +204,7 @@ Stage 3 - Publish
.. code-block:: console
- cp <old-version>.md <version>.md
+ cp content/release/<old-version>.md content/release/<new-version>.md
Paste the GitHub release announcement text into this document, and **remove
line breaks**. In other words, this::
@@ -220,10 +220,17 @@ Stage 3 - Publish
This is very important otherwise the announcement will be unreadable on the
website.
- Make sure to add a link to the GitHub releases page at the top.
+ To get the number of commiters and commits, here is a couple of handy commands:
+
+ .. code-block:: console
- Once finished, manually add a new entry into ``index.html`` to link to this
- new announcement. Look at past commits to see how to do this.
+ # The number of commits
+ % git log --oneline --no-merges base_8.2...base_8.1 | wc -l
+
+ # The number of commiters
+ % git shortlog --summary --no-merges base_8.2...base_8.1 | wc -l
+
+ Make sure to add a link to the GitHub releases page at the top.
#. Deploy the updated ``frr-www`` on the frrouting.org web server and verify
that the announcement text is visible.
diff --git a/doc/developer/lists.rst b/doc/developer/lists.rst
index dc8f236927..4eaa85115e 100644
--- a/doc/developer/lists.rst
+++ b/doc/developer/lists.rst
@@ -1,23 +1,23 @@
.. _lists:
-List implementations
+Type-safe containers
====================
.. note::
- The term *list* is used generically for lists, skiplists, trees and hash
- tables in this document.
+ This section previously used the term *list*; it was changed to *container*
+ to be more clear.
-Common list interface
----------------------
+Common container interface
+--------------------------
-FRR includes a set of list-like data structure implementations with abstracted
+FRR includes a set of container implementations with abstracted
common APIs. The purpose of this is easily allow swapping out one
data structure for another while also making the code easier to read and write.
-There is one API for unsorted lists and a similar but not identical API for
-sorted lists - and heaps use a middle ground of both.
+There is one API for unsorted containers and a similar but not identical API
+for sorted containers - and heaps use a middle ground of both.
-For unsorted lists, the following implementations exist:
+For unsorted containers, the following implementations exist:
- single-linked list with tail pointer (e.g. STAILQ in BSD)
@@ -31,7 +31,7 @@ Being partially sorted, the oddball structure:
- an 8-ary heap
-For sorted lists, these data structures are implemented:
+For sorted containers, these data structures are implemented:
- single-linked list
@@ -44,7 +44,7 @@ For sorted lists, these data structures are implemented:
- hash table (note below)
Except for hash tables, each of the sorted data structures has a variant with
-unique and non-unique list items. Hash tables always require unique items
+unique and non-unique items. Hash tables always require unique items
and mostly follow the "sorted" API but use the hash value as sorting
key. Also, iterating while modifying does not work with hash tables.
Conversely, the heap always has non-unique items, but iterating while modifying
@@ -60,7 +60,7 @@ in the future:
The APIs are all designed to be as type-safe as possible. This means that
-there will be a compiler warning when an item doesn't match the list, or
+there will be a compiler warning when an item doesn't match the container, or
the return value has a different type, or other similar situations. **You
should never use casts with these APIs.** If a cast is neccessary in relation
to these APIs, there is probably something wrong with the overall design.
@@ -100,35 +100,39 @@ Available types:
Functions provided:
-+------------------------------------+------+------+------+---------+------------+
-| Function | LIST | HEAP | HASH | \*_UNIQ | \*_NONUNIQ |
-+====================================+======+======+======+=========+============+
-| _init, _fini | yes | yes | yes | yes | yes |
-+------------------------------------+------+------+------+---------+------------+
-| _first, _next, _next_safe, | yes | yes | yes | yes | yes |
-| | | | | | |
-| _const_first, _const_next | | | | | |
-+------------------------------------+------+------+------+---------+------------+
-| _swap_all | yes | yes | yes | yes | yes |
-+------------------------------------+------+------+------+---------+------------+
-| _anywhere | yes | -- | -- | -- | -- |
-+------------------------------------+------+------+------+---------+------------+
-| _add_head, _add_tail, _add_after | yes | -- | -- | -- | -- |
-+------------------------------------+------+------+------+---------+------------+
-| _add | -- | yes | yes | yes | yes |
-+------------------------------------+------+------+------+---------+------------+
-| _member | yes | yes | yes | yes | yes |
-+------------------------------------+------+------+------+---------+------------+
-| _del, _pop | yes | yes | yes | yes | yes |
-+------------------------------------+------+------+------+---------+------------+
-| _find, _const_find | -- | -- | yes | yes | -- |
-+------------------------------------+------+------+------+---------+------------+
-| _find_lt, _find_gteq, | -- | -- | -- | yes | yes |
-| | | | | | |
-| _const_find_lt, _const_find_gteq | | | | | |
-+------------------------------------+------+------+------+---------+------------+
-| use with frr_each() macros | yes | yes | yes | yes | yes |
-+------------------------------------+------+------+------+---------+------------+
++------------------------------------+-------+------+------+---------+------------+
+| Function | LIST | HEAP | HASH | \*_UNIQ | \*_NONUNIQ |
++====================================+=======+======+======+=========+============+
+| _init, _fini | yes | yes | yes | yes | yes |
++------------------------------------+-------+------+------+---------+------------+
+| _first, _next, _next_safe, | yes | yes | yes | yes | yes |
+| | | | | | |
+| _const_first, _const_next | | | | | |
++------------------------------------+-------+------+------+---------+------------+
+| _last, _prev, _prev_safe, | DLIST | -- | -- | RB only | RB only |
+| | only | | | | |
+| _const_last, _const_prev | | | | | |
++------------------------------------+-------+------+------+---------+------------+
+| _swap_all | yes | yes | yes | yes | yes |
++------------------------------------+-------+------+------+---------+------------+
+| _anywhere | yes | -- | -- | -- | -- |
++------------------------------------+-------+------+------+---------+------------+
+| _add_head, _add_tail, _add_after | yes | -- | -- | -- | -- |
++------------------------------------+-------+------+------+---------+------------+
+| _add | -- | yes | yes | yes | yes |
++------------------------------------+-------+------+------+---------+------------+
+| _member | yes | yes | yes | yes | yes |
++------------------------------------+-------+------+------+---------+------------+
+| _del, _pop | yes | yes | yes | yes | yes |
++------------------------------------+-------+------+------+---------+------------+
+| _find, _const_find | -- | -- | yes | yes | -- |
++------------------------------------+-------+------+------+---------+------------+
+| _find_lt, _find_gteq, | -- | -- | -- | yes | yes |
+| | | | | | |
+| _const_find_lt, _const_find_gteq | | | | | |
++------------------------------------+-------+------+------+---------+------------+
+| use with frr_each() macros | yes | yes | yes | yes | yes |
++------------------------------------+-------+------+------+---------+------------+
@@ -136,7 +140,7 @@ Datastructure type setup
------------------------
Each of the data structures has a ``PREDECL_*`` and a ``DECLARE_*`` macro to
-set up an "instantiation" of the list. This works somewhat similar to C++
+set up an "instantiation" of the container. This works somewhat similar to C++
templating, though much simpler.
**In all following text, the Z prefix is replaced with a name choosen
@@ -174,8 +178,8 @@ The common setup pattern will look like this:
``XXX`` is replaced with the name of the data structure, e.g. ``SKIPLIST``
or ``ATOMLIST``. The ``DECLARE_XXX`` invocation can either occur in a `.h`
-file (if the list needs to be accessed from several C files) or it can be
-placed in a `.c` file (if the list is only accessed from that file.) The
+file (if the container needs to be accessed from several C files) or it can be
+placed in a `.c` file (if the container is only accessed from that file.) The
``PREDECL_XXX`` invocation defines the ``struct Z_item`` and ``struct
Z_head`` types and must therefore occur before these are used.
@@ -196,7 +200,7 @@ The following iteration macros work across all data structures:
for (item = Z_first(&head); item; item = Z_next(&head, item))
- Note that this will fail if the list is modified while being iterated
+ Note that this will fail if the container is modified while being iterated
over.
.. c:macro:: frr_each_safe(Z, head, item)
@@ -220,8 +224,8 @@ The following iteration macros work across all data structures:
.. c:macro:: frr_each_from(Z, head, item, from)
- Iterates over the list, starting at item ``from``. This variant is "safe"
- as in the previous macro. Equivalent to:
+ Iterates over the container, starting at item ``from``. This variant is
+ "safe" as in the previous macro. Equivalent to:
.. code-block:: c
@@ -236,6 +240,13 @@ The following iteration macros work across all data structures:
resume iteration after breaking out of the loop by keeping the ``from``
value persistent and reusing it for the next loop.
+.. c:macro:: frr_rev_each(Z, head, item)
+.. c:macro:: frr_rev_each_safe(Z, head, item)
+.. c:macro:: frr_rev_each_from(Z, head, item, from)
+
+ Reverse direction variants of the above. Only supported on containers that
+ implement ``_last`` and ``_prev`` (i.e. ``RBTREE`` and ``DLIST``).
+
To iterate over ``const`` pointers, add ``_const`` to the name of the
datastructure (``Z`` above), e.g. ``frr_each (mylist, head, item)`` becomes
``frr_each (mylist_const, head, item)``.
@@ -243,24 +254,24 @@ datastructure (``Z`` above), e.g. ``frr_each (mylist, head, item)`` becomes
Common API
----------
-The following documentation assumes that a list has been defined using
-``Z`` as the name, and ``itemtype`` being the type of the list items (e.g.
+The following documentation assumes that a container has been defined using
+``Z`` as the name, and ``itemtype`` being the type of the items (e.g.
``struct item``.)
.. c:function:: void Z_init(struct Z_head *)
- Initializes the list for use. For most implementations, this just sets
+ Initializes the container for use. For most implementations, this just sets
some values. Hash tables are the only implementation that allocates
memory in this call.
.. c:function:: void Z_fini(struct Z_head *)
- Reverse the effects of :c:func:`Z_init()`. The list must be empty
+ Reverse the effects of :c:func:`Z_init()`. The container must be empty
when this function is called.
.. warning::
- This function may ``assert()`` if the list is not empty.
+ This function may ``assert()`` if the container is not empty.
.. c:function:: size_t Z_count(const struct Z_head *)
@@ -270,7 +281,7 @@ The following documentation assumes that a list has been defined using
.. note::
- For atomic lists with concurrent access, the value will already be
+ For atomic containers with concurrent access, the value will already be
outdated by the time this function returns and can therefore only be
used as an estimate.
@@ -291,6 +302,12 @@ The following documentation assumes that a list has been defined using
empty. This is O(1) for all data structures except red-black trees
where it is O(log n).
+.. c:function:: const itemtype *Z_const_last(const struct Z_head *)
+.. c:function:: itemtype *Z_last(struct Z_head *)
+
+ Last item in the structure, or ``NULL``. Only available on containers
+ that support reverse iteration (i.e. ``RBTREE`` and ``DLIST``).
+
.. c:function:: itemtype *Z_pop(struct Z_head *)
Remove and return the first item in the structure, or ``NULL`` if the
@@ -300,7 +317,7 @@ The following documentation assumes that a list has been defined using
This function can be used to build queues (with unsorted structures) or
priority queues (with sorted structures.)
- Another common pattern is deleting all list items:
+ Another common pattern is deleting all container items:
.. code-block:: c
@@ -329,16 +346,23 @@ The following documentation assumes that a list has been defined using
Same as :c:func:`Z_next()`, except that ``NULL`` is returned if
``prev`` is ``NULL``.
+.. c:function:: const itemtype *Z_const_prev(const struct Z_head *, const itemtype *next)
+.. c:function:: itemtype *Z_prev(struct Z_head *, itemtype *next)
+.. c:function:: itemtype *Z_prev_safe(struct Z_head *, itemtype *next)
+
+ As above, but preceding item. Only available on structures that support
+ reverse iteration (i.e. ``RBTREE`` and ``DLIST``).
+
.. c:function:: itemtype *Z_del(struct Z_head *, itemtype *item)
- Remove ``item`` from the list and return it.
+ Remove ``item`` from the container and return it.
.. note::
This function's behaviour is undefined if ``item`` is not actually
- on the list. Some structures return ``NULL`` in this case while others
- return ``item``. The function may also call ``assert()`` (but most
- don't.)
+ on the container. Some structures return ``NULL`` in this case while
+ others return ``item``. The function may also call ``assert()`` (but
+ most don't.)
.. c:function:: itemtype *Z_swap_all(struct Z_head *, struct Z_head *)
@@ -427,8 +451,8 @@ API for sorted structures
-------------------------
Sorted data structures do not need to have an insertion position specified,
-therefore the insertion calls are different from unsorted lists. Also,
-sorted lists can be searched for a value.
+therefore the insertion calls are different from unsorted containers. Also,
+sorted containers can be searched for a value.
.. c:macro:: DECLARE_XXX_UNIQ(Z, type, field, compare_func)
@@ -439,7 +463,7 @@ sorted lists can be searched for a value.
created for this instantiation. ``DECLARE_XXX(foo, ...)``
gives ``struct foo_item``, ``foo_add()``, ``foo_count()``, etc. Note
that this must match the value given in ``PREDECL_XXX(foo)``.
- :param typename type: Specifies the data type of the list items, e.g.
+ :param typename type: Specifies the data type of the items, e.g.
``struct item``. Note that ``struct`` must be added here, it is not
automatically added.
:param token field: References a struct member of ``type`` that must be
@@ -448,29 +472,29 @@ sorted lists can be searched for a value.
:param funcptr compare_func: Item comparison function, must have the
following function signature:
``int function(const itemtype *, const itemtype*)``. This function
- may be static if the list is only used in one file.
+ may be static if the container is only used in one file.
.. c:macro:: DECLARE_XXX_NONUNIQ(Z, type, field, compare_func)
- Same as above, but allow adding multiple items to the list that compare
+ Same as above, but allow adding multiple items to the container that compare
as equal in ``compare_func``. Ordering between these items is undefined
- and depends on the list implementation.
+ and depends on the container implementation.
.. c:function:: itemtype *Z_add(struct Z_head *, itemtype *item)
Insert an item at the appropriate sorted position. If another item exists
- in the list that compares as equal (``compare_func()`` == 0), ``item`` is
- not inserted into the list and the already-existing item in the list is
+ in the container that compares as equal (``compare_func()`` == 0), ``item``
+ is not inserted and the already-existing item in the container is
returned. Otherwise, on successful insertion, ``NULL`` is returned.
- For ``_NONUNIQ`` lists, this function always returns NULL since ``item``
- can always be successfully added to the list.
+ For ``_NONUNIQ`` containers, this function always returns NULL since
+ ``item`` can always be successfully added to the container.
.. c:function:: const itemtype *Z_const_find(const struct Z_head *, const itemtype *ref)
.. c:function:: itemtype *Z_find(struct Z_head *, const itemtype *ref)
- Search the list for an item that compares equal to ``ref``. If no equal
- item is found, return ``NULL``.
+ Search the container for an item that compares equal to ``ref``. If no
+ equal item is found, return ``NULL``.
This function is likely used with a temporary stack-allocated value for
``ref`` like so:
@@ -483,21 +507,21 @@ sorted lists can be searched for a value.
.. note::
- The ``Z_find()`` function is only available for lists that contain
- unique items (i.e. ``DECLARE_XXX_UNIQ``.) This is because on a list
- containing non-unique items, more than one item may compare as equal to
+ The ``Z_find()`` function is only available for containers that contain
+ unique items (i.e. ``DECLARE_XXX_UNIQ``.) This is because on a container
+ with non-unique items, more than one item may compare as equal to
the item that is searched for.
.. c:function:: const itemtype *Z_const_find_gteq(const struct Z_head *, const itemtype *ref)
.. c:function:: itemtype *Z_find_gteq(struct Z_head *, const itemtype *ref)
- Search the list for an item that compares greater or equal to
+ Search the container for an item that compares greater or equal to
``ref``. See :c:func:`Z_find()` above.
.. c:function:: const itemtype *Z_const_find_lt(const struct Z_head *, const itemtype *ref)
.. c:function:: itemtype *Z_find_lt(struct Z_head *, const itemtype *ref)
- Search the list for an item that compares less than
+ Search the container for an item that compares less than
``ref``. See :c:func:`Z_find()` above.
@@ -511,7 +535,7 @@ API for hash tables
created for this instantiation. ``DECLARE_XXX(foo, ...)``
gives ``struct foo_item``, ``foo_add()``, ``foo_count()``, etc. Note
that this must match the value given in ``PREDECL_XXX(foo)``.
- :param typename type: Specifies the data type of the list items, e.g.
+ :param typename type: Specifies the data type of the items, e.g.
``struct item``. Note that ``struct`` must be added here, it is not
automatically added.
:param token field: References a struct member of ``type`` that must be
@@ -520,7 +544,7 @@ API for hash tables
:param funcptr compare_func: Item comparison function, must have the
following function signature:
``int function(const itemtype *, const itemtype*)``. This function
- may be static if the list is only used in one file. For hash tables,
+ may be static if the container is only used in one file. For hash tables,
this function is only used to check for equality, the ordering is
ignored.
:param funcptr hash_func: Hash calculation function, must have the
@@ -725,13 +749,9 @@ Head removal (pop) and deallocation:
FAQ
---
-What are the semantics of ``const`` in the list APIs?
+What are the semantics of ``const`` in the container APIs?
``const`` pointers to list heads and/or items are interpreted to mean that
- both the list itself as well as the data items are read-only.
-
-Why is there no "is this item on a/the list" test?
- It's slow for several of the data structures, and the work of adding it
- just hasn't been done. It can certainly be added if it's needed.
+ both the container itself as well as the data items are read-only.
Why is it ``PREDECL`` + ``DECLARE`` instead of ``DECLARE`` + ``DEFINE``?
The rule is that a ``DEFINE`` must be in a ``.c`` file, and linked exactly
diff --git a/doc/developer/logging.rst b/doc/developer/logging.rst
index 4e6fc04206..7046361204 100644
--- a/doc/developer/logging.rst
+++ b/doc/developer/logging.rst
@@ -163,6 +163,10 @@ Networking data types
- :c:union:`prefixptr` (dereference to get :c:struct:`prefix`)
- :c:union:`prefixconstptr` (dereference to get :c:struct:`prefix`)
+ Options:
+
+ ``%pFXh``: (address only) :frrfmtout:`1.2.3.0` / :frrfmtout:`fe80::1234`
+
.. frrfmt:: %pPSG4 (struct prefix_sg *)
:frrfmtout:`(*,1.2.3.4)`
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index b41181f4e9..6c1d9148d1 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -35,6 +35,9 @@ Installing Topotest Requirements
python2 -m pip install 'exabgp<4.0.0'
useradd -d /var/run/exabgp/ -s /bin/false exabgp
+ # To enable the gRPC topotest install:
+ python3 -m pip install grpcio grpcio-tools
+
Enable Coredumps
""""""""""""""""
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 7d7335a23f..b9733cd522 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2935,6 +2935,8 @@ Example configuration:
exit-address-family
!
+.. _bgp-evpn-mh:
+
EVPN Multihoming
^^^^^^^^^^^^^^^^
@@ -3045,6 +3047,55 @@ following zebra command -
.. clicmd:: evpn mh startup-delay (0-3600)
+EAD-per-ES fragmentation
+""""""""""""""""""""""""
+The EAD-per-ES route carries the EVI route targets for all the broadcast
+domains associated with the ES. Depending on the EVI scale the EAD-per-ES
+route maybe fragmented.
+
+The number of EVIs per-EAD route can be configured via the following
+BGP command -
+
+.. index:: [no] ead-es-frag evi-limit(1-1000)
+.. clicmd:: [no] ead-es-frag evi-limit(1-1000)
+
+Sample Configuration
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: frr
+
+ !
+ router bgp 5556
+ !
+ address-family l2vpn evpn
+ ead-es-frag evi-limit 200
+ exit-address-family
+ !
+ !
+
+EAD-per-ES route-target
+"""""""""""""""""""""""
+The EAD-per-ES route by default carries all the EVI route targets. Depending
+on EVI scale that can result in route fragmentation. In some cases it maybe
+necessary to avoid this fragmentation and that can be done via the following
+workaround -
+1. Configure a single supplementary BD per-tenant VRF. This SBD needs to
+be provisioned on all EVPN PEs associated with the tenant-VRF.
+2. Config the SBD's RT as the EAD-per-ES route's export RT.
+
+Sample Configuration
+^^^^^^^^^^^^^^^^^^^^^
+.. code-block:: frr
+
+ !
+ router bgp 5556
+ !
+ address-family l2vpn evpn
+ ead-es-route-target export 5556:1001
+ ead-es-route-target export 5556:1004
+ ead-es-route-target export 5556:1008
+ exit-address-family
+ !
+
Support with VRF network namespace backend
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to separate overlay networks contained in VXLAN interfaces from
diff --git a/doc/user/index.rst b/doc/user/index.rst
index cadf4cb9cf..5a018a5583 100644
--- a/doc/user/index.rst
+++ b/doc/user/index.rst
@@ -54,6 +54,7 @@ Protocols
ospf6d
pathd
pim
+ pimv6
pbr
ripd
ripngd
diff --git a/doc/user/isisd.rst b/doc/user/isisd.rst
index f7d42d8200..d2859670dd 100644
--- a/doc/user/isisd.rst
+++ b/doc/user/isisd.rst
@@ -272,7 +272,7 @@ ISIS interface
Showing ISIS information
========================
-.. clicmd:: show isis summary
+.. clicmd:: show isis [vrf <NAME|all>] summary [json]
Show summary information about ISIS.
@@ -280,17 +280,17 @@ Showing ISIS information
Show information about ISIS node.
-.. clicmd:: show isis interface [detail] [IFNAME]
+.. clicmd:: show isis [vrf <NAME|all>] interface [detail] [IFNAME] [json]
Show state and configuration of ISIS specified interface, or all interfaces
if no interface is given with or without details.
-.. clicmd:: show isis neighbor [detail] [SYSTEMID]
+.. clicmd:: show isis [vrf <NAME|all>] neighbor [detail] [SYSTEMID] [json]
Show state and information of ISIS specified neighbor, or all neighbors if
no system id is given with or without details.
-.. clicmd:: show isis database [detail] [LSPID]
+.. clicmd:: show isis [vrf <NAME|all>] database [detail] [LSPID] [json]
Show the ISIS database globally, for a specific LSP id without or with
details.
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 1c3a0110ac..dcea709503 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -505,10 +505,25 @@ cause great confusion.
Display information about a S,G pair and how the RPF would be chosen. This
is especially useful if there are ECMP's available from the RPF lookup.
-.. clicmd:: show ip pim rp-info
+.. clicmd:: show ip pim [vrf NAME] rp-info [A.B.C.D/M] [json]
Display information about RP's that are configured on this router.
+ You can filter the output by specifying an arbitrary group range.
+
+ .. code-block:: frr
+
+ # show ip pim rp-info
+ RP address group/prefix-list OIF I am RP Source Group-Type
+ 192.168.10.123 225.0.0.0/24 eth2 yes Static ASM
+ 192.168.10.123 239.0.0.0/8 eth2 yes Static ASM
+ 192.168.10.123 239.4.0.0/24 eth2 yes Static SSM
+
+ # show ip pim rp-info 239.4.0.0/25
+ RP address group/prefix-list OIF I am RP Source Group-Type
+ 192.168.10.123 239.0.0.0/8 eth2 yes Static ASM
+ 192.168.10.123 239.4.0.0/24 eth2 yes Static SSM
+
.. clicmd:: show ip pim rpf
Display information about currently being used S,G's and their RPF lookup
@@ -609,6 +624,11 @@ the config was written out.
This turns on debugging for PIM nexthop tracking. It will display
information about RPF lookups and information about when a nexthop changes.
+.. clicmd:: debug pim nht detail
+
+ This turns on debugging for PIM nexthop in detail. This is not enabled
+ by default.
+
.. clicmd:: debug pim packet-dump
This turns on an extraordinary amount of data. Each pim packet sent and
diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst
new file mode 100644
index 0000000000..e71cf4631c
--- /dev/null
+++ b/doc/user/pimv6.rst
@@ -0,0 +1,288 @@
+.. _pimv6:
+
+*****
+PIMv6
+*****
+
+PIMv6 -- Protocol Independent Multicast for IPv6
+
+*pim6d* supports pim-sm as well as MLD v1 and v2. PIMv6 is
+vrf aware and can work within the context of vrf's in order to
+do S,G mrouting.
+
+.. _starting-and-stopping-pim6d:
+
+Starting and Stopping pim6d
+===========================
+
+The default configuration file name of *pim6d*'s is :file:`pim6d.conf`. When
+invoked *pim6d* searches directory |INSTALL_PREFIX_ETC|. If
+:file:`pim6d.conf` is not there then next search current directory.
+
+*pim6d* requires zebra for proper operation. Additionally *pim6d* depends on
+routing properly setup and working in the network that it is working on.
+
+::
+
+ # zebra -d
+ # pim6d -d
+
+
+Please note that *zebra* must be invoked before *pim6d*.
+
+To stop *pim6d* please use::
+
+ kill `cat /var/run/pim6d.pid`
+
+Certain signals have special meanings to *pim6d*.
+
++---------+---------------------------------------------------------------------+
+| Signal | Meaning |
++=========+=====================================================================+
+| SIGUSR1 | Rotate the *pim6d* logfile |
++---------+---------------------------------------------------------------------+
+| SIGINT | *pim6d* sweeps all installed PIM mroutes then terminates gracefully.|
+| SIGTERM | |
++---------+---------------------------------------------------------------------+
+
+*pim6d* invocation options. Common options that can be specified
+(:ref:`common-invocation-options`).
+
+.. clicmd:: ipv6 pim rp X:X::X:X Y:Y::Y:Y/M
+
+ In order to use pimv6, it is necessary to configure a RP for join messages to
+ be sent to. Currently the only methodology to do this is via static rp
+ commands. All routers in the pimv6 network must agree on these values. The
+ first ipv6 address is the RP's address and the second value is the matching
+ prefix of group ranges covered. This command is vrf aware, to configure for
+ a vrf, enter the vrf submode.
+
+.. clicmd:: ipv6 pim rp X:X::X:X prefix-list WORD
+
+ This CLI helps in configuring RP address for a range of groups specified
+ by the prefix-list.
+
+.. clicmd:: ipv6 pim rp keep-alive-timer (1-65535)
+
+ Modify the time out value for a S,G flow from 1-65535 seconds at RP.
+ The normal keepalive period for the KAT(S,G) defaults to 210 seconds.
+ However, at the RP, the keepalive period must be at least the
+ Register_Suppression_Time, or the RP may time out the (S,G) state
+ before the next Null-Register arrives. Thus, the KAT(S,G) is set to
+ max(Keepalive_Period, RP_Keepalive_Period) when a Register-Stop is sent.
+ If choosing a value below 31 seconds be aware that some hardware platforms
+ cannot see data flowing in better than 30 second chunks. This command is
+ vrf aware, to configure for a vrf, enter the vrf submode.
+
+.. clicmd:: ipv6 pim spt-switchover infinity-and-beyond [prefix-list PLIST]
+
+ On the last hop router if it is desired to not switch over to the SPT tree
+ configure this command. Optional parameter prefix-list can be use to control
+ which groups to switch or not switch. If a group is PERMIT as per the
+ PLIST, then the SPT switchover does not happen for it and if it is DENY,
+ then the SPT switchover happens.
+ This command is vrf aware, to configure for a vrf,
+ enter the vrf submode.
+
+.. clicmd:: ipv6 pim join-prune-interval (1-65535)
+
+ Modify the join/prune interval that pim uses to the new value. Time is
+ specified in seconds. This command is vrf aware, to configure for a vrf,
+ enter the vrf submode. The default time is 60 seconds. If you enter
+ a value smaller than 60 seconds be aware that this can and will affect
+ convergence at scale.
+
+.. clicmd:: ipv6 pim keep-alive-timer (1-65535)
+
+ Modify the time out value for a S,G flow from 1-65535 seconds. If choosing
+ a value below 31 seconds be aware that some hardware platforms cannot see data
+ flowing in better than 30 second chunks. This command is vrf aware, to
+ configure for a vrf, enter the vrf submode.
+
+.. clicmd:: ipv6 pim packets (1-255)
+
+ When processing packets from a neighbor process the number of packets
+ incoming at one time before moving on to the next task. The default value is
+ 3 packets. This command is only useful at scale when you can possibly have
+ a large number of pim control packets flowing. This command is vrf aware, to
+ configure for a vrf, enter the vrf submode.
+
+.. clicmd:: ipv6 pim register-suppress-time (1-65535)
+
+ Modify the time that pim will register suppress a FHR will send register
+ notifications to the kernel. This command is vrf aware, to configure for a
+ vrf, enter the vrf submode.
+
+.. _pimv6-interface-configuration:
+
+PIMv6 Interface Configuration
+=============================
+
+PIMv6 interface commands allow you to configure an interface as either a Receiver
+or a interface that you would like to form pimv6 neighbors on. If the interface
+is in a vrf, enter the interface command with the vrf keyword at the end.
+
+.. clicmd:: ipv6 pim active-active
+
+ Turn on pim active-active configuration for a Vxlan interface. This
+ command will not do anything if you do not have the underlying ability
+ of a mlag implementation.
+
+.. clicmd:: ipv6 pim drpriority (1-4294967295)
+
+ Set the DR Priority for the interface. This command is useful to allow the
+ user to influence what node becomes the DR for a lan segment.
+
+.. clicmd:: ipv6 pim hello (1-65535) (1-65535)
+
+ Set the pim hello and hold interval for a interface.
+
+.. clicmd:: ipv6 pim
+
+ Tell pim that we would like to use this interface to form pim neighbors
+ over. Please note that this command does not enable the reception of MLD
+ reports on the interface. Refer to the next ``ipv6 mld`` command for MLD
+ management.
+
+.. clicmd:: ipv6 pim use-source X:X::X:X
+
+ If you have multiple addresses configured on a particular interface
+ and would like pim to use a specific source address associated with
+ that interface.
+
+.. clicmd:: ipv6 mld
+
+ Tell pim to receive MLD reports and Query on this interface. The default
+ version is v2. This command is useful on a LHR.
+
+.. clicmd:: ipv6 mld join X:X::X:X [Y:Y::Y:Y]
+
+ Join multicast group or source-group on an interface.
+
+.. clicmd:: ipv6 mld query-interval (1-65535)
+
+ Set the MLD query interval that PIM will use.
+
+.. clicmd:: ipv6 mld query-max-response-time (1-65535)
+
+ Set the MLD query response timeout value. If an report is not returned in
+ the specified time we will assume the S,G or \*,G has timed out.
+
+.. clicmd:: ipv6 mld version (1-2)
+
+ Set the MLD version used on this interface. The default value is 2.
+
+.. clicmd:: ipv6 multicast boundary oil WORD
+
+ Set a PIMv6 multicast boundary, based upon the WORD prefix-list. If a PIMv6
+ join or MLD report is received on this interface and the Group is denied by
+ the prefix-list, PIMv6 will ignore the join or report.
+
+.. clicmd:: ipv6 mld last-member-query-count (1-255)
+
+ Set the MLD last member query count. The default value is 2. 'no' form of
+ this command is used to configure back to the default value.
+
+.. clicmd:: ipv6 MLD last-member-query-interval (1-65535)
+
+ Set the MLD last member query interval in deciseconds. The default value is
+ 10 deciseconds. 'no' form of this command is used to to configure back to the
+ default value.
+
+.. clicmd:: ipv6 mroute INTERFACE X:X::X:X [Y:Y::Y:Y]
+
+ Set a static multicast route for a traffic coming on the current interface to
+ be forwarded on the given interface if the traffic matches the group address
+ and optionally the source address.
+
+.. _show-pimv6-information:
+
+Show PIMv6 Information
+======================
+
+All PIMv6 show commands are vrf aware and typically allow you to insert a
+specified vrf command if information is desired about a specific vrf. If no
+vrf is specified then the default vrf is assumed. Finally the special keyword
+'all' allows you to look at all vrfs for the command. Naming a vrf 'all' will
+cause great confusion.
+
+.. clicmd:: show ipv6 pim [vrf NAME] group-type [json]
+
+ Display SSM group ranges.
+
+.. clicmd:: show ipv6 pim interface
+
+ Display information about interfaces PIM is using.
+
+.. clicmd:: show ipv6 pim [vrf NAME] join [X:X::X:X [X:X::X:X]] [json]
+.. clicmd:: show ipv6 pim vrf all join [json]
+
+ Display information about PIM joins received. If one address is specified
+ then we assume it is the Group we are interested in displaying data on.
+ If the second address is specified then it is Source Group.
+
+.. clicmd:: show ipv6 pim [vrf NAME] local-membership [json]
+
+ Display information about PIM interface local-membership.
+
+.. clicmd:: show ipv6 pim [vrf NAME] neighbor [detail|WORD] [json]
+.. clicmd:: show ipv6 pim vrf all neighbor [detail|WORD] [json]
+
+ Display information about PIM neighbors.
+
+.. clicmd:: show ipv6 pim [vrf NAME] nexthop
+
+ Display information about pim nexthops that are being used.
+
+.. clicmd:: show ipv6 pim [vrf NAME] nexthop-lookup X:X::X:X X:X::X:X
+
+ Display information about a S,G pair and how the RPF would be chosen. This
+ is especially useful if there are ECMP's available from the RPF lookup.
+
+.. clicmd:: show ipv6 pim [vrf NAME] rp-info [json]
+.. clicmd:: show ipv6 pim vrf all rp-info [json]
+
+ Display information about RP's that are configured on this router.
+
+.. clicmd:: show ipv6 pim [vrf NAME] rpf [json]
+.. clicmd:: show ipv6 pim vrf all rpf [json]
+
+ Display information about currently being used S,G's and their RPF lookup
+ information. Additionally display some statistics about what has been
+ happening on the router.
+
+.. clicmd:: show ipv6 pim [vrf NAME] secondary
+
+ Display information about an interface and all the secondary addresses
+ associated with it.
+
+.. clicmd:: show ipv6 pim [vrf NAME] state [X:X::X:X [X:X::X:X]] [json]
+.. clicmd:: show ipv6 pim vrf all state [X:X::X:X [X:X::X:X]] [json]
+
+ Display information about known S,G's and incoming interface as well as the
+ OIL and how they were chosen.
+
+.. clicmd:: show ipv6 pim [vrf NAME] upstream [X:X::X:X [Y:Y::Y:Y]] [json]
+.. clicmd:: show ipv6 pim vrf all upstream [json]
+
+ Display upstream information about a S,G mroute. Allow the user to
+ specify sub Source and Groups that we are interested in.
+
+.. clicmd:: show ipv6 pim [vrf NAME] upstream-join-desired [json]
+
+ Display upstream information for S,G's and if we desire to
+ join the multicast tree
+
+.. clicmd:: show ipv6 pim [vrf NAME] upstream-rpf [json]
+
+ Display upstream information for S,G's and the RPF data associated with them.
+
+PIMv6 Debug Commands
+====================
+
+The debugging subsystem for PIMv6 behaves in accordance with how FRR handles
+debugging. You can specify debugging at the enable CLI mode as well as the
+configure CLI mode. If you specify debug commands in the configuration cli
+mode, the debug commands can be persistent across restarts of the FRR pim6d if
+the config was written out.
+
diff --git a/doc/user/sharp.rst b/doc/user/sharp.rst
index e9d4e2763f..8d201a3c06 100644
--- a/doc/user/sharp.rst
+++ b/doc/user/sharp.rst
@@ -296,3 +296,7 @@ keyword. At present, no sharp commands will be preserved in the config.
router# show sharp segment-routing srv6
(nothing)
+
+.. clicmd:: sharp interface IFNAME protodown
+
+ Set an interface protodown.
diff --git a/doc/user/subdir.am b/doc/user/subdir.am
index 31158cb5f7..14ace2c856 100644
--- a/doc/user/subdir.am
+++ b/doc/user/subdir.am
@@ -30,6 +30,7 @@ user_RSTFILES = \
doc/user/packet-dumps.rst \
doc/user/pathd.rst \
doc/user/pim.rst \
+ doc/user/pimv6.rst \
doc/user/ripd.rst \
doc/user/pbr.rst \
doc/user/ripngd.rst \
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 15c6088b7a..0244f7c583 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -29,7 +29,7 @@ Besides the common invocation options (:ref:`common-invocation-options`), the
Zebra, when started, will read in routes. Those routes that Zebra
identifies that it was the originator of will be swept in TIME seconds.
If no time is specified then we will sweep those routes immediately.
- Under the *BSD's, there is no way to properly store the originating
+ Under the \*BSD's, there is no way to properly store the originating
route and the route types in this case will show up as a static route
with an admin distance of 255.
@@ -255,6 +255,17 @@ Link Parameters Commands
for InterASv2 link in OSPF (RFC5392). Note that this option is not yet
supported for ISIS (RFC5316).
+Global Commands
+------------------------
+
+.. clicmd:: zebra protodown reason-bit (0-31)
+
+ This command is only supported for linux and a kernel > 5.1.
+ Change reason-bit frr uses for setting protodown. We default to 7, but
+ if another userspace app ever conflicts with this, you can change it here.
+ The descriptor for this bit should exist in :file:`/etc/iproute2/protodown_reasons.d/`
+ to display with :clicmd:`ip -d link show`.
+
Nexthop Tracking
================
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 22a45914a2..e5cea27829 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -167,12 +167,25 @@ enum {
IFLA_NEW_IFINDEX,
IFLA_MIN_MTU,
IFLA_MAX_MTU,
+ IFLA_PROP_LIST,
+ IFLA_ALT_IFNAME, /* Alternative ifname */
+ IFLA_PERM_ADDRESS,
+ IFLA_PROTO_DOWN_REASON,
__IFLA_MAX
};
#define IFLA_MAX (__IFLA_MAX - 1)
+enum {
+ IFLA_PROTO_DOWN_REASON_UNSPEC,
+ IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
+ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
+
+ __IFLA_PROTO_DOWN_REASON_CNT,
+ IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
new file mode 100644
index 0000000000..a0bfdcb53e
--- /dev/null
+++ b/include/linux/mroute.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_MROUTE_H
+#define _UAPI__LINUX_MROUTE_H
+
+#ifdef __LINUX__
+#include <linux/sockios.h>
+#include <linux/types.h>
+#endif
+#include <netinet/in.h> /* For struct in_addr. */
+
+/* Based on the MROUTING 3.5 defines primarily to keep
+ * source compatibility with BSD.
+ *
+ * See the mrouted code for the original history.
+ *
+ * Protocol Independent Multicast (PIM) data structures included
+ * Carlos Picoto (cap@di.fc.ul.pt)
+ */
+
+#define MRT_BASE 200
+#define MRT_INIT (MRT_BASE) /* Activate the kernel mroute code */
+#define MRT_DONE (MRT_BASE+1) /* Shutdown the kernel mroute */
+#define MRT_ADD_VIF (MRT_BASE+2) /* Add a virtual interface */
+#define MRT_DEL_VIF (MRT_BASE+3) /* Delete a virtual interface */
+#define MRT_ADD_MFC (MRT_BASE+4) /* Add a multicast forwarding entry */
+#define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */
+#define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */
+#define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */
+#define MRT_PIM (MRT_BASE+8) /* enable PIM code */
+#define MRT_TABLE (MRT_BASE+9) /* Specify mroute table ID */
+#define MRT_ADD_MFC_PROXY (MRT_BASE+10) /* Add a (*,*|G) mfc entry */
+#define MRT_DEL_MFC_PROXY (MRT_BASE+11) /* Del a (*,*|G) mfc entry */
+#define MRT_FLUSH (MRT_BASE+12) /* Flush all mfc entries and/or vifs */
+#define MRT_MAX (MRT_BASE+12)
+
+#ifndef SIOCGETVIFCNT
+#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */
+#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1)
+#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
+#endif
+
+#ifndef MAXVIFS
+#define MAXVIFS 32
+#endif
+/* MRT_FLUSH optional flags */
+#define MRT_FLUSH_MFC 1 /* Flush multicast entries */
+#define MRT_FLUSH_MFC_STATIC 2 /* Flush static multicast entries */
+#define MRT_FLUSH_VIFS 4 /* Flush multicast vifs */
+#define MRT_FLUSH_VIFS_STATIC 8 /* Flush static multicast vifs */
+
+typedef unsigned long vifbitmap_t; /* User mode code depends on this lot */
+typedef unsigned short vifi_t;
+#define ALL_VIFS ((vifi_t)(-1))
+
+/* Same idea as select */
+
+#define VIFM_SET(n,m) ((m)|=(1<<(n)))
+#define VIFM_CLR(n,m) ((m)&=~(1<<(n)))
+#define VIFM_ISSET(n,m) ((m)&(1<<(n)))
+#define VIFM_CLRALL(m) ((m)=0)
+#define VIFM_COPY(mfrom,mto) ((mto)=(mfrom))
+#define VIFM_SAME(m1,m2) ((m1)==(m2))
+
+/* Passed by mrouted for an MRT_ADD_VIF - again we use the
+ * mrouted 3.6 structures for compatibility
+ */
+struct vifctl {
+ vifi_t vifc_vifi; /* Index of VIF */
+ unsigned char vifc_flags; /* VIFF_ flags */
+ unsigned char vifc_threshold; /* ttl limit */
+ unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
+ union {
+ struct in_addr vifc_lcl_addr; /* Local interface address */
+ int vifc_lcl_ifindex; /* Local interface index */
+ };
+ struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */
+};
+
+#define VIFF_TUNNEL 0x1 /* IPIP tunnel */
+#define VIFF_SRCRT 0x2 /* NI */
+#define VIFF_REGISTER 0x4 /* register vif */
+#define VIFF_USE_IFINDEX 0x8 /* use vifc_lcl_ifindex instead of
+ vifc_lcl_addr to find an interface */
+
+/* Cache manipulation structures for mrouted and PIMd */
+struct mfcctl {
+ struct in_addr mfcc_origin; /* Origin of mcast */
+ struct in_addr mfcc_mcastgrp; /* Group in question */
+ vifi_t mfcc_parent; /* Where it arrived */
+ unsigned char mfcc_ttls[MAXVIFS]; /* Where it is going */
+ unsigned int mfcc_pkt_cnt; /* pkt count for src-grp */
+ unsigned int mfcc_byte_cnt;
+ unsigned int mfcc_wrong_if;
+ int mfcc_expire;
+};
+
+/* Group count retrieval for mrouted */
+struct sioc_sg_req {
+ struct in_addr src;
+ struct in_addr grp;
+ unsigned long pktcnt;
+ unsigned long bytecnt;
+ unsigned long wrong_if;
+};
+
+/* To get vif packet counts */
+struct sioc_vif_req {
+ vifi_t vifi; /* Which iface */
+ unsigned long icount; /* In packets */
+ unsigned long ocount; /* Out packets */
+ unsigned long ibytes; /* In bytes */
+ unsigned long obytes; /* Out bytes */
+};
+
+/* This is the format the mroute daemon expects to see IGMP control
+ * data. Magically happens to be like an IP packet as per the original
+ */
+struct igmpmsg {
+ uint32_t unused1,unused2;
+ unsigned char im_msgtype; /* What is this */
+ unsigned char im_mbz; /* Must be zero */
+ unsigned char im_vif; /* Low 8 bits of Interface */
+ unsigned char im_vif_hi; /* High 8 bits of Interface */
+ struct in_addr im_src,im_dst;
+};
+
+/* ipmr netlink table attributes */
+enum {
+ IPMRA_TABLE_UNSPEC,
+ IPMRA_TABLE_ID,
+ IPMRA_TABLE_CACHE_RES_QUEUE_LEN,
+ IPMRA_TABLE_MROUTE_REG_VIF_NUM,
+ IPMRA_TABLE_MROUTE_DO_ASSERT,
+ IPMRA_TABLE_MROUTE_DO_PIM,
+ IPMRA_TABLE_VIFS,
+ IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
+ __IPMRA_TABLE_MAX
+};
+#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
+
+/* ipmr netlink vif attribute format
+ * [ IPMRA_TABLE_VIFS ] - nested attribute
+ * [ IPMRA_VIF ] - nested attribute
+ * [ IPMRA_VIFA_xxx ]
+ */
+enum {
+ IPMRA_VIF_UNSPEC,
+ IPMRA_VIF,
+ __IPMRA_VIF_MAX
+};
+#define IPMRA_VIF_MAX (__IPMRA_VIF_MAX - 1)
+
+/* vif-specific attributes */
+enum {
+ IPMRA_VIFA_UNSPEC,
+ IPMRA_VIFA_IFINDEX,
+ IPMRA_VIFA_VIF_ID,
+ IPMRA_VIFA_FLAGS,
+ IPMRA_VIFA_BYTES_IN,
+ IPMRA_VIFA_BYTES_OUT,
+ IPMRA_VIFA_PACKETS_IN,
+ IPMRA_VIFA_PACKETS_OUT,
+ IPMRA_VIFA_LOCAL_ADDR,
+ IPMRA_VIFA_REMOTE_ADDR,
+ IPMRA_VIFA_PAD,
+ __IPMRA_VIFA_MAX
+};
+#define IPMRA_VIFA_MAX (__IPMRA_VIFA_MAX - 1)
+
+/* ipmr netlink cache report attributes */
+enum {
+ IPMRA_CREPORT_UNSPEC,
+ IPMRA_CREPORT_MSGTYPE,
+ IPMRA_CREPORT_VIF_ID,
+ IPMRA_CREPORT_SRC_ADDR,
+ IPMRA_CREPORT_DST_ADDR,
+ IPMRA_CREPORT_PKT,
+ IPMRA_CREPORT_TABLE,
+ __IPMRA_CREPORT_MAX
+};
+#define IPMRA_CREPORT_MAX (__IPMRA_CREPORT_MAX - 1)
+
+/* That's all usermode folks */
+
+#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
+
+/* Pseudo messages used by mrouted */
+#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */
+#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */
+#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
+#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */
+
+#endif /* _UAPI__LINUX_MROUTE_H */
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
new file mode 100644
index 0000000000..1fb90ec021
--- /dev/null
+++ b/include/linux/mroute6.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_MROUTE6_H
+#define _UAPI__LINUX_MROUTE6_H
+
+#ifdef __LINUX__
+#include <linux/const.h>
+#include <linux/types.h>
+#include <linux/sockios.h>
+#endif
+#include <netinet/in.h> /* For struct sockaddr_in6. */
+
+/*
+ * Based on the MROUTING 3.5 defines primarily to keep
+ * source compatibility with BSD.
+ *
+ * See the pim6sd code for the original history.
+ *
+ * Protocol Independent Multicast (PIM) data structures included
+ * Carlos Picoto (cap@di.fc.ul.pt)
+ *
+ */
+
+#define MRT6_BASE 200
+#define MRT6_INIT (MRT6_BASE) /* Activate the kernel mroute code */
+#define MRT6_DONE (MRT6_BASE+1) /* Shutdown the kernel mroute */
+#define MRT6_ADD_MIF (MRT6_BASE+2) /* Add a virtual interface */
+#define MRT6_DEL_MIF (MRT6_BASE+3) /* Delete a virtual interface */
+#define MRT6_ADD_MFC (MRT6_BASE+4) /* Add a multicast forwarding entry */
+#define MRT6_DEL_MFC (MRT6_BASE+5) /* Delete a multicast forwarding entry */
+#define MRT6_VERSION (MRT6_BASE+6) /* Get the kernel multicast version */
+#define MRT6_ASSERT (MRT6_BASE+7) /* Activate PIM assert mode */
+#define MRT6_PIM (MRT6_BASE+8) /* enable PIM code */
+#define MRT6_TABLE (MRT6_BASE+9) /* Specify mroute table ID */
+#define MRT6_ADD_MFC_PROXY (MRT6_BASE+10) /* Add a (*,*|G) mfc entry */
+#define MRT6_DEL_MFC_PROXY (MRT6_BASE+11) /* Del a (*,*|G) mfc entry */
+#define MRT6_FLUSH (MRT6_BASE+12) /* Flush all mfc entries and/or vifs */
+#define MRT6_MAX (MRT6_BASE+12)
+
+#ifndef SIOCGETMIFCNT_IN6
+#define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */
+#define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1)
+#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
+#endif
+
+#ifndef MAXMIFS
+#define MAXMIFS 32
+#endif
+/* MRT6_FLUSH optional flags */
+#define MRT6_FLUSH_MFC 1 /* Flush multicast entries */
+#define MRT6_FLUSH_MFC_STATIC 2 /* Flush static multicast entries */
+#define MRT6_FLUSH_MIFS 4 /* Flushing multicast vifs */
+#define MRT6_FLUSH_MIFS_STATIC 8 /* Flush static multicast vifs */
+
+typedef unsigned long mifbitmap_t; /* User mode code depends on this lot */
+typedef unsigned short mifi_t;
+#define ALL_MIFS ((mifi_t)(-1))
+
+#ifndef IF_SETSIZE
+#define IF_SETSIZE 256
+#endif
+
+typedef uint32_t if_mask;
+#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */
+
+typedef struct if_set {
+ if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
+} if_set;
+
+#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
+#define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS)))
+#define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS)))
+#define IF_COPY(f, t) bcopy(f, t, sizeof(*(f)))
+#define IF_ZERO(p) bzero(p, sizeof(*(p)))
+
+/*
+ * Passed by mrouted for an MRT_ADD_MIF - again we use the
+ * mrouted 3.6 structures for compatibility
+ */
+
+struct mif6ctl {
+ mifi_t mif6c_mifi; /* Index of MIF */
+ unsigned char mif6c_flags; /* MIFF_ flags */
+ unsigned char vifc_threshold; /* ttl limit */
+ __u16 mif6c_pifi; /* the index of the physical IF */
+ unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
+};
+
+#define MIFF_REGISTER 0x1 /* register vif */
+
+/*
+ * Cache manipulation structures for mrouted and PIMd
+ */
+
+struct mf6cctl {
+ struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */
+ struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */
+ mifi_t mf6cc_parent; /* Where it arrived */
+ struct if_set mf6cc_ifset; /* Where it is going */
+};
+
+/*
+ * Group count retrieval for pim6sd
+ */
+
+struct sioc_sg_req6 {
+ struct sockaddr_in6 src;
+ struct sockaddr_in6 grp;
+ unsigned long pktcnt;
+ unsigned long bytecnt;
+ unsigned long wrong_if;
+};
+
+/*
+ * To get vif packet counts
+ */
+
+struct sioc_mif_req6 {
+ mifi_t mifi; /* Which iface */
+ unsigned long icount; /* In packets */
+ unsigned long ocount; /* Out packets */
+ unsigned long ibytes; /* In bytes */
+ unsigned long obytes; /* Out bytes */
+};
+
+/*
+ * That's all usermode folks
+ */
+
+
+
+/*
+ * Structure used to communicate from kernel to multicast router.
+ * We'll overlay the structure onto an MLD header (not an IPv6 heder like igmpmsg{}
+ * used for IPv4 implementation). This is because this structure will be passed via an
+ * IPv6 raw socket, on which an application will only receiver the payload i.e the data after
+ * the IPv6 header and all the extension headers. (See section 3 of RFC 3542)
+ */
+
+struct mrt6msg {
+#define MRT6MSG_NOCACHE 1
+#define MRT6MSG_WRONGMIF 2
+#define MRT6MSG_WHOLEPKT 3 /* used for use level encap */
+#define MRT6MSG_WRMIFWHOLE 4 /* For PIM Register and assert processing */
+ __u8 im6_mbz; /* must be zero */
+ __u8 im6_msgtype; /* what type of message */
+ __u16 im6_mif; /* mif rec'd on */
+ __u32 im6_pad; /* padding for 64 bit arch */
+ struct in6_addr im6_src, im6_dst;
+};
+
+/* ip6mr netlink cache report attributes */
+enum {
+ IP6MRA_CREPORT_UNSPEC,
+ IP6MRA_CREPORT_MSGTYPE,
+ IP6MRA_CREPORT_MIF_ID,
+ IP6MRA_CREPORT_SRC_ADDR,
+ IP6MRA_CREPORT_DST_ADDR,
+ IP6MRA_CREPORT_PKT,
+ __IP6MRA_CREPORT_MAX
+};
+#define IP6MRA_CREPORT_MAX (__IP6MRA_CREPORT_MAX - 1)
+
+#endif /* _UAPI__LINUX_MROUTE6_H */
diff --git a/include/subdir.am b/include/subdir.am
index a06a8e5649..f6328ef38e 100644
--- a/include/subdir.am
+++ b/include/subdir.am
@@ -17,4 +17,6 @@ noinst_HEADERS += \
include/linux/seg6_hmac.h \
include/linux/seg6_iptunnel.h \
include/linux/seg6_local.h \
+ include/linux/mroute.h \
+ include/linux/mroute6.h \
# end
diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c
index 06909c4306..2729dce382 100644
--- a/isisd/isis_adjacency.c
+++ b/isisd/isis_adjacency.c
@@ -464,6 +464,220 @@ void isis_adj_expire(struct thread *thread)
}
/*
+ * show isis neighbor [detail] json
+ */
+void isis_adj_print_json(struct isis_adjacency *adj, struct json_object *json,
+ char detail)
+{
+ json_object *iface_json, *ipv4_addr_json, *ipv6_link_json,
+ *ipv6_non_link_json, *topo_json, *dis_flaps_json,
+ *area_addr_json, *adj_sid_json;
+ time_t now;
+ struct isis_dynhn *dyn;
+ int level;
+ char buf[256];
+
+ json_object_string_add(json, "adj", isis_adj_name(adj));
+
+ if (detail == ISIS_UI_LEVEL_BRIEF) {
+ if (adj->circuit)
+ json_object_string_add(json, "interface",
+ adj->circuit->interface->name);
+ else
+ json_object_string_add(json, "interface",
+ "NULL circuit!");
+ json_object_int_add(json, "level", adj->level);
+ json_object_string_add(json, "state",
+ adj_state2string(adj->adj_state));
+ now = time(NULL);
+ if (adj->last_upd) {
+ if (adj->last_upd + adj->hold_time < now)
+ json_object_string_add(json, "last-upd",
+ "expiring");
+ else
+ json_object_string_add(
+ json, "expires-in",
+ time2string(adj->last_upd +
+ adj->hold_time - now));
+ }
+ json_object_string_add(json, "snpa", snpa_print(adj->snpa));
+ }
+
+ if (detail == ISIS_UI_LEVEL_DETAIL) {
+ struct sr_adjacency *sra;
+ struct listnode *anode;
+
+ level = adj->level;
+ iface_json = json_object_new_object();
+ json_object_object_add(json, "interface", iface_json);
+ if (adj->circuit)
+ json_object_string_add(iface_json, "name",
+ adj->circuit->interface->name);
+ else
+ json_object_string_add(iface_json, "name",
+ "null-circuit");
+ json_object_int_add(json, "level", adj->level);
+ json_object_string_add(iface_json, "state",
+ adj_state2string(adj->adj_state));
+ now = time(NULL);
+ if (adj->last_upd) {
+ if (adj->last_upd + adj->hold_time < now)
+ json_object_string_add(iface_json, "last-upd",
+ "expiring");
+ else
+ json_object_string_add(
+ json, "expires-in",
+ time2string(adj->last_upd +
+ adj->hold_time - now));
+ } else
+ json_object_string_add(json, "expires-in",
+ time2string(adj->hold_time));
+ json_object_int_add(iface_json, "adj-flaps", adj->flaps);
+ json_object_string_add(iface_json, "last-ago",
+ time2string(now - adj->last_flap));
+ json_object_string_add(iface_json, "circuit-type",
+ circuit_t2string(adj->circuit_t));
+ json_object_string_add(iface_json, "speaks",
+ nlpid2string(&adj->nlpids));
+ if (adj->mt_count != 1 ||
+ adj->mt_set[0] != ISIS_MT_IPV4_UNICAST) {
+ topo_json = json_object_new_object();
+ json_object_object_add(iface_json, "topologies",
+ topo_json);
+ for (unsigned int i = 0; i < adj->mt_count; i++) {
+ snprintfrr(buf, sizeof(buf), "topo-%d", i);
+ json_object_string_add(
+ topo_json, buf,
+ isis_mtid2str(adj->mt_set[i]));
+ }
+ }
+ json_object_string_add(iface_json, "snpa",
+ snpa_print(adj->snpa));
+ if (adj->circuit &&
+ (adj->circuit->circ_type == CIRCUIT_T_BROADCAST)) {
+ dyn = dynhn_find_by_id(adj->circuit->isis, adj->lanid);
+ if (dyn) {
+ snprintfrr(buf, sizeof(buf), "%s-%02x",
+ dyn->hostname,
+ adj->lanid[ISIS_SYS_ID_LEN]);
+ json_object_string_add(iface_json, "lan-id",
+ buf);
+ } else {
+ snprintfrr(buf, sizeof(buf), "%s-%02x",
+ sysid_print(adj->lanid),
+ adj->lanid[ISIS_SYS_ID_LEN]);
+ json_object_string_add(iface_json, "lan-id",
+ buf);
+ }
+
+ json_object_int_add(iface_json, "lan-prio",
+ adj->prio[adj->level - 1]);
+
+ dis_flaps_json = json_object_new_object();
+ json_object_object_add(iface_json, "dis-flaps",
+ dis_flaps_json);
+ json_object_string_add(
+ dis_flaps_json, "dis-record",
+ isis_disflag2string(
+ adj->dis_record[ISIS_LEVELS + level - 1]
+ .dis));
+ json_object_int_add(dis_flaps_json, "last",
+ adj->dischanges[level - 1]);
+ json_object_string_add(
+ dis_flaps_json, "ago",
+ time2string(now - (adj->dis_record[ISIS_LEVELS +
+ level - 1]
+ .last_dis_change)));
+ }
+
+ if (adj->area_address_count) {
+ area_addr_json = json_object_new_object();
+ json_object_object_add(iface_json, "area-address",
+ area_addr_json);
+ for (unsigned int i = 0; i < adj->area_address_count;
+ i++) {
+ json_object_string_add(
+ area_addr_json, "isonet",
+ isonet_print(adj->area_addresses[i]
+ .area_addr,
+ adj->area_addresses[i]
+ .addr_len));
+ }
+ }
+ if (adj->ipv4_address_count) {
+ ipv4_addr_json = json_object_new_object();
+ json_object_object_add(iface_json, "ipv4-address",
+ ipv4_addr_json);
+ for (unsigned int i = 0; i < adj->ipv4_address_count;
+ i++){
+ inet_ntop(AF_INET, &adj->ipv4_addresses[i], buf,
+ sizeof(buf));
+ json_object_string_add(ipv4_addr_json, "ipv4", buf);
+ }
+ }
+ if (adj->ll_ipv6_count) {
+ ipv6_link_json = json_object_new_object();
+ json_object_object_add(iface_json, "ipv6-link-local",
+ ipv6_link_json);
+ for (unsigned int i = 0; i < adj->ll_ipv6_count; i++) {
+ char buf[INET6_ADDRSTRLEN];
+ inet_ntop(AF_INET6, &adj->ll_ipv6_addrs[i], buf,
+ sizeof(buf));
+ json_object_string_add(ipv6_link_json, "ipv6",
+ buf);
+ }
+ }
+ if (adj->global_ipv6_count) {
+ ipv6_non_link_json = json_object_new_object();
+ json_object_object_add(iface_json, "ipv6-global",
+ ipv6_non_link_json);
+ for (unsigned int i = 0; i < adj->global_ipv6_count;
+ i++) {
+ char buf[INET6_ADDRSTRLEN];
+ inet_ntop(AF_INET6, &adj->global_ipv6_addrs[i],
+ buf, sizeof(buf));
+ json_object_string_add(ipv6_non_link_json,
+ "ipv6", buf);
+ }
+ }
+
+ adj_sid_json = json_object_new_object();
+ json_object_object_add(iface_json, "adj-sid", adj_sid_json);
+ for (ALL_LIST_ELEMENTS_RO(adj->adj_sids, anode, sra)) {
+ const char *adj_type;
+ const char *backup;
+ uint32_t sid;
+
+ switch (sra->adj->circuit->circ_type) {
+ case CIRCUIT_T_BROADCAST:
+ adj_type = "LAN Adjacency-SID";
+ sid = sra->u.ladj_sid->sid;
+ break;
+ case CIRCUIT_T_P2P:
+ adj_type = "Adjacency-SID";
+ sid = sra->u.adj_sid->sid;
+ break;
+ default:
+ continue;
+ }
+ backup = (sra->type == ISIS_SR_LAN_BACKUP) ? " (backup)"
+ : "";
+
+ json_object_string_add(adj_sid_json, "nexthop",
+ (sra->nexthop.family == AF_INET)
+ ? "IPv4"
+ : "IPv6");
+ json_object_string_add(adj_sid_json, "adj-type",
+ adj_type);
+ json_object_string_add(adj_sid_json, "is-backup",
+ backup);
+ json_object_int_add(adj_sid_json, "sid", sid);
+ }
+ }
+ return;
+}
+
+/*
* show isis neighbor [detail]
*/
void isis_adj_print_vty(struct isis_adjacency *adj, struct vty *vty,
diff --git a/isisd/isis_adjacency.h b/isisd/isis_adjacency.h
index 4d84c5ca4d..7467a619cb 100644
--- a/isisd/isis_adjacency.h
+++ b/isisd/isis_adjacency.h
@@ -144,6 +144,8 @@ const char *isis_adj_yang_state(enum isis_adj_state state);
void isis_adj_expire(struct thread *thread);
void isis_adj_print_vty(struct isis_adjacency *adj, struct vty *vty,
char detail);
+void isis_adj_print_json(struct isis_adjacency *adj, struct json_object *json,
+ char detail);
void isis_adj_build_neigh_list(struct list *adjdb, struct list *list);
void isis_adj_build_up_list(struct list *adjdb, struct list *list);
int isis_adj_usage2levels(enum isis_adj_usage usage);
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 1b0447226d..c7bf1e2012 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -312,8 +312,10 @@ void isis_circuit_add_addr(struct isis_circuit *circuit,
0);
#ifdef EXTREME_DEBUG
- zlog_debug("Added IP address %pFX to circuit %s",
- connected->address, circuit->interface->name);
+ if (IS_DEBUG_EVENTS)
+ zlog_debug("Added IP address %pFX to circuit %s",
+ connected->address,
+ circuit->interface->name);
#endif /* EXTREME_DEBUG */
}
if (connected->address->family == AF_INET6) {
@@ -350,8 +352,10 @@ void isis_circuit_add_addr(struct isis_circuit *circuit,
0);
#ifdef EXTREME_DEBUG
- zlog_debug("Added IPv6 address %pFX to circuit %s",
- connected->address, circuit->interface->name);
+ if (IS_DEBUG_EVENTS)
+ zlog_debug("Added IPv6 address %pFX to circuit %s",
+ connected->address,
+ circuit->interface->name);
#endif /* EXTREME_DEBUG */
}
@@ -681,9 +685,11 @@ int isis_circuit_up(struct isis_circuit *circuit)
ETH_ALEN);
}
#ifdef EXTREME_DEGUG
- zlog_debug("isis_circuit_if_add: if_id %d, isomtu %d snpa %s",
- circuit->interface->ifindex, ISO_MTU(circuit),
- snpa_print(circuit->u.bc.snpa));
+ if (IS_DEBUG_EVENTS)
+ zlog_debug(
+ "isis_circuit_if_add: if_id %d, isomtu %d snpa %s",
+ circuit->interface->ifindex, ISO_MTU(circuit),
+ snpa_print(circuit->u.bc.snpa));
#endif /* EXTREME_DEBUG */
circuit->u.bc.adjdb[0] = list_new();
@@ -933,6 +939,151 @@ void circuit_update_nlpids(struct isis_circuit *circuit)
return;
}
+void isis_circuit_print_json(struct isis_circuit *circuit,
+ struct json_object *json, char detail)
+{
+ int level;
+ json_object *iface_json, *ipv4_addr_json, *ipv6_link_json,
+ *ipv6_non_link_json, *hold_json, *lan_prio_json, *levels_json,
+ *level_json;
+ char buf_prx[INET6_BUFSIZ];
+ char buf[255];
+
+ snprintfrr(buf, sizeof(buf), "0x%x", circuit->circuit_id);
+ if (detail == ISIS_UI_LEVEL_BRIEF) {
+ iface_json = json_object_new_object();
+ json_object_object_add(json, "interface", iface_json);
+ json_object_string_add(iface_json, "name",
+ circuit->interface->name);
+ json_object_string_add(iface_json, "circuit-id", buf);
+ json_object_string_add(iface_json, "state",
+ circuit_state2string(circuit->state));
+ json_object_string_add(iface_json, "type",
+ circuit_type2string(circuit->circ_type));
+ json_object_string_add(iface_json, "level",
+ circuit_t2string(circuit->is_type));
+ }
+
+ if (detail == ISIS_UI_LEVEL_DETAIL) {
+ struct listnode *node;
+ struct prefix *ip_addr;
+
+ iface_json = json_object_new_object();
+ json_object_object_add(json, "interface", iface_json);
+ json_object_string_add(iface_json, "name",
+ circuit->interface->name);
+ json_object_string_add(iface_json, "state",
+ circuit_state2string(circuit->state));
+ if (circuit->is_passive)
+ json_object_string_add(iface_json, "is-passive",
+ "passive");
+ else
+ json_object_string_add(iface_json, "is-passive",
+ "active");
+ json_object_string_add(iface_json, "circuit-id", buf);
+ json_object_string_add(iface_json, "type",
+ circuit_type2string(circuit->circ_type));
+ json_object_string_add(iface_json, "level",
+ circuit_t2string(circuit->is_type));
+ if (circuit->circ_type == CIRCUIT_T_BROADCAST)
+ json_object_string_add(iface_json, "snpa",
+ snpa_print(circuit->u.bc.snpa));
+
+
+ levels_json = json_object_new_array();
+ json_object_object_add(iface_json, "levels", levels_json);
+ for (level = ISIS_LEVEL1; level <= ISIS_LEVELS; level++) {
+ if ((circuit->is_type & level) == 0)
+ continue;
+ level_json = json_object_new_object();
+ json_object_string_add(level_json, "level",
+ circuit_t2string(level));
+ if (circuit->area->newmetric)
+ json_object_int_add(level_json, "metric",
+ circuit->te_metric[0]);
+ else
+ json_object_int_add(level_json, "metric",
+ circuit->metric[0]);
+ if (!circuit->is_passive) {
+ json_object_int_add(level_json,
+ "active-neighbors",
+ circuit->upadjcount[0]);
+ json_object_int_add(level_json,
+ "hello-interval",
+ circuit->hello_interval[0]);
+ hold_json = json_object_new_object();
+ json_object_object_add(level_json, "holddown",
+ hold_json);
+ json_object_int_add(
+ hold_json, "count",
+ circuit->hello_multiplier[0]);
+ json_object_string_add(
+ hold_json, "pad",
+ (circuit->pad_hellos ? "yes" : "no"));
+ json_object_int_add(level_json, "cnsp-interval",
+ circuit->csnp_interval[0]);
+ json_object_int_add(level_json, "psnp-interval",
+ circuit->psnp_interval[0]);
+ if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
+ lan_prio_json =
+ json_object_new_object();
+ json_object_object_add(level_json,
+ "lan",
+ lan_prio_json);
+ json_object_int_add(
+ lan_prio_json, "priority",
+ circuit->priority[0]);
+ json_object_string_add(
+ lan_prio_json, "is-dis",
+ (circuit->u.bc.is_dr[0]
+ ? "yes"
+ : "no"));
+ }
+ }
+ json_object_array_add(levels_json, level_json);
+ }
+
+ if (circuit->ip_addrs && listcount(circuit->ip_addrs) > 0) {
+ ipv4_addr_json = json_object_new_object();
+ json_object_object_add(iface_json, "ip-prefix",
+ ipv4_addr_json);
+ for (ALL_LIST_ELEMENTS_RO(circuit->ip_addrs, node,
+ ip_addr)) {
+ snprintfrr(buf_prx, INET6_BUFSIZ, "%pFX",
+ ip_addr);
+ json_object_string_add(ipv4_addr_json, "ip",
+ buf_prx);
+ }
+ }
+ if (circuit->ipv6_link && listcount(circuit->ipv6_link) > 0) {
+ ipv6_link_json = json_object_new_object();
+ json_object_object_add(iface_json, "ipv6-link-locals",
+ ipv6_link_json);
+ for (ALL_LIST_ELEMENTS_RO(circuit->ipv6_link, node,
+ ip_addr)) {
+ snprintfrr(buf_prx, INET6_BUFSIZ, "%pFX",
+ ip_addr);
+ json_object_string_add(ipv6_link_json, "ipv6",
+ buf_prx);
+ }
+ }
+ if (circuit->ipv6_non_link &&
+ listcount(circuit->ipv6_non_link) > 0) {
+ ipv6_non_link_json = json_object_new_object();
+ json_object_object_add(iface_json, "ipv6-prefixes",
+ ipv6_non_link_json);
+ for (ALL_LIST_ELEMENTS_RO(circuit->ipv6_non_link, node,
+ ip_addr)) {
+ snprintfrr(buf_prx, INET6_BUFSIZ, "%pFX",
+ ip_addr);
+ json_object_string_add(ipv6_non_link_json,
+ "ipv6", buf_prx);
+ }
+ }
+ }
+ return;
+}
+
void isis_circuit_print_vty(struct isis_circuit *circuit, struct vty *vty,
char detail)
{
diff --git a/isisd/isis_circuit.h b/isisd/isis_circuit.h
index 7465780848..5ff0390c26 100644
--- a/isisd/isis_circuit.h
+++ b/isisd/isis_circuit.h
@@ -206,6 +206,8 @@ void isis_circuit_down(struct isis_circuit *);
void circuit_update_nlpids(struct isis_circuit *circuit);
void isis_circuit_print_vty(struct isis_circuit *circuit, struct vty *vty,
char detail);
+void isis_circuit_print_json(struct isis_circuit *circuit,
+ struct json_object *json, char detail);
size_t isis_circuit_pdu_size(struct isis_circuit *circuit);
void isis_circuit_stream(struct isis_circuit *circuit, struct stream **stream);
diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c
index 463d26f6c7..eb7e9e725e 100644
--- a/isisd/isis_lsp.c
+++ b/isisd/isis_lsp.c
@@ -733,8 +733,48 @@ static const char *lsp_bits2string(uint8_t lsp_bits, char *buf, size_t buf_size)
}
/* this function prints the lsp on show isis database */
-void lsp_print(struct isis_lsp *lsp, struct vty *vty, char dynhost,
- struct isis *isis)
+void lsp_print_common(struct isis_lsp *lsp, struct vty *vty, struct json_object *json,
+ char dynhost, struct isis *isis)
+{
+ if (json) {
+ return lsp_print_json(lsp, json, dynhost, isis);
+ } else {
+ return lsp_print_vty(lsp, vty, dynhost, isis);
+ }
+}
+
+void lsp_print_json(struct isis_lsp *lsp, struct json_object *json,
+ char dynhost, struct isis *isis)
+{
+ char LSPid[255];
+ char age_out[8];
+ char b[200];
+ json_object *own_json;
+ char buf[256];
+
+ lspid_print(lsp->hdr.lsp_id, LSPid, sizeof(LSPid), dynhost, 1, isis);
+ own_json = json_object_new_object();
+ json_object_object_add(json, "lsp", own_json);
+ json_object_string_add(own_json, "id", LSPid);
+ json_object_string_add(own_json, "own", lsp->own_lsp ? "*" : " ");
+ json_object_int_add(json, "pdu-len", lsp->hdr.pdu_len);
+ snprintfrr(buf, sizeof(buf), "0x%08x", lsp->hdr.seqno);
+ json_object_string_add(json, "seq-number", buf);
+ snprintfrr(buf, sizeof(buf), "0x%04hx", lsp->hdr.checksum);
+ json_object_string_add(json, "chksum", buf);
+ if (lsp->hdr.rem_lifetime == 0) {
+ snprintf(age_out, sizeof(age_out), "(%d)", lsp->age_out);
+ age_out[7] = '\0';
+ json_object_string_add(json, "holdtime", age_out);
+ } else {
+ json_object_int_add(json, "holdtime", lsp->hdr.rem_lifetime);
+ }
+ json_object_string_add(
+ json, "att-p-ol", lsp_bits2string(lsp->hdr.lsp_bits, b, sizeof(b)));
+}
+
+void lsp_print_vty(struct isis_lsp *lsp, struct vty *vty,
+ char dynhost, struct isis *isis)
{
char LSPid[255];
char age_out[8];
@@ -754,30 +794,40 @@ void lsp_print(struct isis_lsp *lsp, struct vty *vty, char dynhost,
vty_out(vty, "%s\n", lsp_bits2string(lsp->hdr.lsp_bits, b, sizeof(b)));
}
-void lsp_print_detail(struct isis_lsp *lsp, struct vty *vty, char dynhost,
- struct isis *isis)
+void lsp_print_detail(struct isis_lsp *lsp, struct vty *vty,
+ struct json_object *json, char dynhost,
+ struct isis *isis)
{
- lsp_print(lsp, vty, dynhost, isis);
- if (lsp->tlvs)
- vty_multiline(vty, " ", "%s", isis_format_tlvs(lsp->tlvs));
- vty_out(vty, "\n");
+ if (json) {
+ lsp_print_json(lsp, json, dynhost, isis);
+ if (lsp->tlvs) {
+ isis_format_tlvs(lsp->tlvs, json);
+ }
+ } else {
+ lsp_print_vty(lsp, vty, dynhost, isis);
+ if (lsp->tlvs)
+ vty_multiline(vty, " ", "%s",
+ isis_format_tlvs(lsp->tlvs, NULL));
+ vty_out(vty, "\n");
+ }
}
/* print all the lsps info in the local lspdb */
-int lsp_print_all(struct vty *vty, struct lspdb_head *head, char detail,
- char dynhost, struct isis *isis)
+int lsp_print_all(struct vty *vty, struct json_object *json,
+ struct lspdb_head *head, char detail, char dynhost,
+ struct isis *isis)
{
struct isis_lsp *lsp;
int lsp_count = 0;
if (detail == ISIS_UI_LEVEL_BRIEF) {
frr_each (lspdb, head, lsp) {
- lsp_print(lsp, vty, dynhost, isis);
+ lsp_print_common(lsp, vty, json, dynhost, isis);
lsp_count++;
}
} else if (detail == ISIS_UI_LEVEL_DETAIL) {
frr_each (lspdb, head, lsp) {
- lsp_print_detail(lsp, vty, dynhost, isis);
+ lsp_print_detail(lsp, vty, json, dynhost, isis);
lsp_count++;
}
}
@@ -1264,7 +1314,7 @@ static void lsp_build(struct isis_lsp *lsp, struct isis_area *area)
if (!fragments) {
zlog_warn("BUG: could not fragment own LSP:");
log_multiline(LOG_WARNING, " ", "%s",
- isis_format_tlvs(tlvs));
+ isis_format_tlvs(tlvs, NULL));
isis_free_tlvs(tlvs);
return;
}
diff --git a/isisd/isis_lsp.h b/isisd/isis_lsp.h
index f42d702b37..b13b2a35e6 100644
--- a/isisd/isis_lsp.h
+++ b/isisd/isis_lsp.h
@@ -120,12 +120,19 @@ void lsp_update(struct isis_lsp *lsp, struct isis_lsp_hdr *hdr,
void lsp_inc_seqno(struct isis_lsp *lsp, uint32_t seqno);
void lspid_print(uint8_t *lsp_id, char *dest, size_t dest_len, char dynhost,
char frag, struct isis *isis);
-void lsp_print(struct isis_lsp *lsp, struct vty *vty, char dynhost,
- struct isis *isis);
-void lsp_print_detail(struct isis_lsp *lsp, struct vty *vty, char dynhost,
+void lsp_print_common(struct isis_lsp *lsp, struct vty *vty,
+ struct json_object *json, char dynhost,
struct isis *isis);
-int lsp_print_all(struct vty *vty, struct lspdb_head *head, char detail,
- char dynhost, struct isis *isis);
+void lsp_print_vty(struct isis_lsp *lsp, struct vty *vty, char dynhost,
+ struct isis *isis);
+void lsp_print_json(struct isis_lsp *lsp, struct json_object *json,
+ char dynhost, struct isis *isis);
+void lsp_print_detail(struct isis_lsp *lsp, struct vty *vty,
+ struct json_object *json, char dynhost,
+ struct isis *isis);
+int lsp_print_all(struct vty *vty, struct json_object *json,
+ struct lspdb_head *head, char detail, char dynhost,
+ struct isis *isis);
/* sets SRMflags for all active circuits of an lsp */
void lsp_set_all_srmflags(struct isis_lsp *lsp, bool set);
diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c
index 517c9ec5aa..1a54d47f3c 100644
--- a/isisd/isis_pdu.c
+++ b/isisd/isis_pdu.c
@@ -2209,7 +2209,7 @@ int send_csnp(struct isis_circuit *circuit, int level)
circuit->interface->name,
stream_get_endp(circuit->snd_stream));
log_multiline(LOG_DEBUG, " ", "%s",
- isis_format_tlvs(tlvs));
+ isis_format_tlvs(tlvs, NULL));
if (IS_DEBUG_PACKET_DUMP)
zlog_dump_data(
STREAM_DATA(circuit->snd_stream),
@@ -2368,7 +2368,7 @@ static int send_psnp(int level, struct isis_circuit *circuit)
circuit->interface->name,
stream_get_endp(circuit->snd_stream));
log_multiline(LOG_DEBUG, " ", "%s",
- isis_format_tlvs(tlvs));
+ isis_format_tlvs(tlvs, NULL));
if (IS_DEBUG_PACKET_DUMP)
zlog_dump_data(
STREAM_DATA(circuit->snd_stream),
diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c
index 20224c73a1..d58cd1c5bc 100644
--- a/isisd/isis_pfpacket.c
+++ b/isisd/isis_pfpacket.c
@@ -101,11 +101,13 @@ static int isis_multicast_join(int fd, int registerto, int if_num)
mreq.mr_type = PACKET_MR_ALLMULTI;
}
#ifdef EXTREME_DEBUG
- zlog_debug(
- "isis_multicast_join(): fd=%d, reg_to=%d, if_num=%d, address = %02x:%02x:%02x:%02x:%02x:%02x",
- fd, registerto, if_num, mreq.mr_address[0], mreq.mr_address[1],
- mreq.mr_address[2], mreq.mr_address[3], mreq.mr_address[4],
- mreq.mr_address[5]);
+ if (IS_DEBUG_EVENTS)
+ zlog_debug(
+ "isis_multicast_join(): fd=%d, reg_to=%d, if_num=%d, address = %02x:%02x:%02x:%02x:%02x:%02x",
+ fd, registerto, if_num, mreq.mr_address[0],
+ mreq.mr_address[1], mreq.mr_address[2],
+ mreq.mr_address[3], mreq.mr_address[4],
+ mreq.mr_address[5]);
#endif /* EXTREME_DEBUG */
if (setsockopt(fd, SOL_PACKET, PACKET_ADD_MEMBERSHIP, &mreq,
sizeof(struct packet_mreq))) {
diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c
index 04b5cf1a67..fd05fb94df 100644
--- a/isisd/isis_spf.c
+++ b/isisd/isis_spf.c
@@ -511,10 +511,12 @@ static struct isis_vertex *isis_spf_add_root(struct isis_spftree *spftree)
isis_vertex_queue_append(&spftree->paths, vertex);
#ifdef EXTREME_DEBUG
- zlog_debug("ISIS-SPF: added this IS %s %s depth %d dist %d to PATHS",
- vtype2string(vertex->type),
- vid2string(vertex, buff, sizeof(buff)), vertex->depth,
- vertex->d_N);
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug(
+ "ISIS-SPF: added this IS %s %s depth %d dist %d to PATHS",
+ vtype2string(vertex->type),
+ vid2string(vertex, buff, sizeof(buff)), vertex->depth,
+ vertex->d_N);
#endif /* EXTREME_DEBUG */
return vertex;
@@ -629,11 +631,13 @@ isis_spf_add2tent(struct isis_spftree *spftree, enum vertextype vtype, void *id,
}
#ifdef EXTREME_DEBUG
- zlog_debug(
- "ISIS-SPF: add to TENT %s %s %s depth %d dist %d adjcount %d",
- print_sys_hostname(vertex->N.id), vtype2string(vertex->type),
- vid2string(vertex, buff, sizeof(buff)), vertex->depth,
- vertex->d_N, listcount(vertex->Adj_N));
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug(
+ "ISIS-SPF: add to TENT %s %s %s depth %d dist %d adjcount %d",
+ print_sys_hostname(vertex->N.id),
+ vtype2string(vertex->type),
+ vid2string(vertex, buff, sizeof(buff)), vertex->depth,
+ vertex->d_N, listcount(vertex->Adj_N));
#endif /* EXTREME_DEBUG */
isis_vertex_queue_insert(&spftree->tents, vertex);
@@ -721,10 +725,12 @@ static void process_N(struct isis_spftree *spftree, enum vertextype vtype,
vertex = isis_find_vertex(&spftree->paths, id, vtype);
if (vertex) {
#ifdef EXTREME_DEBUG
- zlog_debug(
- "ISIS-SPF: process_N %s %s %s dist %d already found from PATH",
- print_sys_hostname(vertex->N.id), vtype2string(vtype),
- vid2string(vertex, buff, sizeof(buff)), dist);
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug(
+ "ISIS-SPF: process_N %s %s %s dist %d already found from PATH",
+ print_sys_hostname(vertex->N.id),
+ vtype2string(vtype),
+ vid2string(vertex, buff, sizeof(buff)), dist);
#endif /* EXTREME_DEBUG */
assert(dist >= vertex->d_N);
return;
@@ -735,12 +741,15 @@ static void process_N(struct isis_spftree *spftree, enum vertextype vtype,
if (vertex) {
/* 1) */
#ifdef EXTREME_DEBUG
- zlog_debug(
- "ISIS-SPF: process_N %s %s %s dist %d parent %s adjcount %d",
- print_sys_hostname(vertex->N.id), vtype2string(vtype),
- vid2string(vertex, buff, sizeof(buff)), dist,
- (parent ? print_sys_hostname(parent->N.id) : "null"),
- (parent ? listcount(parent->Adj_N) : 0));
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug(
+ "ISIS-SPF: process_N %s %s %s dist %d parent %s adjcount %d",
+ print_sys_hostname(vertex->N.id),
+ vtype2string(vtype),
+ vid2string(vertex, buff, sizeof(buff)), dist,
+ (parent ? print_sys_hostname(parent->N.id)
+ : "null"),
+ (parent ? listcount(parent->Adj_N) : 0));
#endif /* EXTREME_DEBUG */
if (vertex->d_N == dist) {
struct listnode *node;
@@ -778,9 +787,11 @@ static void process_N(struct isis_spftree *spftree, enum vertextype vtype,
}
#ifdef EXTREME_DEBUG
- zlog_debug("ISIS-SPF: process_N add2tent %s %s dist %d parent %s",
- print_sys_hostname(id), vtype2string(vtype), dist,
- (parent ? print_sys_hostname(parent->N.id) : "null"));
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug(
+ "ISIS-SPF: process_N add2tent %s %s dist %d parent %s",
+ print_sys_hostname(id), vtype2string(vtype), dist,
+ (parent ? print_sys_hostname(parent->N.id) : "null"));
#endif /* EXTREME_DEBUG */
isis_spf_add2tent(spftree, vtype, id, dist, depth, NULL, psid, parent);
@@ -839,8 +850,9 @@ lspfragloop:
}
#ifdef EXTREME_DEBUG
- zlog_debug("ISIS-SPF: process_lsp %s",
- print_sys_hostname(lsp->hdr.lsp_id));
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug("ISIS-SPF: process_lsp %s",
+ print_sys_hostname(lsp->hdr.lsp_id));
#endif /* EXTREME_DEBUG */
if (no_overload) {
@@ -1477,10 +1489,12 @@ static void add_to_paths(struct isis_spftree *spftree,
isis_vertex_queue_append(&spftree->paths, vertex);
#ifdef EXTREME_DEBUG
- zlog_debug("ISIS-SPF: added %s %s %s depth %d dist %d to PATHS",
- print_sys_hostname(vertex->N.id), vtype2string(vertex->type),
- vid2string(vertex, buff, sizeof(buff)), vertex->depth,
- vertex->d_N);
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug("ISIS-SPF: added %s %s %s depth %d dist %d to PATHS",
+ print_sys_hostname(vertex->N.id),
+ vtype2string(vertex->type),
+ vid2string(vertex, buff, sizeof(buff)),
+ vertex->depth, vertex->d_N);
#endif /* EXTREME_DEBUG */
}
@@ -1647,10 +1661,12 @@ static void isis_spf_loop(struct isis_spftree *spftree,
vertex = isis_vertex_queue_pop(&spftree->tents);
#ifdef EXTREME_DEBUG
- zlog_debug(
- "ISIS-SPF: get TENT node %s %s depth %d dist %d to PATHS",
- print_sys_hostname(vertex->N.id),
- vtype2string(vertex->type), vertex->depth, vertex->d_N);
+ if (IS_DEBUG_SPF_EVENTS)
+ zlog_debug(
+ "ISIS-SPF: get TENT node %s %s depth %d dist %d to PATHS",
+ print_sys_hostname(vertex->N.id),
+ vtype2string(vertex->type), vertex->depth,
+ vertex->d_N);
#endif /* EXTREME_DEBUG */
add_to_paths(spftree, vertex);
@@ -2683,3 +2699,15 @@ void isis_spf_print(struct isis_spftree *spftree, struct vty *vty)
vty_out(vty, " run count : %u\n", spftree->runcount);
}
+void isis_spf_print_json(struct isis_spftree *spftree, struct json_object *json)
+{
+ char uptime[MONOTIME_STRLEN];
+ time_t cur;
+ cur = time(NULL);
+ cur -= spftree->last_run_timestamp;
+ frrtime_to_interval(cur, uptime, sizeof(uptime));
+ json_object_string_add(json, "last-run-elapsed", uptime);
+ json_object_int_add(json, "last-run-duration-usec",
+ spftree->last_run_duration);
+ json_object_int_add(json, "last-run-count", spftree->runcount);
+}
diff --git a/isisd/isis_spf.h b/isisd/isis_spf.h
index 5b3aa59379..815db7b226 100644
--- a/isisd/isis_spf.h
+++ b/isisd/isis_spf.h
@@ -75,6 +75,8 @@ void isis_print_routes(struct vty *vty, struct isis_spftree *spftree,
bool prefix_sid, bool backup);
void isis_spf_init(void);
void isis_spf_print(struct isis_spftree *spftree, struct vty *vty);
+void isis_spf_print_json(struct isis_spftree *spftree,
+ struct json_object *json);
void isis_run_spf(struct isis_spftree *spftree);
struct isis_spftree *isis_run_hopcount_spf(struct isis_area *area,
uint8_t *sysid,
diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c
index f1aae7caf1..d3d59fb435 100644
--- a/isisd/isis_tlvs.c
+++ b/isisd/isis_tlvs.c
@@ -22,6 +22,7 @@
* Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
+#include <json-c/json_object.h>
#include <zebra.h>
#ifdef CRYPTO_INTERNAL
@@ -57,7 +58,8 @@ typedef void (*free_item_func)(struct isis_item *i);
typedef int (*unpack_item_func)(uint16_t mtid, uint8_t len, struct stream *s,
struct sbuf *log, void *dest, int indent);
typedef void (*format_item_func)(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent);
+ struct sbuf *buf, struct json_object *json,
+ int indent);
typedef struct isis_item *(*copy_item_func)(struct isis_item *i);
struct tlv_ops {
@@ -208,152 +210,430 @@ copy_item_ext_subtlvs(struct isis_ext_subtlvs *exts, uint16_t mtid)
/* mtid parameter is used to manage multi-topology i.e. IPv4 / IPv6 */
static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts,
- struct sbuf *buf, int indent,
- uint16_t mtid)
+ struct sbuf *buf, struct json_object *json,
+ int indent, uint16_t mtid)
{
+ char aux_buf[255];
+ char cnt_buf[255];
/* Standard metrics */
- if (IS_SUBTLV(exts, EXT_ADM_GRP))
- sbuf_push(buf, indent, "Administrative Group: 0x%x\n",
- exts->adm_group);
+ if (IS_SUBTLV(exts, EXT_ADM_GRP)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "0x%x",
+ exts->adm_group);
+ json_object_string_add(json, "adm-group", aux_buf);
+ } else
+ sbuf_push(buf, indent, "Administrative Group: 0x%x\n",
+ exts->adm_group);
+ }
if (IS_SUBTLV(exts, EXT_LLRI)) {
- sbuf_push(buf, indent, "Link Local ID: %u\n",
- exts->local_llri);
- sbuf_push(buf, indent, "Link Remote ID: %u\n",
- exts->remote_llri);
- }
- if (IS_SUBTLV(exts, EXT_LOCAL_ADDR))
- sbuf_push(buf, indent, "Local Interface IP Address(es): %pI4\n",
- &exts->local_addr);
- if (IS_SUBTLV(exts, EXT_NEIGH_ADDR))
- sbuf_push(buf, indent,
- "Remote Interface IP Address(es): %pI4\n",
- &exts->neigh_addr);
- if (IS_SUBTLV(exts, EXT_LOCAL_ADDR6))
- sbuf_push(buf, indent,
- "Local Interface IPv6 Address(es): %pI6\n",
- &exts->local_addr6);
- if (IS_SUBTLV(exts, EXT_NEIGH_ADDR6))
- sbuf_push(buf, indent,
- "Remote Interface IPv6 Address(es): %pI6\n",
- &exts->neigh_addr6);
- if (IS_SUBTLV(exts, EXT_MAX_BW))
- sbuf_push(buf, indent, "Maximum Bandwidth: %g (Bytes/sec)\n",
- exts->max_bw);
- if (IS_SUBTLV(exts, EXT_MAX_RSV_BW))
- sbuf_push(buf, indent,
- "Maximum Reservable Bandwidth: %g (Bytes/sec)\n",
- exts->max_rsv_bw);
+ if (json) {
+ json_object_int_add(json, "link-local-id",
+ exts->local_llri);
+ json_object_int_add(json, "link-remote-id",
+ exts->remote_llri);
+ } else {
+ sbuf_push(buf, indent, "Link Local ID: %u\n",
+ exts->local_llri);
+ sbuf_push(buf, indent, "Link Remote ID: %u\n",
+ exts->remote_llri);
+ }
+ }
+ if (IS_SUBTLV(exts, EXT_LOCAL_ADDR)) {
+ if (json) {
+ inet_ntop(AF_INET, &exts->local_addr, aux_buf,
+ sizeof(aux_buf));
+ json_object_string_add(json, "local-iface-ip", aux_buf);
+ } else
+ sbuf_push(buf, indent,
+ "Local Interface IP Address(es): %pI4\n",
+ &exts->local_addr);
+ }
+ if (IS_SUBTLV(exts, EXT_NEIGH_ADDR)) {
+ if (json) {
+ inet_ntop(AF_INET, &exts->neigh_addr, aux_buf,
+ sizeof(aux_buf));
+ json_object_string_add(json, "remote-iface-ip",
+ aux_buf);
+ } else
+ sbuf_push(buf, indent,
+ "Remote Interface IP Address(es): %pI4\n",
+ &exts->neigh_addr);
+ }
+ if (IS_SUBTLV(exts, EXT_LOCAL_ADDR6)) {
+ if (json) {
+ inet_ntop(AF_INET6, &exts->local_addr6, aux_buf,
+ sizeof(aux_buf));
+ json_object_string_add(json, "local-iface-ipv6",
+ aux_buf);
+ } else
+ sbuf_push(buf, indent,
+ "Local Interface IPv6 Address(es): %pI6\n",
+ &exts->local_addr6);
+ }
+ if (IS_SUBTLV(exts, EXT_NEIGH_ADDR6)) {
+ if (json) {
+ inet_ntop(AF_INET6, &exts->neigh_addr6, aux_buf,
+ sizeof(aux_buf));
+ json_object_string_add(json, "remote-iface-ipv6",
+ aux_buf);
+ } else
+ sbuf_push(buf, indent,
+ "Remote Interface IPv6 Address(es): %pI6\n",
+ &exts->neigh_addr6);
+ }
+ if (IS_SUBTLV(exts, EXT_MAX_BW)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ exts->max_bw);
+ json_object_string_add(json, "max-bandwith-bytes-sec",
+ aux_buf);
+ } else
+ sbuf_push(buf, indent,
+ "Maximum Bandwidth: %g (Bytes/sec)\n",
+ exts->max_bw);
+ }
+ if (IS_SUBTLV(exts, EXT_MAX_RSV_BW)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ exts->max_rsv_bw);
+ json_object_string_add(
+ json, "max-res-bandwith-bytes-sec", aux_buf);
+ } else
+ sbuf_push(
+ buf, indent,
+ "Maximum Reservable Bandwidth: %g (Bytes/sec)\n",
+ exts->max_rsv_bw);
+ }
if (IS_SUBTLV(exts, EXT_UNRSV_BW)) {
- sbuf_push(buf, indent, "Unreserved Bandwidth:\n");
- for (int j = 0; j < MAX_CLASS_TYPE; j += 2) {
- sbuf_push(buf, indent + 2,
- "[%d]: %g (Bytes/sec),\t[%d]: %g (Bytes/sec)\n",
- j, exts->unrsv_bw[j],
- j + 1, exts->unrsv_bw[j + 1]);
+ if (json) {
+ struct json_object *unrsv_json;
+ unrsv_json = json_object_new_object();
+ json_object_object_add(json, "unrsv-bandwith-bytes-sec",
+ unrsv_json);
+ for (int j = 0; j < MAX_CLASS_TYPE; j += 1) {
+ snprintfrr(cnt_buf, sizeof(cnt_buf), "%d", j);
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ exts->unrsv_bw[j]);
+ json_object_string_add(unrsv_json, cnt_buf,
+ aux_buf);
+ }
+ } else {
+ sbuf_push(buf, indent, "Unreserved Bandwidth:\n");
+ for (int j = 0; j < MAX_CLASS_TYPE; j += 2) {
+ sbuf_push(
+ buf, indent + 2,
+ "[%d]: %g (Bytes/sec),\t[%d]: %g (Bytes/sec)\n",
+ j, exts->unrsv_bw[j], j + 1,
+ exts->unrsv_bw[j + 1]);
+ }
}
}
- if (IS_SUBTLV(exts, EXT_TE_METRIC))
- sbuf_push(buf, indent, "Traffic Engineering Metric: %u\n",
- exts->te_metric);
- if (IS_SUBTLV(exts, EXT_RMT_AS))
- sbuf_push(buf, indent,
- "Inter-AS TE Remote AS number: %u\n",
- exts->remote_as);
- if (IS_SUBTLV(exts, EXT_RMT_IP))
- sbuf_push(buf, indent,
- "Inter-AS TE Remote ASBR IP address: %pI4\n",
- &exts->remote_ip);
+ if (IS_SUBTLV(exts, EXT_TE_METRIC)) {
+ if (json) {
+ json_object_int_add(json, "te-metric", exts->te_metric);
+ } else
+ sbuf_push(buf, indent,
+ "Traffic Engineering Metric: %u\n",
+ exts->te_metric);
+ }
+ if (IS_SUBTLV(exts, EXT_RMT_AS)) {
+ if (json) {
+ json_object_int_add(json, "inter-as-te-remote-as",
+ exts->remote_as);
+ } else
+ sbuf_push(buf, indent,
+ "Inter-AS TE Remote AS number: %u\n",
+ exts->remote_as);
+ }
+ if (IS_SUBTLV(exts, EXT_RMT_IP)) {
+ if (json) {
+ inet_ntop(AF_INET6, &exts->remote_ip, aux_buf,
+ sizeof(aux_buf));
+ json_object_string_add(
+ json, "inter-as-te-remote-asbr-ip", aux_buf);
+ } else
+ sbuf_push(buf, indent,
+ "Inter-AS TE Remote ASBR IP address: %pI4\n",
+ &exts->remote_ip);
+ }
/* Extended metrics */
- if (IS_SUBTLV(exts, EXT_DELAY))
- sbuf_push(buf, indent,
- "%s Average Link Delay: %u (micro-sec)\n",
- IS_ANORMAL(exts->delay) ? "Anomalous" : "Normal",
- exts->delay);
+ if (IS_SUBTLV(exts, EXT_DELAY)) {
+ if (json) {
+ struct json_object *avg_json;
+ avg_json = json_object_new_object();
+ json_object_object_add(json, "avg-delay", avg_json);
+ json_object_string_add(avg_json, "delay",
+ IS_ANORMAL(exts->delay)
+ ? "Anomalous"
+ : "Normal");
+ json_object_int_add(avg_json, "micro-sec", exts->delay);
+ } else
+ sbuf_push(buf, indent,
+ "%s Average Link Delay: %u (micro-sec)\n",
+ IS_ANORMAL(exts->delay) ? "Anomalous"
+ : "Normal",
+ exts->delay);
+ }
if (IS_SUBTLV(exts, EXT_MM_DELAY)) {
- sbuf_push(buf, indent, "%s Min/Max Link Delay: %u / %u (micro-sec)\n",
- IS_ANORMAL(exts->min_delay) ? "Anomalous" : "Normal",
- exts->min_delay & TE_EXT_MASK,
- exts->max_delay & TE_EXT_MASK);
+ if (json) {
+ struct json_object *avg_json;
+ avg_json = json_object_new_object();
+ json_object_object_add(json, "max-min-delay", avg_json);
+ json_object_string_add(avg_json, "delay",
+ IS_ANORMAL(exts->min_delay)
+ ? "Anomalous"
+ : "Normal");
+ snprintfrr(aux_buf, sizeof(aux_buf), "%u / %u",
+ exts->min_delay & TE_EXT_MASK,
+ exts->max_delay & TE_EXT_MASK);
+ json_object_string_add(avg_json, "micro-sec", aux_buf);
+
+ } else
+ sbuf_push(
+ buf, indent,
+ "%s Min/Max Link Delay: %u / %u (micro-sec)\n",
+ IS_ANORMAL(exts->min_delay) ? "Anomalous"
+ : "Normal",
+ exts->min_delay & TE_EXT_MASK,
+ exts->max_delay & TE_EXT_MASK);
}
if (IS_SUBTLV(exts, EXT_DELAY_VAR)) {
- sbuf_push(buf, indent,
- "Delay Variation: %u (micro-sec)\n",
- exts->delay_var & TE_EXT_MASK);
- }
- if (IS_SUBTLV(exts, EXT_PKT_LOSS))
- sbuf_push(buf, indent, "%s Link Packet Loss: %g (%%)\n",
- IS_ANORMAL(exts->pkt_loss) ? "Anomalous" : "Normal",
- (float)((exts->pkt_loss & TE_EXT_MASK)
- * LOSS_PRECISION));
- if (IS_SUBTLV(exts, EXT_RES_BW))
- sbuf_push(buf, indent,
- "Unidir. Residual Bandwidth: %g (Bytes/sec)\n",
- exts->res_bw);
- if (IS_SUBTLV(exts, EXT_AVA_BW))
- sbuf_push(buf, indent,
- "Unidir. Available Bandwidth: %g (Bytes/sec)\n",
- exts->ava_bw);
- if (IS_SUBTLV(exts, EXT_USE_BW))
- sbuf_push(buf, indent,
- "Unidir. Utilized Bandwidth: %g (Bytes/sec)\n",
- exts->use_bw);
+ if (json) {
+ json_object_int_add(json, "delay-variation-micro-sec",
+ exts->delay_var & TE_EXT_MASK);
+ } else
+ sbuf_push(buf, indent,
+ "Delay Variation: %u (micro-sec)\n",
+ exts->delay_var & TE_EXT_MASK);
+ }
+ if (IS_SUBTLV(exts, EXT_PKT_LOSS)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ (float)((exts->pkt_loss & TE_EXT_MASK) *
+ LOSS_PRECISION));
+ struct json_object *link_json;
+ link_json = json_object_new_object();
+ json_object_object_add(json, "link-packet-loss",
+ link_json);
+ json_object_string_add(link_json, "loss",
+ IS_ANORMAL(exts->pkt_loss)
+ ? "Anomalous"
+ : "Normal");
+ json_object_string_add(link_json, "percentaje",
+ aux_buf);
+ } else
+ sbuf_push(buf, indent, "%s Link Packet Loss: %g (%%)\n",
+ IS_ANORMAL(exts->pkt_loss) ? "Anomalous"
+ : "Normal",
+ (float)((exts->pkt_loss & TE_EXT_MASK) *
+ LOSS_PRECISION));
+ }
+ if (IS_SUBTLV(exts, EXT_RES_BW)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ (exts->res_bw));
+ json_object_string_add(json,
+ "unidir-residual-band-bytes-sec",
+ aux_buf);
+ } else
+ sbuf_push(
+ buf, indent,
+ "Unidir. Residual Bandwidth: %g (Bytes/sec)\n",
+ exts->res_bw);
+ }
+ if (IS_SUBTLV(exts, EXT_AVA_BW)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ (exts->ava_bw));
+ json_object_string_add(
+ json, "unidir-available-band-bytes-sec",
+ aux_buf);
+ } else
+ sbuf_push(
+ buf, indent,
+ "Unidir. Available Bandwidth: %g (Bytes/sec)\n",
+ exts->ava_bw);
+ }
+ if (IS_SUBTLV(exts, EXT_USE_BW)) {
+ if (json) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%g",
+ (exts->use_bw));
+ json_object_string_add(json,
+ "unidir-utilized-band-bytes-sec",
+ aux_buf);
+ } else
+ sbuf_push(
+ buf, indent,
+ "Unidir. Utilized Bandwidth: %g (Bytes/sec)\n",
+ exts->use_bw);
+ }
/* Segment Routing Adjacency as per RFC8667 section #2.2.1 */
if (IS_SUBTLV(exts, EXT_ADJ_SID)) {
struct isis_adj_sid *adj;
- for (adj = (struct isis_adj_sid *)exts->adj_sid.head; adj;
- adj = adj->next) {
- sbuf_push(
- buf, indent,
- "Adjacency-SID: %u, Weight: %hhu, Flags: F:%c B:%c, V:%c, L:%c, S:%c, P:%c\n",
- adj->sid, adj->weight,
- adj->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG ? '1'
- : '0',
- adj->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG
- ? '1'
- : '0');
- }
+ if (json) {
+ struct json_object *arr_adj_json, *flags_json;
+ arr_adj_json = json_object_new_array();
+ json_object_object_add(json, "adj-sid", arr_adj_json);
+ for (adj = (struct isis_adj_sid *)exts->adj_sid.head;
+ adj; adj = adj->next) {
+ snprintfrr(cnt_buf, sizeof(cnt_buf), "%d",
+ adj->sid);
+ flags_json = json_object_new_object();
+ json_object_int_add(flags_json, "sid",
+ adj->sid);
+ json_object_int_add(flags_json, "weight",
+ adj->weight);
+ json_object_string_add(
+ flags_json, "flag-f",
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-b",
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-v",
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-l",
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-s",
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-p",
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG
+ ? "1"
+ : "0");
+ json_object_array_add(arr_adj_json, flags_json);
+ }
+ } else
+ for (adj = (struct isis_adj_sid *)exts->adj_sid.head;
+ adj; adj = adj->next) {
+ sbuf_push(
+ buf, indent,
+ "Adjacency-SID: %u, Weight: %hhu, Flags: F:%c B:%c, V:%c, L:%c, S:%c, P:%c\n",
+ adj->sid, adj->weight,
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG
+ ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG
+ ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG
+ ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG
+ ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG
+ ? '1'
+ : '0',
+ adj->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG
+ ? '1'
+ : '0');
+ }
}
/* Segment Routing LAN-Adjacency as per RFC8667 section #2.2.2 */
if (IS_SUBTLV(exts, EXT_LAN_ADJ_SID)) {
struct isis_lan_adj_sid *lan;
-
- for (lan = (struct isis_lan_adj_sid *)exts->lan_sid.head;
- lan; lan = lan->next) {
- continue;
- sbuf_push(buf, indent,
- "Lan-Adjacency-SID: %u, Weight: %hhu, Flags: F:%c B:%c, V:%c, L:%c, S:%c, P:%c\n"
- " Neighbor-ID: %s\n",
- lan->sid, lan->weight,
- lan->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG
- ? '1'
- : '0',
- lan->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG
- ? '1'
- : '0',
- isis_format_id(lan->neighbor_id, 6));
- }
+ if (json) {
+ struct json_object *arr_adj_json, *flags_json;
+ arr_adj_json = json_object_new_array();
+ json_object_object_add(json, "lan-adj-sid",
+ arr_adj_json);
+ for (lan = (struct isis_lan_adj_sid *)
+ exts->adj_sid.head;
+ lan; lan = lan->next) {
+ if (((mtid == ISIS_MT_IPV4_UNICAST) &&
+ (lan->family != AF_INET)) ||
+ ((mtid == ISIS_MT_IPV6_UNICAST) &&
+ (lan->family != AF_INET6)))
+ continue;
+ snprintfrr(cnt_buf, sizeof(cnt_buf), "%d",
+ lan->sid);
+ flags_json = json_object_new_object();
+ json_object_int_add(flags_json, "sid",
+ lan->sid);
+ json_object_int_add(flags_json, "weight",
+ lan->weight);
+ json_object_string_add(
+ flags_json, "flag-f",
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-b",
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-v",
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-l",
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-s",
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG
+ ? "1"
+ : "0");
+ json_object_string_add(
+ flags_json, "flag-p",
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG
+ ? "1"
+ : "0");
+ json_object_array_add(arr_adj_json, flags_json);
+ }
+ } else
+
+ for (lan = (struct isis_lan_adj_sid *)
+ exts->lan_sid.head;
+ lan; lan = lan->next) {
+ if (((mtid == ISIS_MT_IPV4_UNICAST) &&
+ (lan->family != AF_INET)) ||
+ ((mtid == ISIS_MT_IPV6_UNICAST) &&
+ (lan->family != AF_INET6)))
+ continue;
+ sbuf_push(
+ buf, indent,
+ "Lan-Adjacency-SID: %u, Weight: %hhu, Flags: F:%c B:%c, V:%c, L:%c, S:%c, P:%c\n"
+ " Neighbor-ID: %s\n",
+ lan->sid, lan->weight,
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG
+ ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG
+ ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG
+ ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG
+ ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG
+ ? '1'
+ : '0',
+ lan->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG
+ ? '1'
+ : '0',
+ isis_format_id(lan->neighbor_id, 6));
+ }
}
}
@@ -880,26 +1160,64 @@ static struct isis_item *copy_item_prefix_sid(struct isis_item *i)
}
static void format_item_prefix_sid(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_prefix_sid *sid = (struct isis_prefix_sid *)i;
- sbuf_push(buf, indent, "SR Prefix-SID ");
- if (sid->flags & ISIS_PREFIX_SID_VALUE) {
- sbuf_push(buf, 0, "Label: %u, ", sid->value);
+ if (json) {
+ struct json_object *sr_json;
+ sr_json = json_object_new_object();
+ json_object_object_add(json, "sr", sr_json);
+ if (sid->flags & ISIS_PREFIX_SID_VALUE) {
+ json_object_int_add(sr_json, "label", sid->value);
+ } else {
+ json_object_int_add(sr_json, "index", sid->value);
+ }
+ json_object_int_add(sr_json, "alg", sid->algorithm);
+ json_object_string_add(
+ sr_json, "readvertised",
+ ((sid->flags & ISIS_PREFIX_SID_READVERTISED) ? "yes"
+ : ""));
+ json_object_string_add(
+ sr_json, "node",
+ ((sid->flags & ISIS_PREFIX_SID_NODE) ? "yes" : ""));
+ json_object_string_add(sr_json, "php",
+ ((sid->flags & ISIS_PREFIX_SID_NO_PHP)
+ ? "no-php"
+ : "php"));
+ json_object_string_add(
+ sr_json, "explicit-null",
+ ((sid->flags & ISIS_PREFIX_SID_EXPLICIT_NULL) ? "yes"
+ : ""));
+ json_object_string_add(
+ sr_json, "value",
+ ((sid->flags & ISIS_PREFIX_SID_VALUE) ? "yes" : ""));
+ json_object_string_add(
+ sr_json, "local",
+ ((sid->flags & ISIS_PREFIX_SID_LOCAL) ? "yes" : ""));
+
} else {
- sbuf_push(buf, 0, "Index: %u, ", sid->value);
+ sbuf_push(buf, indent, "SR Prefix-SID ");
+ if (sid->flags & ISIS_PREFIX_SID_VALUE) {
+ sbuf_push(buf, 0, "Label: %u, ", sid->value);
+ } else {
+ sbuf_push(buf, 0, "Index: %u, ", sid->value);
+ }
+ sbuf_push(buf, 0, "Algorithm: %hhu, ", sid->algorithm);
+ sbuf_push(buf, 0, "Flags:%s%s%s%s%s%s\n",
+ sid->flags & ISIS_PREFIX_SID_READVERTISED
+ ? " READVERTISED"
+ : "",
+ sid->flags & ISIS_PREFIX_SID_NODE ? " NODE" : "",
+ sid->flags & ISIS_PREFIX_SID_NO_PHP ? " NO-PHP"
+ : " PHP",
+ sid->flags & ISIS_PREFIX_SID_EXPLICIT_NULL
+ ? " EXPLICIT-NULL"
+ : "",
+ sid->flags & ISIS_PREFIX_SID_VALUE ? " VALUE" : "",
+ sid->flags & ISIS_PREFIX_SID_LOCAL ? " LOCAL" : "");
}
- sbuf_push(buf, 0, "Algorithm: %hhu, ", sid->algorithm);
- sbuf_push(buf, 0, "Flags:%s%s%s%s%s%s\n",
- sid->flags & ISIS_PREFIX_SID_READVERTISED ? " READVERTISED"
- : "",
- sid->flags & ISIS_PREFIX_SID_NODE ? " NODE" : "",
- sid->flags & ISIS_PREFIX_SID_NO_PHP ? " NO-PHP" : " PHP",
- sid->flags & ISIS_PREFIX_SID_EXPLICIT_NULL ? " EXPLICIT-NULL"
- : "",
- sid->flags & ISIS_PREFIX_SID_VALUE ? " VALUE" : "",
- sid->flags & ISIS_PREFIX_SID_LOCAL ? " LOCAL" : "");
}
static void free_item_prefix_sid(struct isis_item *i)
@@ -977,7 +1295,7 @@ static int unpack_item_prefix_sid(uint16_t mtid, uint8_t len, struct stream *s,
sid.value = stream_getl(s);
}
- format_item_prefix_sid(mtid, (struct isis_item *)&sid, log, indent + 2);
+ format_item_prefix_sid(mtid, (struct isis_item *)&sid, log, NULL, indent + 2);
append_item(&subtlvs->prefix_sids, copy_item_prefix_sid((struct isis_item *)&sid));
return 0;
}
@@ -997,14 +1315,21 @@ static struct prefix_ipv6 *copy_subtlv_ipv6_source_prefix(struct prefix_ipv6 *p)
}
static void format_subtlv_ipv6_source_prefix(struct prefix_ipv6 *p,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json,
+ int indent)
{
if (!p)
return;
char prefixbuf[PREFIX2STR_BUFFER];
- sbuf_push(buf, indent, "IPv6 Source Prefix: %s\n",
- prefix2str(p, prefixbuf, sizeof(prefixbuf)));
+ if (json) {
+ prefix2str(p, prefixbuf, sizeof(prefixbuf));
+ json_object_string_add(json, "ipv6-src-prefix", prefixbuf);
+ } else {
+ sbuf_push(buf, indent, "IPv6 Source Prefix: %s\n",
+ prefix2str(p, prefixbuf, sizeof(prefixbuf)));
+ }
}
static int pack_subtlv_ipv6_source_prefix(struct prefix_ipv6 *p,
@@ -1080,7 +1405,8 @@ static void copy_items(enum isis_tlv_context context, enum isis_tlv_type type,
struct isis_item_list *src, struct isis_item_list *dest);
static void format_items_(uint16_t mtid, enum isis_tlv_context context,
enum isis_tlv_type type, struct isis_item_list *items,
- struct sbuf *buf, int indent);
+ struct sbuf *buf, struct json_object *json,
+ int indent);
#define format_items(...) format_items_(ISIS_MT_IPV4_UNICAST, __VA_ARGS__)
static void free_items(enum isis_tlv_context context, enum isis_tlv_type type,
struct isis_item_list *items);
@@ -1124,12 +1450,12 @@ static struct isis_subtlvs *copy_subtlvs(struct isis_subtlvs *subtlvs)
}
static void format_subtlvs(struct isis_subtlvs *subtlvs, struct sbuf *buf,
- int indent)
+ struct json_object *json, int indent)
{
format_items(subtlvs->context, ISIS_SUBTLV_PREFIX_SID,
- &subtlvs->prefix_sids, buf, indent);
+ &subtlvs->prefix_sids, buf, json, indent);
- format_subtlv_ipv6_source_prefix(subtlvs->source_prefix, buf, indent);
+ format_subtlv_ipv6_source_prefix(subtlvs->source_prefix, buf, json, indent);
}
static void isis_free_subtlvs(struct isis_subtlvs *subtlvs)
@@ -1189,12 +1515,18 @@ static struct isis_item *copy_item_area_address(struct isis_item *i)
}
static void format_item_area_address(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_area_address *addr = (struct isis_area_address *)i;
- sbuf_push(buf, indent, "Area Address: %s\n",
- isonet_print(addr->addr, addr->len));
+ if (json) {
+ json_object_string_add(json, "area-addr",
+ isonet_print(addr->addr, addr->len));
+ } else {
+ sbuf_push(buf, indent, "Area Address: %s\n",
+ isonet_print(addr->addr, addr->len));
+ }
}
static void free_item_area_address(struct isis_item *i)
@@ -1251,7 +1583,7 @@ static int unpack_item_area_address(uint16_t mtid, uint8_t len,
stream_get(rv->addr, s, rv->len);
format_item_area_address(ISIS_MT_IPV4_UNICAST, (struct isis_item *)rv,
- log, indent + 2);
+ log, NULL, indent + 2);
append_item(&tlvs->area_addresses, (struct isis_item *)rv);
return 0;
out:
@@ -1271,12 +1603,21 @@ static struct isis_item *copy_item_oldstyle_reach(struct isis_item *i)
}
static void format_item_oldstyle_reach(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
struct isis_oldstyle_reach *r = (struct isis_oldstyle_reach *)i;
- sbuf_push(buf, indent, "IS Reachability: %s (Metric: %hhu)\n",
- isis_format_id(r->id, 7), r->metric);
+ if (json) {
+ struct json_object *old_json;
+ old_json = json_object_new_object();
+ json_object_object_add(json, "old-reach-style", old_json);
+ json_object_string_add(old_json, "is-reach",
+ isis_format_id(r->id, 7));
+ json_object_int_add(old_json, "metric", r->metric);
+ } else
+ sbuf_push(buf, indent, "IS Reachability: %s (Metric: %hhu)\n",
+ isis_format_id(r->id, 7), r->metric);
}
static void free_item_oldstyle_reach(struct isis_item *i)
@@ -1327,7 +1668,7 @@ static int unpack_item_oldstyle_reach(uint16_t mtid, uint8_t len,
stream_forward_getp(s, 3); /* Skip other metrics */
stream_get(rv->id, s, 7);
- format_item_oldstyle_reach(mtid, (struct isis_item *)rv, log,
+ format_item_oldstyle_reach(mtid, (struct isis_item *)rv, log, NULL,
indent + 2);
append_item(&tlvs->oldstyle_reach, (struct isis_item *)rv);
return 0;
@@ -1344,11 +1685,17 @@ static struct isis_item *copy_item_lan_neighbor(struct isis_item *i)
}
static void format_item_lan_neighbor(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_lan_neighbor *n = (struct isis_lan_neighbor *)i;
- sbuf_push(buf, indent, "LAN Neighbor: %s\n", isis_format_id(n->mac, 6));
+ if (json) {
+ json_object_string_add(json, "lan-neighbor",
+ isis_format_id(n->mac, 6));
+ } else
+ sbuf_push(buf, indent, "LAN Neighbor: %s\n",
+ isis_format_id(n->mac, 6));
}
static void free_item_lan_neighbor(struct isis_item *i)
@@ -1389,7 +1736,7 @@ static int unpack_item_lan_neighbor(uint16_t mtid, uint8_t len,
struct isis_lan_neighbor *rv = XCALLOC(MTYPE_ISIS_TLV, sizeof(*rv));
stream_get(rv->mac, s, 6);
- format_item_lan_neighbor(mtid, (struct isis_item *)rv, log, indent + 2);
+ format_item_lan_neighbor(mtid, (struct isis_item *)rv, log, NULL, indent + 2);
append_item(&tlvs->lan_neighbor, (struct isis_item *)rv);
return 0;
}
@@ -1409,10 +1756,23 @@ static struct isis_item *copy_item_lsp_entry(struct isis_item *i)
}
static void format_item_lsp_entry(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_lsp_entry *e = (struct isis_lsp_entry *)i;
+ if (json) {
+ char buf[255];
+ struct json_object *lsp_json;
+ lsp_json = json_object_new_object();
+ json_object_object_add(json, "lsp-entry", lsp_json);
+ json_object_string_add(lsp_json, "id", isis_format_id(e->id, 8));
+ snprintfrr(buf,sizeof(buf),"0x%08x",e->seqno);
+ json_object_string_add(lsp_json, "seq", buf);
+ snprintfrr(buf,sizeof(buf),"0x%04hx",e->checksum);
+ json_object_string_add(lsp_json, "chksum", buf);
+ json_object_int_add(lsp_json, "lifetime", e->checksum);
+ } else
sbuf_push(buf, indent,
"LSP Entry: %s, seq 0x%08x, cksum 0x%04hx, lifetime %hus\n",
isis_format_id(e->id, 8), e->seqno, e->checksum,
@@ -1462,7 +1822,7 @@ static int unpack_item_lsp_entry(uint16_t mtid, uint8_t len, struct stream *s,
rv->seqno = stream_getl(s);
rv->checksum = stream_getw(s);
- format_item_lsp_entry(mtid, (struct isis_item *)rv, log, indent + 2);
+ format_item_lsp_entry(mtid, (struct isis_item *)rv, log, NULL, indent + 2);
append_item(&tlvs->lsp_entries, (struct isis_item *)rv);
return 0;
}
@@ -1484,19 +1844,40 @@ static struct isis_item *copy_item_extended_reach(struct isis_item *i)
}
static void format_item_extended_reach(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
struct isis_extended_reach *r = (struct isis_extended_reach *)i;
- sbuf_push(buf, indent, "%s Reachability: %s (Metric: %u)",
- (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT",
- isis_format_id(r->id, 7), r->metric);
- if (mtid != ISIS_MT_IPV4_UNICAST)
- sbuf_push(buf, 0, " %s", isis_mtid2str(mtid));
- sbuf_push(buf, 0, "\n");
+ if (json) {
+ struct json_object *reach_json;
+ reach_json = json_object_new_object();
+ json_object_object_add(json, "ext-reach", reach_json);
+ json_object_string_add(
+ reach_json, "mt-id",
+ (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT");
+ json_object_string_add(reach_json, "id",
+ isis_format_id(r->id, 7));
+ json_object_int_add(reach_json, "metric", r->metric);
+ if (mtid != ISIS_MT_IPV4_UNICAST)
+ json_object_string_add(reach_json, "mt-name",
+ isis_mtid2str(mtid));
+
+ if (r->subtlvs)
+ format_item_ext_subtlvs(r->subtlvs, NULL, json,
+ indent + 2, mtid);
+ } else {
+ sbuf_push(buf, indent, "%s Reachability: %s (Metric: %u)",
+ (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT",
+ isis_format_id(r->id, 7), r->metric);
+ if (mtid != ISIS_MT_IPV4_UNICAST)
+ sbuf_push(buf, 0, " %s", isis_mtid2str(mtid));
+ sbuf_push(buf, 0, "\n");
- if (r->subtlvs)
- format_item_ext_subtlvs(r->subtlvs, buf, indent + 2, mtid);
+ if (r->subtlvs)
+ format_item_ext_subtlvs(r->subtlvs, buf, NULL,
+ indent + 2, mtid);
+ }
}
static void free_item_extended_reach(struct isis_item *i)
@@ -1579,7 +1960,7 @@ static int unpack_item_extended_reach(uint16_t mtid, uint8_t len,
}
}
- format_item_extended_reach(mtid, (struct isis_item *)rv, log,
+ format_item_extended_reach(mtid, (struct isis_item *)rv, log, NULL,
indent + 2);
append_item(items, (struct isis_item *)rv);
return 0;
@@ -1603,11 +1984,20 @@ static struct isis_item *copy_item_oldstyle_ip_reach(struct isis_item *i)
}
static void format_item_oldstyle_ip_reach(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
struct isis_oldstyle_ip_reach *r = (struct isis_oldstyle_ip_reach *)i;
char prefixbuf[PREFIX2STR_BUFFER];
+ if (json) {
+ struct json_object *old_json;
+ old_json = json_object_new_object();
+ json_object_object_add(json, "old-ip-reach-style", old_json);
+ json_object_string_add(old_json, "prefix",
+ prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)));
+ json_object_int_add(old_json, "metric", r->metric);
+ } else
sbuf_push(buf, indent, "IP Reachability: %s (Metric: %hhu)\n",
prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)),
r->metric);
@@ -1669,7 +2059,7 @@ static int unpack_item_oldstyle_ip_reach(uint16_t mtid, uint8_t len,
stream_get(&mask, s, 4);
rv->prefix.prefixlen = ip_masklen(mask);
- format_item_oldstyle_ip_reach(mtid, (struct isis_item *)rv, log,
+ format_item_oldstyle_ip_reach(mtid, (struct isis_item *)rv, log, NULL,
indent + 2);
append_item(dest, (struct isis_item *)rv);
return 0;
@@ -1689,17 +2079,32 @@ static void copy_tlv_protocols_supported(struct isis_protocols_supported *src,
}
static void format_tlv_protocols_supported(struct isis_protocols_supported *p,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
if (!p || !p->count || !p->protocols)
return;
- sbuf_push(buf, indent, "Protocols Supported: ");
- for (uint8_t i = 0; i < p->count; i++) {
- sbuf_push(buf, 0, "%s%s", nlpid2str(p->protocols[i]),
- (i + 1 < p->count) ? ", " : "");
+ if (json) {
+ struct json_object *protocol_json;
+ char buf[255];
+
+ protocol_json = json_object_new_object();
+ json_object_object_add(json, "protocols-supported",
+ protocol_json);
+ for (uint8_t i = 0; i < p->count; i++) {
+ snprintfrr(buf, sizeof(buf), "%d", i);
+ json_object_string_add(protocol_json, buf,
+ nlpid2str(p->protocols[i]));
+ }
+ } else {
+ sbuf_push(buf, indent, "Protocols Supported: ");
+ for (uint8_t i = 0; i < p->count; i++) {
+ sbuf_push(buf, 0, "%s%s", nlpid2str(p->protocols[i]),
+ (i + 1 < p->count) ? ", " : "");
+ }
+ sbuf_push(buf, 0, "\n");
}
- sbuf_push(buf, 0, "\n");
}
static void free_tlv_protocols_supported(struct isis_protocols_supported *p)
@@ -1746,7 +2151,7 @@ static int unpack_tlv_protocols_supported(enum isis_tlv_context context,
tlvs->protocols_supported.protocols = XCALLOC(MTYPE_ISIS_TLV, tlv_len);
stream_get(tlvs->protocols_supported.protocols, s, tlv_len);
- format_tlv_protocols_supported(&tlvs->protocols_supported, log,
+ format_tlv_protocols_supported(&tlvs->protocols_supported, log, NULL,
indent + 2);
return 0;
}
@@ -1762,13 +2167,18 @@ static struct isis_item *copy_item_ipv4_address(struct isis_item *i)
}
static void format_item_ipv4_address(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_ipv4_address *a = (struct isis_ipv4_address *)i;
char addrbuf[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &a->addr, addrbuf, sizeof(addrbuf));
- sbuf_push(buf, indent, "IPv4 Interface Address: %s\n", addrbuf);
+ if (json) {
+ json_object_string_add(json, "ipv4", addrbuf);
+ } else {
+ sbuf_push(buf, indent, "IPv4 Interface Address: %s\n", addrbuf);
+ }
}
static void free_item_ipv4_address(struct isis_item *i)
@@ -1809,7 +2219,7 @@ static int unpack_item_ipv4_address(uint16_t mtid, uint8_t len,
struct isis_ipv4_address *rv = XCALLOC(MTYPE_ISIS_TLV, sizeof(*rv));
stream_get(&rv->addr, s, 4);
- format_item_ipv4_address(mtid, (struct isis_item *)rv, log, indent + 2);
+ format_item_ipv4_address(mtid, (struct isis_item *)rv, log, NULL, indent + 2);
append_item(&tlvs->ipv4_address, (struct isis_item *)rv);
return 0;
}
@@ -1826,13 +2236,17 @@ static struct isis_item *copy_item_ipv6_address(struct isis_item *i)
}
static void format_item_ipv6_address(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_ipv6_address *a = (struct isis_ipv6_address *)i;
char addrbuf[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6, &a->addr, addrbuf, sizeof(addrbuf));
- sbuf_push(buf, indent, "IPv6 Interface Address: %s\n", addrbuf);
+ if (json)
+ json_object_string_add(json, "ipv6", addrbuf);
+ else
+ sbuf_push(buf, indent, "IPv6 Interface Address: %s\n", addrbuf);
}
static void free_item_ipv6_address(struct isis_item *i)
@@ -1873,7 +2287,7 @@ static int unpack_item_ipv6_address(uint16_t mtid, uint8_t len,
struct isis_ipv6_address *rv = XCALLOC(MTYPE_ISIS_TLV, sizeof(*rv));
stream_get(&rv->addr, s, IPV6_MAX_BYTELEN);
- format_item_ipv6_address(mtid, (struct isis_item *)rv, log, indent + 2);
+ format_item_ipv6_address(mtid, (struct isis_item *)rv, log, NULL, indent + 2);
append_item(&tlvs->ipv6_address, (struct isis_item *)rv);
return 0;
}
@@ -1890,13 +2304,19 @@ static struct isis_item *copy_item_global_ipv6_address(struct isis_item *i)
}
static void format_item_global_ipv6_address(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json,
+ int indent)
{
struct isis_ipv6_address *a = (struct isis_ipv6_address *)i;
char addrbuf[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6, &a->addr, addrbuf, sizeof(addrbuf));
- sbuf_push(buf, indent, "Global IPv6 Interface Address: %s\n", addrbuf);
+ if (json)
+ json_object_string_add(json, "global-ipv6", addrbuf);
+ else
+ sbuf_push(buf, indent, "Global IPv6 Interface Address: %s\n",
+ addrbuf);
}
static void free_item_global_ipv6_address(struct isis_item *i)
@@ -1937,7 +2357,7 @@ static int unpack_item_global_ipv6_address(uint16_t mtid, uint8_t len,
struct isis_ipv6_address *rv = XCALLOC(MTYPE_ISIS_TLV, sizeof(*rv));
stream_get(&rv->addr, s, IPV6_MAX_BYTELEN);
- format_item_global_ipv6_address(mtid, (struct isis_item *)rv, log,
+ format_item_global_ipv6_address(mtid, (struct isis_item *)rv, log, NULL,
indent + 2);
append_item(&tlvs->global_ipv6_address, (struct isis_item *)rv);
return 0;
@@ -1956,14 +2376,23 @@ static struct isis_item *copy_item_mt_router_info(struct isis_item *i)
}
static void format_item_mt_router_info(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
struct isis_mt_router_info *info = (struct isis_mt_router_info *)i;
- sbuf_push(buf, indent, "MT Router Info: %s%s%s\n",
- isis_mtid2str(info->mtid),
- info->overload ? " Overload" : "",
- info->attached ? " Attached" : "");
+ if (json) {
+ struct json_object *mt_json;
+ mt_json = json_object_new_object();
+ json_object_object_add(json, "mt", mt_json);
+ json_object_int_add(mt_json, "mtid", info->mtid);
+ json_object_string_add(mt_json, "overload", info->overload?"true":"false");
+ json_object_string_add(mt_json, "attached", info->attached?"true":"false");
+ } else
+ sbuf_push(buf, indent, "MT Router Info: %s%s%s\n",
+ isis_mtid2str(info->mtid),
+ info->overload ? " Overload" : "",
+ info->attached ? " Attached" : "");
}
static void free_item_mt_router_info(struct isis_item *i)
@@ -2015,7 +2444,7 @@ static int unpack_item_mt_router_info(uint16_t mtid, uint8_t len,
rv->attached = entry & ISIS_MT_AT_MASK;
rv->mtid = entry & ISIS_MT_MASK;
- format_item_mt_router_info(mtid, (struct isis_item *)rv, log,
+ format_item_mt_router_info(mtid, (struct isis_item *)rv, log, NULL,
indent + 2);
append_item(&tlvs->mt_router_info, (struct isis_item *)rv);
return 0;
@@ -2034,14 +2463,17 @@ static struct in_addr *copy_tlv_te_router_id(const struct in_addr *id)
}
static void format_tlv_te_router_id(const struct in_addr *id, struct sbuf *buf,
- int indent)
+ struct json_object *json, int indent)
{
if (!id)
return;
char addrbuf[INET_ADDRSTRLEN];
inet_ntop(AF_INET, id, addrbuf, sizeof(addrbuf));
- sbuf_push(buf, indent, "TE Router ID: %s\n", addrbuf);
+ if (json)
+ json_object_string_add(json, "te-router-id", addrbuf);
+ else
+ sbuf_push(buf, indent, "TE Router ID: %s\n", addrbuf);
}
static void free_tlv_te_router_id(struct in_addr *id)
@@ -2085,7 +2517,7 @@ static int unpack_tlv_te_router_id(enum isis_tlv_context context,
tlvs->te_router_id = XCALLOC(MTYPE_ISIS_TLV, 4);
stream_get(tlvs->te_router_id, s, 4);
- format_tlv_te_router_id(tlvs->te_router_id, log, indent + 2);
+ format_tlv_te_router_id(tlvs->te_router_id, log, NULL, indent + 2);
return 0;
}
@@ -2107,22 +2539,46 @@ static struct isis_item *copy_item_extended_ip_reach(struct isis_item *i)
}
static void format_item_extended_ip_reach(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
struct isis_extended_ip_reach *r = (struct isis_extended_ip_reach *)i;
char prefixbuf[PREFIX2STR_BUFFER];
- sbuf_push(buf, indent, "%s IP Reachability: %s (Metric: %u)%s",
- (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT",
- prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)), r->metric,
- r->down ? " Down" : "");
- if (mtid != ISIS_MT_IPV4_UNICAST)
- sbuf_push(buf, 0, " %s", isis_mtid2str(mtid));
- sbuf_push(buf, 0, "\n");
-
- if (r->subtlvs) {
- sbuf_push(buf, indent, " Subtlvs:\n");
- format_subtlvs(r->subtlvs, buf, indent + 4);
+ if (json) {
+ struct json_object *ext_json;
+ ext_json = json_object_new_object();
+ json_object_object_add(json, "ext-ip-reach", ext_json);
+ json_object_string_add(
+ json, "mt-id",
+ (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT");
+ json_object_string_add(
+ json, "ip-reach",
+ prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)));
+ json_object_int_add(json, "ip-reach-metric", r->metric);
+ json_object_string_add(json, "down", r->down ? "yes" : "");
+ if (mtid != ISIS_MT_IPV4_UNICAST)
+ json_object_string_add(json, "mt-name",
+ isis_mtid2str(mtid));
+ if (r->subtlvs) {
+ struct json_object *subtlv_json;
+ subtlv_json = json_object_new_object();
+ json_object_object_add(json, "subtlvs", subtlv_json);
+ format_subtlvs(r->subtlvs, NULL, subtlv_json, 0);
+ }
+ } else {
+ sbuf_push(buf, indent, "%s IP Reachability: %s (Metric: %u)%s",
+ (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT",
+ prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)),
+ r->metric, r->down ? " Down" : "");
+ if (mtid != ISIS_MT_IPV4_UNICAST)
+ sbuf_push(buf, 0, " %s", isis_mtid2str(mtid));
+ sbuf_push(buf, 0, "\n");
+
+ if (r->subtlvs) {
+ sbuf_push(buf, indent, " Subtlvs:\n");
+ format_subtlvs(r->subtlvs, buf, NULL, indent + 4);
+ }
}
}
@@ -2216,7 +2672,7 @@ static int unpack_item_extended_ip_reach(uint16_t mtid, uint8_t len,
if (orig_prefix != rv->prefix.prefix.s_addr)
sbuf_push(log, indent + 2,
"WARNING: Prefix had hostbits set.\n");
- format_item_extended_ip_reach(mtid, (struct isis_item *)rv, log,
+ format_item_extended_ip_reach(mtid, (struct isis_item *)rv, log, NULL,
indent + 2);
if (control & ISIS_EXTENDED_IP_REACH_SUBTLV) {
@@ -2273,12 +2729,15 @@ static char *copy_tlv_dynamic_hostname(const char *hostname)
}
static void format_tlv_dynamic_hostname(const char *hostname, struct sbuf *buf,
- int indent)
+ struct json_object *json, int indent)
{
if (!hostname)
return;
- sbuf_push(buf, indent, "Hostname: %s\n", hostname);
+ if (json)
+ json_object_string_add(json, "hostname", hostname);
+ else
+ sbuf_push(buf, indent, "Hostname: %s\n", hostname);
}
static void free_tlv_dynamic_hostname(char *hostname)
@@ -2356,14 +2815,18 @@ static struct in6_addr *copy_tlv_te_router_id_ipv6(const struct in6_addr *id)
}
static void format_tlv_te_router_id_ipv6(const struct in6_addr *id,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
if (!id)
return;
char addrbuf[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6, id, addrbuf, sizeof(addrbuf));
- sbuf_push(buf, indent, "IPv6 TE Router ID: %s\n", addrbuf);
+ if (json)
+ json_object_string_add(json, "ipv6-te-router-id", addrbuf);
+ else
+ sbuf_push(buf, indent, "IPv6 TE Router ID: %s\n", addrbuf);
}
static void free_tlv_te_router_id_ipv6(struct in6_addr *id)
@@ -2409,7 +2872,7 @@ static int unpack_tlv_te_router_id_ipv6(enum isis_tlv_context context,
tlvs->te_router_id_ipv6 = XCALLOC(MTYPE_ISIS_TLV, IPV6_MAX_BYTELEN);
stream_get(tlvs->te_router_id_ipv6, s, IPV6_MAX_BYTELEN);
- format_tlv_te_router_id_ipv6(tlvs->te_router_id_ipv6, log, indent + 2);
+ format_tlv_te_router_id_ipv6(tlvs->te_router_id_ipv6, log, NULL, indent + 2);
return 0;
}
@@ -2429,26 +2892,50 @@ static struct isis_spine_leaf *copy_tlv_spine_leaf(
}
static void format_tlv_spine_leaf(const struct isis_spine_leaf *spine_leaf,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
if (!spine_leaf)
return;
- sbuf_push(buf, indent, "Spine-Leaf-Extension:\n");
- if (spine_leaf->has_tier) {
- if (spine_leaf->tier == ISIS_TIER_UNDEFINED) {
- sbuf_push(buf, indent, " Tier: undefined\n");
- } else {
- sbuf_push(buf, indent, " Tier: %hhu\n",
- spine_leaf->tier);
+ char aux_buf[255];
+
+ if (json) {
+ struct json_object *spine_json;
+ spine_json = json_object_new_object();
+ json_object_object_add(json, "spine-leaf-extension",
+ spine_json);
+ if (spine_leaf->has_tier) {
+ snprintfrr(aux_buf, sizeof(aux_buf), "%hhu",
+ spine_leaf->tier);
+ json_object_string_add(
+ spine_json, "tier",
+ (spine_leaf->tier == ISIS_TIER_UNDEFINED)
+ ? "undefined"
+ : aux_buf);
+ }
+ json_object_string_add(spine_json, "flag-leaf",
+ spine_leaf->is_leaf ? "yes" : "");
+ json_object_string_add(spine_json, "flag-spine",
+ spine_leaf->is_spine ? "yes" : "");
+ json_object_string_add(spine_json, "flag-backup",
+ spine_leaf->is_backup ? "yes" : "");
+ } else {
+ sbuf_push(buf, indent, "Spine-Leaf-Extension:\n");
+ if (spine_leaf->has_tier) {
+ if (spine_leaf->tier == ISIS_TIER_UNDEFINED) {
+ sbuf_push(buf, indent, " Tier: undefined\n");
+ } else {
+ sbuf_push(buf, indent, " Tier: %hhu\n",
+ spine_leaf->tier);
+ }
}
- }
-
- sbuf_push(buf, indent, " Flags:%s%s%s\n",
- spine_leaf->is_leaf ? " LEAF" : "",
- spine_leaf->is_spine ? " SPINE" : "",
- spine_leaf->is_backup ? " BACKUP" : "");
+ sbuf_push(buf, indent, " Flags:%s%s%s\n",
+ spine_leaf->is_leaf ? " LEAF" : "",
+ spine_leaf->is_spine ? " SPINE" : "",
+ spine_leaf->is_backup ? " BACKUP" : "");
+ }
}
static void free_tlv_spine_leaf(struct isis_spine_leaf *spine_leaf)
@@ -2562,25 +3049,45 @@ static struct isis_threeway_adj *copy_tlv_threeway_adj(
return rv;
}
-static void format_tlv_threeway_adj(const struct isis_threeway_adj *threeway_adj,
- struct sbuf *buf, int indent)
+static void
+format_tlv_threeway_adj(const struct isis_threeway_adj *threeway_adj,
+ struct sbuf *buf, struct json_object *json, int indent)
{
if (!threeway_adj)
return;
- sbuf_push(buf, indent, "P2P Three-Way Adjacency:\n");
- sbuf_push(buf, indent, " State: %s (%d)\n",
- isis_threeway_state_name(threeway_adj->state),
- threeway_adj->state);
- sbuf_push(buf, indent, " Extended Local Circuit ID: %u\n",
- threeway_adj->local_circuit_id);
- if (!threeway_adj->neighbor_set)
- return;
+ if (json) {
+ struct json_object *three_json;
+ three_json = json_object_new_object();
+ json_object_object_add(json, "p2p-three-way-adj", three_json);
+ json_object_string_add(
+ three_json, "state-name",
+ isis_threeway_state_name(threeway_adj->state));
+ json_object_int_add(three_json, "state", threeway_adj->state);
+ json_object_int_add(three_json, "ext-local-circuit-id",
+ threeway_adj->local_circuit_id);
+ if (!threeway_adj->neighbor_set)
+ return;
+ json_object_string_add(
+ three_json, "neigh-system-id",
+ isis_format_id(threeway_adj->neighbor_id, 6));
+ json_object_int_add(three_json, "neigh-ext-circuit-id",
+ threeway_adj->neighbor_circuit_id);
+ } else {
+ sbuf_push(buf, indent, "P2P Three-Way Adjacency:\n");
+ sbuf_push(buf, indent, " State: %s (%d)\n",
+ isis_threeway_state_name(threeway_adj->state),
+ threeway_adj->state);
+ sbuf_push(buf, indent, " Extended Local Circuit ID: %u\n",
+ threeway_adj->local_circuit_id);
+ if (!threeway_adj->neighbor_set)
+ return;
- sbuf_push(buf, indent, " Neighbor System ID: %s\n",
- isis_format_id(threeway_adj->neighbor_id, 6));
- sbuf_push(buf, indent, " Neighbor Extended Circuit ID: %u\n",
- threeway_adj->neighbor_circuit_id);
+ sbuf_push(buf, indent, " Neighbor System ID: %s\n",
+ isis_format_id(threeway_adj->neighbor_id, 6));
+ sbuf_push(buf, indent, " Neighbor Extended Circuit ID: %u\n",
+ threeway_adj->neighbor_circuit_id);
+ }
}
static void free_tlv_threeway_adj(struct isis_threeway_adj *threeway_adj)
@@ -2663,24 +3170,51 @@ static struct isis_item *copy_item_ipv6_reach(struct isis_item *i)
}
static void format_item_ipv6_reach(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_ipv6_reach *r = (struct isis_ipv6_reach *)i;
char prefixbuf[PREFIX2STR_BUFFER];
- sbuf_push(buf, indent, "%sIPv6 Reachability: %s (Metric: %u)%s%s",
- (mtid == ISIS_MT_IPV4_UNICAST) ? "" : "MT ",
- prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)),
- r->metric,
- r->down ? " Down" : "",
- r->external ? " External" : "");
- if (mtid != ISIS_MT_IPV4_UNICAST)
- sbuf_push(buf, 0, " %s", isis_mtid2str(mtid));
- sbuf_push(buf, 0, "\n");
-
- if (r->subtlvs) {
- sbuf_push(buf, indent, " Subtlvs:\n");
- format_subtlvs(r->subtlvs, buf, indent + 4);
+ if (json) {
+ struct json_object *reach_json;
+ reach_json = json_object_new_object();
+ json_object_object_add(json, "ipv6-reach", reach_json);
+ json_object_string_add(reach_json, "mt-id",
+ (mtid == ISIS_MT_IPV4_UNICAST) ? ""
+ : "mt");
+ json_object_string_add(
+ reach_json, "prefix",
+ prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)));
+ json_object_int_add(reach_json, "metric", r->metric);
+ json_object_string_add(reach_json, "down",
+ r->down ? "yes" : "");
+ json_object_string_add(reach_json, "external",
+ r->external ? "yes" : "");
+ if (mtid != ISIS_MT_IPV4_UNICAST)
+ json_object_string_add(reach_json, "mt-name",
+ isis_mtid2str(mtid));
+ if (r->subtlvs) {
+ struct json_object *subtlvs_json;
+ subtlvs_json = json_object_new_object();
+ json_object_object_add(json, "subtlvs", subtlvs_json);
+ format_subtlvs(r->subtlvs, NULL, subtlvs_json, 0);
+ }
+ } else {
+ sbuf_push(buf, indent,
+ "%sIPv6 Reachability: %s (Metric: %u)%s%s",
+ (mtid == ISIS_MT_IPV4_UNICAST) ? "" : "MT ",
+ prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf)),
+ r->metric, r->down ? " Down" : "",
+ r->external ? " External" : "");
+ if (mtid != ISIS_MT_IPV4_UNICAST)
+ sbuf_push(buf, 0, " %s", isis_mtid2str(mtid));
+ sbuf_push(buf, 0, "\n");
+
+ if (r->subtlvs) {
+ sbuf_push(buf, indent, " Subtlvs:\n");
+ format_subtlvs(r->subtlvs, buf, NULL, indent + 4);
+ }
}
}
@@ -2773,7 +3307,7 @@ static int unpack_item_ipv6_reach(uint16_t mtid, uint8_t len, struct stream *s,
if (memcmp(&orig_prefix, &rv->prefix.prefix, sizeof(orig_prefix)))
sbuf_push(log, indent + 2,
"WARNING: Prefix had hostbits set.\n");
- format_item_ipv6_reach(mtid, (struct isis_item *)rv, log, indent + 2);
+ format_item_ipv6_reach(mtid, (struct isis_item *)rv, log, NULL, indent + 2);
if (control & ISIS_IPV6_REACH_SUBTLV) {
consume += 1;
@@ -2834,6 +3368,77 @@ static struct isis_router_cap *copy_tlv_router_cap(
return rv;
}
+static void format_tlv_router_cap_json(const struct isis_router_cap *router_cap,
+ struct json_object *json)
+{
+ char addrbuf[INET_ADDRSTRLEN];
+
+ if (!router_cap)
+ return;
+
+ /* Router ID and Flags */
+ struct json_object *cap_json;
+ cap_json = json_object_new_object();
+ json_object_object_add(json, "router-capability", cap_json);
+ inet_ntop(AF_INET, &router_cap->router_id, addrbuf, sizeof(addrbuf));
+ json_object_string_add(cap_json, "id", addrbuf);
+ json_object_string_add(
+ cap_json, "flag-d",
+ router_cap->flags & ISIS_ROUTER_CAP_FLAG_D ? "1" : "0");
+ json_object_string_add(
+ cap_json, "flag-s",
+ router_cap->flags & ISIS_ROUTER_CAP_FLAG_S ? "1" : "0");
+
+ /* Segment Routing Global Block as per RFC8667 section #3.1 */
+ if (router_cap->srgb.range_size != 0) {
+ struct json_object *gb_json;
+ gb_json = json_object_new_object();
+ json_object_object_add(json, "segment-routing-gb", gb_json);
+ json_object_string_add(gb_json, "ipv4",
+ IS_SR_IPV4(&router_cap->srgb) ? "1"
+ : "0");
+ json_object_string_add(gb_json, "ipv6",
+ IS_SR_IPV6(&router_cap->srgb) ? "1"
+ : "0");
+ json_object_int_add(gb_json, "global-block-base",
+ router_cap->srgb.lower_bound);
+ json_object_int_add(gb_json, "global-block-range",
+ router_cap->srgb.range_size);
+ }
+
+ /* Segment Routing Local Block as per RFC8667 section #3.3 */
+ if (router_cap->srlb.range_size != 0) {
+ struct json_object *lb_json;
+ lb_json = json_object_new_object();
+ json_object_object_add(json, "segment-routing-lb", lb_json);
+ json_object_int_add(lb_json, "global-block-base",
+ router_cap->srlb.lower_bound);
+ json_object_int_add(lb_json, "global-block-range",
+ router_cap->srlb.range_size);
+ }
+
+ /* Segment Routing Algorithms as per RFC8667 section #3.2 */
+ if (router_cap->algo[0] != SR_ALGORITHM_UNSET) {
+ char buf[255];
+ struct json_object *alg_json;
+ alg_json = json_object_new_object();
+ json_object_object_add(json, "segment-routing-algorithm",
+ alg_json);
+ for (int i = 0; i < SR_ALGORITHM_COUNT; i++)
+ if (router_cap->algo[i] != SR_ALGORITHM_UNSET) {
+ snprintfrr(buf, sizeof(buf), "%d", i);
+ json_object_string_add(alg_json, buf,
+ router_cap->algo[i] == 0
+ ? "SPF"
+ : "Strict SPF");
+ }
+ }
+
+ /* Segment Routing Node MSD as per RFC8491 section #2 */
+ if (router_cap->msd != 0)
+ json_object_int_add(json, "msd", router_cap->msd);
+}
+
static void format_tlv_router_cap(const struct isis_router_cap *router_cap,
struct sbuf *buf, int indent)
{
@@ -3177,26 +3782,40 @@ static struct isis_item *copy_item_auth(struct isis_item *i)
}
static void format_item_auth(uint16_t mtid, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_auth *auth = (struct isis_auth *)i;
char obuf[768];
- sbuf_push(buf, indent, "Authentication:\n");
+ if (json)
+ json_object_string_add(json, "test-auth", "ok");
+ else
+ sbuf_push(buf, indent, "Authentication:\n");
switch (auth->type) {
case ISIS_PASSWD_TYPE_CLEARTXT:
zlog_sanitize(obuf, sizeof(obuf), auth->value, auth->length);
- sbuf_push(buf, indent, " Password: %s\n", obuf);
+ if (json)
+ json_object_string_add(json, "auth-pass", obuf);
+ else
+ sbuf_push(buf, indent, " Password: %s\n", obuf);
break;
case ISIS_PASSWD_TYPE_HMAC_MD5:
for (unsigned int j = 0; j < 16; j++) {
- snprintf(obuf + 2 * j, sizeof(obuf) - 2 * j,
- "%02hhx", auth->value[j]);
+ snprintf(obuf + 2 * j, sizeof(obuf) - 2 * j, "%02hhx",
+ auth->value[j]);
}
- sbuf_push(buf, indent, " HMAC-MD5: %s\n", obuf);
+ if (json)
+ json_object_string_add(json, "auth-hmac-md5", obuf);
+ else
+ sbuf_push(buf, indent, " HMAC-MD5: %s\n", obuf);
break;
default:
- sbuf_push(buf, indent, " Unknown (%hhu)\n", auth->type);
+ if (json)
+ json_object_int_add(json, "auth-unknown", auth->type);
+ else
+ sbuf_push(buf, indent, " Unknown (%hhu)\n",
+ auth->type);
break;
}
}
@@ -3270,7 +3889,7 @@ static int unpack_item_auth(uint16_t mtid, uint8_t len, struct stream *s,
rv->offset = stream_get_getp(s);
stream_get(rv->value, s, rv->length);
- format_item_auth(mtid, (struct isis_item *)rv, log, indent + 2);
+ format_item_auth(mtid, (struct isis_item *)rv, log, NULL, indent + 2);
append_item(&tlvs->isis_auth, (struct isis_item *)rv);
return 0;
}
@@ -3294,17 +3913,36 @@ static struct isis_purge_originator *copy_tlv_purge_originator(
}
static void format_tlv_purge_originator(struct isis_purge_originator *poi,
- struct sbuf *buf, int indent)
+ struct sbuf *buf,
+ struct json_object *json, int indent)
{
if (!poi)
return;
- sbuf_push(buf, indent, "Purge Originator Identification:\n");
- sbuf_push(buf, indent, " Generator: %s\n",
- isis_format_id(poi->generator, sizeof(poi->generator)));
- if (poi->sender_set) {
- sbuf_push(buf, indent, " Received-From: %s\n",
- isis_format_id(poi->sender, sizeof(poi->sender)));
+ if (json) {
+ struct json_object *purge_json;
+ purge_json = json_object_new_object();
+ json_object_object_add(json, "purge_originator", purge_json);
+
+ json_object_string_add(
+ purge_json, "id",
+ isis_format_id(poi->generator, sizeof(poi->generator)));
+ if (poi->sender_set) {
+ json_object_string_add(
+ purge_json, "rec-from",
+ isis_format_id(poi->sender,
+ sizeof(poi->sender)));
+ }
+ } else {
+ sbuf_push(buf, indent, "Purge Originator Identification:\n");
+ sbuf_push(
+ buf, indent, " Generator: %s\n",
+ isis_format_id(poi->generator, sizeof(poi->generator)));
+ if (poi->sender_set) {
+ sbuf_push(buf, indent, " Received-From: %s\n",
+ isis_format_id(poi->sender,
+ sizeof(poi->sender)));
+ }
}
}
@@ -3417,12 +4055,12 @@ static void copy_items(enum isis_tlv_context context, enum isis_tlv_type type,
static void format_item(uint16_t mtid, enum isis_tlv_context context,
enum isis_tlv_type type, struct isis_item *i,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json, int indent)
{
const struct tlv_ops *ops = tlv_table[context][type];
if (ops && ops->format_item) {
- ops->format_item(mtid, i, buf, indent);
+ ops->format_item(mtid, i, buf, json, indent);
return;
}
@@ -3431,12 +4069,13 @@ static void format_item(uint16_t mtid, enum isis_tlv_context context,
static void format_items_(uint16_t mtid, enum isis_tlv_context context,
enum isis_tlv_type type, struct isis_item_list *items,
- struct sbuf *buf, int indent)
+ struct sbuf *buf, struct json_object *json,
+ int indent)
{
struct isis_item *i;
for (i = items->head; i; i = i->next)
- format_item(mtid, context, type, i, buf, indent);
+ format_item(mtid, context, type, i, buf, json, indent);
}
static void free_item(enum isis_tlv_context tlv_context,
@@ -3765,12 +4404,12 @@ static void free_mt_items(enum isis_tlv_context context,
static void format_mt_items(enum isis_tlv_context context,
enum isis_tlv_type type,
struct isis_mt_item_list *m, struct sbuf *buf,
- int indent)
+ struct json_object *json, int indent)
{
struct isis_item_list *n;
RB_FOREACH (n, isis_mt_item_list, m) {
- format_items_(n->mtid, context, type, n, buf, indent);
+ format_items_(n->mtid, context, type, n, buf, json, indent);
}
}
@@ -3917,87 +4556,100 @@ struct isis_tlvs *isis_copy_tlvs(struct isis_tlvs *tlvs)
return rv;
}
-static void format_tlvs(struct isis_tlvs *tlvs, struct sbuf *buf, int indent)
+static void format_tlvs(struct isis_tlvs *tlvs, struct sbuf *buf, struct json_object *json, int indent)
{
- format_tlv_protocols_supported(&tlvs->protocols_supported, buf, indent);
+ format_tlv_protocols_supported(&tlvs->protocols_supported, buf, json,
+ indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_AUTH, &tlvs->isis_auth, buf,
- indent);
+ json, indent);
- format_tlv_purge_originator(tlvs->purge_originator, buf, indent);
+ format_tlv_purge_originator(tlvs->purge_originator, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_AREA_ADDRESSES,
- &tlvs->area_addresses, buf, indent);
+ &tlvs->area_addresses, buf, json, indent);
if (tlvs->mt_router_info_empty) {
- sbuf_push(buf, indent, "MT Router Info: None\n");
+ if (json)
+ json_object_string_add(json, "mt-router-info", "none");
+ else
+ sbuf_push(buf, indent, "MT Router Info: None\n");
} else {
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_MT_ROUTER_INFO,
- &tlvs->mt_router_info, buf, indent);
+ &tlvs->mt_router_info, buf, json, indent);
}
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_OLDSTYLE_REACH,
- &tlvs->oldstyle_reach, buf, indent);
+ &tlvs->oldstyle_reach, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_LAN_NEIGHBORS,
- &tlvs->lan_neighbor, buf, indent);
+ &tlvs->lan_neighbor, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_LSP_ENTRY, &tlvs->lsp_entries,
- buf, indent);
-
- format_tlv_dynamic_hostname(tlvs->hostname, buf, indent);
- format_tlv_te_router_id(tlvs->te_router_id, buf, indent);
- format_tlv_te_router_id_ipv6(tlvs->te_router_id_ipv6, buf, indent);
- format_tlv_router_cap(tlvs->router_cap, buf, indent);
+ buf, json, indent);
+
+ format_tlv_dynamic_hostname(tlvs->hostname, buf, json, indent);
+ format_tlv_te_router_id(tlvs->te_router_id, buf, json, indent);
+ format_tlv_te_router_id_ipv6(tlvs->te_router_id_ipv6, buf, json,
+ indent);
+ if (json)
+ format_tlv_router_cap_json(tlvs->router_cap, json);
+ else
+ format_tlv_router_cap(tlvs->router_cap, buf, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_EXTENDED_REACH,
- &tlvs->extended_reach, buf, indent);
+ &tlvs->extended_reach, buf, json, indent);
format_mt_items(ISIS_CONTEXT_LSP, ISIS_TLV_MT_REACH, &tlvs->mt_reach,
- buf, indent);
+ buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_OLDSTYLE_IP_REACH,
- &tlvs->oldstyle_ip_reach, buf, indent);
+ &tlvs->oldstyle_ip_reach, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_OLDSTYLE_IP_REACH_EXT,
- &tlvs->oldstyle_ip_reach_ext, buf, indent);
+ &tlvs->oldstyle_ip_reach_ext, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_IPV4_ADDRESS,
- &tlvs->ipv4_address, buf, indent);
+ &tlvs->ipv4_address, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_IPV6_ADDRESS,
- &tlvs->ipv6_address, buf, indent);
+ &tlvs->ipv6_address, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_GLOBAL_IPV6_ADDRESS,
- &tlvs->global_ipv6_address, buf, indent);
+ &tlvs->global_ipv6_address, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_EXTENDED_IP_REACH,
- &tlvs->extended_ip_reach, buf, indent);
+ &tlvs->extended_ip_reach, buf, json, indent);
format_mt_items(ISIS_CONTEXT_LSP, ISIS_TLV_MT_IP_REACH,
- &tlvs->mt_ip_reach, buf, indent);
+ &tlvs->mt_ip_reach, buf, json, indent);
format_items(ISIS_CONTEXT_LSP, ISIS_TLV_IPV6_REACH, &tlvs->ipv6_reach,
- buf, indent);
+ buf, json, indent);
format_mt_items(ISIS_CONTEXT_LSP, ISIS_TLV_MT_IPV6_REACH,
- &tlvs->mt_ipv6_reach, buf, indent);
+ &tlvs->mt_ipv6_reach, buf, json, indent);
- format_tlv_threeway_adj(tlvs->threeway_adj, buf, indent);
+ format_tlv_threeway_adj(tlvs->threeway_adj, buf, json, indent);
- format_tlv_spine_leaf(tlvs->spine_leaf, buf, indent);
+ format_tlv_spine_leaf(tlvs->spine_leaf, buf, json, indent);
}
-const char *isis_format_tlvs(struct isis_tlvs *tlvs)
+const char *isis_format_tlvs(struct isis_tlvs *tlvs, struct json_object *json)
{
- static struct sbuf buf;
+ if (json) {
+ format_tlvs(tlvs, NULL, json, 0);
+ return NULL;
+ } else {
+ static struct sbuf buf;
- if (!sbuf_buf(&buf))
- sbuf_init(&buf, NULL, 0);
+ if (!sbuf_buf(&buf))
+ sbuf_init(&buf, NULL, 0);
- sbuf_reset(&buf);
- format_tlvs(tlvs, &buf, 0);
- return sbuf_buf(&buf);
+ sbuf_reset(&buf);
+ format_tlvs(tlvs, &buf, NULL, 0);
+ return sbuf_buf(&buf);
+ }
}
void isis_free_tlvs(struct isis_tlvs *tlvs)
diff --git a/isisd/isis_tlvs.h b/isisd/isis_tlvs.h
index 0c6ed11cb6..364e38aba1 100644
--- a/isisd/isis_tlvs.h
+++ b/isisd/isis_tlvs.h
@@ -549,7 +549,7 @@ void isis_free_tlvs(struct isis_tlvs *tlvs);
struct isis_tlvs *isis_alloc_tlvs(void);
int isis_unpack_tlvs(size_t avail_len, struct stream *stream,
struct isis_tlvs **dest, const char **error_log);
-const char *isis_format_tlvs(struct isis_tlvs *tlvs);
+const char *isis_format_tlvs(struct isis_tlvs *tlvs, struct json_object *json);
struct isis_tlvs *isis_copy_tlvs(struct isis_tlvs *tlvs);
struct list *isis_fragment_tlvs(struct isis_tlvs *tlvs, size_t size);
diff --git a/isisd/isisd.c b/isisd/isisd.c
index 3fa2b7cc20..369b83396a 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -109,12 +109,19 @@ DEFINE_HOOK(isis_hook_db_overload, (const struct isis_area *area), (area));
int isis_area_get(struct vty *, const char *);
int area_net_title(struct vty *, const char *);
int area_clear_net_title(struct vty *, const char *);
-int show_isis_interface_common(struct vty *, const char *ifname, char,
- const char *vrf_name, bool all_vrf);
-int show_isis_neighbor_common(struct vty *, const char *id, char,
- const char *vrf_name, bool all_vrf);
-int clear_isis_neighbor_common(struct vty *, const char *id, const char *vrf_name,
+int show_isis_interface_common(struct vty *, struct json_object *json,
+ const char *ifname, char, const char *vrf_name,
bool all_vrf);
+int show_isis_interface_common_vty(struct vty *, const char *ifname, char,
+ const char *vrf_name, bool all_vrf);
+int show_isis_interface_common_json(struct json_object *json,
+ const char *ifname, char,
+ const char *vrf_name, bool all_vrf);
+int show_isis_neighbor_common(struct vty *, struct json_object *json,
+ const char *id, char, const char *vrf_name,
+ bool all_vrf);
+int clear_isis_neighbor_common(struct vty *, const char *id,
+ const char *vrf_name, bool all_vrf);
/* Link ISIS instance to VRF. */
void isis_vrf_link(struct isis *isis, struct vrf *vrf)
@@ -202,7 +209,7 @@ struct isis *isis_new(const char *vrf_name)
/*
* Default values
*/
- isis->max_area_addrs = 3;
+ isis->max_area_addrs = ISIS_DEFAULT_MAX_AREA_ADDRESSES;
isis->process_id = getpid();
isis->router_id = 0;
isis->area_list = list_new();
@@ -933,10 +940,125 @@ int area_clear_net_title(struct vty *vty, const char *net_title)
/*
* 'show isis interface' command
*/
-
-int show_isis_interface_common(struct vty *vty, const char *ifname, char detail,
+int show_isis_interface_common(struct vty *vty, struct json_object *json,
+ const char *ifname, char detail,
const char *vrf_name, bool all_vrf)
{
+ if (json) {
+ return show_isis_interface_common_json(json, ifname, detail,
+ vrf_name, all_vrf);
+ } else {
+ return show_isis_interface_common_vty(vty, ifname, detail,
+ vrf_name, all_vrf);
+ }
+}
+
+int show_isis_interface_common_json(struct json_object *json,
+ const char *ifname, char detail,
+ const char *vrf_name, bool all_vrf)
+{
+ struct listnode *anode, *cnode, *inode;
+ struct isis_area *area;
+ struct isis_circuit *circuit;
+ struct isis *isis;
+ struct json_object *areas_json, *area_json;
+ struct json_object *circuits_json, *circuit_json;
+ if (!im) {
+ // IS-IS Routing Process not enabled
+ json_object_string_add(json, "is-is-routing-process-enabled",
+ "no");
+ return CMD_SUCCESS;
+ }
+ if (vrf_name) {
+ if (all_vrf) {
+ for (ALL_LIST_ELEMENTS_RO(im->isis, inode, isis)) {
+ areas_json = json_object_new_array();
+ json_object_object_add(json, "areas",
+ areas_json);
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list,
+ anode, area)) {
+ area_json = json_object_new_object();
+ json_object_string_add(
+ area_json, "area",
+ area->area_tag ? area->area_tag
+ : "null");
+ circuits_json = json_object_new_array();
+ json_object_object_add(area_json,
+ "circuits",
+ circuits_json);
+ for (ALL_LIST_ELEMENTS_RO(
+ area->circuit_list, cnode,
+ circuit)) {
+ circuit_json =
+ json_object_new_object();
+ json_object_int_add(
+ circuit_json, "circuit",
+ circuit->circuit_id);
+ if (!ifname)
+ isis_circuit_print_json(
+ circuit,
+ circuit_json,
+ detail);
+ else if (strcmp(circuit->interface->name, ifname) == 0)
+ isis_circuit_print_json(
+ circuit,
+ circuit_json,
+ detail);
+ json_object_array_add(
+ circuits_json,
+ circuit_json);
+ }
+ json_object_array_add(areas_json,
+ area_json);
+ }
+ }
+ return CMD_SUCCESS;
+ }
+ isis = isis_lookup_by_vrfname(vrf_name);
+ if (isis != NULL) {
+ areas_json = json_object_new_array();
+ json_object_object_add(json, "areas", areas_json);
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, anode,
+ area)) {
+ area_json = json_object_new_object();
+ json_object_string_add(area_json, "area",
+ area->area_tag
+ ? area->area_tag
+ : "null");
+
+ circuits_json = json_object_new_array();
+ json_object_object_add(area_json, "circuits",
+ circuits_json);
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list,
+ cnode, circuit)) {
+ circuit_json = json_object_new_object();
+ json_object_int_add(
+ circuit_json, "circuit",
+ circuit->circuit_id);
+ if (!ifname)
+ isis_circuit_print_json(
+ circuit, circuit_json,
+ detail);
+ else if (
+ strcmp(circuit->interface->name,
+ ifname) == 0)
+ isis_circuit_print_json(
+ circuit, circuit_json,
+ detail);
+ json_object_array_add(circuits_json,
+ circuit_json);
+ }
+ json_object_array_add(areas_json, area_json);
+ }
+ }
+ }
+ return CMD_SUCCESS;
+}
+
+int show_isis_interface_common_vty(struct vty *vty, const char *ifname,
+ char detail, const char *vrf_name,
+ bool all_vrf)
+{
struct listnode *anode, *cnode, *inode;
struct isis_area *area;
struct isis_circuit *circuit;
@@ -990,8 +1112,7 @@ int show_isis_interface_common(struct vty *vty, const char *ifname, char detail,
circuit, vty, detail);
else if (
strcmp(circuit->interface->name,
- ifname)
- == 0)
+ ifname) == 0)
isis_circuit_print_vty(
circuit, vty, detail);
}
@@ -1003,63 +1124,90 @@ int show_isis_interface_common(struct vty *vty, const char *ifname, char detail,
DEFUN(show_isis_interface,
show_isis_interface_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] interface",
+ "show " PROTO_NAME " [vrf <NAME|all>] interface [json]",
SHOW_STR
PROTO_HELP
VRF_CMD_HELP_STR
"All VRFs\n"
+ "json output\n"
"IS-IS interface\n")
{
+ int res = CMD_SUCCESS;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
int idx_vrf = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
- return show_isis_interface_common(vty, NULL, ISIS_UI_LEVEL_BRIEF,
- vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
+ res = show_isis_interface_common(vty, json, NULL, ISIS_UI_LEVEL_BRIEF,
+ vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
DEFUN(show_isis_interface_detail,
show_isis_interface_detail_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] interface detail",
+ "show " PROTO_NAME " [vrf <NAME|all>] interface detail [json]",
SHOW_STR
PROTO_HELP
VRF_CMD_HELP_STR
"All VRFs\n"
"IS-IS interface\n"
- "show detailed information\n")
+ "show detailed information\n"
+ "json output\n")
{
+ int res = CMD_SUCCESS;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
int idx_vrf = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
- return show_isis_interface_common(vty, NULL, ISIS_UI_LEVEL_DETAIL,
- vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
+ res = show_isis_interface_common(vty, json, NULL, ISIS_UI_LEVEL_DETAIL,
+ vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
DEFUN(show_isis_interface_arg,
show_isis_interface_arg_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] interface WORD",
+ "show " PROTO_NAME " [vrf <NAME|all>] interface WORD [json]",
SHOW_STR
PROTO_HELP
VRF_CMD_HELP_STR
"All VRFs\n"
"IS-IS interface\n"
- "IS-IS interface name\n")
+ "IS-IS interface name\n"
+ "json output\n")
{
+ int res = CMD_SUCCESS;
int idx_word = 0;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
int idx_vrf = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
char *ifname = argv_find(argv, argc, "WORD", &idx_word)
? argv[idx_word]->arg
: NULL;
- return show_isis_interface_common(vty, ifname, ISIS_UI_LEVEL_DETAIL,
- vrf_name, all_vrf);
+ res = show_isis_interface_common(
+ vty, json, ifname, ISIS_UI_LEVEL_DETAIL, vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
static int id_to_sysid(struct isis *isis, const char *id, uint8_t *sysid)
@@ -1079,8 +1227,65 @@ static int id_to_sysid(struct isis *isis, const char *id, uint8_t *sysid)
return 0;
}
-static void isis_neighbor_common(struct vty *vty, const char *id, char detail,
- struct isis *isis, uint8_t *sysid)
+static void isis_neighbor_common_json(struct json_object *json, const char *id,
+ char detail, struct isis *isis,
+ uint8_t *sysid)
+{
+ struct listnode *anode, *cnode, *node;
+ struct isis_area *area;
+ struct isis_circuit *circuit;
+ struct list *adjdb;
+ struct isis_adjacency *adj;
+ struct json_object *areas_json, *area_json;
+ struct json_object *circuits_json, *circuit_json;
+ int i;
+
+ areas_json = json_object_new_array();
+ json_object_object_add(json, "areas", areas_json);
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, anode, area)) {
+ area_json = json_object_new_object();
+ json_object_string_add(area_json, "area",
+ area->area_tag ? area->area_tag
+ : "null");
+ circuits_json = json_object_new_array();
+ json_object_object_add(area_json, "circuits", circuits_json);
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, cnode, circuit)) {
+ circuit_json = json_object_new_object();
+ json_object_int_add(circuit_json, "circuit",
+ circuit->circuit_id);
+ if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
+ for (i = 0; i < 2; i++) {
+ adjdb = circuit->u.bc.adjdb[i];
+ if (adjdb && adjdb->count) {
+ for (ALL_LIST_ELEMENTS_RO(
+ adjdb, node, adj))
+ if (!id ||
+ !memcmp(adj->sysid,
+ sysid,
+ ISIS_SYS_ID_LEN))
+ isis_adj_print_json(
+ adj,
+ circuit_json,
+ detail);
+ }
+ }
+ } else if (circuit->circ_type == CIRCUIT_T_P2P &&
+ circuit->u.p2p.neighbor) {
+ adj = circuit->u.p2p.neighbor;
+ if (!id ||
+ !memcmp(adj->sysid, sysid, ISIS_SYS_ID_LEN))
+ isis_adj_print_json(adj, circuit_json,
+ detail);
+ }
+ json_object_array_add(circuits_json, circuit_json);
+ }
+ json_object_array_add(areas_json, area_json);
+ }
+}
+
+static void isis_neighbor_common_vty(struct vty *vty, const char *id,
+ char detail, struct isis *isis,
+ uint8_t *sysid)
{
struct listnode *anode, *cnode, *node;
struct isis_area *area;
@@ -1103,9 +1308,8 @@ static void isis_neighbor_common(struct vty *vty, const char *id, char detail,
if (adjdb && adjdb->count) {
for (ALL_LIST_ELEMENTS_RO(
adjdb, node, adj))
- if (!id
- || !memcmp(
- adj->sysid,
+ if (!id ||
+ !memcmp(adj->sysid,
sysid,
ISIS_SYS_ID_LEN))
isis_adj_print_vty(
@@ -1114,24 +1318,35 @@ static void isis_neighbor_common(struct vty *vty, const char *id, char detail,
detail);
}
}
- } else if (circuit->circ_type == CIRCUIT_T_P2P
- && circuit->u.p2p.neighbor) {
+ } else if (circuit->circ_type == CIRCUIT_T_P2P &&
+ circuit->u.p2p.neighbor) {
adj = circuit->u.p2p.neighbor;
- if (!id
- || !memcmp(adj->sysid, sysid,
- ISIS_SYS_ID_LEN))
+ if (!id ||
+ !memcmp(adj->sysid, sysid, ISIS_SYS_ID_LEN))
isis_adj_print_vty(adj, vty, detail);
}
}
}
+}
+static void isis_neighbor_common(struct vty *vty, struct json_object *json,
+ const char *id, char detail, struct isis *isis,
+ uint8_t *sysid)
+{
+ if (json) {
+ isis_neighbor_common_json(json, id, detail,isis,sysid);
+ } else {
+ isis_neighbor_common_vty(vty, id, detail,isis,sysid);
+ }
}
+
/*
* 'show isis neighbor' command
*/
-int show_isis_neighbor_common(struct vty *vty, const char *id, char detail,
- const char *vrf_name, bool all_vrf)
+int show_isis_neighbor_common(struct vty *vty, struct json_object *json,
+ const char *id, char detail, const char *vrf_name,
+ bool all_vrf)
{
struct listnode *node;
uint8_t sysid[ISIS_SYS_ID_LEN];
@@ -1150,8 +1365,8 @@ int show_isis_neighbor_common(struct vty *vty, const char *id, char detail,
id);
return CMD_SUCCESS;
}
- isis_neighbor_common(vty, id, detail, isis,
- sysid);
+ isis_neighbor_common(vty, json, id, detail,
+ isis, sysid);
}
return CMD_SUCCESS;
}
@@ -1161,7 +1376,8 @@ int show_isis_neighbor_common(struct vty *vty, const char *id, char detail,
vty_out(vty, "Invalid system id %s\n", id);
return CMD_SUCCESS;
}
- isis_neighbor_common(vty, id, detail, isis, sysid);
+ isis_neighbor_common(vty, json, id, detail, isis,
+ sysid);
}
}
@@ -1254,64 +1470,91 @@ int clear_isis_neighbor_common(struct vty *vty, const char *id, const char *vrf_
DEFUN(show_isis_neighbor,
show_isis_neighbor_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] neighbor",
+ "show " PROTO_NAME " [vrf <NAME|all>] neighbor [json]",
SHOW_STR
PROTO_HELP
VRF_CMD_HELP_STR
"All vrfs\n"
- "IS-IS neighbor adjacencies\n")
+ "IS-IS neighbor adjacencies\n"
+ "json output\n")
{
+ int res = CMD_SUCCESS;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
int idx_vrf = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
- return show_isis_neighbor_common(vty, NULL, ISIS_UI_LEVEL_BRIEF,
- vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
+ res = show_isis_neighbor_common(vty, json, NULL, ISIS_UI_LEVEL_BRIEF,
+ vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
DEFUN(show_isis_neighbor_detail,
show_isis_neighbor_detail_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] neighbor detail",
+ "show " PROTO_NAME " [vrf <NAME|all>] neighbor detail [json]",
SHOW_STR
PROTO_HELP
VRF_CMD_HELP_STR
"all vrfs\n"
"IS-IS neighbor adjacencies\n"
- "show detailed information\n")
+ "show detailed information\n"
+ "json output\n")
{
+ int res = CMD_SUCCESS;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
int idx_vrf = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
- return show_isis_neighbor_common(vty, NULL, ISIS_UI_LEVEL_DETAIL,
- vrf_name, all_vrf);
+ res = show_isis_neighbor_common(vty, json, NULL, ISIS_UI_LEVEL_DETAIL,
+ vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
DEFUN(show_isis_neighbor_arg,
show_isis_neighbor_arg_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] neighbor WORD",
+ "show " PROTO_NAME " [vrf <NAME|all>] neighbor WORD [json]",
SHOW_STR
PROTO_HELP
VRF_CMD_HELP_STR
"All vrfs\n"
"IS-IS neighbor adjacencies\n"
- "System id\n")
+ "System id\n"
+ "json output\n")
{
+ int res = CMD_SUCCESS;
int idx_word = 0;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
int idx_vrf = 0;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
char *id = argv_find(argv, argc, "WORD", &idx_word)
? argv[idx_word]->arg
: NULL;
- return show_isis_neighbor_common(vty, id, ISIS_UI_LEVEL_DETAIL,
- vrf_name, all_vrf);
+ res = show_isis_neighbor_common(vty, json, id, ISIS_UI_LEVEL_DETAIL,
+ vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
DEFUN(clear_isis_neighbor,
@@ -2056,7 +2299,152 @@ DEFUN(show_isis_spf_ietf, show_isis_spf_ietf_cmd,
return CMD_SUCCESS;
}
-static void common_isis_summary(struct vty *vty, struct isis *isis)
+
+static const char *pdu_counter_index_to_name_json(enum pdu_counter_index index)
+{
+ switch (index) {
+ case L1_LAN_HELLO_INDEX:
+ return "l1-iih";
+ case L2_LAN_HELLO_INDEX:
+ return "l2-iih";
+ case P2P_HELLO_INDEX:
+ return "p2p-iih";
+ case L1_LINK_STATE_INDEX:
+ return "l1-lsp";
+ case L2_LINK_STATE_INDEX:
+ return "l2-lsp";
+ case FS_LINK_STATE_INDEX:
+ return "fs-lsp";
+ case L1_COMPLETE_SEQ_NUM_INDEX:
+ return "l1-csnp";
+ case L2_COMPLETE_SEQ_NUM_INDEX:
+ return "l2-csnp";
+ case L1_PARTIAL_SEQ_NUM_INDEX:
+ return "l1-psnp";
+ case L2_PARTIAL_SEQ_NUM_INDEX:
+ return "l2-psnp";
+ default:
+ return "???????";
+ }
+}
+
+static void common_isis_summary_json(struct json_object *json,
+ struct isis *isis)
+{
+ int level;
+ json_object *areas_json, *area_json, *tx_pdu_json, *rx_pdu_json,
+ *levels_json, *level_json;
+ struct listnode *node, *node2;
+ struct isis_area *area;
+ time_t cur;
+ char uptime[MONOTIME_STRLEN];
+ char stier[5];
+ json_object_string_add(json, "vrf", isis->name);
+ json_object_int_add(json, "process-id", isis->process_id);
+ if (isis->sysid_set)
+ json_object_string_add(json, "system-id",
+ sysid_print(isis->sysid));
+
+ cur = time(NULL);
+ cur -= isis->uptime;
+ frrtime_to_interval(cur, uptime, sizeof(uptime));
+ json_object_string_add(json, "up-time", uptime);
+ if (isis->area_list)
+ json_object_int_add(json, "number-areas",
+ isis->area_list->count);
+ areas_json = json_object_new_array();
+ json_object_object_add(json, "areas", areas_json);
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) {
+ area_json = json_object_new_object();
+ json_object_string_add(area_json, "area",
+ area->area_tag ? area->area_tag
+ : "null");
+
+
+ if (fabricd) {
+ uint8_t tier = fabricd_tier(area);
+ snprintfrr(stier, sizeof(stier), "%s", &tier);
+ json_object_string_add(area_json, "tier",
+ tier == ISIS_TIER_UNDEFINED
+ ? "undefined"
+ : stier);
+ }
+
+ if (listcount(area->area_addrs) > 0) {
+ struct area_addr *area_addr;
+ for (ALL_LIST_ELEMENTS_RO(area->area_addrs, node2,
+ area_addr)) {
+ json_object_string_add(
+ area_json, "net",
+ isonet_print(area_addr->area_addr,
+ area_addr->addr_len +
+ ISIS_SYS_ID_LEN +
+ 1));
+ }
+ }
+
+ tx_pdu_json = json_object_new_object();
+ json_object_object_add(area_json, "tx-pdu-type", tx_pdu_json);
+ for (int i = 0; i < PDU_COUNTER_SIZE; i++) {
+ if (!area->pdu_tx_counters[i])
+ continue;
+ json_object_int_add(tx_pdu_json,
+ pdu_counter_index_to_name_json(i),
+ area->pdu_tx_counters[i]);
+ }
+ json_object_int_add(tx_pdu_json, "lsp-rxmt",
+ area->lsp_rxmt_count);
+
+ rx_pdu_json = json_object_new_object();
+ json_object_object_add(area_json, "rx-pdu-type", rx_pdu_json);
+ for (int i = 0; i < PDU_COUNTER_SIZE; i++) {
+ if (!area->pdu_rx_counters[i])
+ continue;
+ json_object_int_add(rx_pdu_json,
+ pdu_counter_index_to_name_json(i),
+ area->pdu_rx_counters[i]);
+ }
+
+ levels_json = json_object_new_array();
+ json_object_object_add(area_json, "levels", levels_json);
+ for (level = ISIS_LEVEL1; level <= ISIS_LEVELS; level++) {
+ if ((area->is_type & level) == 0)
+ continue;
+ level_json = json_object_new_object();
+ json_object_int_add(level_json, "id", level);
+ json_object_int_add(level_json, "lsp0-regenerated",
+ area->lsp_gen_count[level - 1]);
+ json_object_int_add(level_json, "lsp-purged",
+ area->lsp_purge_count[level - 1]);
+ if (area->spf_timer[level - 1])
+ json_object_string_add(level_json, "spf",
+ "pending");
+ else
+ json_object_string_add(level_json, "spf",
+ "no pending");
+ json_object_int_add(level_json, "minimum-interval",
+ area->min_spf_interval[level - 1]);
+ if (area->spf_delay_ietf[level - 1])
+ json_object_string_add(
+ level_json, "ietf-spf-delay-activated",
+ "not used");
+ if (area->ip_circuits) {
+ isis_spf_print_json(
+ area->spftree[SPFTREE_IPV4][level - 1],
+ level_json);
+ }
+ if (area->ipv6_circuits) {
+ isis_spf_print_json(
+ area->spftree[SPFTREE_IPV6][level - 1],
+ level_json);
+ }
+ json_object_array_add(levels_json, level_json);
+ }
+ json_object_array_add(areas_json, area_json);
+ }
+}
+
+static void common_isis_summary_vty(struct vty *vty, struct isis *isis)
{
struct listnode *node, *node2;
struct isis_area *area;
@@ -2156,10 +2544,21 @@ static void common_isis_summary(struct vty *vty, struct isis *isis)
}
}
+static void common_isis_summary(struct vty *vty, struct json_object *json,
+ struct isis *isis)
+{
+ if (json) {
+ common_isis_summary_json(json, isis);
+ } else {
+ common_isis_summary_vty(vty, isis);
+ }
+}
+
DEFUN(show_isis_summary, show_isis_summary_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] summary",
+ "show " PROTO_NAME " [vrf <NAME|all>] summary [json]",
SHOW_STR PROTO_HELP VRF_CMD_HELP_STR
"All VRFs\n"
+ "json output\n"
"summary\n")
{
struct listnode *node;
@@ -2167,25 +2566,30 @@ DEFUN(show_isis_summary, show_isis_summary_cmd,
struct isis *isis;
const char *vrf_name = VRF_DEFAULT_NAME;
bool all_vrf = false;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf)
if (!im) {
vty_out(vty, PROTO_NAME " is not running\n");
return CMD_SUCCESS;
}
+ if (uj)
+ json = json_object_new_object();
if (vrf_name) {
if (all_vrf) {
for (ALL_LIST_ELEMENTS_RO(im->isis, node, isis))
- common_isis_summary(vty, isis);
+ common_isis_summary(vty, json, isis);
return CMD_SUCCESS;
}
isis = isis_lookup_by_vrfname(vrf_name);
if (isis != NULL)
- common_isis_summary(vty, isis);
+ common_isis_summary(vty, json, isis);
}
- vty_out(vty, "\n");
+ if (uj)
+ vty_json(vty, json);
return CMD_SUCCESS;
}
@@ -2250,9 +2654,40 @@ struct isis_lsp *lsp_for_sysid(struct lspdb_head *head, const char *sysid_str,
return lsp;
}
-void show_isis_database_lspdb(struct vty *vty, struct isis_area *area,
- int level, struct lspdb_head *lspdb,
- const char *sysid_str, int ui_level)
+void show_isis_database_lspdb_json(struct json_object *json,
+ struct isis_area *area, int level,
+ struct lspdb_head *lspdb,
+ const char *sysid_str, int ui_level)
+{
+ struct isis_lsp *lsp;
+ int lsp_count;
+
+ if (lspdb_count(lspdb) > 0) {
+ lsp = lsp_for_sysid(lspdb, sysid_str, area->isis);
+
+ if (lsp != NULL || sysid_str == NULL) {
+ json_object_int_add(json, "id", level + 1);
+ }
+
+ if (lsp) {
+ if (ui_level == ISIS_UI_LEVEL_DETAIL)
+ lsp_print_detail(lsp, NULL, json,
+ area->dynhostname, area->isis);
+ else
+ lsp_print_json(lsp, json, area->dynhostname,
+ area->isis);
+ } else if (sysid_str == NULL) {
+ lsp_count =
+ lsp_print_all(NULL, json, lspdb, ui_level,
+ area->dynhostname, area->isis);
+
+ json_object_int_add(json, "count", lsp_count);
+ }
+ }
+}
+void show_isis_database_lspdb_vty(struct vty *vty, struct isis_area *area,
+ int level, struct lspdb_head *lspdb,
+ const char *sysid_str, int ui_level)
{
struct isis_lsp *lsp;
int lsp_count;
@@ -2271,14 +2706,14 @@ void show_isis_database_lspdb(struct vty *vty, struct isis_area *area,
if (lsp) {
if (ui_level == ISIS_UI_LEVEL_DETAIL)
- lsp_print_detail(lsp, vty, area->dynhostname,
- area->isis);
+ lsp_print_detail(lsp, vty, NULL,
+ area->dynhostname, area->isis);
else
- lsp_print(lsp, vty, area->dynhostname,
- area->isis);
+ lsp_print_vty(lsp, vty, area->dynhostname,
+ area->isis);
} else if (sysid_str == NULL) {
lsp_count =
- lsp_print_all(vty, lspdb, ui_level,
+ lsp_print_all(vty, NULL, lspdb, ui_level,
area->dynhostname, area->isis);
vty_out(vty, " %u LSPs\n\n", lsp_count);
@@ -2286,7 +2721,43 @@ void show_isis_database_lspdb(struct vty *vty, struct isis_area *area,
}
}
-static void show_isis_database_common(struct vty *vty, const char *sysid_str,
+static void show_isis_database_json(struct json_object *json, const char *sysid_str,
+ int ui_level, struct isis *isis)
+{
+ struct listnode *node;
+ struct isis_area *area;
+ int level;
+ struct json_object *tag_area_json,*area_json, *lsp_json, *area_arr_json, *arr_json;
+ uint8_t area_cnt = 0;
+
+ if (isis->area_list->count == 0)
+ return;
+
+ area_arr_json = json_object_new_array();
+ json_object_object_add(json, "areas", area_arr_json);
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) {
+ area_json = json_object_new_object();
+ tag_area_json = json_object_new_object();
+ json_object_string_add(tag_area_json, "name",
+ area->area_tag ? area->area_tag
+ : "null");
+
+ arr_json = json_object_new_array();
+ json_object_object_add(area_json,"area",tag_area_json);
+ json_object_object_add(area_json,"levels",arr_json);
+ for (level = 0; level < ISIS_LEVELS; level++) {
+ lsp_json = json_object_new_object();
+ show_isis_database_lspdb_json(lsp_json, area, level,
+ &area->lspdb[level],
+ sysid_str, ui_level);
+ json_object_array_add(arr_json, lsp_json);
+ }
+ json_object_array_add(area_arr_json, area_json);
+ area_cnt++;
+ }
+}
+
+static void show_isis_database_vty(struct vty *vty, const char *sysid_str,
int ui_level, struct isis *isis)
{
struct listnode *node;
@@ -2301,11 +2772,22 @@ static void show_isis_database_common(struct vty *vty, const char *sysid_str,
area->area_tag ? area->area_tag : "null");
for (level = 0; level < ISIS_LEVELS; level++)
- show_isis_database_lspdb(vty, area, level,
+ show_isis_database_lspdb_vty(vty, area, level,
&area->lspdb[level], sysid_str,
ui_level);
}
}
+
+static void show_isis_database_common(struct vty *vty, struct json_object *json, const char *sysid_str,
+ int ui_level, struct isis *isis)
+{
+ if (json) {
+ show_isis_database_json(json, sysid_str, ui_level, isis);
+ } else {
+ show_isis_database_vty(vty, sysid_str, ui_level, isis);
+ }
+}
+
/*
* This function supports following display options:
* [ show isis database [detail] ]
@@ -2322,7 +2804,7 @@ static void show_isis_database_common(struct vty *vty, const char *sysid_str,
* [ show isis database detail <sysid>.<pseudo-id>-<fragment-number> ]
* [ show isis database detail <hostname>.<pseudo-id>-<fragment-number> ]
*/
-static int show_isis_database(struct vty *vty, const char *sysid_str,
+static int show_isis_database(struct vty *vty, struct json_object *json, const char *sysid_str,
int ui_level, const char *vrf_name, bool all_vrf)
{
struct listnode *node;
@@ -2331,28 +2813,30 @@ static int show_isis_database(struct vty *vty, const char *sysid_str,
if (vrf_name) {
if (all_vrf) {
for (ALL_LIST_ELEMENTS_RO(im->isis, node, isis))
- show_isis_database_common(vty, sysid_str,
+ show_isis_database_common(vty, json, sysid_str,
ui_level, isis);
return CMD_SUCCESS;
}
isis = isis_lookup_by_vrfname(vrf_name);
if (isis)
- show_isis_database_common(vty, sysid_str, ui_level,
- isis);
+ show_isis_database_common(vty, json, sysid_str,
+ ui_level, isis);
}
return CMD_SUCCESS;
}
DEFUN(show_database, show_database_cmd,
- "show " PROTO_NAME " [vrf <NAME|all>] database [detail] [WORD]",
+ "show " PROTO_NAME " [vrf <NAME|all>] database [detail] [WORD] [json]",
SHOW_STR PROTO_HELP VRF_CMD_HELP_STR
"All VRFs\n"
"Link state database\n"
"Detailed information\n"
- "LSP ID\n")
+ "LSP ID\n"
+ "json output\n")
{
+ int res = CMD_SUCCESS;
int idx = 0;
int idx_vrf = 0;
const char *vrf_name = VRF_DEFAULT_NAME;
@@ -2361,8 +2845,17 @@ DEFUN(show_database, show_database_cmd,
? ISIS_UI_LEVEL_DETAIL
: ISIS_UI_LEVEL_BRIEF;
char *id = argv_find(argv, argc, "WORD", &idx) ? argv[idx]->arg : NULL;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
+
ISIS_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
- return show_isis_database(vty, id, uilevel, vrf_name, all_vrf);
+ if (uj)
+ json = json_object_new_object();
+
+ res = show_isis_database(vty, json, id, uilevel, vrf_name, all_vrf);
+ if (uj)
+ vty_json(vty, json);
+ return res;
}
#ifdef FABRICD
diff --git a/isisd/isisd.h b/isisd/isisd.h
index 7f8474a5f2..c313fd9ef7 100644
--- a/isisd/isisd.h
+++ b/isisd/isisd.h
@@ -89,6 +89,8 @@ struct isis_master {
};
#define F_ISIS_UNIT_TEST 0x01
+#define ISIS_DEFAULT_MAX_AREA_ADDRESSES 3
+
struct isis {
vrf_id_t vrf_id;
char *name;
@@ -305,9 +307,13 @@ int isis_area_passwd_cleartext_set(struct isis_area *area, int level,
const char *passwd, uint8_t snp_auth);
int isis_area_passwd_hmac_md5_set(struct isis_area *area, int level,
const char *passwd, uint8_t snp_auth);
-void show_isis_database_lspdb(struct vty *vty, struct isis_area *area,
- int level, struct lspdb_head *lspdb,
- const char *argv, int ui_level);
+void show_isis_database_lspdb_json(struct json_object *json,
+ struct isis_area *area, int level,
+ struct lspdb_head *lspdb, const char *argv,
+ int ui_level);
+void show_isis_database_lspdb_vty(struct vty *vty, struct isis_area *area,
+ int level, struct lspdb_head *lspdb,
+ const char *argv, int ui_level);
/* YANG paths */
#define ISIS_INSTANCE "/frr-isisd:isis/instance"
diff --git a/ldpd/ldp_vty_exec.c b/ldpd/ldp_vty_exec.c
index 7bad1dca7c..c4053f5374 100644
--- a/ldpd/ldp_vty_exec.c
+++ b/ldpd/ldp_vty_exec.c
@@ -1472,7 +1472,11 @@ show_l2vpn_pw_msg_json(struct imsg *imsg, struct show_params *params,
json_pw = json_object_new_object();
json_object_string_addf(json_pw, "peerId", "%pI4", &pw->lsr_id);
json_object_int_add(json_pw, "vcId", pw->pwid);
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
json_object_string_add(json_pw, "VpnName", pw->l2vpn_name);
+ json_object_string_add(json_pw, "vpnName", pw->l2vpn_name);
if (pw->status == PW_FORWARDING)
json_object_string_add(json_pw, "status", "up");
else
diff --git a/lib/json.c b/lib/json.c
index 854a3d59d1..d85a21215c 100644
--- a/lib/json.c
+++ b/lib/json.c
@@ -74,6 +74,19 @@ void json_object_string_addv(struct json_object *obj, const char *key,
json_object_object_add(obj, key, json_object_new_stringv(fmt, args));
}
+void json_object_object_addv(struct json_object *parent,
+ struct json_object *child, const char *keyfmt,
+ va_list args)
+{
+ char *text, buf[256];
+
+ text = vasnprintfrr(MTYPE_TMP, buf, sizeof(buf), keyfmt, args);
+ json_object_object_add(parent, text, child);
+
+ if (text != buf)
+ XFREE(MTYPE_TMP, text);
+}
+
void json_object_int_add(struct json_object *obj, const char *key, int64_t i)
{
json_object_object_add(obj, key, json_object_new_int64(i));
diff --git a/lib/json.h b/lib/json.h
index fcaa84c816..78c3836515 100644
--- a/lib/json.h
+++ b/lib/json.h
@@ -116,6 +116,28 @@ static inline struct json_object *json_object_new_stringf(const char *fmt, ...)
return ret;
}
+/* NOTE: argument order differs! (due to varargs)
+ * json_object_object_add(parent, key, child)
+ * json_object_object_addv(parent, child, key, va)
+ * json_object_object_addf(parent, child, key, ...)
+ * (would be weird to have the child inbetween the format string and args)
+ */
+PRINTFRR(3, 0)
+extern void json_object_object_addv(struct json_object *parent,
+ struct json_object *child,
+ const char *keyfmt, va_list args);
+PRINTFRR(3, 4)
+static inline void json_object_object_addf(struct json_object *parent,
+ struct json_object *child,
+ const char *keyfmt, ...)
+{
+ va_list args;
+
+ va_start(args, keyfmt);
+ json_object_object_addv(parent, child, keyfmt, args);
+ va_end(args);
+}
+
#define JSON_STR "JavaScript Object Notation\n"
/* NOTE: json-c lib has following commit 316da85 which
diff --git a/lib/libfrr.c b/lib/libfrr.c
index 10b3aad89e..042c9d3704 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -333,6 +333,8 @@ void frr_preinit(struct frr_daemon_info *daemon, int argc, char **argv)
umask(0027);
+ log_args_init(daemon->early_logging);
+
opt_extend(&os_always);
if (!(di->flags & FRR_NO_SPLIT_CONFIG))
opt_extend(&os_cfg);
@@ -431,6 +433,8 @@ static int frr_opt(int opt)
static int vty_port_set = 0;
static int vty_addr_set = 0;
struct option_chain *oc;
+ struct log_arg *log_arg;
+ size_t arg_len;
char *err;
switch (opt) {
@@ -613,7 +617,10 @@ static int frr_opt(int opt)
di->privs->group = optarg;
break;
case OPTION_LOG:
- di->early_logging = optarg;
+ arg_len = strlen(optarg) + 1;
+ log_arg = XCALLOC(MTYPE_TMP, sizeof(*log_arg) + arg_len);
+ memcpy(log_arg->target, optarg, arg_len);
+ log_args_add_tail(di->early_logging, log_arg);
break;
case OPTION_LOGLEVEL:
di->early_loglevel = optarg;
@@ -706,10 +713,12 @@ static struct thread_master *master;
struct thread_master *frr_init(void)
{
struct option_chain *oc;
+ struct log_arg *log_arg;
struct frrmod_runtime *module;
struct zprivs_ids_t ids;
char p_instance[16] = "", p_pathspace[256] = "";
const char *dir;
+
dir = di->module_path ? di->module_path : frr_moduledir;
srandom(time(NULL));
@@ -739,7 +748,11 @@ struct thread_master *frr_init(void)
zlog_init(di->progname, di->logname, di->instance,
ids.uid_normal, ids.gid_normal);
- command_setup_early_logging(di->early_logging, di->early_loglevel);
+ while ((log_arg = log_args_pop(di->early_logging))) {
+ command_setup_early_logging(log_arg->target,
+ di->early_loglevel);
+ XFREE(MTYPE_TMP, log_arg);
+ }
if (!frr_zclient_addr(&zclient_addr, &zclient_addr_len,
frr_zclientpath)) {
diff --git a/lib/libfrr.h b/lib/libfrr.h
index 65c1df9675..69054e4264 100644
--- a/lib/libfrr.h
+++ b/lib/libfrr.h
@@ -21,6 +21,7 @@
#ifndef _ZEBRA_FRR_H
#define _ZEBRA_FRR_H
+#include "typesafe.h"
#include "sigevent.h"
#include "privs.h"
#include "thread.h"
@@ -52,6 +53,14 @@ extern "C" {
*/
#define FRR_DETACH_LATER (1 << 6)
+PREDECL_DLIST(log_args);
+struct log_arg {
+ struct log_args_item itm;
+
+ char target[0];
+};
+DECLARE_DLIST(log_args, struct log_arg, itm);
+
enum frr_cli_mode {
FRR_CLI_CLASSIC = 0,
FRR_CLI_TRANSACTIONAL,
@@ -88,7 +97,7 @@ struct frr_daemon_info {
const char *pathspace;
bool zpathspace;
- const char *early_logging;
+ struct log_args_head early_logging[1];
const char *early_loglevel;
const char *proghelp;
diff --git a/lib/log.c b/lib/log.c
index fb12c08aae..5c453569ee 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -239,7 +239,7 @@ void zlog_backtrace(int priority)
{
#ifdef HAVE_LIBUNWIND
char buf[100];
- unw_cursor_t cursor;
+ unw_cursor_t cursor = {};
unw_context_t uc;
unw_word_t ip, off, sp;
Dl_info dlinfo;
diff --git a/lib/log_vty.c b/lib/log_vty.c
index 682c9ea372..81280f302f 100644
--- a/lib/log_vty.c
+++ b/lib/log_vty.c
@@ -159,6 +159,7 @@ void zlog_rotate(void)
{
zlog_file_rotate(&zt_file);
zlog_file_rotate(&zt_filterfile.parent);
+ zlog_file_rotate(&zt_file_cmdline);
hook_call(zlog_rotate);
}
@@ -427,6 +428,22 @@ void command_setup_early_logging(const char *dest, const char *level)
set_log_file(&zt_file_cmdline, NULL, sep, nlevel);
return;
}
+ if (strcmp(type, "monitor") == 0 && sep) {
+ struct zlog_live_cfg cfg = {};
+ unsigned long fd;
+ char *endp;
+
+ sep++;
+ fd = strtoul(sep, &endp, 10);
+ if (!*sep || *endp) {
+ fprintf(stderr, "invalid monitor fd \"%s\"\n", sep);
+ exit(1);
+ }
+
+ zlog_live_open_fd(&cfg, nlevel, fd);
+ zlog_live_disown(&cfg);
+ return;
+ }
fprintf(stderr, "invalid log target \"%s\" (\"%s\")\n", type, dest);
exit(1);
diff --git a/lib/monotime.h b/lib/monotime.h
index 15b6933955..89616c5427 100644
--- a/lib/monotime.h
+++ b/lib/monotime.h
@@ -81,9 +81,9 @@ static inline time_t monotime(struct timeval *tvo)
return ts.tv_sec;
}
-#define ONE_DAY_SECOND 60*60*24
-#define ONE_WEEK_SECOND ONE_DAY_SECOND*7
-#define ONE_YEAR_SECOND ONE_DAY_SECOND*365
+#define ONE_DAY_SECOND (60 * 60 * 24)
+#define ONE_WEEK_SECOND (ONE_DAY_SECOND * 7)
+#define ONE_YEAR_SECOND (ONE_DAY_SECOND * 365)
/* the following two return microseconds, not time_t!
*
diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp
index 34bb1e4986..9cb999110b 100644
--- a/lib/northbound_grpc.cpp
+++ b/lib/northbound_grpc.cpp
@@ -1,7 +1,7 @@
//
+// Copyright (c) 2021-2022, LabN Consulting, L.L.C
// Copyright (C) 2019 NetDEF, Inc.
// Renato Westphal
-// Copyright (c) 2021, LabN Consulting, L.L.C
//
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
@@ -39,6 +39,11 @@
#define GRPC_DEFAULT_PORT 50051
+
+// ------------------------------------------------------
+// File Local Variables
+// ------------------------------------------------------
+
/*
* NOTE: we can't use the FRR debugging infrastructure here since it uses
* atomics and C++ has a different atomics API. Enable gRPC debugging
@@ -50,6 +55,8 @@ static struct thread_master *main_master;
static struct frr_pthread *fpt;
+static bool grpc_running;
+
#define grpc_debug(...) \
do { \
if (nb_dbg_client_grpc) \
@@ -76,7 +83,7 @@ class Candidates
{
// Delete candidates.
for (auto it = _cdb.begin(); it != _cdb.end(); it++)
- delete_candidate(&it->second);
+ delete_candidate(it->first);
}
struct candidate *create_candidate(void)
@@ -92,123 +99,106 @@ class Candidates
return c;
}
- void delete_candidate(struct candidate *c)
+ bool contains(uint64_t candidate_id)
+ {
+ return _cdb.count(candidate_id) > 0;
+ }
+
+ void delete_candidate(uint64_t candidate_id)
{
+ struct candidate *c = &_cdb[candidate_id];
char errmsg[BUFSIZ] = {0};
- _cdb.erase(c->id);
nb_config_free(c->config);
if (c->transaction)
nb_candidate_commit_abort(c->transaction, errmsg,
sizeof(errmsg));
+ _cdb.erase(c->id);
}
- struct candidate *get_candidate(uint32_t id)
+ struct candidate *get_candidate(uint64_t id)
{
return _cdb.count(id) == 0 ? NULL : &_cdb[id];
}
private:
uint64_t _next_id = 0;
- std::map<uint32_t, struct candidate> _cdb;
+ std::map<uint64_t, struct candidate> _cdb;
};
+/*
+ * RpcStateBase is the common base class used to track a gRPC RPC.
+ */
class RpcStateBase
{
public:
- virtual CallState doCallback() = 0;
virtual void do_request(::frr::Northbound::AsyncService *service,
- ::grpc::ServerCompletionQueue *cq) = 0;
-};
+ ::grpc::ServerCompletionQueue *cq,
+ bool no_copy) = 0;
-/*
- * The RPC state class is used to track the execution of an RPC.
- */
-template <typename Q, typename S> class NewRpcState : RpcStateBase
-{
- typedef void (frr::Northbound::AsyncService::*reqfunc_t)(
- ::grpc::ServerContext *, Q *,
- ::grpc::ServerAsyncResponseWriter<S> *,
- ::grpc::CompletionQueue *, ::grpc::ServerCompletionQueue *,
- void *);
- typedef void (frr::Northbound::AsyncService::*reqsfunc_t)(
- ::grpc::ServerContext *, Q *, ::grpc::ServerAsyncWriter<S> *,
- ::grpc::CompletionQueue *, ::grpc::ServerCompletionQueue *,
- void *);
+ RpcStateBase(const char *name) : name(name){};
- public:
- NewRpcState(Candidates *cdb, reqfunc_t rfunc,
- void (*cb)(NewRpcState<Q, S> *), const char *name)
- : requestf(rfunc), callback(cb), responder(&ctx),
- async_responder(&ctx), name(name), cdb(cdb){};
- NewRpcState(Candidates *cdb, reqsfunc_t rfunc,
- void (*cb)(NewRpcState<Q, S> *), const char *name)
- : requestsf(rfunc), callback(cb), responder(&ctx),
- async_responder(&ctx), name(name), cdb(cdb){};
-
- CallState doCallback() override
+ virtual ~RpcStateBase() = default;
+
+ CallState get_state() const
{
- CallState enter_state = this->state;
- CallState new_state;
- if (enter_state == FINISH) {
- grpc_debug("%s RPC FINISH -> DELETED", name);
- new_state = FINISH;
- } else {
- grpc_debug("%s RPC: %s -> PROCESS", name,
- call_states[this->state]);
- new_state = PROCESS;
- }
+ return state;
+ }
+
+ bool is_initial_process() const
+ {
+ /* Will always be true for Unary */
+ return entered_state == CREATE;
+ }
+
+ // Returns "more" status, if false caller can delete
+ bool run(frr::Northbound::AsyncService *service,
+ grpc::ServerCompletionQueue *cq)
+ {
+ /*
+ * We enter in either CREATE or MORE state, and transition to
+ * PROCESS state.
+ */
+ this->entered_state = this->state;
+ this->state = PROCESS;
+ grpc_debug("%s RPC: %s -> %s on grpc-io-thread", name,
+ call_states[this->entered_state],
+ call_states[this->state]);
/*
- * We are either in state CREATE, MORE or FINISH. If CREATE or
- * MORE move back to PROCESS, otherwise we are cleaning up
- * (FINISH) so leave it in that state. Run the callback on the
- * main threadmaster/pthread; and wait for expected transition
- * from main thread. If transition is to FINISH->DELETED.
- * delete us.
- *
- * We update the state prior to scheduling the callback which
- * may then update the state in the master pthread. Then we
- * obtain the lock in the condvar-check-loop as the callback
- * will be modifying updating the state value.
+ * We schedule the callback on the main pthread, and wait for
+ * the state to transition out of the PROCESS state. The new
+ * state will either be MORE or FINISH. It will always be FINISH
+ * for Unary RPCs.
*/
- this->state = new_state;
thread_add_event(main_master, c_callback, (void *)this, 0,
NULL);
+
pthread_mutex_lock(&this->cmux);
- while (this->state == new_state)
+ while (this->state == PROCESS)
pthread_cond_wait(&this->cond, &this->cmux);
pthread_mutex_unlock(&this->cmux);
- if (this->state == DELETED) {
- grpc_debug("%s RPC: -> [DELETED]", name);
- delete this;
- return DELETED;
- }
- return this->state;
- }
-
- void do_request(::frr::Northbound::AsyncService *service,
- ::grpc::ServerCompletionQueue *cq) override
- {
- grpc_debug("%s, posting a request for: %s", __func__, name);
- if (requestf) {
- NewRpcState<Q, S> *copy =
- new NewRpcState(cdb, requestf, callback, name);
- (service->*requestf)(&copy->ctx, &copy->request,
- &copy->responder, cq, cq, copy);
- } else {
- NewRpcState<Q, S> *copy =
- new NewRpcState(cdb, requestsf, callback, name);
- (service->*requestsf)(&copy->ctx, &copy->request,
- &copy->async_responder, cq, cq,
- copy);
+ grpc_debug("%s RPC in %s on grpc-io-thread", name,
+ call_states[this->state]);
+
+ if (this->state == FINISH) {
+ /*
+ * Server is done (FINISH) so prep to receive a new
+ * request of this type. We could do this earlier but
+ * that would mean we could be handling multiple same
+ * type requests in parallel without limit.
+ */
+ this->do_request(service, cq, false);
}
+ return true;
}
+ protected:
+ virtual CallState run_mainthread(struct thread *thread) = 0;
static void c_callback(struct thread *thread)
{
- auto _tag = static_cast<NewRpcState<Q, S> *>(thread->arg);
+ auto _tag = static_cast<RpcStateBase *>(thread->arg);
/*
* We hold the lock until the callback finishes and has updated
* _tag->state, then we signal done and release.
@@ -216,36 +206,131 @@ template <typename Q, typename S> class NewRpcState : RpcStateBase
pthread_mutex_lock(&_tag->cmux);
CallState enter_state = _tag->state;
- grpc_debug("%s RPC running on main thread", _tag->name);
+ grpc_debug("%s RPC: running %s on main thread", _tag->name,
+ call_states[enter_state]);
- _tag->callback(_tag);
+ _tag->state = _tag->run_mainthread(thread);
- grpc_debug("%s RPC: %s -> %s", _tag->name,
+ grpc_debug("%s RPC: %s -> %s [main thread]", _tag->name,
call_states[enter_state], call_states[_tag->state]);
pthread_cond_signal(&_tag->cond);
pthread_mutex_unlock(&_tag->cmux);
return;
}
- NewRpcState<Q, S> *orig;
- const char *name;
grpc::ServerContext ctx;
+ pthread_mutex_t cmux = PTHREAD_MUTEX_INITIALIZER;
+ pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+ CallState state = CREATE;
+ CallState entered_state = CREATE;
+
+ public:
+ const char *name;
+};
+
+/*
+ * The UnaryRpcState class is used to track the execution of a Unary RPC.
+ *
+ * Template Args:
+ * Q - the request type for a given unary RPC
+ * S - the response type for a given unary RPC
+ */
+template <typename Q, typename S> class UnaryRpcState : public RpcStateBase
+{
+ public:
+ typedef void (frr::Northbound::AsyncService::*reqfunc_t)(
+ ::grpc::ServerContext *, Q *,
+ ::grpc::ServerAsyncResponseWriter<S> *,
+ ::grpc::CompletionQueue *, ::grpc::ServerCompletionQueue *,
+ void *);
+
+ UnaryRpcState(Candidates *cdb, reqfunc_t rfunc,
+ grpc::Status (*cb)(UnaryRpcState<Q, S> *),
+ const char *name)
+ : RpcStateBase(name), cdb(cdb), requestf(rfunc), callback(cb),
+ responder(&ctx){};
+
+ void do_request(::frr::Northbound::AsyncService *service,
+ ::grpc::ServerCompletionQueue *cq,
+ bool no_copy) override
+ {
+ grpc_debug("%s, posting a request for: %s", __func__, name);
+ auto copy = no_copy ? this
+ : new UnaryRpcState(cdb, requestf, callback,
+ name);
+ (service->*requestf)(&copy->ctx, &copy->request,
+ &copy->responder, cq, cq, copy);
+ }
+
+ CallState run_mainthread(struct thread *thread) override
+ {
+ // Unary RPC are always finished, see "Unary" :)
+ grpc::Status status = this->callback(this);
+ responder.Finish(response, status, this);
+ return FINISH;
+ }
+
+ Candidates *cdb;
+
Q request;
S response;
grpc::ServerAsyncResponseWriter<S> responder;
- grpc::ServerAsyncWriter<S> async_responder;
- Candidates *cdb;
- void (*callback)(NewRpcState<Q, S> *);
- reqfunc_t requestf;
- reqsfunc_t requestsf;
+ grpc::Status (*callback)(UnaryRpcState<Q, S> *);
+ reqfunc_t requestf = NULL;
+};
- pthread_mutex_t cmux = PTHREAD_MUTEX_INITIALIZER;
- pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
- void *context;
+/*
+ * The StreamRpcState class is used to track the execution of a Streaming RPC.
+ *
+ * Template Args:
+ * Q - the request type for a given streaming RPC
+ * S - the response type for a given streaming RPC
+ * X - the type used to track the streaming state
+ */
+template <typename Q, typename S, typename X>
+class StreamRpcState : public RpcStateBase
+{
+ public:
+ typedef void (frr::Northbound::AsyncService::*reqsfunc_t)(
+ ::grpc::ServerContext *, Q *, ::grpc::ServerAsyncWriter<S> *,
+ ::grpc::CompletionQueue *, ::grpc::ServerCompletionQueue *,
+ void *);
- CallState state = CREATE;
+ StreamRpcState(reqsfunc_t rfunc, bool (*cb)(StreamRpcState<Q, S, X> *),
+ const char *name)
+ : RpcStateBase(name), requestsf(rfunc), callback(cb),
+ async_responder(&ctx){};
+
+ void do_request(::frr::Northbound::AsyncService *service,
+ ::grpc::ServerCompletionQueue *cq,
+ bool no_copy) override
+ {
+ grpc_debug("%s, posting a request for: %s", __func__, name);
+ auto copy =
+ no_copy ? this
+ : new StreamRpcState(requestsf, callback, name);
+ (service->*requestsf)(&copy->ctx, &copy->request,
+ &copy->async_responder, cq, cq, copy);
+ }
+
+ CallState run_mainthread(struct thread *thread) override
+ {
+ if (this->callback(this))
+ return MORE;
+ else
+ return FINISH;
+ }
+
+ Q request;
+ S response;
+ grpc::ServerAsyncWriter<S> async_responder;
+
+ bool (*callback)(StreamRpcState<Q, S, X> *);
+ reqsfunc_t requestsf = NULL;
+
+ X context;
};
// ------------------------------------------------------
@@ -268,10 +353,10 @@ static LYD_FORMAT encoding2lyd_format(enum frr::Encoding encoding)
}
static int yang_dnode_edit(struct lyd_node *dnode, const std::string &path,
- const std::string &value)
+ const char *value)
{
- LY_ERR err = lyd_new_path(dnode, ly_native_ctx, path.c_str(),
- value.c_str(), LYD_NEW_PATH_UPDATE, &dnode);
+ LY_ERR err = lyd_new_path(dnode, ly_native_ctx, path.c_str(), value,
+ LYD_NEW_PATH_UPDATE, &dnode);
if (err != LY_SUCCESS) {
flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed: %s",
__func__, ly_errmsg(ly_native_ctx));
@@ -464,15 +549,11 @@ static grpc::Status get_path(frr::DataTree *dt, const std::string &path,
// RPC Callback Functions: run on main thread
// ------------------------------------------------------
-void HandleUnaryGetCapabilities(NewRpcState<frr::GetCapabilitiesRequest,
- frr::GetCapabilitiesResponse> *tag)
+grpc::Status HandleUnaryGetCapabilities(
+ UnaryRpcState<frr::GetCapabilitiesRequest, frr::GetCapabilitiesResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
// Response: string frr_version = 1;
tag->response.set_frr_version(FRR_VERSION);
@@ -498,30 +579,24 @@ void HandleUnaryGetCapabilities(NewRpcState<frr::GetCapabilitiesRequest,
tag->response.add_supported_encodings(frr::JSON);
tag->response.add_supported_encodings(frr::XML);
- /* Should we do this in the async process call? */
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
-
- /* Indicate we are done. */
- tag->state = FINISH;
+ return grpc::Status::OK;
}
-void HandleStreamingGet(NewRpcState<frr::GetRequest, frr::GetResponse> *tag)
-{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
+// Define the context variable type for this streaming handler
+typedef std::list<std::string> GetContextType;
- if (tag->state == FINISH) {
- delete static_cast<std::list<std::string> *>(tag->context);
- tag->state = DELETED;
- return;
- }
+bool HandleStreamingGet(
+ StreamRpcState<frr::GetRequest, frr::GetResponse, GetContextType> *tag)
+{
+ grpc_debug("%s: entered", __func__);
- if (!tag->context) {
- /* Creating, first time called for this RPC */
- auto mypaths = new std::list<std::string>();
- tag->context = mypaths;
+ auto mypathps = &tag->context;
+ if (tag->is_initial_process()) {
+ // Fill our context container first time through
+ grpc_debug("%s: initialize streaming state", __func__);
auto paths = tag->request.path();
for (const std::string &path : paths) {
- mypaths->push_back(std::string(path));
+ mypathps->push_back(std::string(path));
}
}
@@ -532,11 +607,9 @@ void HandleStreamingGet(NewRpcState<frr::GetRequest, frr::GetResponse> *tag)
// Request: bool with_defaults = 3;
bool with_defaults = tag->request.with_defaults();
- auto mypathps = static_cast<std::list<std::string> *>(tag->context);
if (mypathps->empty()) {
tag->async_responder.Finish(grpc::Status::OK, tag);
- tag->state = FINISH;
- return;
+ return false;
}
frr::GetResponse response;
@@ -554,86 +627,57 @@ void HandleStreamingGet(NewRpcState<frr::GetRequest, frr::GetResponse> *tag)
if (!status.ok()) {
tag->async_responder.WriteAndFinish(
response, grpc::WriteOptions(), status, tag);
- tag->state = FINISH;
- return;
+ return false;
}
mypathps->pop_back();
if (mypathps->empty()) {
tag->async_responder.WriteAndFinish(
response, grpc::WriteOptions(), grpc::Status::OK, tag);
- tag->state = FINISH;
+ return false;
} else {
tag->async_responder.Write(response, tag);
- tag->state = MORE;
+ return true;
}
}
-void HandleUnaryCreateCandidate(NewRpcState<frr::CreateCandidateRequest,
- frr::CreateCandidateResponse> *tag)
+grpc::Status HandleUnaryCreateCandidate(
+ UnaryRpcState<frr::CreateCandidateRequest, frr::CreateCandidateResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
struct candidate *candidate = tag->cdb->create_candidate();
- if (!candidate) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::RESOURCE_EXHAUSTED,
- "Can't create candidate configuration"),
- tag);
- } else {
- tag->response.set_candidate_id(candidate->id);
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- }
-
- tag->state = FINISH;
+ if (!candidate)
+ return grpc::Status(grpc::StatusCode::RESOURCE_EXHAUSTED,
+ "Can't create candidate configuration");
+ tag->response.set_candidate_id(candidate->id);
+ return grpc::Status::OK;
}
-void HandleUnaryDeleteCandidate(NewRpcState<frr::DeleteCandidateRequest,
- frr::DeleteCandidateResponse> *tag)
+grpc::Status HandleUnaryDeleteCandidate(
+ UnaryRpcState<frr::DeleteCandidateRequest, frr::DeleteCandidateResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
+ grpc_debug("%s: entered", __func__);
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
-
- // Request: uint32 candidate_id = 1;
uint32_t candidate_id = tag->request.candidate_id();
grpc_debug("%s(candidate_id: %u)", __func__, candidate_id);
- struct candidate *candidate = tag->cdb->get_candidate(candidate_id);
- if (!candidate) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found"),
- tag);
- } else {
- tag->cdb->delete_candidate(candidate);
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- }
- tag->state = FINISH;
+ if (!tag->cdb->contains(candidate_id))
+ return grpc::Status(grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+ tag->cdb->delete_candidate(candidate_id);
+ return grpc::Status::OK;
}
-void HandleUnaryUpdateCandidate(NewRpcState<frr::UpdateCandidateRequest,
- frr::UpdateCandidateResponse> *tag)
+grpc::Status HandleUnaryUpdateCandidate(
+ UnaryRpcState<frr::UpdateCandidateRequest, frr::UpdateCandidateResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
- // Request: uint32 candidate_id = 1;
uint32_t candidate_id = tag->request.candidate_id();
grpc_debug("%s(candidate_id: %u)", __func__, candidate_id);
@@ -641,76 +685,45 @@ void HandleUnaryUpdateCandidate(NewRpcState<frr::UpdateCandidateRequest,
struct candidate *candidate = tag->cdb->get_candidate(candidate_id);
if (!candidate)
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found"),
- tag);
- else if (candidate->transaction)
- tag->responder.Finish(
- tag->response,
- grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "candidate is in the middle of a transaction"),
- tag);
- else if (nb_candidate_update(candidate->config) != NB_OK)
- tag->responder.Finish(
- tag->response,
- grpc::Status(
- grpc::StatusCode::INTERNAL,
- "failed to update candidate configuration"),
- tag);
-
- else
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
+ return grpc::Status(grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
+ if (candidate->transaction)
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "candidate is in the middle of a transaction");
+ if (nb_candidate_update(candidate->config) != NB_OK)
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "failed to update candidate configuration");
- tag->state = FINISH;
+ return grpc::Status::OK;
}
-void HandleUnaryEditCandidate(
- NewRpcState<frr::EditCandidateRequest, frr::EditCandidateResponse> *tag)
+grpc::Status HandleUnaryEditCandidate(
+ UnaryRpcState<frr::EditCandidateRequest, frr::EditCandidateResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
+ grpc_debug("%s: entered", __func__);
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
-
- // Request: uint32 candidate_id = 1;
uint32_t candidate_id = tag->request.candidate_id();
grpc_debug("%s(candidate_id: %u)", __func__, candidate_id);
struct candidate *candidate = tag->cdb->get_candidate(candidate_id);
-
- if (!candidate) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!candidate)
+ return grpc::Status(grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
struct nb_config *candidate_tmp = nb_config_dup(candidate->config);
auto pvs = tag->request.update();
for (const frr::PathValue &pv : pvs) {
- if (yang_dnode_edit(candidate_tmp->dnode, pv.path(), pv.value())
- != 0) {
+ if (yang_dnode_edit(candidate_tmp->dnode, pv.path(),
+ pv.value().c_str()) != 0) {
nb_config_free(candidate_tmp);
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Failed to update \"" + pv.path()
- + "\""),
- tag);
-
- tag->state = FINISH;
- return;
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Failed to update \"" + pv.path() +
+ "\"");
}
}
@@ -718,36 +731,23 @@ void HandleUnaryEditCandidate(
for (const frr::PathValue &pv : pvs) {
if (yang_dnode_delete(candidate_tmp->dnode, pv.path()) != 0) {
nb_config_free(candidate_tmp);
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Failed to remove \"" + pv.path()
- + "\""),
- tag);
- tag->state = FINISH;
- return;
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Failed to remove \"" + pv.path() +
+ "\"");
}
}
// No errors, accept all changes.
nb_config_replace(candidate->config, candidate_tmp, false);
-
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
-
- tag->state = FINISH;
+ return grpc::Status::OK;
}
-void HandleUnaryLoadToCandidate(NewRpcState<frr::LoadToCandidateRequest,
- frr::LoadToCandidateResponse> *tag)
+grpc::Status HandleUnaryLoadToCandidate(
+ UnaryRpcState<frr::LoadToCandidateRequest, frr::LoadToCandidateResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
- // Request: uint32 candidate_id = 1;
uint32_t candidate_id = tag->request.candidate_id();
grpc_debug("%s(candidate_id: %u)", __func__, candidate_id);
@@ -757,59 +757,31 @@ void HandleUnaryLoadToCandidate(NewRpcState<frr::LoadToCandidateRequest,
// Request: DataTree config = 3;
auto config = tag->request.config();
-
struct candidate *candidate = tag->cdb->get_candidate(candidate_id);
-
- if (!candidate) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!candidate)
+ return grpc::Status(grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
struct lyd_node *dnode = dnode_from_data_tree(&config, true);
- if (!dnode) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INTERNAL,
- "Failed to parse the configuration"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!dnode)
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "Failed to parse the configuration");
struct nb_config *loaded_config = nb_config_new(dnode);
-
if (load_type == frr::LoadToCandidateRequest::REPLACE)
nb_config_replace(candidate->config, loaded_config, false);
- else if (nb_config_merge(candidate->config, loaded_config, false)
- != NB_OK) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(
- grpc::StatusCode::INTERNAL,
- "Failed to merge the loaded configuration"),
- tag);
- tag->state = FINISH;
- return;
- }
+ else if (nb_config_merge(candidate->config, loaded_config, false) !=
+ NB_OK)
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "Failed to merge the loaded configuration");
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- tag->state = FINISH;
+ return grpc::Status::OK;
}
-void HandleUnaryCommit(
- NewRpcState<frr::CommitRequest, frr::CommitResponse> *tag)
+grpc::Status
+HandleUnaryCommit(UnaryRpcState<frr::CommitRequest, frr::CommitResponse> *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
// Request: uint32 candidate_id = 1;
uint32_t candidate_id = tag->request.candidate_id();
@@ -823,15 +795,9 @@ void HandleUnaryCommit(
// Find candidate configuration.
struct candidate *candidate = tag->cdb->get_candidate(candidate_id);
- if (!candidate) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::NOT_FOUND,
- "candidate configuration not found"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!candidate)
+ return grpc::Status(grpc::StatusCode::NOT_FOUND,
+ "candidate configuration not found");
int ret = NB_OK;
uint32_t transaction_id = 0;
@@ -840,29 +806,17 @@ void HandleUnaryCommit(
switch (phase) {
case frr::CommitRequest::PREPARE:
case frr::CommitRequest::ALL:
- if (candidate->transaction) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "candidate is in the middle of a transaction"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (candidate->transaction)
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "candidate is in the middle of a transaction");
break;
case frr::CommitRequest::ABORT:
case frr::CommitRequest::APPLY:
- if (!candidate->transaction) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "no transaction in progress"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!candidate->transaction)
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "no transaction in progress");
break;
default:
break;
@@ -942,53 +896,30 @@ void HandleUnaryCommit(
if (strlen(errmsg) > 0)
tag->response.set_error_message(errmsg);
- tag->responder.Finish(tag->response, status, tag);
- tag->state = FINISH;
+ return status;
}
-void HandleUnaryLockConfig(
- NewRpcState<frr::LockConfigRequest, frr::LockConfigResponse> *tag)
+grpc::Status HandleUnaryLockConfig(
+ UnaryRpcState<frr::LockConfigRequest, frr::LockConfigResponse> *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
+ grpc_debug("%s: entered", __func__);
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
-
- if (nb_running_lock(NB_CLIENT_GRPC, NULL)) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::FAILED_PRECONDITION,
- "running configuration is locked already"),
- tag);
- } else {
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- }
- tag->state = FINISH;
+ if (nb_running_lock(NB_CLIENT_GRPC, NULL))
+ return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION,
+ "running configuration is locked already");
+ return grpc::Status::OK;
}
-void HandleUnaryUnlockConfig(
- NewRpcState<frr::UnlockConfigRequest, frr::UnlockConfigResponse> *tag)
+grpc::Status HandleUnaryUnlockConfig(
+ UnaryRpcState<frr::UnlockConfigRequest, frr::UnlockConfigResponse> *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
+ grpc_debug("%s: entered", __func__);
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
-
- if (nb_running_unlock(NB_CLIENT_GRPC, NULL)) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(
- grpc::StatusCode::FAILED_PRECONDITION,
- "failed to unlock the running configuration"),
- tag);
- } else {
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- }
- tag->state = FINISH;
+ if (nb_running_unlock(NB_CLIENT_GRPC, NULL))
+ return grpc::Status(
+ grpc::StatusCode::FAILED_PRECONDITION,
+ "failed to unlock the running configuration");
+ return grpc::Status::OK;
}
static void list_transactions_cb(void *arg, int transaction_id,
@@ -1002,45 +933,34 @@ static void list_transactions_cb(void *arg, int transaction_id,
std::string(date), std::string(comment)));
}
-void HandleStreamingListTransactions(
- NewRpcState<frr::ListTransactionsRequest, frr::ListTransactionsResponse>
- *tag)
-{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- delete static_cast<std::list<std::tuple<
- int, std::string, std::string, std::string>> *>(
- tag->context);
- tag->state = DELETED;
- return;
- }
-
- if (!tag->context) {
- /* Creating, first time called for this RPC */
- auto new_list =
- new std::list<std::tuple<int, std::string, std::string,
- std::string>>();
- tag->context = new_list;
- nb_db_transactions_iterate(list_transactions_cb, tag->context);
+// Define the context variable type for this streaming handler
+typedef std::list<std::tuple<int, std::string, std::string, std::string>>
+ ListTransactionsContextType;
- new_list->push_back(std::make_tuple(
+bool HandleStreamingListTransactions(
+ StreamRpcState<frr::ListTransactionsRequest,
+ frr::ListTransactionsResponse,
+ ListTransactionsContextType> *tag)
+{
+ grpc_debug("%s: entered", __func__);
+
+ auto list = &tag->context;
+ if (tag->is_initial_process()) {
+ grpc_debug("%s: initialize streaming state", __func__);
+ // Fill our context container first time through
+ nb_db_transactions_iterate(list_transactions_cb, list);
+ list->push_back(std::make_tuple(
0xFFFF, std::string("fake client"),
std::string("fake date"), std::string("fake comment")));
- new_list->push_back(
- std::make_tuple(0xFFFE, std::string("fake client2"),
- std::string("fake date"),
- std::string("fake comment2")));
+ list->push_back(std::make_tuple(0xFFFE,
+ std::string("fake client2"),
+ std::string("fake date"),
+ std::string("fake comment2")));
}
- auto list = static_cast<std::list<
- std::tuple<int, std::string, std::string, std::string>> *>(
- tag->context);
-
if (list->empty()) {
tag->async_responder.Finish(grpc::Status::OK, tag);
- tag->state = FINISH;
- return;
+ return false;
}
auto item = list->back();
@@ -1063,22 +983,18 @@ void HandleStreamingListTransactions(
if (list->empty()) {
tag->async_responder.WriteAndFinish(
response, grpc::WriteOptions(), grpc::Status::OK, tag);
- tag->state = FINISH;
+ return false;
} else {
tag->async_responder.Write(response, tag);
- tag->state = MORE;
+ return true;
}
}
-void HandleUnaryGetTransaction(NewRpcState<frr::GetTransactionRequest,
- frr::GetTransactionResponse> *tag)
+grpc::Status HandleUnaryGetTransaction(
+ UnaryRpcState<frr::GetTransactionRequest, frr::GetTransactionResponse>
+ *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
// Request: uint32 transaction_id = 1;
uint32_t transaction_id = tag->request.transaction_id();
@@ -1094,15 +1010,9 @@ void HandleUnaryGetTransaction(NewRpcState<frr::GetTransactionRequest,
// Load configuration from the transactions database.
nb_config = nb_db_transaction_load(transaction_id);
- if (!nb_config) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Transaction not found"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!nb_config)
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Transaction not found");
// Response: DataTree config = 1;
auto config = tag->response.mutable_config();
@@ -1113,29 +1023,19 @@ void HandleUnaryGetTransaction(NewRpcState<frr::GetTransactionRequest,
encoding2lyd_format(encoding), with_defaults)
!= 0) {
nb_config_free(nb_config);
- tag->responder.Finish(tag->response,
- grpc::Status(grpc::StatusCode::INTERNAL,
- "Failed to dump data"),
- tag);
- tag->state = FINISH;
- return;
+ return grpc::Status(grpc::StatusCode::INTERNAL,
+ "Failed to dump data");
}
nb_config_free(nb_config);
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- tag->state = FINISH;
+ return grpc::Status::OK;
}
-void HandleUnaryExecute(
- NewRpcState<frr::ExecuteRequest, frr::ExecuteResponse> *tag)
+grpc::Status HandleUnaryExecute(
+ UnaryRpcState<frr::ExecuteRequest, frr::ExecuteResponse> *tag)
{
- grpc_debug("%s: state: %s", __func__, call_states[tag->state]);
-
- if (tag->state == FINISH) {
- tag->state = DELETED;
- return;
- }
+ grpc_debug("%s: entered", __func__);
struct nb_node *nb_node;
struct list *input_list;
@@ -1150,26 +1050,14 @@ void HandleUnaryExecute(
grpc_debug("%s(path: \"%s\")", __func__, xpath);
- if (tag->request.path().empty()) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Data path is empty"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (tag->request.path().empty())
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Data path is empty");
nb_node = nb_node_find(xpath);
- if (!nb_node) {
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
- "Unknown data path"),
- tag);
- tag->state = FINISH;
- return;
- }
+ if (!nb_node)
+ return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT,
+ "Unknown data path");
input_list = yang_data_list_new();
output_list = yang_data_list_new();
@@ -1191,12 +1079,7 @@ void HandleUnaryExecute(
list_delete(&input_list);
list_delete(&output_list);
- tag->responder.Finish(
- tag->response,
- grpc::Status(grpc::StatusCode::INTERNAL, "RPC failed"),
- tag);
- tag->state = FINISH;
- return;
+ return grpc::Status(grpc::StatusCode::INTERNAL, "RPC failed");
}
// Process output parameters.
@@ -1211,8 +1094,7 @@ void HandleUnaryExecute(
list_delete(&input_list);
list_delete(&output_list);
- tag->responder.Finish(tag->response, grpc::Status::OK, tag);
- tag->state = FINISH;
+ return grpc::Status::OK;
}
// ------------------------------------------------------
@@ -1222,20 +1104,21 @@ void HandleUnaryExecute(
#define REQUEST_NEWRPC(NAME, cdb) \
do { \
- auto _rpcState = new NewRpcState<frr::NAME##Request, \
- frr::NAME##Response>( \
+ auto _rpcState = new UnaryRpcState<frr::NAME##Request, \
+ frr::NAME##Response>( \
(cdb), &frr::Northbound::AsyncService::Request##NAME, \
&HandleUnary##NAME, #NAME); \
- _rpcState->do_request(service, s_cq); \
+ _rpcState->do_request(&service, cq.get(), true); \
} while (0)
-#define REQUEST_NEWRPC_STREAMING(NAME, cdb) \
+#define REQUEST_NEWRPC_STREAMING(NAME) \
do { \
- auto _rpcState = new NewRpcState<frr::NAME##Request, \
- frr::NAME##Response>( \
- (cdb), &frr::Northbound::AsyncService::Request##NAME, \
+ auto _rpcState = new StreamRpcState<frr::NAME##Request, \
+ frr::NAME##Response, \
+ NAME##ContextType>( \
+ &frr::Northbound::AsyncService::Request##NAME, \
&HandleStreaming##NAME, #NAME); \
- _rpcState->do_request(service, s_cq); \
+ _rpcState->do_request(&service, cq.get(), true); \
} while (0)
struct grpc_pthread_attr {
@@ -1244,8 +1127,8 @@ struct grpc_pthread_attr {
};
// Capture these objects so we can try to shut down cleanly
-static std::unique_ptr<grpc::Server> s_server;
-static grpc::ServerCompletionQueue *s_cq;
+static pthread_mutex_t s_server_lock = PTHREAD_MUTEX_INITIALIZER;
+static grpc::Server *s_server;
static void *grpc_pthread_start(void *arg)
{
@@ -1255,20 +1138,26 @@ static void *grpc_pthread_start(void *arg)
Candidates candidates;
grpc::ServerBuilder builder;
std::stringstream server_address;
- frr::Northbound::AsyncService *service =
- new frr::Northbound::AsyncService();
+ frr::Northbound::AsyncService service;
frr_pthread_set_name(fpt);
server_address << "0.0.0.0:" << port;
builder.AddListeningPort(server_address.str(),
grpc::InsecureServerCredentials());
- builder.RegisterService(service);
- auto cq = builder.AddCompletionQueue();
- s_cq = cq.get();
- s_server = builder.BuildAndStart();
-
- /* Schedule all RPC handlers */
+ builder.RegisterService(&service);
+ builder.AddChannelArgument(
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS, 5000);
+ std::unique_ptr<grpc::ServerCompletionQueue> cq =
+ builder.AddCompletionQueue();
+ std::unique_ptr<grpc::Server> server = builder.BuildAndStart();
+ s_server = server.get();
+
+ pthread_mutex_lock(&s_server_lock); // Make coverity happy
+ grpc_running = true;
+ pthread_mutex_unlock(&s_server_lock); // Make coverity happy
+
+ /* Schedule unary RPC handlers */
REQUEST_NEWRPC(GetCapabilities, NULL);
REQUEST_NEWRPC(CreateCandidate, &candidates);
REQUEST_NEWRPC(DeleteCandidate, &candidates);
@@ -1280,40 +1169,60 @@ static void *grpc_pthread_start(void *arg)
REQUEST_NEWRPC(LockConfig, NULL);
REQUEST_NEWRPC(UnlockConfig, NULL);
REQUEST_NEWRPC(Execute, NULL);
- REQUEST_NEWRPC_STREAMING(Get, NULL);
- REQUEST_NEWRPC_STREAMING(ListTransactions, NULL);
+
+ /* Schedule streaming RPC handlers */
+ REQUEST_NEWRPC_STREAMING(Get);
+ REQUEST_NEWRPC_STREAMING(ListTransactions);
zlog_notice("gRPC server listening on %s",
server_address.str().c_str());
/* Process inbound RPCs */
+ bool ok;
+ void *tag;
while (true) {
- void *tag;
- bool ok;
-
- s_cq->Next(&tag, &ok);
- if (!ok)
+ if (!cq->Next(&tag, &ok)) {
+ grpc_debug("%s: CQ empty exiting", __func__);
break;
+ }
+
+ grpc_debug("%s: got next from CQ tag: %p ok: %d", __func__, tag,
+ ok);
- grpc_debug("%s: Got next from CompletionQueue, %p %d", __func__,
- tag, ok);
+ if (!ok) {
+ delete static_cast<RpcStateBase *>(tag);
+ break;
+ }
RpcStateBase *rpc = static_cast<RpcStateBase *>(tag);
- CallState state = rpc->doCallback();
- grpc_debug("%s: Callback returned RPC State: %s", __func__,
- call_states[state]);
+ if (rpc->get_state() != FINISH)
+ rpc->run(&service, cq.get());
+ else {
+ grpc_debug("%s RPC FINISH -> [delete]", rpc->name);
+ delete rpc;
+ }
+ }
- /*
- * Our side is done (FINISH) receive new requests of this type
- * We could do this earlier but that would mean we could be
- * handling multiple same type requests in parallel. We expect
- * to be called back once more in the FINISH state (from the
- * user indicating Finish() for cleanup.
- */
- if (state == FINISH)
- rpc->do_request(service, s_cq);
+ /* This was probably done for us to get here, but let's be safe */
+ pthread_mutex_lock(&s_server_lock);
+ grpc_running = false;
+ if (s_server) {
+ grpc_debug("%s: shutdown server and CQ", __func__);
+ server->Shutdown();
+ s_server = NULL;
+ }
+ pthread_mutex_unlock(&s_server_lock);
+
+ grpc_debug("%s: shutting down CQ", __func__);
+ cq->Shutdown();
+
+ grpc_debug("%s: draining the CQ", __func__);
+ while (cq->Next(&tag, &ok)) {
+ grpc_debug("%s: drain tag %p", __func__, tag);
+ delete static_cast<RpcStateBase *>(tag);
}
+ zlog_info("%s: exiting from grpc pthread", __func__);
return NULL;
}
@@ -1325,6 +1234,8 @@ static int frr_grpc_init(uint port)
.stop = NULL,
};
+ grpc_debug("%s: entered", __func__);
+
fpt = frr_pthread_new(&attr, "frr-grpc", "frr-grpc");
fpt->data = reinterpret_cast<void *>((intptr_t)port);
@@ -1340,23 +1251,31 @@ static int frr_grpc_init(uint port)
static int frr_grpc_finish(void)
{
- // Shutdown the grpc server
- if (s_server) {
- s_server->Shutdown();
- s_cq->Shutdown();
+ grpc_debug("%s: entered", __func__);
- // And drain the queue
- void *ignore;
- bool ok;
+ if (!fpt)
+ return 0;
- while (s_cq->Next(&ignore, &ok))
- ;
+ /*
+ * Shut the server down here in main thread. This will cause the wait on
+ * the completion queue (cq.Next()) to exit and cleanup everything else.
+ */
+ pthread_mutex_lock(&s_server_lock);
+ grpc_running = false;
+ if (s_server) {
+ grpc_debug("%s: shutdown server", __func__);
+ s_server->Shutdown();
+ s_server = NULL;
}
+ pthread_mutex_unlock(&s_server_lock);
- if (fpt) {
- pthread_join(fpt->thread, NULL);
- frr_pthread_destroy(fpt);
- }
+ grpc_debug("%s: joining and destroy grpc thread", __func__);
+ pthread_join(fpt->thread, NULL);
+ frr_pthread_destroy(fpt);
+
+ // Fix protobuf 'memory leaks' during shutdown.
+ // https://groups.google.com/g/protobuf/c/4y_EmQiCGgs
+ google::protobuf::ShutdownProtobufLibrary();
return 0;
}
diff --git a/lib/northbound_sysrepo.c b/lib/northbound_sysrepo.c
index 0158d8ea0a..8a64347871 100644
--- a/lib/northbound_sysrepo.c
+++ b/lib/northbound_sysrepo.c
@@ -77,11 +77,11 @@ static int yang_data_frr2sr(struct yang_data *frr_data, sr_val_t *sr_data)
return 0;
case LYS_LEAF:
sleaf = (struct lysc_node_leaf *)snode;
- type = sleaf->type.base;
+ type = sleaf->type->basetype;
break;
case LYS_LEAFLIST:
sleaflist = (struct lysc_node_leaflist *)snode;
- type = sleaflist->type.base;
+ type = sleaflist->type->basetype;
break;
default:
return -1;
@@ -301,7 +301,7 @@ static int frr_sr_config_change_cb_prepare(sr_session_ctx_t *session,
case NB_ERR_LOCKED:
return SR_ERR_LOCKED;
case NB_ERR_RESOURCE:
- return SR_ERR_NOMEM;
+ return SR_ERR_NO_MEMORY;
default:
return SR_ERR_VALIDATION_FAILED;
}
@@ -339,7 +339,7 @@ static int frr_sr_config_change_cb_abort(sr_session_ctx_t *session,
}
/* Callback for changes in the running configuration. */
-static int frr_sr_config_change_cb(sr_session_ctx_t *session,
+static int frr_sr_config_change_cb(sr_session_ctx_t *session, uint32_t sub_id,
const char *module_name, const char *xpath,
sr_event_t sr_ev, uint32_t request_id,
void *private_data)
@@ -364,10 +364,11 @@ static int frr_sr_state_data_iter_cb(const struct lysc_node *snode,
struct yang_data *data, void *arg)
{
struct lyd_node *dnode = arg;
+ LY_ERR ly_errno;
ly_errno = 0;
- dnode = lyd_new_path(dnode, ly_native_ctx, data->xpath, data->value, 0,
- LYD_PATH_OPT_UPDATE);
+ ly_errno = lyd_new_path(NULL, ly_native_ctx, data->xpath, data->value,
+ 0, &dnode);
if (!dnode && ly_errno) {
flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
__func__);
@@ -380,10 +381,10 @@ static int frr_sr_state_data_iter_cb(const struct lysc_node *snode,
}
/* Callback for state retrieval. */
-static int frr_sr_state_cb(sr_session_ctx_t *session, const char *module_name,
- const char *xpath, const char *request_xpath,
- uint32_t request_id, struct lyd_node **parent,
- void *private_ctx)
+static int frr_sr_state_cb(sr_session_ctx_t *session, uint32_t sub_id,
+ const char *module_name, const char *xpath,
+ const char *request_xpath, uint32_t request_id,
+ struct lyd_node **parent, void *private_ctx)
{
struct lyd_node *dnode;
@@ -401,9 +402,8 @@ static int frr_sr_state_cb(sr_session_ctx_t *session, const char *module_name,
return SR_ERR_OK;
}
-
-static int frr_sr_config_rpc_cb(sr_session_ctx_t *session, const char *xpath,
- const sr_val_t *sr_input,
+static int frr_sr_config_rpc_cb(sr_session_ctx_t *session, uint32_t sub_id,
+ const char *xpath, const sr_val_t *sr_input,
const size_t input_cnt, sr_event_t sr_ev,
uint32_t request_id, sr_val_t **sr_output,
size_t *sr_output_cnt, void *private_ctx)
@@ -515,7 +515,7 @@ static int frr_sr_notification_send(const char *xpath, struct list *arguments)
}
}
- ret = sr_event_notif_send(session, xpath, values, values_cnt);
+ ret = sr_event_notif_send(session, xpath, values, values_cnt, 0, 0);
if (ret != SR_ERR_OK) {
flog_err(EC_LIB_LIBSYSREPO,
"%s: sr_event_notif_send() failed for xpath %s",
diff --git a/lib/plist.c b/lib/plist.c
index d6a63c1b0c..e7647fb2a7 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -31,6 +31,7 @@
#include "lib/json.h"
#include "libfrr.h"
+#include <typesafe.h>
#include "plist_int.h"
DEFINE_MTYPE_STATIC(LIB, PREFIX_LIST, "Prefix List");
@@ -58,17 +59,8 @@ struct pltrie_table {
struct pltrie_entry entries[PLC_LEN];
};
-/* List of struct prefix_list. */
-struct prefix_list_list {
- struct prefix_list *head;
- struct prefix_list *tail;
-};
-
/* Master structure of prefix_list. */
struct prefix_master {
- /* List of prefix_list which name is string. */
- struct prefix_list_list str;
-
/* The latest update. */
struct prefix_list *recent;
@@ -80,26 +72,32 @@ struct prefix_master {
/* number of bytes that have a trie level */
size_t trie_depth;
+
+ struct plist_head str;
};
+static int prefix_list_compare_func(const struct prefix_list *a,
+ const struct prefix_list *b);
+DECLARE_RBTREE_UNIQ(plist, struct prefix_list, plist_item,
+ prefix_list_compare_func);
/* Static structure of IPv4 prefix_list's master. */
static struct prefix_master prefix_master_ipv4 = {
- {NULL, NULL}, NULL, NULL, NULL, PLC_MAXLEVELV4,
+ NULL, NULL, NULL, PLC_MAXLEVELV4,
};
/* Static structure of IPv6 prefix-list's master. */
static struct prefix_master prefix_master_ipv6 = {
- {NULL, NULL}, NULL, NULL, NULL, PLC_MAXLEVELV6,
+ NULL, NULL, NULL, PLC_MAXLEVELV6,
};
/* Static structure of BGP ORF prefix_list's master. */
static struct prefix_master prefix_master_orf_v4 = {
- {NULL, NULL}, NULL, NULL, NULL, PLC_MAXLEVELV4,
+ NULL, NULL, NULL, PLC_MAXLEVELV4,
};
/* Static structure of BGP ORF prefix_list's master. */
static struct prefix_master prefix_master_orf_v6 = {
- {NULL, NULL}, NULL, NULL, NULL, PLC_MAXLEVELV6,
+ NULL, NULL, NULL, PLC_MAXLEVELV6,
};
static struct prefix_master *prefix_master_get(afi_t afi, int orf)
@@ -124,11 +122,17 @@ afi_t prefix_list_afi(struct prefix_list *plist)
return AFI_IP6;
}
+static int prefix_list_compare_func(const struct prefix_list *a,
+ const struct prefix_list *b)
+{
+ return strcmp(a->name, b->name);
+}
+
/* Lookup prefix_list from list of prefix_list by name. */
static struct prefix_list *prefix_list_lookup_do(afi_t afi, int orf,
const char *name)
{
- struct prefix_list *plist;
+ struct prefix_list *plist, lookup;
struct prefix_master *master;
if (name == NULL)
@@ -138,11 +142,10 @@ static struct prefix_list *prefix_list_lookup_do(afi_t afi, int orf,
if (master == NULL)
return NULL;
- for (plist = master->str.head; plist; plist = plist->next)
- if (strcmp(plist->name, name) == 0)
- return plist;
-
- return NULL;
+ lookup.name = XSTRDUP(MTYPE_TMP, name);
+ plist = plist_find(&master->str, &lookup);
+ XFREE(MTYPE_TMP, lookup.name);
+ return plist;
}
struct prefix_list *prefix_list_lookup(afi_t afi, const char *name)
@@ -188,8 +191,6 @@ static struct prefix_list *prefix_list_insert(afi_t afi, int orf,
const char *name)
{
struct prefix_list *plist;
- struct prefix_list *point;
- struct prefix_list_list *list;
struct prefix_master *master;
master = prefix_master_get(afi, orf);
@@ -203,43 +204,7 @@ static struct prefix_list *prefix_list_insert(afi_t afi, int orf,
plist->trie =
XCALLOC(MTYPE_PREFIX_LIST_TRIE, sizeof(struct pltrie_table));
- /* Set prefix_list to string list. */
- list = &master->str;
-
- /* Set point to insertion point. */
- for (point = list->head; point; point = point->next)
- if (strcmp(point->name, name) >= 0)
- break;
-
- /* In case of this is the first element of master. */
- if (list->head == NULL) {
- list->head = list->tail = plist;
- return plist;
- }
-
- /* In case of insertion is made at the tail of access_list. */
- if (point == NULL) {
- plist->prev = list->tail;
- list->tail->next = plist;
- list->tail = plist;
- return plist;
- }
-
- /* In case of insertion is made at the head of access_list. */
- if (point == list->head) {
- plist->next = list->head;
- list->head->prev = plist;
- list->head = plist;
- return plist;
- }
-
- /* Insertion is made at middle of the access_list. */
- plist->next = point;
- plist->prev = point->prev;
-
- if (point->prev)
- point->prev->next = plist;
- point->prev = plist;
+ plist_add(&master->str, plist);
return plist;
}
@@ -261,7 +226,6 @@ static void prefix_list_trie_del(struct prefix_list *plist,
/* Delete prefix-list from prefix_list_master and free it. */
void prefix_list_delete(struct prefix_list *plist)
{
- struct prefix_list_list *list;
struct prefix_master *master;
struct prefix_list_entry *pentry;
struct prefix_list_entry *next;
@@ -278,17 +242,7 @@ void prefix_list_delete(struct prefix_list *plist)
master = plist->master;
- list = &master->str;
-
- if (plist->next)
- plist->next->prev = plist->prev;
- else
- list->tail = plist->prev;
-
- if (plist->prev)
- plist->prev->next = plist->next;
- else
- list->head = plist->next;
+ plist_del(&master->str, plist);
XFREE(MTYPE_TMP, plist->desc);
@@ -1120,7 +1074,7 @@ static int vty_show_prefix_list(struct vty *vty, afi_t afi, const char *name,
master->recent->name);
}
- for (plist = master->str.head; plist; plist = plist->next)
+ frr_each (plist, &master->str, plist)
vty_show_prefix_entry(vty, json_proto, afi, plist,
master, dtype, seqnum);
}
@@ -1208,7 +1162,7 @@ static int vty_clear_prefix_list(struct vty *vty, afi_t afi, const char *name,
return CMD_WARNING;
if (name == NULL && prefix == NULL) {
- for (plist = master->str.head; plist; plist = plist->next)
+ frr_each (plist, &master->str, plist)
for (pentry = plist->head; pentry;
pentry = pentry->next)
pentry->hitcnt = 0;
@@ -1608,20 +1562,14 @@ int prefix_bgp_show_prefix_list(struct vty *vty, afi_t afi, char *name,
static void prefix_list_reset_afi(afi_t afi, int orf)
{
struct prefix_list *plist;
- struct prefix_list *next;
struct prefix_master *master;
master = prefix_master_get(afi, orf);
if (master == NULL)
return;
- for (plist = master->str.head; plist; plist = next) {
- next = plist->next;
+ while ((plist = plist_pop(&master->str)))
prefix_list_delete(plist);
- }
-
- assert(master->str.head == NULL);
- assert(master->str.tail == NULL);
master->recent = NULL;
}
@@ -1643,7 +1591,7 @@ static void plist_autocomplete_afi(afi_t afi, vector comps,
if (master == NULL)
return;
- for (plist = master->str.head; plist; plist = plist->next)
+ frr_each (plist, &master->str, plist)
vector_set(comps, XSTRDUP(MTYPE_COMPLETION, plist->name));
}
@@ -1696,6 +1644,11 @@ static void prefix_list_init_ipv6(void)
void prefix_list_init(void)
{
+ plist_init(&prefix_master_ipv4.str);
+ plist_init(&prefix_master_orf_v4.str);
+ plist_init(&prefix_master_ipv6.str);
+ plist_init(&prefix_master_orf_v6.str);
+
cmd_variable_handler_register(plist_var_handlers);
prefix_list_init_ipv4();
diff --git a/lib/plist_int.h b/lib/plist_int.h
index 571978a517..397557b37f 100644
--- a/lib/plist_int.h
+++ b/lib/plist_int.h
@@ -28,6 +28,8 @@ extern "C" {
struct pltrie_table;
+PREDECL_RBTREE_UNIQ(plist);
+
struct prefix_list {
char *name;
char *desc;
@@ -37,13 +39,12 @@ struct prefix_list {
int count;
int rangecount;
+ struct plist_item plist_item;
+
struct prefix_list_entry *head;
struct prefix_list_entry *tail;
struct pltrie_table *trie;
-
- struct prefix_list *next;
- struct prefix_list *prev;
};
/* Each prefix-list's entry. */
diff --git a/lib/prefix.c b/lib/prefix.c
index 90ab48a13b..4db0c2478b 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -915,12 +915,13 @@ static const char *prefixevpn_ead2str(const struct prefix_evpn *p, char *str,
char buf1[INET6_ADDRSTRLEN];
family = IS_IPADDR_V4(&p->prefix.ead_addr.ip) ? AF_INET : AF_INET6;
- snprintf(str, size, "[%d]:[%u]:[%s]:[%d]:[%s]", p->prefix.route_type,
- p->prefix.ead_addr.eth_tag,
+ snprintf(str, size, "[%d]:[%u]:[%s]:[%d]:[%s]:[%u]",
+ p->prefix.route_type, p->prefix.ead_addr.eth_tag,
esi_to_str(&p->prefix.ead_addr.esi, buf, sizeof(buf)),
(family == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN,
inet_ntop(family, &p->prefix.ead_addr.ip.ipaddr_v4, buf1,
- sizeof(buf1)));
+ sizeof(buf1)),
+ p->prefix.ead_addr.frag_id);
return str;
}
@@ -1071,6 +1072,26 @@ const char *prefix2str(union prefixconstptr pu, char *str, int size)
return str;
}
+static ssize_t prefixhost2str(struct fbuf *fbuf, union prefixconstptr pu)
+{
+ const struct prefix *p = pu.p;
+ char buf[PREFIX2STR_BUFFER];
+
+ switch (p->family) {
+ case AF_INET:
+ case AF_INET6:
+ inet_ntop(p->family, &p->u.prefix, buf, sizeof(buf));
+ return bputs(fbuf, buf);
+
+ case AF_ETHERNET:
+ prefix_mac2str(&p->u.prefix_eth, buf, sizeof(buf));
+ return bputs(fbuf, buf);
+
+ default:
+ return bprintfrr(fbuf, "{prefix.af=%dPF}", p->family);
+ }
+}
+
void prefix_mcast_inet4_dump(const char *onfail, struct in_addr addr,
char *buf, int buf_size)
{
@@ -1458,13 +1479,24 @@ printfrr_ext_autoreg_p("FX", printfrr_pfx);
static ssize_t printfrr_pfx(struct fbuf *buf, struct printfrr_eargs *ea,
const void *ptr)
{
- char cbuf[PREFIX_STRLEN];
+ bool host_only = false;
+
+ if (ea->fmt[0] == 'h') {
+ ea->fmt++;
+ host_only = true;
+ }
if (!ptr)
return bputs(buf, "(null)");
- prefix2str(ptr, cbuf, sizeof(cbuf));
- return bputs(buf, cbuf);
+ if (host_only)
+ return prefixhost2str(buf, (struct prefix *)ptr);
+ else {
+ char cbuf[PREFIX_STRLEN];
+
+ prefix2str(ptr, cbuf, sizeof(cbuf));
+ return bputs(buf, cbuf);
+ }
}
printfrr_ext_autoreg_p("PSG4", printfrr_psg);
diff --git a/lib/prefix.h b/lib/prefix.h
index b3545a72b4..816a1517e1 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -95,6 +95,7 @@ struct evpn_ead_addr {
esi_t esi;
uint32_t eth_tag;
struct ipaddr ip;
+ uint16_t frag_id;
};
struct evpn_macip_addr {
diff --git a/lib/routemap.c b/lib/routemap.c
index 7f733c8114..46161fd817 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -100,6 +100,7 @@ static void route_map_del_plist_entries(afi_t afi,
struct prefix_list_entry *entry);
static struct hash *route_map_get_dep_hash(route_map_event_t event);
+static void route_map_free_map(struct route_map *map);
struct route_map_match_set_hooks rmap_match_set_hook;
@@ -566,15 +567,8 @@ static bool route_map_hash_cmp(const void *p1, const void *p2)
const struct route_map *map1 = p1;
const struct route_map *map2 = p2;
- if (map1->deleted == map2->deleted) {
- if (map1->name && map2->name) {
- if (!strcmp(map1->name, map2->name)) {
- return true;
- }
- } else if (!map1->name && !map2->name) {
- return true;
- }
- }
+ if (!strcmp(map1->name, map2->name))
+ return true;
return false;
}
@@ -636,13 +630,25 @@ static struct route_map *route_map_new(const char *name)
/* Add new name to route_map. */
static struct route_map *route_map_add(const char *name)
{
- struct route_map *map;
+ struct route_map *map, *exist;
struct route_map_list *list;
map = route_map_new(name);
list = &route_map_master;
- /* Add map to the hash */
+ /*
+ * Add map to the hash
+ *
+ * If the map already exists in the hash, then we know that
+ * FRR is now in a sequence of delete/create.
+ * All FRR needs to do here is set the to_be_processed
+ * bit (to inherit from the old one
+ */
+ exist = hash_release(route_map_master_hash, map);
+ if (exist) {
+ map->to_be_processed = exist->to_be_processed;
+ route_map_free_map(exist);
+ }
hash_get(route_map_master_hash, map, hash_alloc_intern);
/* Add new entry to the head of the list to match how it is added in the
@@ -752,11 +758,15 @@ struct route_map *route_map_lookup_by_name(const char *name)
if (!name)
return NULL;
- // map.deleted is 0 via memset
+ // map.deleted is false via memset
memset(&tmp_map, 0, sizeof(struct route_map));
tmp_map.name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, name);
map = hash_lookup(route_map_master_hash, &tmp_map);
XFREE(MTYPE_ROUTE_MAP_NAME, tmp_map.name);
+
+ if (map && map->deleted)
+ return NULL;
+
return map;
}
@@ -1799,12 +1809,11 @@ static struct list *route_map_get_index_list(struct route_node **rn,
/*
* This function returns the route-map index that best matches the prefix.
*/
-static struct route_map_index *route_map_get_index(struct route_map *map,
- const struct prefix *prefix,
- void *object,
- uint8_t *match_ret)
+static struct route_map_index *
+route_map_get_index(struct route_map *map, const struct prefix *prefix,
+ void *object, enum route_map_cmd_result_t *match_ret)
{
- int ret = 0;
+ enum route_map_cmd_result_t ret = RMAP_NOMATCH;
struct list *candidate_rmap_list = NULL;
struct route_node *rn = NULL;
struct listnode *ln = NULL, *nn = NULL;
@@ -2559,7 +2568,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
if ((!map->optimization_disabled)
&& (map->ipv4_prefix_table || map->ipv6_prefix_table)) {
index = route_map_get_index(map, prefix, match_object,
- (uint8_t *)&match_ret);
+ &match_ret);
if (index) {
index->applied++;
if (rmap_debug)
diff --git a/lib/sockunion.c b/lib/sockunion.c
index 006ac142aa..9763b38e28 100644
--- a/lib/sockunion.c
+++ b/lib/sockunion.c
@@ -351,21 +351,6 @@ int sockopt_ttl(int family, int sock, int ttl)
return 0;
}
-/*
- * This function called setsockopt(.., TCP_CORK,...)
- * Which on linux is a no-op since it is enabled by
- * default and on BSD it uses TCP_NOPUSH to do
- * the same thing( which it was not configured to
- * use). This cleanup of the api occurred on 8/1/17
- * I imagine if after more than 1 year of no-one
- * complaining, and a major upgrade release we
- * can deprecate and remove this function call
- */
-int sockopt_cork(int sock, int onoff)
-{
- return 0;
-}
-
int sockopt_minttl(int family, int sock, int minttl)
{
#ifdef IP_MINTTL
diff --git a/lib/sockunion.h b/lib/sockunion.h
index 9e6719ccf9..8ace3e4781 100644
--- a/lib/sockunion.h
+++ b/lib/sockunion.h
@@ -95,7 +95,6 @@ extern int sockunion_bind(int sock, union sockunion *, unsigned short,
union sockunion *);
extern int sockopt_ttl(int family, int sock, int ttl);
extern int sockopt_minttl(int family, int sock, int minttl);
-extern int sockopt_cork(int sock, int onoff);
extern int sockunion_socket(const union sockunion *su);
extern const char *inet_sutop(const union sockunion *su, char *str);
extern enum connect_result sockunion_connect(int fd, const union sockunion *su,
diff --git a/lib/subdir.am b/lib/subdir.am
index b505e235ca..d1df9cb3d9 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -523,6 +523,11 @@ lib/clippy-command_parse.$(OBJEXT): lib/command_lex.h
lib/lib_clippy-command_lex.$(OBJEXT): lib/command_parse.h
lib/lib_clippy-command_parse.$(OBJEXT): lib/command_lex.h
+DISTCLEANFILES += lib/command_lex.h \
+ lib/command_lex.c \
+ lib/command_parse.h \
+ lib/command_parse.c
+
rt_enabled =
if BABELD
diff --git a/lib/typerb.c b/lib/typerb.c
index e1346df191..fe142ff354 100644
--- a/lib/typerb.c
+++ b/lib/typerb.c
@@ -468,6 +468,28 @@ struct rb_entry *typed_rb_next(const struct rb_entry *rbe_const)
return rbe;
}
+struct rb_entry *typed_rb_prev(const struct rb_entry *rbe_const)
+{
+ struct rb_entry *rbe = (struct rb_entry *)rbe_const;
+
+ if (RBE_LEFT(rbe)) {
+ rbe = RBE_LEFT(rbe);
+ while (RBE_RIGHT(rbe))
+ rbe = RBE_RIGHT(rbe);
+ } else {
+ if (RBE_PARENT(rbe) && (rbe == RBE_RIGHT(RBE_PARENT(rbe))))
+ rbe = RBE_PARENT(rbe);
+ else {
+ while (RBE_PARENT(rbe)
+ && (rbe == RBE_LEFT(RBE_PARENT(rbe))))
+ rbe = RBE_PARENT(rbe);
+ rbe = RBE_PARENT(rbe);
+ }
+ }
+
+ return rbe;
+}
+
struct rb_entry *typed_rb_min(const struct rbt_tree *rbt)
{
struct rb_entry *rbe = RBH_ROOT(rbt);
@@ -481,6 +503,19 @@ struct rb_entry *typed_rb_min(const struct rbt_tree *rbt)
return parent;
}
+struct rb_entry *typed_rb_max(const struct rbt_tree *rbt)
+{
+ struct rb_entry *rbe = RBH_ROOT(rbt);
+ struct rb_entry *parent = NULL;
+
+ while (rbe != NULL) {
+ parent = rbe;
+ rbe = RBE_RIGHT(rbe);
+ }
+
+ return parent;
+}
+
bool typed_rb_member(const struct typed_rb_root *rbt,
const struct typed_rb_entry *rbe)
{
diff --git a/lib/typerb.h b/lib/typerb.h
index 75a1de77b3..8ac1821742 100644
--- a/lib/typerb.h
+++ b/lib/typerb.h
@@ -62,6 +62,8 @@ const struct typed_rb_entry *typed_rb_find_lt(const struct typed_rb_root *rbt,
const struct typed_rb_entry *a,
const struct typed_rb_entry *b));
struct typed_rb_entry *typed_rb_min(const struct typed_rb_root *rbt);
+struct typed_rb_entry *typed_rb_max(const struct typed_rb_root *rbt);
+struct typed_rb_entry *typed_rb_prev(const struct typed_rb_entry *rbe);
struct typed_rb_entry *typed_rb_next(const struct typed_rb_entry *rbe);
bool typed_rb_member(const struct typed_rb_root *rbt,
const struct typed_rb_entry *rbe);
@@ -135,12 +137,32 @@ macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
return container_of_null(re, type, field.re); \
} \
TYPESAFE_FIRST_NEXT(prefix, type) \
+macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
+{ \
+ const struct typed_rb_entry *re; \
+ re = typed_rb_max(&h->rr); \
+ return container_of_null(re, type, field.re); \
+} \
+macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
+ const type *item) \
+{ \
+ const struct typed_rb_entry *re; \
+ re = typed_rb_prev(&item->field.re); \
+ return container_of_null(re, type, field.re); \
+} \
+TYPESAFE_LAST_PREV(prefix, type) \
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
{ \
struct typed_rb_entry *re; \
re = item ? typed_rb_next(&item->field.re) : NULL; \
return container_of_null(re, type, field.re); \
} \
+macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
+{ \
+ struct typed_rb_entry *re; \
+ re = item ? typed_rb_prev(&item->field.re) : NULL; \
+ return container_of_null(re, type, field.re); \
+} \
macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
{ \
return h->rr.count; \
diff --git a/lib/typesafe.h b/lib/typesafe.h
index b284397d98..06fdc52e78 100644
--- a/lib/typesafe.h
+++ b/lib/typesafe.h
@@ -43,6 +43,22 @@ extern "C" {
item; \
item = from, from = prefix##_next_safe(head, from))
+/* reverse direction, only supported by a few containers */
+
+#define frr_rev_each(prefix, head, item) \
+ for (item = prefix##_last(head); item; \
+ item = prefix##_prev(head, item))
+#define frr_rev_each_safe(prefix, head, item) \
+ for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
+ prefix##_prev_safe(head, \
+ (item = prefix##_last(head))); \
+ item; \
+ item = prefix##_safe, \
+ prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
+#define frr_rev_each_from(prefix, head, item, from) \
+ for (item = from, from = prefix##_prev_safe(head, item); \
+ item; \
+ item = from, from = prefix##_prev_safe(head, from))
/* non-const variants. these wrappers are the same for all the types, so
* bundle them together here.
@@ -57,6 +73,16 @@ macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
return (type *)prefix ## _const_next(h, item); \
} \
/* ... */
+#define TYPESAFE_LAST_PREV(prefix, type) \
+macro_pure type *prefix ## _last(struct prefix##_head *h) \
+{ \
+ return (type *)prefix ## _const_last(h); \
+} \
+macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
+{ \
+ return (type *)prefix ## _const_prev(h, item); \
+} \
+/* ... */
#define TYPESAFE_FIND(prefix, type) \
macro_inline type *prefix ## _find(struct prefix##_head *h, \
const type *item) \
@@ -398,12 +424,34 @@ macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
return container_of(ditem->next, type, field.di); \
} \
TYPESAFE_FIRST_NEXT(prefix, type) \
+macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
+{ \
+ const struct dlist_item *ditem = h->dh.hitem.prev; \
+ if (ditem == &h->dh.hitem) \
+ return NULL; \
+ return container_of(ditem, type, field.di); \
+} \
+macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
+ const type *item) \
+{ \
+ const struct dlist_item *ditem = &item->field.di; \
+ if (ditem->prev == &h->dh.hitem) \
+ return NULL; \
+ return container_of(ditem->prev, type, field.di); \
+} \
+TYPESAFE_LAST_PREV(prefix, type) \
macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
{ \
if (!item) \
return NULL; \
return prefix ## _next(h, item); \
} \
+macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
+{ \
+ if (!item) \
+ return NULL; \
+ return prefix ## _prev(h, item); \
+} \
macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
{ \
return h->dh.count; \
diff --git a/lib/vty.c b/lib/vty.c
index 6aa8a0bbb5..619d51e1ce 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -65,7 +65,7 @@ DEFINE_MTYPE_STATIC(LIB, VTY_HIST, "VTY history");
DECLARE_DLIST(vtys, struct vty, itm);
/* Vty events */
-enum event {
+enum vty_event {
VTY_SERV,
VTY_READ,
VTY_WRITE,
@@ -90,8 +90,8 @@ struct vty_serv {
DECLARE_DLIST(vtyservs, struct vty_serv, itm);
-static void vty_event_serv(enum event event, struct vty_serv *);
-static void vty_event(enum event, struct vty *);
+static void vty_event_serv(enum vty_event event, struct vty_serv *);
+static void vty_event(enum vty_event, struct vty *);
/* Extern host structure from command.c */
extern struct host host;
@@ -2033,6 +2033,7 @@ static int vtysh_do_pass_fd(struct vty *vty)
struct cmsghdr *cmh = CMSG_FIRSTHDR(&mh);
ssize_t ret;
+ memset(&u.buf, 0, sizeof(u.buf));
cmh->cmsg_level = SOL_SOCKET;
cmh->cmsg_type = SCM_RIGHTS;
cmh->cmsg_len = CMSG_LEN(sizeof(int));
@@ -2683,7 +2684,7 @@ int vty_config_node_exit(struct vty *vty)
/* Master of the threads. */
static struct thread_master *vty_master;
-static void vty_event_serv(enum event event, struct vty_serv *vty_serv)
+static void vty_event_serv(enum vty_event event, struct vty_serv *vty_serv)
{
switch (event) {
case VTY_SERV:
@@ -2701,7 +2702,7 @@ static void vty_event_serv(enum event event, struct vty_serv *vty_serv)
}
}
-static void vty_event(enum event event, struct vty *vty)
+static void vty_event(enum vty_event event, struct vty *vty)
{
switch (event) {
#ifdef VTYSH
diff --git a/lib/wheel.c b/lib/wheel.c
index 463410bea4..6e9c88de9d 100644
--- a/lib/wheel.c
+++ b/lib/wheel.c
@@ -40,7 +40,6 @@ static void wheel_timer_thread_helper(struct thread *t)
void *data;
wheel = THREAD_ARG(t);
- THREAD_OFF(wheel->timer);
wheel->curr_slot += wheel->slots_to_skip;
@@ -95,7 +94,7 @@ struct timer_wheel *wheel_init(struct thread_master *master, int period,
wheel->nexttime = period / slots;
wheel->wheel_slot_lists = XCALLOC(MTYPE_TIMER_WHEEL_LIST,
- slots * sizeof(struct listnode *));
+ slots * sizeof(struct list *));
for (i = 0; i < slots; i++)
wheel->wheel_slot_lists[i] = list_new();
diff --git a/lib/zclient.c b/lib/zclient.c
index cfccb21667..0c34214151 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -47,10 +47,10 @@ DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient");
DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs");
/* Zebra client events. */
-enum event { ZCLIENT_SCHEDULE, ZCLIENT_READ, ZCLIENT_CONNECT };
+enum zclient_event { ZCLIENT_SCHEDULE, ZCLIENT_READ, ZCLIENT_CONNECT };
/* Prototype for event manager. */
-static void zclient_event(enum event, struct zclient *);
+static void zclient_event(enum zclient_event, struct zclient *);
static void zebra_interface_if_set_value(struct stream *s,
struct interface *ifp);
@@ -768,9 +768,9 @@ static void zclient_connect(struct thread *t)
}
enum zclient_send_status zclient_send_rnh(struct zclient *zclient, int command,
- const struct prefix *p,
- bool connected,
- bool resolve_via_def, vrf_id_t vrf_id)
+ const struct prefix *p, safi_t safi,
+ bool connected, bool resolve_via_def,
+ vrf_id_t vrf_id)
{
struct stream *s;
@@ -779,7 +779,7 @@ enum zclient_send_status zclient_send_rnh(struct zclient *zclient, int command,
zclient_create_header(s, command, vrf_id);
stream_putc(s, (connected) ? 1 : 0);
stream_putc(s, (resolve_via_def) ? 1 : 0);
- stream_putw(s, SAFI_UNICAST);
+ stream_putw(s, safi);
stream_putw(s, PREFIX_FAMILY(p));
stream_putc(s, p->prefixlen);
switch (PREFIX_FAMILY(p)) {
@@ -1924,7 +1924,8 @@ const char *zapi_nexthop2str(const struct zapi_nexthop *znh, char *buf,
/*
* Decode the nexthop-tracking update message
*/
-bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr)
+bool zapi_nexthop_update_decode(struct stream *s, struct prefix *match,
+ struct zapi_route *nhr)
{
uint32_t i;
@@ -1932,6 +1933,22 @@ bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr)
STREAM_GETL(s, nhr->message);
STREAM_GETW(s, nhr->safi);
+ STREAM_GETW(s, match->family);
+ STREAM_GETC(s, match->prefixlen);
+ /*
+ * What we got told to match against
+ */
+ switch (match->family) {
+ case AF_INET:
+ STREAM_GET(&match->u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
+ break;
+ case AF_INET6:
+ STREAM_GET(&match->u.prefix6, s, IPV6_MAX_BYTELEN);
+ break;
+ }
+ /*
+ * What we matched against
+ */
STREAM_GETW(s, nhr->prefix.family);
STREAM_GETC(s, nhr->prefix.prefixlen);
switch (nhr->prefix.family) {
@@ -4038,7 +4055,7 @@ void zclient_redistribute_default(int command, struct zclient *zclient,
zebra_redistribute_default_send(command, zclient, afi, vrf_id);
}
-static void zclient_event(enum event event, struct zclient *zclient)
+static void zclient_event(enum zclient_event event, struct zclient *zclient)
{
switch (event) {
case ZCLIENT_SCHEDULE:
diff --git a/lib/zclient.h b/lib/zclient.h
index ca62b1afeb..7e1283d830 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -1071,7 +1071,8 @@ extern enum zclient_send_status zclient_route_send(uint8_t, struct zclient *,
struct zapi_route *);
extern enum zclient_send_status
zclient_send_rnh(struct zclient *zclient, int command, const struct prefix *p,
- bool connected, bool resolve_via_default, vrf_id_t vrf_id);
+ safi_t safi, bool connected, bool resolve_via_default,
+ vrf_id_t vrf_id);
int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh,
uint32_t api_flags, uint32_t api_message);
extern int zapi_route_encode(uint8_t, struct stream *, struct zapi_route *);
@@ -1111,7 +1112,17 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh,
const struct nexthop *nh);
int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh,
const struct nexthop *nh);
-extern bool zapi_nexthop_update_decode(struct stream *s,
+/*
+ * match -> is the prefix that the calling daemon asked to be matched
+ * against.
+ * nhr->prefix -> is the actual prefix that was matched against in the
+ * rib itself.
+ *
+ * This distinction is made because a LPM can be made if there is a
+ * covering route. This way the upper level protocol can make a decision
+ * point about whether or not it wants to use the match or not.
+ */
+extern bool zapi_nexthop_update_decode(struct stream *s, struct prefix *match,
struct zapi_route *nhr);
const char *zapi_nexthop2str(const struct zapi_nexthop *znh, char *buf,
int bufsize);
diff --git a/lib/zlog.c b/lib/zlog.c
index 85606d2624..e0bb34a258 100644
--- a/lib/zlog.c
+++ b/lib/zlog.c
@@ -401,7 +401,7 @@ void zlog_tls_buffer_flush(void)
return;
rcu_read_lock();
- frr_each (zlog_targets, &zlog_targets, zt) {
+ frr_each_safe (zlog_targets, &zlog_targets, zt) {
if (!zt->logfn)
continue;
@@ -431,7 +431,7 @@ static void vzlog_notls(const struct xref_logmsg *xref, int prio,
msg->stackbufsz = sizeof(stackbuf);
rcu_read_lock();
- frr_each (zlog_targets, &zlog_targets, zt) {
+ frr_each_safe (zlog_targets, &zlog_targets, zt) {
if (prio > zt->prio_min)
continue;
if (!zt->logfn)
diff --git a/lib/zlog_live.c b/lib/zlog_live.c
index fbe0e5ee49..931aa3461d 100644
--- a/lib/zlog_live.c
+++ b/lib/zlog_live.c
@@ -22,6 +22,7 @@
#include "frrcu.h"
#include "zlog.h"
#include "printfrr.h"
+#include "network.h"
DEFINE_MTYPE_STATIC(LOG, LOG_LIVE, "log vtysh live target");
@@ -39,6 +40,7 @@ struct zlt_live {
struct rcu_head head_self;
atomic_uint_fast32_t state;
+ atomic_uint_fast32_t lost_msgs;
};
static void zlog_live(struct zlog_target *zt, struct zlog_msg *msgs[],
@@ -63,14 +65,16 @@ static void zlog_live(struct zlog_target *zt, struct zlog_msg *msgs[],
for (i = 0; i < nmsgs; i++) {
const struct fmt_outpos *argpos;
- size_t n_argpos, arghdrlen;
+ size_t n_argpos, texthdrlen;
struct zlog_msg *msg = msgs[i];
int prio = zlog_msg_prio(msg);
+ const struct xref_logmsg *xref;
+ intmax_t pid, tid;
if (prio > zt->prio_min)
continue;
- zlog_msg_args(msg, &arghdrlen, &n_argpos, &argpos);
+ zlog_msg_args(msg, &texthdrlen, &n_argpos, &argpos);
mmh->msg_hdr.msg_iov = iov;
@@ -89,14 +93,29 @@ static void zlog_live(struct zlog_target *zt, struct zlog_msg *msgs[],
iov++;
zlog_msg_tsraw(msg, &ts);
+ zlog_msg_pid(msg, &pid, &tid);
+ xref = zlog_msg_xref(msg);
hdr->ts_sec = ts.tv_sec;
hdr->ts_nsec = ts.tv_nsec;
- hdr->prio = zlog_msg_prio(msg);
+ hdr->pid = pid;
+ hdr->tid = tid;
+ hdr->lost_msgs = atomic_load_explicit(&zte->lost_msgs,
+ memory_order_relaxed);
+ hdr->prio = prio;
hdr->flags = 0;
hdr->textlen = textlen;
- hdr->arghdrlen = arghdrlen;
+ hdr->texthdrlen = texthdrlen;
hdr->n_argpos = n_argpos;
+ if (xref) {
+ memcpy(hdr->uid, xref->xref.xrefdata->uid,
+ sizeof(hdr->uid));
+ hdr->ec = xref->ec;
+ } else {
+ memset(hdr->uid, 0, sizeof(hdr->uid));
+ hdr->ec = 0;
+ }
+ hdr->hdrlen = sizeof(*hdr) + sizeof(*argpos) * n_argpos;
mmh->msg_hdr.msg_iovlen = iov - mmh->msg_hdr.msg_iov;
mmh++;
@@ -109,6 +128,12 @@ static void zlog_live(struct zlog_target *zt, struct zlog_msg *msgs[],
for (size_t msgpos = 0; msgpos < msgtotal; msgpos += sent) {
sent = sendmmsg(fd, mmhs + msgpos, msgtotal - msgpos, 0);
+ if (sent <= 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ atomic_fetch_add_explicit(&zte->lost_msgs,
+ msgtotal - msgpos,
+ memory_order_relaxed);
+ break;
+ }
if (sent <= 0)
goto out_err;
}
@@ -134,7 +159,7 @@ static void zlog_live_sigsafe(struct zlog_target *zt, const char *text,
size_t len)
{
struct zlt_live *zte = container_of(zt, struct zlt_live, zt);
- struct zlog_live_hdr hdr[1];
+ struct zlog_live_hdr hdr[1] = {};
struct iovec iovs[2], *iov = iovs;
struct timespec ts;
int fd;
@@ -143,14 +168,12 @@ static void zlog_live_sigsafe(struct zlog_target *zt, const char *text,
if (fd < 0)
return;
- clock_gettime(CLOCK_MONOTONIC, &ts);
+ clock_gettime(CLOCK_REALTIME, &ts);
hdr->ts_sec = ts.tv_sec;
hdr->ts_nsec = ts.tv_nsec;
hdr->prio = LOG_CRIT;
- hdr->flags = 0;
hdr->textlen = len;
- hdr->n_argpos = 0;
iov->iov_base = (char *)hdr;
iov->iov_len = sizeof(hdr);
@@ -166,8 +189,6 @@ static void zlog_live_sigsafe(struct zlog_target *zt, const char *text,
void zlog_live_open(struct zlog_live_cfg *cfg, int prio_min, int *other_fd)
{
int sockets[2];
- struct zlt_live *zte;
- struct zlog_target *zt;
if (cfg->target)
zlog_live_close(cfg);
@@ -192,12 +213,23 @@ void zlog_live_open(struct zlog_live_cfg *cfg, int prio_min, int *other_fd)
shutdown(sockets[0], SHUT_RD);
*other_fd = sockets[1];
+ zlog_live_open_fd(cfg, prio_min, sockets[0]);
+}
+
+void zlog_live_open_fd(struct zlog_live_cfg *cfg, int prio_min, int fd)
+{
+ struct zlt_live *zte;
+ struct zlog_target *zt;
+
+ if (cfg->target)
+ zlog_live_close(cfg);
zt = zlog_target_clone(MTYPE_LOG_LIVE, NULL, sizeof(*zte));
zte = container_of(zt, struct zlt_live, zt);
cfg->target = zte;
- zte->fd = sockets[0];
+ set_nonblocking(fd);
+ zte->fd = fd;
zte->zt.prio_min = prio_min;
zte->zt.logfn = zlog_live;
zte->zt.logfn_sigsafe = zlog_live_sigsafe;
diff --git a/lib/zlog_live.h b/lib/zlog_live.h
index c948baeab1..55e60ae674 100644
--- a/lib/zlog_live.h
+++ b/lib/zlog_live.h
@@ -20,13 +20,42 @@
#include "printfrr.h"
struct zlog_live_hdr {
+ /* timestamp (CLOCK_REALTIME) */
uint64_t ts_sec;
uint32_t ts_nsec;
+
+ /* length of zlog_live_hdr, including variable length bits and
+ * possible future extensions - aka start of text
+ */
+ uint32_t hdrlen;
+
+ /* process & thread ID, meaning depends on OS */
+ int64_t pid;
+ int64_t tid;
+
+ /* number of lost messages due to best-effort non-blocking mode */
+ uint32_t lost_msgs;
+ /* syslog priority value */
uint32_t prio;
+ /* flags: currently unused */
uint32_t flags;
+ /* length of message text - extra data (e.g. future key/value metadata)
+ * may follow after it
+ */
uint32_t textlen;
+ /* length of "[XXXXX-XXXXX][EC 0] " header; consumer may want to skip
+ * over it if using the raw values below. Note that this text may be
+ * absent depending on "log error-category" and "log unique-id"
+ * settings
+ */
+ uint32_t texthdrlen;
+
+ /* xref unique identifier, "XXXXX-XXXXX\0" = 12 bytes */
+ char uid[12];
+ /* EC value */
+ uint32_t ec;
- uint32_t arghdrlen;
+ /* recorded printf formatting argument positions (variable length) */
uint32_t n_argpos;
struct fmt_outpos argpos[0];
};
@@ -41,6 +70,7 @@ struct zlog_live_cfg {
extern void zlog_live_open(struct zlog_live_cfg *cfg, int prio_min,
int *other_fd);
+extern void zlog_live_open_fd(struct zlog_live_cfg *cfg, int prio_min, int fd);
static inline bool zlog_live_is_null(struct zlog_live_cfg *cfg)
{
diff --git a/ospf6d/ospf6_flood.c b/ospf6d/ospf6_flood.c
index 5fed6dfe17..cc82084e5e 100644
--- a/ospf6d/ospf6_flood.c
+++ b/ospf6d/ospf6_flood.c
@@ -464,11 +464,28 @@ void ospf6_flood_interface(struct ospf6_neighbor *from, struct ospf6_lsa *lsa,
lsa->header->type, lsa->header->id,
lsa->header->adv_router, on->retrans_list);
if (!old) {
+ struct ospf6_lsa *orig;
+ struct ospf6_lsdb *lsdb;
+
if (is_debug)
zlog_debug(
"Increment %s from retrans_list of %s",
lsa->name, on->name);
- ospf6_increment_retrans_count(lsa);
+
+ /* Increment the retrans count on the original
+ * copy of LSA if present, to maintain the
+ * counter consistency.
+ */
+
+ lsdb = ospf6_get_scoped_lsdb(lsa);
+ orig = ospf6_lsdb_lookup(
+ lsa->header->type, lsa->header->id,
+ lsa->header->adv_router, lsdb);
+ if (orig)
+ ospf6_increment_retrans_count(orig);
+ else
+ ospf6_increment_retrans_count(lsa);
+
ospf6_lsdb_add(ospf6_lsa_copy(lsa),
on->retrans_list);
thread_add_timer(
diff --git a/ospf6d/ospf6_gr.c b/ospf6d/ospf6_gr.c
index d618ed86e0..87407245b3 100644
--- a/ospf6d/ospf6_gr.c
+++ b/ospf6d/ospf6_gr.c
@@ -689,7 +689,7 @@ DEFPY(ospf6_graceful_restart_prepare, ospf6_graceful_restart_prepare_cmd,
"graceful-restart prepare ipv6 ospf",
"Graceful Restart commands\n"
"Prepare upcoming graceful restart\n" IPV6_STR
- "Prepare to restart the OSPFv3 process")
+ "Prepare to restart the OSPFv3 process\n")
{
ospf6_gr_prepare();
diff --git a/ospf6d/ospf6_gr_helper.c b/ospf6d/ospf6_gr_helper.c
index 5f9b7f0294..7b5ffc920b 100644
--- a/ospf6d/ospf6_gr_helper.c
+++ b/ospf6d/ospf6_gr_helper.c
@@ -960,13 +960,22 @@ static void show_ospf6_gr_helper_details(struct vty *vty, struct ospf6 *ospf6,
json, "supportedGracePeriod",
ospf6->ospf6_helper_cfg.supported_grace_time);
- if (ospf6->ospf6_helper_cfg.last_exit_reason
- != OSPF6_GR_HELPER_EXIT_NONE)
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
+ if (ospf6->ospf6_helper_cfg.last_exit_reason !=
+ OSPF6_GR_HELPER_EXIT_NONE) {
json_object_string_add(
json, "LastExitReason",
ospf6_exit_reason_desc
[ospf6->ospf6_helper_cfg
.last_exit_reason]);
+ json_object_string_add(
+ json, "lastExitReason",
+ ospf6_exit_reason_desc
+ [ospf6->ospf6_helper_cfg
+ .last_exit_reason]);
+ }
if (OSPF6_HELPER_ENABLE_RTR_COUNT(ospf6)) {
struct json_object *json_rid_array =
@@ -995,12 +1004,18 @@ static void show_ospf6_gr_helper_details(struct vty *vty, struct ospf6 *ospf6,
json_object_object_get_ex(
json, "Neighbors",
&json_neighbors);
+ json_object_object_get_ex(
+ json, "neighbors",
+ &json_neighbors);
if (!json_neighbors) {
json_neighbors =
json_object_new_object();
json_object_object_add(
json, "Neighbors",
json_neighbors);
+ json_object_object_add(
+ json, "neighbors",
+ json_neighbors);
}
}
diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c
index 740a94ba84..779076f387 100644
--- a/ospf6d/ospf6_lsa.c
+++ b/ospf6d/ospf6_lsa.c
@@ -85,9 +85,13 @@ static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa,
start = (uint8_t *)lsa->header + sizeof(struct ospf6_lsa_header);
end = (uint8_t *)lsa->header + ntohs(lsa->header->length);
- if (use_json)
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
+ if (use_json) {
json_object_string_add(json_obj, "LsaType", "unknown");
- else {
+ json_object_string_add(json_obj, "lsaType", "unknown");
+ } else {
vty_out(vty, " Unknown contents:\n");
for (current = start; current < end; current++) {
if ((current - start) % 16 == 0)
diff --git a/ospf6d/ospf6_lsdb.c b/ospf6d/ospf6_lsdb.c
index 889ab16b11..7a4c49a158 100644
--- a/ospf6d/ospf6_lsdb.c
+++ b/ospf6d/ospf6_lsdb.c
@@ -132,6 +132,8 @@ void ospf6_lsdb_add(struct ospf6_lsa *lsa, struct ospf6_lsdb *lsdb)
(*lsdb->hook_add)(lsa);
}
} else {
+ lsa->retrans_count = old->retrans_count;
+
if (OSPF6_LSA_IS_CHANGED(old, lsa)) {
if (OSPF6_LSA_IS_MAXAGE(lsa)) {
if (lsdb->hook_remove) {
diff --git a/ospf6d/ospf6_nssa.c b/ospf6d/ospf6_nssa.c
index 1220c32783..53b45d6ca3 100644
--- a/ospf6d/ospf6_nssa.c
+++ b/ospf6d/ospf6_nssa.c
@@ -613,7 +613,8 @@ struct ospf6_lsa *ospf6_translated_nssa_refresh(struct ospf6_area *area,
return new;
}
-static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *lsa)
+static void ospf6_abr_translate_nssa(struct ospf6_area *area,
+ struct ospf6_lsa *lsa)
{
/* Incoming Type-7 or aggregated Type-7
*
@@ -625,7 +626,7 @@ static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *
* Later, any Unapproved Translated Type-5's are flushed/discarded
*/
- struct ospf6_lsa *old = NULL, *new = NULL;
+ struct ospf6_lsa *old = NULL;
struct ospf6_as_external_lsa *nssa_lsa;
struct prefix prefix;
struct ospf6_route *match;
@@ -661,11 +662,36 @@ static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *
return;
}
+ /* Find the type-5 LSA in the area-range table */
+ match = ospf6_route_lookup_bestmatch(&prefix, area->nssa_range_table);
+ if (match && CHECK_FLAG(match->flag, OSPF6_ROUTE_NSSA_RANGE)) {
+ if (prefix_same(&prefix, &match->prefix)) {
+ /* The prefix range is being removed,
+ * no need to refresh
+ */
+ if
+ CHECK_FLAG(match->flag, OSPF6_ROUTE_REMOVE)
+ return;
+ } else {
+ if (!CHECK_FLAG(match->flag, OSPF6_ROUTE_REMOVE)) {
+ if (IS_OSPF6_DEBUG_NSSA)
+ zlog_debug(
+ "%s: LSA Id %pI4 suppressed by range %pFX of area %s",
+ __func__, &lsa->header->id,
+ &match->prefix, area->name);
+ /* LSA will be suppressed by area-range command,
+ * no need to refresh
+ */
+ return;
+ }
+ }
+ }
+
/* Find the existing AS-External LSA for this prefix */
- match = ospf6_route_lookup(&prefix, ospf6->external_table);
+ match = ospf6_route_lookup(&prefix, ospf6->route_table);
if (match) {
- old = ospf6_lsdb_lookup(OSPF6_LSTYPE_AS_EXTERNAL,
- match->path.origin.id, ospf6->router_id,
+ old = ospf6_lsdb_lookup(htons(OSPF6_LSTYPE_AS_EXTERNAL),
+ lsa->external_lsa_id, ospf6->router_id,
ospf6->lsdb);
}
@@ -675,20 +701,15 @@ static void ospf6_abr_translate_nssa(struct ospf6_area *area, struct ospf6_lsa *
return;
}
- if (old) {
+ if (old && !OSPF6_LSA_IS_MAXAGE(old)) {
if (IS_OSPF6_DEBUG_NSSA)
zlog_debug(
- "%s : found old translated LSA Id %pI4, refreshing",
+ "%s : found old translated LSA Id %pI4, skip",
__func__, &old->header->id);
- /* refresh */
- new = ospf6_translated_nssa_refresh(area, lsa, old);
- if (!new) {
- if (IS_OSPF6_DEBUG_NSSA)
- zlog_debug(
- "%s : could not refresh translated LSA Id %pI4",
- __func__, &old->header->id);
- }
+ UNSET_FLAG(old->flag, OSPF6_LSA_UNAPPROVED);
+ return;
+
} else {
/* no existing external route for this LSA Id
* originate translated LSA
diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c
index e64bf24b66..2a6d7cd099 100644
--- a/ospf6d/ospf6_top.c
+++ b/ospf6d/ospf6_top.c
@@ -2017,6 +2017,9 @@ ospf6_show_vrf_name(struct vty *vty, struct ospf6 *ospf6,
}
}
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys containing whitespaces")
+#endif
static int
ospf6_show_summary_address(struct vty *vty, struct ospf6 *ospf6,
json_object *json,
@@ -2037,7 +2040,9 @@ ospf6_show_summary_address(struct vty *vty, struct ospf6 *ospf6,
ospf6_show_vrf_name(vty, ospf6, json_vrf);
json_object_int_add(json_vrf, "aggregation delay interval",
- ospf6->aggr_delay_interval);
+ ospf6->aggr_delay_interval);
+ json_object_int_add(json_vrf, "aggregationDelayInterval",
+ ospf6->aggr_delay_interval);
}
@@ -2062,12 +2067,18 @@ ospf6_show_summary_address(struct vty *vty, struct ospf6 *ospf6,
json_object_string_add(json_aggr,
"Summary address",
buf);
+ json_object_string_add(json_aggr, "summaryAddress",
+ buf);
json_object_string_add(
json_aggr, "Metric-type",
(aggr->mtype == DEFAULT_METRIC_TYPE)
? "E2"
: "E1");
+ json_object_string_add(
+ json_aggr, "metricType",
+ (aggr->mtype == DEFAULT_METRIC_TYPE) ? "E2"
+ : "E1");
json_object_int_add(json_aggr, "Metric",
(aggr->metric != -1)
@@ -2080,6 +2091,8 @@ ospf6_show_summary_address(struct vty *vty, struct ospf6 *ospf6,
json_object_int_add(json_aggr,
"External route count",
OSPF6_EXTERNAL_RT_COUNT(aggr));
+ json_object_int_add(json_aggr, "externalRouteCount",
+ OSPF6_EXTERNAL_RT_COUNT(aggr));
if (OSPF6_EXTERNAL_RT_COUNT(aggr) && detail) {
json_object_int_add(json_aggr, "ID",
diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c
index e279d0411b..18b1959b9b 100644
--- a/ospf6d/ospf6_zebra.c
+++ b/ospf6d/ospf6_zebra.c
@@ -155,8 +155,8 @@ void ospf6_zebra_import_default_route(struct ospf6 *ospf6, bool unreg)
zserv_command_string(command), &prefix,
ospf6->vrf_id);
- if (zclient_send_rnh(zclient, command, &prefix, false, true,
- ospf6->vrf_id)
+ if (zclient_send_rnh(zclient, command, &prefix, SAFI_UNICAST, false,
+ true, ospf6->vrf_id)
== ZCLIENT_SEND_FAILURE)
flog_err(EC_LIB_ZAPI_SOCKET, "%s: zclient_send_rnh() failed",
__func__);
@@ -166,19 +166,20 @@ static int ospf6_zebra_import_check_update(ZAPI_CALLBACK_ARGS)
{
struct ospf6 *ospf6;
struct zapi_route nhr;
+ struct prefix matched;
ospf6 = ospf6_lookup_by_vrf_id(vrf_id);
if (ospf6 == NULL || !IS_OSPF6_ASBR(ospf6))
return 0;
- if (!zapi_nexthop_update_decode(zclient->ibuf, &nhr)) {
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &matched, &nhr)) {
zlog_err("%s[%u]: Failure to decode route", __func__,
ospf6->vrf_id);
return -1;
}
- if (nhr.prefix.family != AF_INET6 || nhr.prefix.prefixlen != 0
- || nhr.type == ZEBRA_ROUTE_OSPF6)
+ if (matched.family != AF_INET6 || matched.prefixlen != 0 ||
+ nhr.type == ZEBRA_ROUTE_OSPF6)
return 0;
ospf6->nssa_default_import_check.status = !!nhr.nexthop_num;
diff --git a/ospfd/ospf_apiserver.c b/ospfd/ospf_apiserver.c
index 93e3b9a8e2..a624f4ce1e 100644
--- a/ospfd/ospf_apiserver.c
+++ b/ospfd/ospf_apiserver.c
@@ -278,7 +278,7 @@ struct ospf_apiserver *ospf_apiserver_new(int fd_sync, int fd_async)
return new;
}
-void ospf_apiserver_event(enum event event, int fd,
+void ospf_apiserver_event(enum ospf_apiserver_event event, int fd,
struct ospf_apiserver *apiserv)
{
switch (event) {
@@ -367,7 +367,7 @@ void ospf_apiserver_read(struct thread *thread)
struct ospf_apiserver *apiserv;
struct msg *msg;
int fd;
- enum event event;
+ enum ospf_apiserver_event event;
apiserv = THREAD_ARG(thread);
fd = THREAD_FD(thread);
@@ -710,7 +710,7 @@ static int ospf_apiserver_send_msg(struct ospf_apiserver *apiserv,
{
struct msg_fifo *fifo;
struct msg *msg2;
- enum event event;
+ enum ospf_apiserver_event event;
int fd;
switch (msg->hdr.msgtype) {
diff --git a/ospfd/ospf_apiserver.h b/ospfd/ospf_apiserver.h
index 3994c8d503..3d57737080 100644
--- a/ospfd/ospf_apiserver.h
+++ b/ospfd/ospf_apiserver.h
@@ -68,7 +68,7 @@ struct ospf_apiserver {
struct thread *t_async_write;
};
-enum event {
+enum ospf_apiserver_event {
OSPF_APISERVER_ACCEPT,
OSPF_APISERVER_SYNC_READ,
#ifdef USE_ASYNC_READ
@@ -88,7 +88,7 @@ extern int ospf_apiserver_init(void);
extern void ospf_apiserver_term(void);
extern struct ospf_apiserver *ospf_apiserver_new(int fd_sync, int fd_async);
extern void ospf_apiserver_free(struct ospf_apiserver *apiserv);
-extern void ospf_apiserver_event(enum event event, int fd,
+extern void ospf_apiserver_event(enum ospf_apiserver_event event, int fd,
struct ospf_apiserver *apiserv);
extern int ospf_apiserver_serv_sock_family(unsigned short port, int family);
extern void ospf_apiserver_accept(struct thread *thread);
diff --git a/ospfd/ospf_gr.c b/ospfd/ospf_gr.c
index ee1ca256e3..2521f2fce0 100644
--- a/ospfd/ospf_gr.c
+++ b/ospfd/ospf_gr.c
@@ -730,7 +730,7 @@ DEFPY(graceful_restart_prepare, graceful_restart_prepare_cmd,
"Graceful Restart commands\n"
"Prepare upcoming graceful restart\n"
IP_STR
- "Prepare to restart the OSPF process")
+ "Prepare to restart the OSPF process\n")
{
struct ospf *ospf;
struct listnode *node;
diff --git a/ospfd/ospf_ldp_sync.c b/ospfd/ospf_ldp_sync.c
index 9b3498bc1e..f6c1b43610 100644
--- a/ospfd/ospf_ldp_sync.c
+++ b/ospfd/ospf_ldp_sync.c
@@ -508,10 +508,17 @@ void ospf_ldp_sync_show_info(struct vty *vty, struct ospf *ospf,
if (CHECK_FLAG(ospf->ldp_sync_cmd.flags, LDP_SYNC_FLAG_ENABLE)) {
if (use_json) {
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
json_object_boolean_true_add(json_vrf,
"MplsLdpIgpSyncEnabled");
+ json_object_boolean_true_add(json_vrf,
+ "mplsLdpIgpSyncEnabled");
json_object_int_add(json_vrf, "MplsLdpIgpSyncHolddown",
ospf->ldp_sync_cmd.holddown);
+ json_object_int_add(json_vrf, "mplsLdpIgpSyncHolddown",
+ ospf->ldp_sync_cmd.holddown);
} else {
vty_out(vty, " MPLS LDP-IGP Sync is enabled\n");
if (ospf->ldp_sync_cmd.holddown == 0)
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 780521bfe4..d4245bde7f 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -3432,7 +3432,7 @@ DEFUN (show_ip_ospf,
if (uj)
vty_json(vty, json);
else if (!ospf_output)
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled\n");
return ret;
}
ospf = ospf_lookup_by_inst_name(inst, vrf_name);
@@ -3440,7 +3440,9 @@ DEFUN (show_ip_ospf,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -3451,7 +3453,8 @@ DEFUN (show_ip_ospf,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -4164,7 +4167,7 @@ DEFUN (show_ip_ospf_interface,
if (uj)
vty_json(vty, json);
else if (!ospf)
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled\n");
return ret;
}
@@ -4173,7 +4176,9 @@ DEFUN (show_ip_ospf_interface,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -4187,7 +4192,8 @@ DEFUN (show_ip_ospf_interface,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -4572,7 +4578,7 @@ DEFUN (show_ip_ospf_neighbor,
if (uj)
vty_json(vty, json);
else if (!ospf)
- vty_out(vty, "OSPF instance not found\n");
+ vty_out(vty, "OSPF is not enabled\n");
return ret;
}
@@ -4582,7 +4588,9 @@ DEFUN (show_ip_ospf_neighbor,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -4593,7 +4601,8 @@ DEFUN (show_ip_ospf_neighbor,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -7123,11 +7132,13 @@ DEFUN (show_ip_ospf_database_max,
}
if (!ospf_output)
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled\n");
} else {
ospf = ospf_lookup_by_inst_name(inst, vrf_name);
if (ospf == NULL || !ospf->oi_running) {
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
ret = (show_ip_ospf_database_common(
@@ -7138,7 +7149,7 @@ DEFUN (show_ip_ospf_database_max,
/* Display default ospf (instance 0) info */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
if (ospf == NULL || !ospf->oi_running) {
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -7349,11 +7360,13 @@ DEFUN (show_ip_ospf_database_type_adv_router,
uj);
}
if (!ospf_output)
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled\n");
} else {
ospf = ospf_lookup_by_inst_name(inst, vrf_name);
if ((ospf == NULL) || !ospf->oi_running) {
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -7364,7 +7377,7 @@ DEFUN (show_ip_ospf_database_type_adv_router,
/* Display default ospf (instance 0) info */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
if (ospf == NULL || !ospf->oi_running) {
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled on vrf default\n");
return CMD_SUCCESS;
}
@@ -10185,10 +10198,17 @@ static int ospf_show_gr_helper_details(struct vty *vty, struct ospf *ospf,
json_object_int_add(json_vrf, "supportedGracePeriod",
ospf->supported_grace_time);
- if (ospf->last_exit_reason != OSPF_GR_HELPER_EXIT_NONE)
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
+ if (ospf->last_exit_reason != OSPF_GR_HELPER_EXIT_NONE) {
json_object_string_add(
json_vrf, "LastExitReason",
ospf_exit_reason2str(ospf->last_exit_reason));
+ json_object_string_add(
+ json_vrf, "lastExitReason",
+ ospf_exit_reason2str(ospf->last_exit_reason));
+ }
if (ospf->active_restarter_cnt)
json_object_int_add(json_vrf, "activeRestarterCnt",
@@ -10211,12 +10231,17 @@ static int ospf_show_gr_helper_details(struct vty *vty, struct ospf *ospf,
if (uj) {
json_object_object_get_ex(json_vrf, "Neighbors",
&json_neighbors);
+ json_object_object_get_ex(json_vrf, "neighbors",
+ &json_neighbors);
if (!json_neighbors) {
json_neighbors =
json_object_new_object();
json_object_object_add(json_vrf,
"Neighbors",
json_neighbors);
+ json_object_object_add(json_vrf,
+ "neighbors",
+ json_neighbors);
}
}
@@ -10448,7 +10473,9 @@ DEFPY (show_ip_ospf_gr_helper,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -10462,7 +10489,8 @@ DEFPY (show_ip_ospf_gr_helper,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -10514,6 +10542,9 @@ static void config_write_stub_router(struct vty *vty, struct ospf *ospf)
return;
}
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys containing whitespaces")
+#endif
static void show_ip_ospf_route_network(struct vty *vty, struct ospf *ospf,
struct route_table *rt,
json_object *json)
@@ -10621,6 +10652,12 @@ static void show_ip_ospf_route_network(struct vty *vty, struct ospf *ospf,
ifindex2ifname(
path->ifindex,
ospf->vrf_id));
+ json_object_string_add(
+ json_nexthop,
+ "directlyAttachedTo",
+ ifindex2ifname(
+ path->ifindex,
+ ospf->vrf_id));
} else {
vty_out(vty,
"%24s directly attached to %s\n",
@@ -10706,9 +10743,12 @@ static void show_ip_ospf_route_router(struct vty *vty, struct ospf *ospf,
json_object_string_addf(json_route, "area",
"%pI4",
&or->u.std.area_id);
- if (or->path_type == OSPF_PATH_INTER_AREA)
+ if (or->path_type == OSPF_PATH_INTER_AREA) {
json_object_boolean_true_add(json_route,
"IA");
+ json_object_boolean_true_add(json_route,
+ "ia");
+ }
if (or->u.std.flags & ROUTER_LSA_BORDER)
json_object_string_add(json_route,
"routerType",
@@ -10760,6 +10800,12 @@ static void show_ip_ospf_route_router(struct vty *vty, struct ospf *ospf,
ifindex2ifname(
path->ifindex,
ospf->vrf_id));
+ json_object_string_add(
+ json_nexthop,
+ "directlyAttachedTo",
+ ifindex2ifname(
+ path->ifindex,
+ ospf->vrf_id));
} else {
vty_out(vty,
"%24s directly attached to %s\n",
@@ -10886,6 +10932,12 @@ static void show_ip_ospf_route_external(struct vty *vty, struct ospf *ospf,
ifindex2ifname(
path->ifindex,
ospf->vrf_id));
+ json_object_string_add(
+ json_nexthop,
+ "directlyAttachedTo",
+ ifindex2ifname(
+ path->ifindex,
+ ospf->vrf_id));
} else {
vty_out(vty,
"%24s directly attached to %s\n",
@@ -10983,11 +11035,13 @@ DEFUN (show_ip_ospf_border_routers,
}
if (!ospf_output)
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled\n");
} else {
ospf = ospf_lookup_by_inst_name(inst, vrf_name);
if (ospf == NULL || !ospf->oi_running) {
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -10998,7 +11052,7 @@ DEFUN (show_ip_ospf_border_routers,
/* Display default ospf (instance 0) info */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
if (ospf == NULL || !ospf->oi_running) {
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -11125,7 +11179,7 @@ DEFUN (show_ip_ospf_route,
/* Keep Non-pretty format */
vty_json(vty, json);
} else if (!ospf_output)
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty, "%% OSPF is not enabled\n");
return ret;
}
@@ -11134,7 +11188,9 @@ DEFUN (show_ip_ospf_route,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -11145,7 +11201,8 @@ DEFUN (show_ip_ospf_route,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
@@ -11408,12 +11465,15 @@ static int ospf_show_summary_address(struct vty *vty, struct ospf *ospf,
ospf_show_vrf_name(ospf, vty, json_vrf, use_vrf);
- if (!uj)
+ if (!uj) {
vty_out(vty, "aggregation delay interval :%u(in seconds)\n\n",
ospf->aggr_delay_interval);
- else
+ } else {
json_object_int_add(json_vrf, "aggregation delay interval",
ospf->aggr_delay_interval);
+ json_object_int_add(json_vrf, "aggregationDelayInterval",
+ ospf->aggr_delay_interval);
+ }
for (rn = route_top(ospf->rt_aggr_tbl); rn; rn = route_next(rn))
if (rn->info) {
@@ -11432,21 +11492,37 @@ static int ospf_show_summary_address(struct vty *vty, struct ospf *ospf,
json_object_string_add(json_aggr,
"Summary address", buf);
+ json_object_string_add(json_aggr,
+ "summaryAddress", buf);
json_object_string_add(
json_aggr, "Metric-type",
(mtype == EXTERNAL_METRIC_TYPE_1)
? "E1"
: "E2");
+ json_object_string_add(
+ json_aggr, "metricType",
+ (mtype == EXTERNAL_METRIC_TYPE_1)
+ ? "E1"
+ : "E2");
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
json_object_int_add(json_aggr, "Metric", mval);
+ json_object_int_add(json_aggr, "metric", mval);
json_object_int_add(json_aggr, "Tag",
aggr->tag);
+ json_object_int_add(json_aggr, "tag",
+ aggr->tag);
json_object_int_add(
json_aggr, "External route count",
OSPF_EXTERNAL_RT_COUNT(aggr));
+ json_object_int_add(
+ json_aggr, "externalRouteCount",
+ OSPF_EXTERNAL_RT_COUNT(aggr));
if (OSPF_EXTERNAL_RT_COUNT(aggr) && detail) {
hash_walk(
@@ -11547,7 +11623,9 @@ DEFUN (show_ip_ospf_external_aggregator,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf %s\n",
+ vrf_name);
return CMD_SUCCESS;
}
@@ -11560,7 +11638,8 @@ DEFUN (show_ip_ospf_external_aggregator,
if (uj)
vty_json(vty, json);
else
- vty_out(vty, "%% OSPF instance not found\n");
+ vty_out(vty,
+ "%% OSPF is not enabled in vrf default\n");
return CMD_SUCCESS;
}
diff --git a/pathd/path_cli.c b/pathd/path_cli.c
index bfeea8c3db..4775aa36fb 100644
--- a/pathd/path_cli.c
+++ b/pathd/path_cli.c
@@ -527,7 +527,7 @@ DEFPY(srte_segment_list_segment, srte_segment_list_segment_cmd,
adj_src_ipv4, adj_dst_ipv4,
adj_src_ipv6, adj_dst_ipv6,
adj_src_ipv4_str, adj_dst_ipv4_str,
- adj_dst_ipv6_str, adj_src_ipv6_str);
+ adj_src_ipv6_str, adj_dst_ipv6_str);
if (status != CMD_SUCCESS)
return status;
} else {
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index b480d4072e..da4e3e1bc0 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -399,17 +399,19 @@ void route_delete(struct pbr_nexthop_group_cache *pnhgc, afi_t afi)
static int pbr_zebra_nexthop_update(ZAPI_CALLBACK_ARGS)
{
struct zapi_route nhr;
+ struct prefix matched;
uint32_t i;
- if (!zapi_nexthop_update_decode(zclient->ibuf, &nhr)) {
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &matched, &nhr)) {
zlog_err("Failure to decode Nexthop update message");
return 0;
}
if (DEBUG_MODE_CHECK(&pbr_dbg_zebra, DEBUG_MODE_ALL)) {
- DEBUGD(&pbr_dbg_zebra, "%s: Received Nexthop update: %pFX",
- __func__, &nhr.prefix);
+ DEBUGD(&pbr_dbg_zebra,
+ "%s: Received Nexthop update: %pFX against %pFX",
+ __func__, &matched, &nhr.prefix);
DEBUGD(&pbr_dbg_zebra, "%s: (Nexthops(%u)", __func__,
nhr.nexthop_num);
@@ -423,6 +425,7 @@ static int pbr_zebra_nexthop_update(ZAPI_CALLBACK_ARGS)
}
}
+ nhr.prefix = matched;
pbr_nht_nexthop_update(&nhr);
return 1;
}
@@ -482,7 +485,8 @@ void pbr_send_rnh(struct nexthop *nhop, bool reg)
break;
}
- if (zclient_send_rnh(zclient, command, &p, false, false, nhop->vrf_id)
+ if (zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false,
+ nhop->vrf_id)
== ZCLIENT_SEND_FAILURE) {
zlog_warn("%s: Failure to send nexthop to zebra", __func__);
}
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index 06790d3d63..7b3e04fdc0 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -210,6 +210,418 @@ DEFPY (no_ipv6_pim_register_suppress,
return pim_process_no_register_suppress_cmd(vty);
}
+DEFPY (interface_ipv6_pim,
+ interface_ipv6_pim_cmd,
+ "ipv6 pim",
+ IPV6_STR
+ PIM_STR)
+{
+ return pim_process_ip_pim_cmd(vty);
+}
+
+DEFPY (interface_no_ipv6_pim,
+ interface_no_ipv6_pim_cmd,
+ "no ipv6 pim",
+ NO_STR
+ IPV6_STR
+ PIM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim_drprio,
+ interface_ipv6_pim_drprio_cmd,
+ "ipv6 pim drpriority (1-4294967295)",
+ IPV6_STR
+ PIM_STR
+ "Set the Designated Router Election Priority\n"
+ "Value of the new DR Priority\n")
+{
+ return pim_process_ip_pim_drprio_cmd(vty, drpriority_str);
+}
+
+DEFPY (interface_no_ipv6_pim_drprio,
+ interface_no_ipv6_pim_drprio_cmd,
+ "no ip pim drpriority [(1-4294967295)]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Revert the Designated Router Priority to default\n"
+ "Old Value of the Priority\n")
+{
+ return pim_process_no_ip_pim_drprio_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim_hello,
+ interface_ipv6_pim_hello_cmd,
+ "ipv6 pim hello (1-65535) [(1-65535)]$hold",
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_HELLO_STR
+ IFACE_PIM_HELLO_TIME_STR
+ IFACE_PIM_HELLO_HOLD_STR)
+{
+ return pim_process_ip_pim_hello_cmd(vty, hello_str, hold_str);
+}
+
+DEFPY (interface_no_ipv6_pim_hello,
+ interface_no_ipv6_pim_hello_cmd,
+ "no ipv6 pim hello [(1-65535) [(1-65535)]]",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_HELLO_STR
+ IGNORED_IN_NO_STR
+ IGNORED_IN_NO_STR)
+{
+ return pim_process_no_ip_pim_hello_cmd(vty);
+}
+
+DEFPY (interface_ipv6_pim_activeactive,
+ interface_ipv6_pim_activeactive_cmd,
+ "[no] ipv6 pim active-active",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Mark interface as Active-Active for MLAG operations\n")
+{
+ return pim_process_ip_pim_activeactive_cmd(vty, no);
+}
+
+DEFPY_HIDDEN (interface_ipv6_pim_ssm,
+ interface_ipv6_pim_ssm_cmd,
+ "ipv6 pim ssm",
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_STR)
+{
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ vty_out(vty,
+ "Enabled PIM SM on interface; configure PIM SSM range if needed\n");
+
+ return NB_OK;
+}
+
+DEFPY_HIDDEN (interface_no_ipv6_pim_ssm,
+ interface_no_ipv6_pim_ssm_cmd,
+ "no ipv6 pim ssm",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+DEFPY_HIDDEN (interface_ipv6_pim_sm,
+ interface_ipv6_pim_sm_cmd,
+ "ipv6 pim sm",
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_SM_STR)
+{
+ return pim_process_ip_pim_cmd(vty);
+}
+
+DEFPY_HIDDEN (interface_no_ipv6_pim_sm,
+ interface_no_ipv6_pim_sm_cmd,
+ "no ipv6 pim sm",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ IFACE_PIM_SM_STR)
+{
+ return pim_process_no_ip_pim_cmd(vty);
+}
+
+/* boundaries */
+DEFPY (interface_ipv6_pim_boundary_oil,
+ interface_ipv6_pim_boundary_oil_cmd,
+ "ipv6 multicast boundary oil WORD",
+ IPV6_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Filter OIL by group using prefix list\n"
+ "Prefix list to filter OIL with\n")
+{
+ return pim_process_ip_pim_boundary_oil_cmd(vty, oil);
+}
+
+DEFPY (interface_no_ipv6_pim_boundary_oil,
+ interface_no_ipv6_pim_boundary_oil_cmd,
+ "no ipv6 multicast boundary oil [WORD]",
+ NO_STR
+ IPV6_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Filter OIL by group using prefix list\n"
+ "Prefix list to filter OIL with\n")
+{
+ return pim_process_no_ip_pim_boundary_oil_cmd(vty);
+}
+
+DEFPY (interface_ipv6_mroute,
+ interface_ipv6_mroute_cmd,
+ "ipv6 mroute INTERFACE X:X::X:X$group [X:X::X:X]$source",
+ IPV6_STR
+ "Add multicast route\n"
+ "Outgoing interface name\n"
+ "Group address\n"
+ "Source address\n")
+{
+ return pim_process_ip_mroute_cmd(vty, interface, group_str, source_str);
+}
+
+DEFPY (interface_no_ipv6_mroute,
+ interface_no_ipv6_mroute_cmd,
+ "no ipv6 mroute INTERFACE X:X::X:X$group [X:X::X:X]$source",
+ NO_STR
+ IPV6_STR
+ "Add multicast route\n"
+ "Outgoing interface name\n"
+ "Group Address\n"
+ "Source Address\n")
+{
+ return pim_process_no_ip_mroute_cmd(vty, interface, group_str,
+ source_str);
+}
+
+DEFPY (ipv6_pim_rp,
+ ipv6_pim_rp_cmd,
+ "ipv6 pim rp X:X::X:X$rp [X:X::X:X/M]$gp",
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "Group Address range to cover\n")
+{
+ const char *group_str = (gp_str) ? gp_str : "FF00::0/8";
+
+ return pim_process_rp_cmd(vty, rp_str, group_str);
+}
+
+DEFPY (no_ipv6_pim_rp,
+ no_ipv6_pim_rp_cmd,
+ "no ipv6 pim rp X:X::X:X$rp [X:X::X:X/M]$gp",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "Group Address range to cover\n")
+{
+ const char *group_str = (gp_str) ? gp_str : "FF00::0/8";
+
+ return pim_process_no_rp_cmd(vty, rp_str, group_str);
+}
+
+DEFPY (ipv6_pim_rp_prefix_list,
+ ipv6_pim_rp_prefix_list_cmd,
+ "ipv6 pim rp X:X::X:X$rp prefix-list WORD$plist",
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "group prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ return pim_process_rp_plist_cmd(vty, rp_str, plist);
+}
+
+DEFPY (no_ipv6_pim_rp_prefix_list,
+ no_ipv6_pim_rp_prefix_list_cmd,
+ "no ipv6 pim rp X:X::X:X$rp prefix-list WORD$plist",
+ NO_STR
+ IPV6_STR
+ PIM_STR
+ "Rendezvous Point\n"
+ "ipv6 address of RP\n"
+ "group prefix-list filter\n"
+ "Name of a prefix-list\n")
+{
+ return pim_process_no_rp_plist_cmd(vty, rp_str, plist);
+}
+
+DEFPY (interface_ipv6_mld_join,
+ interface_ipv6_mld_join_cmd,
+ "ipv6 mld join X:X::X:X$group [X:X::X:X$source]",
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ char xpath[XPATH_MAXLEN];
+
+ if (source_str) {
+ if (IPV6_ADDR_SAME(&source, &in6addr_any)) {
+ vty_out(vty, "Bad source address %s\n", source_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
+ source_str = "::";
+
+ snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH, "frr-routing:ipv6",
+ group_str, source_str);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (interface_no_ipv6_mld_join,
+ interface_no_ipv6_mld_join_cmd,
+ "no ipv6 mld join X:X::X:X$group [X:X::X:X$source]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD join multicast group\n"
+ "Multicast group address\n"
+ "Source address\n")
+{
+ char xpath[XPATH_MAXLEN];
+
+ if (source_str) {
+ if (IPV6_ADDR_SAME(&source, &in6addr_any)) {
+ vty_out(vty, "Bad source address %s\n", source_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ } else
+ source_str = "::";
+
+ snprintf(xpath, sizeof(xpath), FRR_GMP_JOIN_XPATH, "frr-routing:ipv6",
+ group_str, source_str);
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY (interface_ipv6_mld,
+ interface_ipv6_mld_cmd,
+ "ipv6 mld",
+ IPV6_STR
+ IFACE_MLD_STR)
+{
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_no_ipv6_mld,
+ interface_no_ipv6_mld_cmd,
+ "no ipv6 mld",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR)
+{
+ const struct lyd_node *pim_enable_dnode;
+ char pim_if_xpath[XPATH_MAXLEN + 64];
+
+ snprintf(pim_if_xpath, sizeof(pim_if_xpath),
+ "%s/frr-pim:pim/address-family[address-family='%s']",
+ VTY_CURR_XPATH, "frr-routing:ipv6");
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv6");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, ".")) {
+ nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY,
+ NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "false");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_ipv6_mld_version,
+ interface_ipv6_mld_version_cmd,
+ "ipv6 mld version (1-2)$version",
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD version\n"
+ "MLD version number\n")
+{
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ nb_cli_enqueue_change(vty, "./mld-version", NB_OP_MODIFY, version_str);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_no_ipv6_mld_version,
+ interface_no_ipv6_mld_version_cmd,
+ "no ipv6 mld version [(1-2)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ "MLD version\n"
+ "MLD version number\n")
+{
+ nb_cli_enqueue_change(vty, "./mld-version", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_ipv6_mld_query_interval,
+ interface_ipv6_mld_query_interval_cmd,
+ "ipv6 mld query-interval (1-65535)$q_interval",
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_QUERY_INTERVAL_STR
+ "Query interval in seconds\n")
+{
+ const struct lyd_node *pim_enable_dnode;
+
+ pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
+ "frr-routing:ipv6");
+ if (!pim_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
+ } else {
+ if (!yang_dnode_get_bool(pim_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./query-interval", NB_OP_MODIFY,
+ q_interval_str);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
+DEFPY (interface_no_ipv6_mld_query_interval,
+ interface_no_ipv6_mld_query_interval_cmd,
+ "no ipv6 mld query-interval [(1-65535)]",
+ NO_STR
+ IPV6_STR
+ IFACE_MLD_STR
+ IFACE_MLD_QUERY_INTERVAL_STR
+ IGNORED_IN_NO_STR)
+{
+ nb_cli_enqueue_change(vty, "./query-interval", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
+ "frr-routing:ipv6");
+}
+
void pim_cmd_init(void)
{
if_cmd_init(pim_interface_config_write);
@@ -228,4 +640,38 @@ void pim_cmd_init(void)
install_element(CONFIG_NODE, &no_ipv6_pim_rp_keep_alive_cmd);
install_element(CONFIG_NODE, &ipv6_pim_register_suppress_cmd);
install_element(CONFIG_NODE, &no_ipv6_pim_register_suppress_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_drprio_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_drprio_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_hello_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_hello_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_activeactive_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_ssm_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_ssm_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_pim_sm_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_pim_sm_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_ipv6_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mroute_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mroute_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_rp_cmd);
+ install_element(VRF_NODE, &ipv6_pim_rp_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_rp_cmd);
+ install_element(VRF_NODE, &no_ipv6_pim_rp_cmd);
+ install_element(CONFIG_NODE, &ipv6_pim_rp_prefix_list_cmd);
+ install_element(VRF_NODE, &ipv6_pim_rp_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ipv6_pim_rp_prefix_list_cmd);
+ install_element(VRF_NODE, &no_ipv6_pim_rp_prefix_list_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mld_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_join_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mld_join_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_version_cmd);
+ install_element(INTERFACE_NODE, &interface_no_ipv6_mld_version_cmd);
+ install_element(INTERFACE_NODE, &interface_ipv6_mld_query_interval_cmd);
+ install_element(INTERFACE_NODE,
+ &interface_no_ipv6_mld_query_interval_cmd);
}
diff --git a/pimd/pim6_cmd.h b/pimd/pim6_cmd.h
index e49045a1b5..ac5eb3f9bf 100644
--- a/pimd/pim6_cmd.h
+++ b/pimd/pim6_cmd.h
@@ -33,6 +33,11 @@
#define IFACE_MLD_LAST_MEMBER_QUERY_INTERVAL_STR \
"MLD last member query interval\n"
#define IFACE_MLD_LAST_MEMBER_QUERY_COUNT_STR "MLD last member query count\n"
+#define IFACE_PIM_STR "Enable PIM SSM operation\n"
+#define IFACE_PIM_SM_STR "Enable PIM SM operation\n"
+#define IFACE_PIM_HELLO_STR "Hello Interval\n"
+#define IFACE_PIM_HELLO_TIME_STR "Time in seconds for Hello Interval\n"
+#define IFACE_PIM_HELLO_HOLD_STR "Time in seconds for Hold Interval\n"
#define MROUTE_STR "IP multicast routing table\n"
#define DEBUG_MLD_STR "MLD protocol activity\n"
#define DEBUG_MLD_EVENTS_STR "MLD protocol events\n"
diff --git a/pimd/pim6_main.c b/pimd/pim6_main.c
index c9e3463969..ed53924616 100644
--- a/pimd/pim6_main.c
+++ b/pimd/pim6_main.c
@@ -178,26 +178,22 @@ int main(int argc, char **argv, char **envp)
pim_route_map_init();
#endif
- /* pim_init(); */
- pim_cmd_init();
+ pim_init();
/*
* Initialize zclient "update" and "lookup" sockets
*/
pim_iface_init();
- /* TODO PIM6: next line is temporary since pim_cmd_init is disabled */
- if_cmd_init(NULL);
-
pim_zebra_init();
#if 0
pim_bfd_init();
pim_mlag_init();
+#endif
hook_register(routing_conf_event,
routing_control_plane_protocols_name_validate);
routing_control_plane_protocols_register_vrf_dependency();
-#endif
frr_config_fork();
frr_run(router->master);
diff --git a/pimd/pim6_mroute_msg.c b/pimd/pim6_mroute_msg.c
new file mode 100644
index 0000000000..f34fa5965a
--- /dev/null
+++ b/pimd/pim6_mroute_msg.c
@@ -0,0 +1,196 @@
+/*
+ * PIM for Quagga
+ * Copyright (C) 2022 Dell Technologies Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "log.h"
+#include "privs.h"
+#include "if.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+#include "sockopt.h"
+#include "lib_errors.h"
+#include "lib/network.h"
+
+#include "pimd.h"
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "pim_str.h"
+#include "pim_time.h"
+#include "pim_iface.h"
+#include "pim_macro.h"
+#include "pim_rp.h"
+#include "pim_oil.h"
+#include "pim_ssm.h"
+#include "pim_sock.h"
+
+int pim_mroute_set(struct pim_instance *pim, int enable)
+{
+ int err;
+ int opt, data;
+ socklen_t data_len = sizeof(data);
+ static const struct sock_filter filter[] = {
+ BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ };
+
+ static const struct sock_fprog bpf = {
+ .len = array_size(filter),
+ .filter = (struct sock_filter *)filter,
+ };
+
+ /*
+ * We need to create the VRF table for the pim mroute_socket
+ */
+ if (pim->vrf->vrf_id != VRF_DEFAULT) {
+ frr_with_privs (&pimd_privs) {
+
+ data = pim->vrf->data.l.table_id;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO,
+ MRT6_TABLE, &data, data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO, MRT6_TABLE=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ data, errno, safe_strerror(errno));
+ return -1;
+ }
+ }
+ }
+
+ frr_with_privs (&pimd_privs) {
+ opt = enable ? MRT6_INIT : MRT6_DONE;
+ /*
+ * *BSD *cares* about what value we pass down
+ * here
+ */
+ data = 1;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &data,
+ data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,%s=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ enable ? "MRT6_INIT" : "MRT6_DONE", data, errno,
+ safe_strerror(errno));
+ return -1;
+ }
+ }
+
+ if (enable) {
+ /* Linux and Solaris IPV6_PKTINFO */
+ data = 1;
+ if (setsockopt(pim->mroute_socket, PIM_IPPROTO, IPV6_RECVPKTINFO,
+ &data, data_len)) {
+ zlog_warn(
+ "Could not set IPV6_PKTINFO on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+ }
+
+ setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
+
+ if (set_nonblocking (pim->mroute_socket) < 0) {
+ zlog_warn(
+ "Could not set non blocking on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+
+ if (enable) {
+#if defined linux
+ int upcalls = MRT6MSG_WRMIFWHOLE;
+ opt = MRT6_PIM;
+
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &upcalls,
+ sizeof(upcalls));
+ if (err) {
+ zlog_warn(
+ "Failure to register for WHOLE and WRONGMIF upcalls %d %s",
+ errno, safe_strerror(errno));
+ return -1;
+ }
+#else
+ zlog_warn(
+ "PIM-SM will not work properly on this platform, until the ability to receive the WHOLEPKT upcall");
+#endif
+ if (setsockopt(pim->mroute_socket, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf))) {
+ zlog_warn("Failure to attach SO_ATTACH_FILTER on fd %d: %d %s",
+ pim->mroute_socket, errno, safe_strerror(errno));
+ }
+ }
+
+ return 0;
+}
+static const char *const mrt6msgtype2str[MRT6MSG_WRMIFWHOLE + 1] = {
+ "<unknown_upcall?>", "NOCACHE", "WRONGMIF", "WHOLEPKT", "WRMIFWHOLE"};
+
+int pim_mroute_msg(struct pim_instance *pim, const char *buf,
+ size_t buf_size, ifindex_t ifindex)
+{
+ struct interface *ifp;
+ const struct ip6_hdr *ip6_hdr;
+ const struct mrt6msg *msg;
+
+ if (buf_size < (int)sizeof(struct ip6_hdr))
+ return 0;
+
+ ip6_hdr = (const struct ip6_hdr *)buf;
+
+ if ((ip6_hdr->ip6_vfc & 0xf) == 0) {
+ msg = (const struct mrt6msg *)buf;
+
+ ifp = pim_if_find_by_vif_index(pim, msg->im6_mif);
+
+ if (!ifp)
+ return 0;
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI6,%pI6) on %s mifi=%d size=%ld",
+ __func__, mrt6msgtype2str[msg->im6_msgtype],
+ msg->im6_msgtype, ip6_hdr->ip6_nxt,
+ pim->mroute_socket, &msg->im6_src,
+ &msg->im6_dst, ifp->name, msg->im6_mif,
+ (long int)buf_size);
+ }
+
+ switch (msg->im6_msgtype) {
+ case MRT6MSG_WRONGMIF:
+ return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
+ msg);
+ case MRT6MSG_NOCACHE:
+ return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
+ msg);
+ case MRT6MSG_WHOLEPKT:
+ return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
+ (const char *)msg);
+ case MRT6MSG_WRMIFWHOLE:
+ return pim_mroute_msg_wrvifwhole(
+ pim->mroute_socket, ifp, (const char *)msg);
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/pimd/pim6_stubs.c b/pimd/pim6_stubs.c
index 9f68b7be3d..818b137abf 100644
--- a/pimd/pim6_stubs.c
+++ b/pimd/pim6_stubs.c
@@ -29,40 +29,6 @@
/*
* NH lookup / NHT
*/
-void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
- struct pim_nexthop_cache *pnc, int command)
-{
-}
-
-int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, struct prefix *src,
- struct prefix *grp, int neighbor_needed)
-{
- return 0;
-}
-
-int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
- struct pim_upstream *up, struct rp_info *rp,
- struct pim_nexthop_cache *out_pnc)
-{
- return 0;
-}
-
-void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
- struct pim_upstream *up, struct rp_info *rp)
-{
-}
-
-struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
- struct pim_rpf *rpf)
-{
- return NULL;
-}
-
-void pim_rp_nexthop_del(struct rp_info *rp_info)
-{
-}
-
void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
{
}
@@ -88,48 +54,67 @@ void zclient_lookup_free(void)
}
/*
- * packet handling
+ * PIM register
*/
-int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
- int pim_msg_size, const char *ifname)
+void pim_register_join(struct pim_upstream *up)
{
- return 0;
}
-int pim_hello_send(struct interface *ifp, uint16_t holdtime)
+void pim_null_register_send(struct pim_upstream *up)
{
- return -1;
}
-void pim_hello_restart_now(struct interface *ifp)
+void pim_reg_del_on_couldreg_fail(struct interface *ifp)
{
}
-void pim_hello_restart_triggered(struct interface *ifp)
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
{
+ return false;
}
-int pim_sock_add(struct interface *ifp)
+void pim_bsm_proc_free(struct pim_instance *pim)
{
- return -1;
}
-void pim_sock_delete(struct interface *ifp, const char *delete_message)
+void pim_bsm_proc_init(struct pim_instance *pim)
{
}
-/*
- * PIM register
- */
-void pim_register_join(struct pim_upstream *up)
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp)
{
+ return NULL;
}
-void pim_null_register_send(struct pim_upstream *up)
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
{
}
-void pim_reg_del_on_couldreg_fail(struct interface *ifp)
+int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
+ uint32_t buf_size, bool no_fwd)
+{
+ return 0;
+}
+
+void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
+ struct pim_rpf *rpg, int null_register,
+ struct pim_upstream *up)
+{
+}
+
+void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
+ pim_addr originator)
+{
+}
+
+int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size)
{
+ return 0;
}
+int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
+{
+ return 0;
+}
diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h
index a278a46593..e422a2e2da 100644
--- a/pimd/pim_addr.h
+++ b/pimd/pim_addr.h
@@ -33,7 +33,6 @@ typedef struct in_addr pim_addr;
#define PIM_AFI AFI_IP
#define PIM_MAX_BITLEN IPV4_MAX_BITLEN
#define PIM_AF_NAME "ip"
-#define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv4"
union pimprefixptr {
prefixtype(pimprefixptr, struct prefix, p)
@@ -53,7 +52,6 @@ typedef struct in6_addr pim_addr;
#define PIM_AFI AFI_IP6
#define PIM_MAX_BITLEN IPV6_MAX_BITLEN
#define PIM_AF_NAME "ipv6"
-#define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv6"
union pimprefixptr {
prefixtype(pimprefixptr, struct prefix, p)
diff --git a/pimd/pim_assert.c b/pimd/pim_assert.c
index e7fff4db6f..cbd44388c1 100644
--- a/pimd/pim_assert.c
+++ b/pimd/pim_assert.c
@@ -338,6 +338,7 @@ int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,
uint32_t metric_preference, uint32_t route_metric,
uint32_t rpt_bit_flag)
{
+ struct pim_interface *pim_ifp = ifp->info;
uint8_t *buf_pastend = pim_msg + buf_size;
uint8_t *pim_msg_curr;
int pim_msg_size;
@@ -380,7 +381,9 @@ int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,
Add PIM header
*/
pim_msg_size = pim_msg_curr - pim_msg;
- pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT, false);
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
+ PIM_MSG_TYPE_ASSERT, false);
return pim_msg_size;
}
diff --git a/pimd/pim_br.c b/pimd/pim_br.c
index 3e64296deb..6ec6b11e7b 100644
--- a/pimd/pim_br.c
+++ b/pimd/pim_br.c
@@ -30,14 +30,12 @@
struct pim_br {
pim_sgaddr sg;
- struct in_addr pmbr;
+ pim_addr pmbr;
};
-struct in_addr pim_br_unknown = {.s_addr = 0};
-
static struct list *pim_br_list = NULL;
-struct in_addr pim_br_get_pmbr(pim_sgaddr *sg)
+pim_addr pim_br_get_pmbr(pim_sgaddr *sg)
{
struct listnode *node;
struct pim_br *pim_br;
@@ -47,10 +45,10 @@ struct in_addr pim_br_get_pmbr(pim_sgaddr *sg)
return pim_br->pmbr;
}
- return pim_br_unknown;
+ return PIMADDR_ANY;
}
-void pim_br_set_pmbr(pim_sgaddr *sg, struct in_addr br)
+void pim_br_set_pmbr(pim_sgaddr *sg, pim_addr br)
{
struct listnode *node, *next;
struct pim_br *pim_br;
diff --git a/pimd/pim_br.h b/pimd/pim_br.h
index ef24ef3c19..7b87c0f1fd 100644
--- a/pimd/pim_br.h
+++ b/pimd/pim_br.h
@@ -20,13 +20,11 @@
#ifndef PIM_BR_H
#define PIM_BR_H
-struct in_addr pim_br_get_pmbr(pim_sgaddr *sg);
+pim_addr pim_br_get_pmbr(pim_sgaddr *sg);
-void pim_br_set_pmbr(pim_sgaddr *sg, struct in_addr value);
+void pim_br_set_pmbr(pim_sgaddr *sg, pim_addr value);
void pim_br_clear_pmbr(pim_sgaddr *sg);
void pim_br_init(void);
-extern struct in_addr pim_br_unknown;
-
#endif
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index c45823cb87..0e91773be7 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -36,6 +36,7 @@
#include "pim_bsm.h"
#include "pim_time.h"
#include "pim_zebra.h"
+#include "pim_util.h"
/* Functions forward declaration */
static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
@@ -284,7 +285,7 @@ static void pim_on_g2rp_timer(struct thread *t)
struct rp_info *rp_info;
struct route_node *rn;
uint16_t elapse;
- struct in_addr bsrp_addr;
+ pim_addr bsrp_addr;
bsrp = THREAD_ARG(t);
THREAD_OFF(bsrp->g2rp_timer);
@@ -324,10 +325,9 @@ static void pim_on_g2rp_timer(struct thread *t)
if (rp_info->rp_src != RP_SRC_STATIC) {
/* If new rp available, change it else delete the existing */
if (bsrp) {
- bsrp_addr = bsrp->rp_address;
pim_g2rp_timer_start(
bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
- pim_rp_change(pim, bsrp_addr, bsgrp_node->group,
+ pim_rp_change(pim, bsrp->rp_address, bsgrp_node->group,
RP_SRC_BSR);
} else {
pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
@@ -417,7 +417,7 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
pend = bsm_rpinfos_first(bsgrp_node->partial_bsrp_list);
- if (!str2prefix("224.0.0.0/4", &group_all))
+ if (!pim_get_all_mcast_group(&group_all))
return;
rp_all = pim_rp_find_match_group(pim, &group_all);
@@ -628,7 +628,7 @@ void pim_bsm_clear(struct pim_instance *pim)
pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
- if (!str2prefix("224.0.0.0/4", &g_all))
+ if (!pim_get_all_mcast_group(&g_all))
return;
rp_all = pim_rp_find_match_group(pim, &g_all);
@@ -718,6 +718,7 @@ static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
uint32_t pim_mtu, pim_addr dst_addr, bool no_fwd)
{
+ struct pim_interface *pim_ifp = ifp->info;
struct bsmmsg_grpinfo *grpinfo, *curgrp;
uint8_t *firstgrp_ptr;
uint8_t *pkt;
@@ -836,9 +837,10 @@ static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
< (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
/* No space to fit in more rp, send this pkt */
this_pkt_len = pim_mtu - this_pkt_rem;
- pim_msg_build_header(pak_start, this_pkt_len,
- PIM_MSG_TYPE_BOOTSTRAP,
- no_fwd);
+ pim_msg_build_header(
+ pim_ifp->primary_address, dst_addr,
+ pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
dst_addr);
@@ -873,7 +875,8 @@ static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
/* Send if we have any unsent packet */
if (pak_pending) {
this_pkt_len = pim_mtu - this_pkt_rem;
- pim_msg_build_header(pak_start, this_pkt_len,
+ pim_msg_build_header(pim_ifp->primary_address, dst_addr,
+ pak_start, this_pkt_len,
PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
dst_addr);
@@ -920,7 +923,8 @@ static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
zlog_debug("%s: pim_bsm_frag_send returned %s",
__func__, ret ? "TRUE" : "FALSE");
} else {
- pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP,
+ pim_msg_build_header(pim_ifp->primary_address, dst_addr,
+ buf, len, PIM_MSG_TYPE_BOOTSTRAP,
no_fwd);
if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
if (PIM_DEBUG_BSM)
@@ -999,7 +1003,8 @@ bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
}
} else {
/* Pim header needs to be constructed */
- pim_msg_build_header(bsfrag->data, bsfrag->size,
+ pim_msg_build_header(pim_ifp->primary_address, dst_addr,
+ bsfrag->data, bsfrag->size,
PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
ret = pim_bsm_send_intf(bsfrag->data, bsfrag->size, ifp,
dst_addr);
@@ -1271,7 +1276,7 @@ static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
return true;
}
-int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
+int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
uint32_t buf_size, bool no_fwd)
{
struct bsm_hdr *bshdr;
@@ -1371,7 +1376,7 @@ int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
}
#if PIM_IPV == 4
- if (ip_hdr->ip_dst.s_addr == qpim_all_pim_routers_addr.s_addr)
+ if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr))
#else
if (0)
#endif
@@ -1380,18 +1385,16 @@ int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
* match RPF towards the BSR's IP address, or they have
* no-forward set
*/
- if (!no_fwd
- && !pim_nht_bsr_rpf_check(pim, bshdr->bsr_addr.addr, ifp,
- ip_hdr->ip_src)) {
+ if (!no_fwd && !pim_nht_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
+ ifp, sg->src)) {
if (PIM_DEBUG_BSM)
zlog_debug(
- "BSM check: RPF to BSR %s is not %pI4%%%s",
- bsr_str, &ip_hdr->ip_src, ifp->name);
+ "BSM check: RPF to BSR %s is not %pPA%%%s",
+ bsr_str, &sg->src, ifp->name);
pim->bsm_dropped++;
return -1;
}
- } else if (if_address_is_local(&ip_hdr->ip_dst, AF_INET,
- pim->vrf->vrf_id)) {
+ } else if (if_address_is_local(&sg->grp, PIM_AF, pim->vrf->vrf_id)) {
/* Unicast BSM received - if ucast bsm not enabled on
* the interface, drop it
*/
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
index a536b50688..910067109e 100644
--- a/pimd/pim_bsm.h
+++ b/pimd/pim_bsm.h
@@ -115,7 +115,7 @@ struct bsm_rpinfo {
uint32_t elapse_time; /* upd at expiry of elected RP node */
uint16_t rp_prio; /* RP priority */
uint16_t rp_holdtime; /* RP holdtime - g2rp timer value */
- struct in_addr rp_address; /* RP Address */
+ pim_addr rp_address; /* RP Address */
struct bsgrp_node *bsgrp_node; /* Back ptr to bsgrp_node */
struct thread *g2rp_timer; /* Run only for elected RP node */
};
@@ -207,11 +207,8 @@ void pim_bsm_proc_init(struct pim_instance *pim);
void pim_bsm_proc_free(struct pim_instance *pim);
void pim_bsm_clear(struct pim_instance *pim);
void pim_bsm_write_config(struct vty *vty, struct interface *ifp);
-int pim_bsm_process(struct interface *ifp,
- struct ip *ip_hdr,
- uint8_t *buf,
- uint32_t buf_size,
- bool no_fwd);
+int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf,
+ uint32_t buf_size, bool no_fwd);
bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
struct prefix *grp);
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 76f90cdba7..86d179fe39 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -455,8 +455,7 @@ static void pim_show_membership(struct pim_instance *pim, struct vty *vty,
json_object_free(json);
}
-static void pim_print_ifp_flags(struct vty *vty, struct interface *ifp,
- int mloop)
+static void pim_print_ifp_flags(struct vty *vty, struct interface *ifp)
{
vty_out(vty, "Flags\n");
vty_out(vty, "-----\n");
@@ -469,7 +468,6 @@ static void pim_print_ifp_flags(struct vty *vty, struct interface *ifp,
vty_out(vty, "Interface Index : %d\n", ifp->ifindex);
vty_out(vty, "Multicast : %s\n",
if_is_multicast(ifp) ? "yes" : "no");
- vty_out(vty, "Multicast Loop : %d\n", mloop);
vty_out(vty, "Promiscuous : %s\n",
(ifp->flags & IFF_PROMISC) ? "yes" : "no");
vty_out(vty, "\n");
@@ -576,7 +574,6 @@ static void igmp_show_interfaces_single(struct pim_instance *pim,
char other_hhmmss[10];
int found_ifname = 0;
int sqi;
- int mloop = 0;
long gmi_msec; /* Group Membership Interval */
long lmqt_msec;
long ohpi_msec;
@@ -639,11 +636,6 @@ static void igmp_show_interfaces_single(struct pim_instance *pim,
qri_msec =
pim_ifp->gm_query_max_response_time_dsec * 100;
- if (pim_ifp->pim_sock_fd >= 0)
- mloop = pim_socket_mcastloop_get(
- pim_ifp->pim_sock_fd);
- else
- mloop = 0;
lmqc = pim_ifp->gm_last_member_query_count;
if (uj) {
@@ -776,7 +768,7 @@ static void igmp_show_interfaces_single(struct pim_instance *pim,
vty_out(vty, "\n");
vty_out(vty, "\n");
- pim_print_ifp_flags(vty, ifp, mloop);
+ pim_print_ifp_flags(vty, ifp);
}
}
}
@@ -903,7 +895,6 @@ static void pim_show_interfaces_single(struct pim_instance *pim,
char src_str[INET_ADDRSTRLEN];
char stat_uptime[10];
char uptime[10];
- int mloop = 0;
int found_ifname = 0;
int print_header;
json_object *json = NULL;
@@ -945,10 +936,6 @@ static void pim_show_interfaces_single(struct pim_instance *pim,
pim_ifp->pim_hello_period);
pim_time_uptime(stat_uptime, sizeof(stat_uptime),
now - pim_ifp->pim_ifstat_start);
- if (pim_ifp->pim_sock_fd >= 0)
- mloop = pim_socket_mcastloop_get(pim_ifp->pim_sock_fd);
- else
- mloop = 0;
if (uj) {
char pbuf[PREFIX2STR_BUFFER];
@@ -1096,8 +1083,6 @@ static void pim_show_interfaces_single(struct pim_instance *pim,
pim_ifp->pim_ifstat_hello_sendfail);
json_object_int_add(json_row, "helloGenerationId",
pim_ifp->pim_generation_id);
- json_object_int_add(json_row, "flagMulticastLoop",
- mloop);
json_object_int_add(
json_row, "effectivePropagationDelay",
@@ -1250,7 +1235,7 @@ static void pim_show_interfaces_single(struct pim_instance *pim,
vty_out(vty, "\n");
vty_out(vty, "\n");
- pim_print_ifp_flags(vty, ifp, mloop);
+ pim_print_ifp_flags(vty, ifp);
vty_out(vty, "Join Prune Interval\n");
vty_out(vty, "-------------------\n");
@@ -1299,14 +1284,21 @@ static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,
const char *ifname, bool uj)
{
struct interface *ifp;
- struct igmp_stats rx_stats;
+ struct igmp_stats igmp_stats;
+ bool found_ifname = false;
+ json_object *json = NULL;
- igmp_stats_init(&rx_stats);
+ igmp_stats_init(&igmp_stats);
+
+ if (uj)
+ json = json_object_new_object();
FOR_ALL_INTERFACES (pim->vrf, ifp) {
struct pim_interface *pim_ifp;
- struct listnode *sock_node;
+ struct listnode *sock_node, *source_node, *group_node;
struct gm_sock *igmp;
+ struct gm_group *group;
+ struct gm_source *src;
pim_ifp = ifp->info;
@@ -1316,50 +1308,119 @@ static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,
if (ifname && strcmp(ifname, ifp->name))
continue;
+ found_ifname = true;
+
+ igmp_stats.joins_failed += pim_ifp->igmp_ifstat_joins_failed;
+ igmp_stats.joins_sent += pim_ifp->igmp_ifstat_joins_sent;
+ igmp_stats.total_groups +=
+ pim_ifp->gm_group_list
+ ? listcount(pim_ifp->gm_group_list)
+ : 0;
+ igmp_stats.peak_groups += pim_ifp->igmp_peak_group_count;
+
+
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, group_node,
+ group)) {
+ for (ALL_LIST_ELEMENTS_RO(group->group_source_list,
+ source_node, src)) {
+ if (pim_addr_is_any(src->source_addr))
+ continue;
+
+ igmp_stats.total_source_groups++;
+ }
+ }
+
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
igmp)) {
- igmp_stats_add(&rx_stats, &igmp->rx_stats);
+ igmp_stats_add(&igmp_stats, &igmp->igmp_stats);
}
}
- if (uj) {
- json_object *json = NULL;
- json_object *json_row = NULL;
- json = json_object_new_object();
- json_row = json_object_new_object();
+ if (!found_ifname) {
+ if (uj)
+ vty_json(vty, json);
+ else
+ vty_out(vty, "%% No such interface\n");
+ return;
+ }
- json_object_string_add(json_row, "name", ifname ? ifname :
- "global");
- json_object_int_add(json_row, "queryV1", rx_stats.query_v1);
- json_object_int_add(json_row, "queryV2", rx_stats.query_v2);
- json_object_int_add(json_row, "queryV3", rx_stats.query_v3);
- json_object_int_add(json_row, "leaveV3", rx_stats.leave_v2);
- json_object_int_add(json_row, "reportV1", rx_stats.report_v1);
- json_object_int_add(json_row, "reportV2", rx_stats.report_v2);
- json_object_int_add(json_row, "reportV3", rx_stats.report_v3);
+ if (uj) {
+ json_object *json_row = json_object_new_object();
+
+ json_object_string_add(json_row, "name",
+ ifname ? ifname : "global");
+ json_object_int_add(json_row, "queryV1", igmp_stats.query_v1);
+ json_object_int_add(json_row, "queryV2", igmp_stats.query_v2);
+ json_object_int_add(json_row, "queryV3", igmp_stats.query_v3);
+ json_object_int_add(json_row, "leaveV2", igmp_stats.leave_v2);
+ json_object_int_add(json_row, "reportV1", igmp_stats.report_v1);
+ json_object_int_add(json_row, "reportV2", igmp_stats.report_v2);
+ json_object_int_add(json_row, "reportV3", igmp_stats.report_v3);
json_object_int_add(json_row, "mtraceResponse",
- rx_stats.mtrace_rsp);
+ igmp_stats.mtrace_rsp);
json_object_int_add(json_row, "mtraceRequest",
- rx_stats.mtrace_req);
+ igmp_stats.mtrace_req);
json_object_int_add(json_row, "unsupported",
- rx_stats.unsupported);
+ igmp_stats.unsupported);
+ json_object_int_add(json_row, "totalReceivedMessages",
+ igmp_stats.total_recv_messages);
+ json_object_int_add(json_row, "peakGroups",
+ igmp_stats.peak_groups);
+ json_object_int_add(json_row, "totalGroups",
+ igmp_stats.total_groups);
+ json_object_int_add(json_row, "totalSourceGroups",
+ igmp_stats.total_source_groups);
+ json_object_int_add(json_row, "joinsFailed",
+ igmp_stats.joins_failed);
+ json_object_int_add(json_row, "joinsSent",
+ igmp_stats.joins_sent);
+ json_object_int_add(json_row, "generalQueriesSent",
+ igmp_stats.general_queries_sent);
+ json_object_int_add(json_row, "groupQueriesSent",
+ igmp_stats.group_queries_sent);
json_object_object_add(json, ifname ? ifname : "global",
json_row);
vty_json(vty, json);
} else {
- vty_out(vty, "IGMP RX statistics\n");
- vty_out(vty, "Interface : %s\n",
+ vty_out(vty, "IGMP statistics\n");
+ vty_out(vty, "Interface : %s\n",
ifname ? ifname : "global");
- vty_out(vty, "V1 query : %u\n", rx_stats.query_v1);
- vty_out(vty, "V2 query : %u\n", rx_stats.query_v2);
- vty_out(vty, "V3 query : %u\n", rx_stats.query_v3);
- vty_out(vty, "V2 leave : %u\n", rx_stats.leave_v2);
- vty_out(vty, "V1 report : %u\n", rx_stats.report_v1);
- vty_out(vty, "V2 report : %u\n", rx_stats.report_v2);
- vty_out(vty, "V3 report : %u\n", rx_stats.report_v3);
- vty_out(vty, "mtrace response : %u\n", rx_stats.mtrace_rsp);
- vty_out(vty, "mtrace request : %u\n", rx_stats.mtrace_req);
- vty_out(vty, "unsupported : %u\n", rx_stats.unsupported);
+ vty_out(vty, "V1 query : %u\n",
+ igmp_stats.query_v1);
+ vty_out(vty, "V2 query : %u\n",
+ igmp_stats.query_v2);
+ vty_out(vty, "V3 query : %u\n",
+ igmp_stats.query_v3);
+ vty_out(vty, "V2 leave : %u\n",
+ igmp_stats.leave_v2);
+ vty_out(vty, "V1 report : %u\n",
+ igmp_stats.report_v1);
+ vty_out(vty, "V2 report : %u\n",
+ igmp_stats.report_v2);
+ vty_out(vty, "V3 report : %u\n",
+ igmp_stats.report_v3);
+ vty_out(vty, "mtrace response : %u\n",
+ igmp_stats.mtrace_rsp);
+ vty_out(vty, "mtrace request : %u\n",
+ igmp_stats.mtrace_req);
+ vty_out(vty, "unsupported : %u\n",
+ igmp_stats.unsupported);
+ vty_out(vty, "total received messages : %u\n",
+ igmp_stats.total_recv_messages);
+ vty_out(vty, "joins failed : %u\n",
+ igmp_stats.joins_failed);
+ vty_out(vty, "joins sent : %u\n",
+ igmp_stats.joins_sent);
+ vty_out(vty, "general queries sent : %u\n",
+ igmp_stats.general_queries_sent);
+ vty_out(vty, "group queries sent : %u\n",
+ igmp_stats.group_queries_sent);
+ vty_out(vty, "peak groups : %u\n",
+ igmp_stats.peak_groups);
+ vty_out(vty, "total groups : %u\n",
+ igmp_stats.total_groups);
+ vty_out(vty, "total source groups : %u\n",
+ igmp_stats.total_source_groups);
}
}
@@ -1594,6 +1655,10 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
pim_ifp->pim_ifstat_join_recv);
json_object_int_add(json_row, "joinTx",
pim_ifp->pim_ifstat_join_send);
+ json_object_int_add(json_row, "pruneRx",
+ pim_ifp->pim_ifstat_prune_recv);
+ json_object_int_add(json_row, "pruneTx",
+ pim_ifp->pim_ifstat_prune_send);
json_object_int_add(json_row, "registerRx",
pim_ifp->pim_ifstat_reg_recv);
json_object_int_add(json_row, "registerTx",
@@ -1686,8 +1751,13 @@ static void pim_show_join_helper(struct vty *vty, struct pim_interface *pim_ifp,
json_object_string_add(
json_row, "channelJoinName",
pim_ifchannel_ifjoin_name(ch->ifjoin_state, ch->flags));
- if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags))
+ if (PIM_IF_FLAG_TEST_S_G_RPT(ch->flags)) {
+#if CONFDATE > 20230131
+CPP_NOTICE("Remove JSON object commands with keys starting with capital")
+#endif
json_object_int_add(json_row, "SGRpt", 1);
+ json_object_int_add(json_row, "sgRpt", 1);
+ }
if (PIM_IF_FLAG_TEST_PROTO_PIM(ch->flags))
json_object_int_add(json_row, "protocolPim", 1);
if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags))
@@ -2037,6 +2107,8 @@ static void pim_show_state(struct pim_instance *pim, struct vty *vty,
json_ifp_in);
json_object_int_add(json_source, "Installed",
c_oil->installed);
+ json_object_int_add(json_source, "installed",
+ c_oil->installed);
if (isRpt)
json_object_boolean_true_add(
json_source, "isRpt");
@@ -2045,20 +2117,36 @@ static void pim_show_state(struct pim_instance *pim, struct vty *vty,
json_source, "isRpt");
json_object_int_add(json_source, "RefCount",
c_oil->oil_ref_count);
+ json_object_int_add(json_source, "refCount",
+ c_oil->oil_ref_count);
json_object_int_add(json_source, "OilListSize",
c_oil->oil_size);
+ json_object_int_add(json_source, "oilListSize",
+ c_oil->oil_size);
json_object_int_add(
json_source, "OilRescan",
c_oil->oil_inherited_rescan);
+ json_object_int_add(
+ json_source, "oilRescan",
+ c_oil->oil_inherited_rescan);
json_object_int_add(json_source, "LastUsed",
c_oil->cc.lastused);
+ json_object_int_add(json_source, "lastUsed",
+ c_oil->cc.lastused);
json_object_int_add(json_source, "PacketCount",
c_oil->cc.pktcnt);
+ json_object_int_add(json_source, "packetCount",
+ c_oil->cc.pktcnt);
json_object_int_add(json_source, "ByteCount",
c_oil->cc.bytecnt);
+ json_object_int_add(json_source, "byteCount",
+ c_oil->cc.bytecnt);
json_object_int_add(json_source,
"WrongInterface",
c_oil->cc.wrong_if);
+ json_object_int_add(json_source,
+ "wrongInterface",
+ c_oil->cc.wrong_if);
}
} else {
vty_out(vty, "%-6d %-15s %-15s %-3s %-16s ",
@@ -3983,6 +4071,9 @@ DEFUN (clear_ip_pim_interface_traffic,
pim_ifp->pim_ifstat_assert_send = 0;
pim_ifp->pim_ifstat_bsm_rx = 0;
pim_ifp->pim_ifstat_bsm_tx = 0;
+ pim_ifp->igmp_ifstat_joins_sent = 0;
+ pim_ifp->igmp_ifstat_joins_failed = 0;
+ pim_ifp->igmp_peak_group_count = 0;
}
return CMD_SUCCESS;
@@ -5313,39 +5404,56 @@ DEFUN (show_ip_pim_upstream_rpf,
DEFUN (show_ip_pim_rp,
show_ip_pim_rp_cmd,
- "show ip pim [vrf NAME] rp-info [json]",
+ "show ip pim [vrf NAME] rp-info [A.B.C.D/M] [json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
"PIM RP information\n"
+ "Multicast Group range\n"
JSON_STR)
{
int idx = 2;
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
bool uj = use_json(argc, argv);
+ struct prefix *range = NULL;
if (!vrf)
return CMD_WARNING;
- pim_rp_show_information(vrf->info, vty, uj);
+ if (argv_find(argv, argc, "A.B.C.D/M", &idx)) {
+ range = prefix_new();
+ (void)str2prefix(argv[idx]->arg, range);
+ apply_mask(range);
+ }
+
+ pim_rp_show_information(vrf->info, range, vty, uj);
return CMD_SUCCESS;
}
DEFUN (show_ip_pim_rp_vrf_all,
show_ip_pim_rp_vrf_all_cmd,
- "show ip pim vrf all rp-info [json]",
+ "show ip pim vrf all rp-info [A.B.C.D/M] [json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
"PIM RP information\n"
+ "Multicast Group range\n"
JSON_STR)
{
+ int idx = 0;
bool uj = use_json(argc, argv);
struct vrf *vrf;
bool first = true;
+ struct prefix *range = NULL;
+
+ if (argv_find(argv, argc, "A.B.C.D/M", &idx)) {
+ range = prefix_new();
+ (void)str2prefix(argv[idx]->arg, range);
+ apply_mask(range);
+ }
if (uj)
vty_out(vty, "{ ");
@@ -5357,7 +5465,7 @@ DEFUN (show_ip_pim_rp_vrf_all,
first = false;
} else
vty_out(vty, "VRF: %s\n", vrf->name);
- pim_rp_show_information(vrf->info, vty, uj);
+ pim_rp_show_information(vrf->info, range, vty, uj);
}
if (uj)
vty_out(vty, "}\n");
@@ -5952,6 +6060,10 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
/* Find the inbound interface nested under the source,
* create it if it doesn't exist */
+ json_object_string_add(json_source, "source",
+ src_str);
+ json_object_string_add(json_source, "group",
+ grp_str);
json_object_int_add(json_source, "installed",
c_oil->installed);
json_object_int_add(json_source, "refCount",
@@ -5960,6 +6072,8 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
c_oil->oil_size);
json_object_int_add(json_source, "OilInheritedRescan",
c_oil->oil_inherited_rescan);
+ json_object_int_add(json_source, "oilInheritedRescan",
+ c_oil->oil_inherited_rescan);
json_object_string_add(json_source, "iif", in_ifname);
json_object_string_add(json_source, "upTime",
mroute_uptime);
@@ -6997,98 +7111,36 @@ DEFUN (no_ip_pim_v6_secondary,
return nb_cli_apply_changes(vty, NULL);
}
-DEFUN (ip_pim_rp,
+DEFPY (ip_pim_rp,
ip_pim_rp_cmd,
- "ip pim rp A.B.C.D [A.B.C.D/M]",
+ "ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",
IP_STR
"pim multicast routing\n"
"Rendevous Point\n"
"ip address of RP\n"
"Group Address range to cover\n")
{
- const char *vrfname;
- int idx_rp = 3, idx_group = 4;
- char rp_group_xpath[XPATH_MAXLEN];
- int result = 0;
- struct prefix group;
- struct in_addr rp_addr;
- const char *group_str =
- (argc == 5) ? argv[idx_group]->arg : "224.0.0.0/4";
-
- result = str2prefix(group_str, &group);
- if (result) {
- struct prefix temp;
-
- prefix_copy(&temp, &group);
- apply_mask(&temp);
- if (!prefix_same(&group, &temp)) {
- vty_out(vty, "%% Inconsistent address and mask: %s\n",
- group_str);
- return CMD_WARNING_CONFIG_FAILED;
- }
- }
-
- if (!result) {
- vty_out(vty, "%% Bad group address specified: %s\n",
- group_str);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- result = inet_pton(AF_INET, argv[idx_rp]->arg, &rp_addr);
- if (result <= 0) {
- vty_out(vty, "%% Bad RP address specified: %s\n",
- argv[idx_rp]->arg);
- return CMD_WARNING_CONFIG_FAILED;
- }
+ const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
- vrfname = pim_cli_get_vrf_name(vty);
- if (vrfname == NULL)
- return CMD_WARNING_CONFIG_FAILED;
-
- snprintf(rp_group_xpath, sizeof(rp_group_xpath),
- FRR_PIM_STATIC_RP_XPATH,
- "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4",
- argv[idx_rp]->arg);
- strlcat(rp_group_xpath, "/group-list", sizeof(rp_group_xpath));
-
- nb_cli_enqueue_change(vty, rp_group_xpath, NB_OP_CREATE, group_str);
-
- return nb_cli_apply_changes(vty, NULL);
+ return pim_process_rp_cmd(vty, rp_str, group_str);
}
-DEFUN (ip_pim_rp_prefix_list,
+DEFPY (ip_pim_rp_prefix_list,
ip_pim_rp_prefix_list_cmd,
- "ip pim rp A.B.C.D prefix-list WORD",
+ "ip pim rp A.B.C.D$rp prefix-list WORD$plist",
IP_STR
"pim multicast routing\n"
- "Rendevous Point\n"
+ "Rendezvous Point\n"
"ip address of RP\n"
"group prefix-list filter\n"
"Name of a prefix-list\n")
{
- int idx_rp = 3, idx_plist = 5;
- const char *vrfname;
- char rp_plist_xpath[XPATH_MAXLEN];
-
- vrfname = pim_cli_get_vrf_name(vty);
- if (vrfname == NULL)
- return CMD_WARNING_CONFIG_FAILED;
-
- snprintf(rp_plist_xpath, sizeof(rp_plist_xpath),
- FRR_PIM_STATIC_RP_XPATH,
- "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4",
- argv[idx_rp]->arg);
- strlcat(rp_plist_xpath, "/prefix-list", sizeof(rp_plist_xpath));
-
- nb_cli_enqueue_change(vty, rp_plist_xpath, NB_OP_MODIFY,
- argv[idx_plist]->arg);
-
- return nb_cli_apply_changes(vty, NULL);
+ return pim_process_rp_plist_cmd(vty, rp_str, plist);
}
-DEFUN (no_ip_pim_rp,
+DEFPY (no_ip_pim_rp,
no_ip_pim_rp_cmd,
- "no ip pim rp A.B.C.D [A.B.C.D/M]",
+ "no ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",
NO_STR
IP_STR
"pim multicast routing\n"
@@ -7096,106 +7148,23 @@ DEFUN (no_ip_pim_rp,
"ip address of RP\n"
"Group Address range to cover\n")
{
- int idx_rp = 4, idx_group = 5;
- const char *group_str =
- (argc == 6) ? argv[idx_group]->arg : "224.0.0.0/4";
- char group_list_xpath[XPATH_MAXLEN];
- char group_xpath[XPATH_MAXLEN];
- char rp_xpath[XPATH_MAXLEN];
- int printed;
- const char *vrfname;
- const struct lyd_node *group_dnode;
-
- vrfname = pim_cli_get_vrf_name(vty);
- if (vrfname == NULL)
- return CMD_WARNING_CONFIG_FAILED;
-
- snprintf(rp_xpath, sizeof(rp_xpath), FRR_PIM_STATIC_RP_XPATH,
- "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4",
- argv[idx_rp]->arg);
-
-
- printed = snprintf(group_list_xpath, sizeof(group_list_xpath),
- "%s/group-list", rp_xpath);
+ const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
- if (printed >= (int)(sizeof(group_list_xpath))) {
- vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
- XPATH_MAXLEN);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- printed = snprintf(group_xpath, sizeof(group_xpath), "%s[.='%s']",
- group_list_xpath, group_str);
-
- if (printed >= (int)(sizeof(group_xpath))) {
- vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
- XPATH_MAXLEN);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- if (!yang_dnode_exists(vty->candidate_config->dnode, group_xpath)) {
- vty_out(vty, "%% Unable to find specified RP\n");
- return NB_OK;
- }
-
- group_dnode = yang_dnode_get(vty->candidate_config->dnode, group_xpath);
-
- if (yang_is_last_list_dnode(group_dnode))
- nb_cli_enqueue_change(vty, rp_xpath, NB_OP_DESTROY, NULL);
- else
- nb_cli_enqueue_change(vty, group_list_xpath, NB_OP_DESTROY,
- group_str);
-
- return nb_cli_apply_changes(vty, NULL);
+ return pim_process_no_rp_cmd(vty, rp_str, group_str);
}
-DEFUN (no_ip_pim_rp_prefix_list,
+DEFPY (no_ip_pim_rp_prefix_list,
no_ip_pim_rp_prefix_list_cmd,
- "no ip pim rp A.B.C.D prefix-list WORD",
+ "no ip pim rp A.B.C.D$rp prefix-list WORD$plist",
NO_STR
IP_STR
"pim multicast routing\n"
- "Rendevous Point\n"
+ "Rendezvous Point\n"
"ip address of RP\n"
"group prefix-list filter\n"
"Name of a prefix-list\n")
{
- int idx_rp = 4;
- int idx_plist = 6;
- char rp_xpath[XPATH_MAXLEN];
- char plist_xpath[XPATH_MAXLEN];
- const char *vrfname;
- const struct lyd_node *plist_dnode;
- const char *plist;
-
- vrfname = pim_cli_get_vrf_name(vty);
- if (vrfname == NULL)
- return CMD_WARNING_CONFIG_FAILED;
-
- snprintf(rp_xpath, sizeof(rp_xpath), FRR_PIM_STATIC_RP_XPATH,
- "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4",
- argv[idx_rp]->arg);
-
- snprintf(plist_xpath, sizeof(plist_xpath), FRR_PIM_STATIC_RP_XPATH,
- "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4",
- argv[idx_rp]->arg);
- strlcat(plist_xpath, "/prefix-list", sizeof(plist_xpath));
-
- plist_dnode = yang_dnode_get(vty->candidate_config->dnode, plist_xpath);
- if (!plist_dnode) {
- vty_out(vty, "%% Unable to find specified RP\n");
- return NB_OK;
- }
-
- plist = yang_dnode_get_string(plist_dnode, plist_xpath);
- if (strcmp(argv[idx_plist]->arg, plist)) {
- vty_out(vty, "%% Unable to find specified RP\n");
- return NB_OK;
- }
-
- nb_cli_enqueue_change(vty, rp_xpath, NB_OP_DESTROY, NULL);
-
- return nb_cli_apply_changes(vty, NULL);
+ return pim_process_no_rp_plist_cmd(vty, rp_str, plist);
}
DEFUN (ip_pim_ssm_prefix_list,
@@ -7959,11 +7928,7 @@ DEFUN (interface_ip_pim_drprio,
{
int idx_number = 3;
- nb_cli_enqueue_change(vty, "./dr-priority", NB_OP_MODIFY,
- argv[idx_number]->arg);
-
- return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_ip_pim_drprio_cmd(vty, argv[idx_number]->arg);
}
DEFUN (interface_no_ip_pim_drprio,
@@ -7975,10 +7940,7 @@ DEFUN (interface_no_ip_pim_drprio,
"Revert the Designated Router Priority to default\n"
"Old Value of the Priority\n")
{
- nb_cli_enqueue_change(vty, "./dr-priority", NB_OP_DESTROY, NULL);
-
- return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_no_ip_pim_drprio_cmd(vty);
}
DEFPY_HIDDEN (interface_ip_igmp_query_generate,
@@ -8069,20 +8031,7 @@ DEFPY (interface_ip_pim_activeactive,
PIM_STR
"Mark interface as Active-Active for MLAG operations, Hidden because not finished yet\n")
{
- if (no)
- nb_cli_enqueue_change(vty, "./active-active", NB_OP_MODIFY,
- "false");
- else {
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
- "true");
-
- nb_cli_enqueue_change(vty, "./active-active", NB_OP_MODIFY,
- "true");
- }
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_ip_pim_activeactive_cmd(vty, no);
}
DEFUN_HIDDEN (interface_ip_pim_ssm,
@@ -8094,11 +8043,7 @@ DEFUN_HIDDEN (interface_ip_pim_ssm,
{
int ret;
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY, "true");
-
- ret = nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ ret = pim_process_ip_pim_cmd(vty);
if (ret != NB_OK)
return ret;
@@ -8116,11 +8061,7 @@ DEFUN_HIDDEN (interface_ip_pim_sm,
PIM_STR
IFACE_PIM_SM_STR)
{
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY, "true");
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_ip_pim_cmd(vty);
}
DEFUN (interface_ip_pim,
@@ -8129,12 +8070,7 @@ DEFUN (interface_ip_pim,
IP_STR
PIM_STR)
{
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY, "true");
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
-
+ return pim_process_ip_pim_cmd(vty);
}
DEFUN_HIDDEN (interface_no_ip_pim_ssm,
@@ -8145,39 +8081,7 @@ DEFUN_HIDDEN (interface_no_ip_pim_ssm,
PIM_STR
IFACE_PIM_STR)
{
- const struct lyd_node *igmp_enable_dnode;
- char igmp_if_xpath[XPATH_MAXLEN];
-
- int printed =
- snprintf(igmp_if_xpath, sizeof(igmp_if_xpath),
- "%s/frr-gmp:gmp/address-family[address-family='%s']",
- VTY_CURR_XPATH, "frr-routing:ipv4");
-
- if (printed >= (int)(sizeof(igmp_if_xpath))) {
- vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
- XPATH_MAXLEN);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- igmp_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
- FRR_GMP_ENABLE_XPATH,
- VTY_CURR_XPATH,
- "frr-routing:ipv4");
- if (!igmp_enable_dnode) {
- nb_cli_enqueue_change(vty, igmp_if_xpath, NB_OP_DESTROY, NULL);
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- } else {
- if (!yang_dnode_get_bool(igmp_enable_dnode, ".")) {
- nb_cli_enqueue_change(vty, igmp_if_xpath, NB_OP_DESTROY,
- NULL);
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- } else
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
- "false");
- }
-
- return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_no_ip_pim_cmd(vty);
}
DEFUN_HIDDEN (interface_no_ip_pim_sm,
@@ -8188,41 +8092,7 @@ DEFUN_HIDDEN (interface_no_ip_pim_sm,
PIM_STR
IFACE_PIM_SM_STR)
{
- const struct lyd_node *igmp_enable_dnode;
- char igmp_if_xpath[XPATH_MAXLEN];
-
- int printed =
- snprintf(igmp_if_xpath, sizeof(igmp_if_xpath),
- "%s/frr-gmp:gmp/address-family[address-family='%s']",
- VTY_CURR_XPATH, "frr-routing:ipv4");
-
- if (printed >= (int)(sizeof(igmp_if_xpath))) {
- vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
- XPATH_MAXLEN);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- igmp_enable_dnode =
- yang_dnode_getf(vty->candidate_config->dnode,
- FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
- "frr-routing:ipv4");
-
- if (!igmp_enable_dnode) {
- nb_cli_enqueue_change(vty, igmp_if_xpath, NB_OP_DESTROY, NULL);
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- } else {
- if (!yang_dnode_get_bool(igmp_enable_dnode, ".")) {
- nb_cli_enqueue_change(vty, igmp_if_xpath, NB_OP_DESTROY,
- NULL);
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- } else
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
- "false");
- }
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_no_ip_pim_cmd(vty);
}
DEFUN (interface_no_ip_pim,
@@ -8232,41 +8102,7 @@ DEFUN (interface_no_ip_pim,
IP_STR
PIM_STR)
{
- const struct lyd_node *igmp_enable_dnode;
- char igmp_if_xpath[XPATH_MAXLEN];
-
- int printed =
- snprintf(igmp_if_xpath, sizeof(igmp_if_xpath),
- "%s/frr-gmp:gmp/address-family[address-family='%s']",
- VTY_CURR_XPATH, "frr-routing:ipv4");
-
- if (printed >= (int)(sizeof(igmp_if_xpath))) {
- vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
- XPATH_MAXLEN);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- igmp_enable_dnode =
- yang_dnode_getf(vty->candidate_config->dnode,
- FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
- "frr-routing:ipv4");
-
- if (!igmp_enable_dnode) {
- nb_cli_enqueue_change(vty, igmp_if_xpath, NB_OP_DESTROY, NULL);
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- } else {
- if (!yang_dnode_get_bool(igmp_enable_dnode, ".")) {
- nb_cli_enqueue_change(vty, igmp_if_xpath, NB_OP_DESTROY,
- NULL);
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- } else
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
- "false");
- }
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_no_ip_pim_cmd(vty);
}
/* boundaries */
@@ -8279,13 +8115,7 @@ DEFUN(interface_ip_pim_boundary_oil,
"Filter OIL by group using prefix list\n"
"Prefix list to filter OIL with\n")
{
- nb_cli_enqueue_change(vty, "./multicast-boundary-oil", NB_OP_MODIFY,
- argv[4]->arg);
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
-
+ return pim_process_ip_pim_boundary_oil_cmd(vty, argv[4]->arg);
}
DEFUN(interface_no_ip_pim_boundary_oil,
@@ -8298,12 +8128,7 @@ DEFUN(interface_no_ip_pim_boundary_oil,
"Filter OIL by group using prefix list\n"
"Prefix list to filter OIL with\n")
{
- nb_cli_enqueue_change(vty, "./multicast-boundary-oil", NB_OP_DESTROY,
- NULL);
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_no_ip_pim_boundary_oil_cmd(vty);
}
DEFUN (interface_ip_mroute,
@@ -8324,13 +8149,8 @@ DEFUN (interface_ip_mroute,
else
source_str = argv[idx_ipv4 + 1]->arg;
- nb_cli_enqueue_change(vty, "./oif", NB_OP_MODIFY,
- argv[idx_interface]->arg);
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_MROUTE_XPATH,
- "frr-routing:ipv4", source_str,
- argv[idx_ipv4]->arg);
+ return pim_process_ip_mroute_cmd(vty, argv[idx_interface]->arg,
+ argv[idx_ipv4]->arg, source_str);
}
DEFUN (interface_no_ip_mroute,
@@ -8343,6 +8163,7 @@ DEFUN (interface_no_ip_mroute,
"Group Address\n"
"Source Address\n")
{
+ int idx_interface = 3;
int idx_ipv4 = 4;
const char *source_str;
@@ -8351,12 +8172,8 @@ DEFUN (interface_no_ip_mroute,
else
source_str = argv[idx_ipv4 + 1]->arg;
- nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_MROUTE_XPATH,
- "frr-routing:ipv4", source_str,
- argv[idx_ipv4]->arg);
+ return pim_process_no_ip_mroute_cmd(vty, argv[idx_interface]->arg,
+ argv[idx_ipv4]->arg, source_str);
}
DEFUN (interface_ip_pim_hello,
@@ -8370,31 +8187,14 @@ DEFUN (interface_ip_pim_hello,
{
int idx_time = 3;
int idx_hold = 4;
- const struct lyd_node *igmp_enable_dnode;
-
- igmp_enable_dnode =
- yang_dnode_getf(vty->candidate_config->dnode,
- FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
- "frr-routing:ipv4");
- if (!igmp_enable_dnode) {
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
- "true");
- } else {
- if (!yang_dnode_get_bool(igmp_enable_dnode, "."))
- nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
- "true");
- }
-
- nb_cli_enqueue_change(vty, "./hello-interval", NB_OP_MODIFY,
- argv[idx_time]->arg);
if (argc == idx_hold + 1)
- nb_cli_enqueue_change(vty, "./hello-holdtime", NB_OP_MODIFY,
- argv[idx_hold]->arg);
+ return pim_process_ip_pim_hello_cmd(vty, argv[idx_time]->arg,
+ argv[idx_hold]->arg);
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ else
+ return pim_process_ip_pim_hello_cmd(vty, argv[idx_time]->arg,
+ NULL);
}
DEFUN (interface_no_ip_pim_hello,
@@ -8407,12 +8207,7 @@ DEFUN (interface_no_ip_pim_hello,
IGNORED_IN_NO_STR
IGNORED_IN_NO_STR)
{
- nb_cli_enqueue_change(vty, "./hello-interval", NB_OP_DESTROY, NULL);
- nb_cli_enqueue_change(vty, "./hello-holdtime", NB_OP_DESTROY, NULL);
-
- return nb_cli_apply_changes(vty,
- FRR_PIM_INTERFACE_XPATH,
- "frr-routing:ipv4");
+ return pim_process_no_ip_pim_hello_cmd(vty);
}
DEFUN (debug_igmp,
@@ -8593,6 +8388,7 @@ DEFUN (debug_pim,
PIM_DO_DEBUG_MSDP_EVENTS;
PIM_DO_DEBUG_MSDP_PACKETS;
PIM_DO_DEBUG_BSM;
+ PIM_DO_DEBUG_VXLAN;
return CMD_SUCCESS;
}
@@ -8612,6 +8408,7 @@ DEFUN (no_debug_pim,
PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
PIM_DONT_DEBUG_BSM;
+ PIM_DONT_DEBUG_VXLAN;
return CMD_SUCCESS;
}
@@ -8639,6 +8436,31 @@ DEFUN (no_debug_pim_nht,
return CMD_SUCCESS;
}
+DEFUN (debug_pim_nht_det,
+ debug_pim_nht_det_cmd,
+ "debug pim nht detail",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ "Nexthop Tracking\n"
+ "Detailed Information\n")
+{
+ PIM_DO_DEBUG_PIM_NHT_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_pim_nht_det,
+ no_debug_pim_nht_det_cmd,
+ "no debug pim nht detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ "Nexthop Tracking\n"
+ "Detailed Information\n")
+{
+ PIM_DONT_DEBUG_PIM_NHT_DETAIL;
+ return CMD_SUCCESS;
+}
+
DEFUN (debug_pim_nht_rp,
debug_pim_nht_rp_cmd,
"debug pim nht rp",
@@ -10905,6 +10727,8 @@ void pim_cmd_init(void)
install_element(ENABLE_NODE, &no_debug_pim_cmd);
install_element(ENABLE_NODE, &debug_pim_nht_cmd);
install_element(ENABLE_NODE, &no_debug_pim_nht_cmd);
+ install_element(ENABLE_NODE, &debug_pim_nht_det_cmd);
+ install_element(ENABLE_NODE, &no_debug_pim_nht_det_cmd);
install_element(ENABLE_NODE, &debug_pim_nht_rp_cmd);
install_element(ENABLE_NODE, &no_debug_pim_nht_rp_cmd);
install_element(ENABLE_NODE, &debug_pim_events_cmd);
@@ -10956,6 +10780,8 @@ void pim_cmd_init(void)
install_element(CONFIG_NODE, &no_debug_pim_cmd);
install_element(CONFIG_NODE, &debug_pim_nht_cmd);
install_element(CONFIG_NODE, &no_debug_pim_nht_cmd);
+ install_element(CONFIG_NODE, &debug_pim_nht_det_cmd);
+ install_element(CONFIG_NODE, &no_debug_pim_nht_det_cmd);
install_element(CONFIG_NODE, &debug_pim_nht_rp_cmd);
install_element(CONFIG_NODE, &no_debug_pim_nht_rp_cmd);
install_element(CONFIG_NODE, &debug_pim_events_cmd);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 6adea54a61..c5d89f8065 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -30,6 +30,7 @@
#include "nexthop.h"
#include "vrf.h"
#include "ferr.h"
+#include "lib/srcdest_table.h"
#include "pimd.h"
#include "pim_vty.h"
@@ -326,3 +327,329 @@ int pim_process_no_register_suppress_cmd(struct vty *vty)
return nb_cli_apply_changes(vty, NULL);
}
+
+int pim_process_ip_pim_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_cmd(struct vty *vty)
+{
+ const struct lyd_node *mld_enable_dnode;
+ char mld_if_xpath[XPATH_MAXLEN];
+
+ int printed =
+ snprintf(mld_if_xpath, sizeof(mld_if_xpath),
+ "%s/frr-gmp:gmp/address-family[address-family='%s']",
+ VTY_CURR_XPATH, FRR_PIM_AF_XPATH_VAL);
+
+ if (printed >= (int)(sizeof(mld_if_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ mld_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+
+ if (!mld_enable_dnode) {
+ nb_cli_enqueue_change(vty, mld_if_xpath, NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else {
+ if (!yang_dnode_get_bool(mld_enable_dnode, ".")) {
+ nb_cli_enqueue_change(vty, mld_if_xpath, NB_OP_DESTROY,
+ NULL);
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+ } else
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "false");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_drprio_cmd(struct vty *vty, const char *drpriority_str)
+{
+ nb_cli_enqueue_change(vty, "./dr-priority", NB_OP_MODIFY,
+ drpriority_str);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_drprio_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./dr-priority", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_hello_cmd(struct vty *vty, const char *hello_str,
+ const char *hold_str)
+{
+ const struct lyd_node *mld_enable_dnode;
+
+ mld_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
+ FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+
+ if (!mld_enable_dnode) {
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ } else {
+ if (!yang_dnode_get_bool(mld_enable_dnode, "."))
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+ }
+
+ nb_cli_enqueue_change(vty, "./hello-interval", NB_OP_MODIFY, hello_str);
+
+ if (hold_str)
+ nb_cli_enqueue_change(vty, "./hello-holdtime", NB_OP_MODIFY,
+ hold_str);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_hello_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./hello-interval", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./hello-holdtime", NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_activeactive_cmd(struct vty *vty, const char *no)
+{
+ if (no)
+ nb_cli_enqueue_change(vty, "./active-active", NB_OP_MODIFY,
+ "false");
+ else {
+ nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
+ "true");
+
+ nb_cli_enqueue_change(vty, "./active-active", NB_OP_MODIFY,
+ "true");
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_pim_boundary_oil_cmd(struct vty *vty, const char *oil)
+{
+ nb_cli_enqueue_change(vty, "./multicast-boundary-oil", NB_OP_MODIFY,
+ oil);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_no_ip_pim_boundary_oil_cmd(struct vty *vty)
+{
+ nb_cli_enqueue_change(vty, "./multicast-boundary-oil", NB_OP_DESTROY,
+ NULL);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
+int pim_process_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *source_str)
+{
+ nb_cli_enqueue_change(vty, "./oif", NB_OP_MODIFY, interface);
+
+ if (!source_str) {
+ char buf[SRCDEST2STR_BUFFER];
+
+ inet_ntop(AF_INET6, &in6addr_any, buf, sizeof(buf));
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, buf,
+ group_str);
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, source_str,
+ group_str);
+}
+
+int pim_process_no_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *source_str)
+{
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
+
+ if (!source_str) {
+ char buf[SRCDEST2STR_BUFFER];
+
+ inet_ntop(AF_INET6, &in6addr_any, buf, sizeof(buf));
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, buf,
+ group_str);
+ }
+
+ return nb_cli_apply_changes(vty, FRR_PIM_MROUTE_XPATH,
+ FRR_PIM_AF_XPATH_VAL, source_str,
+ group_str);
+}
+
+int pim_process_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str)
+{
+ const char *vrfname;
+ char rp_group_xpath[XPATH_MAXLEN];
+ int result = 0;
+ struct prefix group;
+ pim_addr rp_addr;
+
+ result = str2prefix(group_str, &group);
+ if (result) {
+ struct prefix temp;
+
+ prefix_copy(&temp, &group);
+ apply_mask(&temp);
+ if (!prefix_same(&group, &temp)) {
+ vty_out(vty, "%% Inconsistent address and mask: %s\n",
+ group_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ }
+
+ if (!result) {
+ vty_out(vty, "%% Bad group address specified: %s\n", group_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ result = inet_pton(PIM_AF, rp_str, &rp_addr);
+ if (result <= 0) {
+ vty_out(vty, "%% Bad RP address specified: %s\n", rp_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_group_xpath, sizeof(rp_group_xpath),
+ FRR_PIM_STATIC_RP_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL, rp_str);
+ strlcat(rp_group_xpath, "/group-list", sizeof(rp_group_xpath));
+
+ nb_cli_enqueue_change(vty, rp_group_xpath, NB_OP_CREATE, group_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str)
+{
+ char group_list_xpath[XPATH_MAXLEN];
+ char group_xpath[XPATH_MAXLEN];
+ char rp_xpath[XPATH_MAXLEN];
+ int printed;
+ const char *vrfname;
+ const struct lyd_node *group_dnode;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_xpath, sizeof(rp_xpath), FRR_PIM_STATIC_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL, rp_str);
+
+ printed = snprintf(group_list_xpath, sizeof(group_list_xpath),
+ "%s/group-list", rp_xpath);
+
+ if (printed >= (int)(sizeof(group_list_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ printed = snprintf(group_xpath, sizeof(group_xpath), "%s[.='%s']",
+ group_list_xpath, group_str);
+
+ if (printed >= (int)(sizeof(group_xpath))) {
+ vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
+ XPATH_MAXLEN);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ group_dnode = yang_dnode_get(vty->candidate_config->dnode, group_xpath);
+ if (!group_dnode) {
+ vty_out(vty, "%% Unable to find specified RP\n");
+ return NB_OK;
+ }
+
+ if (yang_is_last_list_dnode(group_dnode))
+ nb_cli_enqueue_change(vty, rp_xpath, NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, group_list_xpath, NB_OP_DESTROY,
+ group_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list)
+{
+ const char *vrfname;
+ char rp_plist_xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_plist_xpath, sizeof(rp_plist_xpath),
+ FRR_PIM_STATIC_RP_XPATH, "frr-pim:pimd", "pim", vrfname,
+ FRR_PIM_AF_XPATH_VAL, rp_str);
+ strlcat(rp_plist_xpath, "/prefix-list", sizeof(rp_plist_xpath));
+
+ nb_cli_enqueue_change(vty, rp_plist_xpath, NB_OP_MODIFY, prefix_list);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list)
+{
+ char rp_xpath[XPATH_MAXLEN];
+ char plist_xpath[XPATH_MAXLEN];
+ const char *vrfname;
+ const struct lyd_node *plist_dnode;
+ const char *plist;
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(rp_xpath, sizeof(rp_xpath), FRR_PIM_STATIC_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL, rp_str);
+
+ snprintf(plist_xpath, sizeof(plist_xpath), FRR_PIM_STATIC_RP_XPATH,
+ "frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL, rp_str);
+ strlcat(plist_xpath, "/prefix-list", sizeof(plist_xpath));
+
+ plist_dnode = yang_dnode_get(vty->candidate_config->dnode, plist_xpath);
+ if (!plist_dnode) {
+ vty_out(vty, "%% Unable to find specified RP\n");
+ return NB_OK;
+ }
+
+ plist = yang_dnode_get_string(plist_dnode, plist_xpath);
+ if (strcmp(prefix_list, plist)) {
+ vty_out(vty, "%% Unable to find specified RP\n");
+ return NB_OK;
+ }
+
+ nb_cli_enqueue_change(vty, rp_xpath, NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h
index 49fc6bcbeb..b7e6b6ac80 100644
--- a/pimd/pim_cmd_common.h
+++ b/pimd/pim_cmd_common.h
@@ -35,5 +35,28 @@ int pim_process_rp_kat_cmd(struct vty *vty, const char *rpkat);
int pim_process_no_rp_kat_cmd(struct vty *vty);
int pim_process_register_suppress_cmd(struct vty *vty, const char *rst);
int pim_process_no_register_suppress_cmd(struct vty *vty);
+int pim_process_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str);
+int pim_process_no_rp_cmd(struct vty *vty, const char *rp_str,
+ const char *group_str);
+int pim_process_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list);
+int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,
+ const char *prefix_list);
+
+int pim_process_ip_pim_cmd(struct vty *vty);
+int pim_process_no_ip_pim_cmd(struct vty *vty);
+int pim_process_ip_pim_drprio_cmd(struct vty *vty, const char *drpriority_str);
+int pim_process_no_ip_pim_drprio_cmd(struct vty *vty);
+int pim_process_ip_pim_hello_cmd(struct vty *vty, const char *hello_str,
+ const char *hold_str);
+int pim_process_no_ip_pim_hello_cmd(struct vty *vty);
+int pim_process_ip_pim_activeactive_cmd(struct vty *vty, const char *no);
+int pim_process_ip_pim_boundary_oil_cmd(struct vty *vty, const char *oil);
+int pim_process_no_ip_pim_boundary_oil_cmd(struct vty *vty);
+int pim_process_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *source_str);
+int pim_process_no_ip_mroute_cmd(struct vty *vty, const char *interface,
+ const char *group_str, const char *src_str);
#endif /* PIM_CMD_COMMON_H */
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 5425aec233..4470d05663 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -53,8 +53,8 @@
#if PIM_IPV == 4
static void pim_if_igmp_join_del_all(struct interface *ifp);
static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
- struct in_addr group_addr,
- struct in_addr source_addr);
+ struct in_addr group_addr, struct in_addr source_addr,
+ struct pim_interface *pim_ifp);
#endif
void pim_if_init(struct pim_instance *pim)
@@ -127,7 +127,6 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
pim_ifp->pim = ifp->vrf->info;
pim_ifp->mroute_vif_index = -1;
-#if PIM_IPV == 4
pim_ifp->igmp_version = IGMP_DEFAULT_VERSION;
pim_ifp->gm_default_robustness_variable =
IGMP_DEFAULT_ROBUSTNESS_VARIABLE;
@@ -153,10 +152,12 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
if (pim)
PIM_IF_DO_PIM(pim_ifp->options);
+#if PIM_IPV == 4
if (igmp)
PIM_IF_DO_IGMP(pim_ifp->options);
PIM_IF_DO_IGMP_LISTEN_ALLROUTERS(pim_ifp->options);
+#endif
pim_ifp->gm_join_list = NULL;
pim_ifp->pim_neighbor_list = NULL;
@@ -189,7 +190,6 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
pim_sock_reset(ifp);
pim_if_add_vif(ifp, ispimreg, is_vxlan_term);
-#endif
pim_ifp->pim->mcast_if_count++;
return pim_ifp;
@@ -208,9 +208,12 @@ void pim_if_delete(struct interface *ifp)
if (pim_ifp->gm_join_list) {
pim_if_igmp_join_del_all(ifp);
}
+#endif
pim_ifchannel_delete_all(ifp);
+#if PIM_IPV == 4
igmp_sock_delete_all(ifp);
+#endif
pim_neighbor_delete_all(ifp, "Interface removed from configuration");
@@ -224,7 +227,6 @@ void pim_if_delete(struct interface *ifp)
XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
XFREE(MTYPE_PIM_INTERFACE, pim_ifp);
-#endif
ifp->info = NULL;
}
@@ -512,6 +514,26 @@ void pim_if_addr_add(struct connected *ifc)
CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)
? "secondary"
: "primary");
+#if PIM_IPV != 4
+ if (IN6_IS_ADDR_LINKLOCAL(&ifc->address->u.prefix6) ||
+ IN6_IS_ADDR_LOOPBACK(&ifc->address->u.prefix6)) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&pim_ifp->ll_lowest))
+ pim_ifp->ll_lowest = ifc->address->u.prefix6;
+ else if (IPV6_ADDR_CMP(&ifc->address->u.prefix6,
+ &pim_ifp->ll_lowest) < 0)
+ pim_ifp->ll_lowest = ifc->address->u.prefix6;
+
+ if (IPV6_ADDR_CMP(&ifc->address->u.prefix6,
+ &pim_ifp->ll_highest) > 0)
+ pim_ifp->ll_highest = ifc->address->u.prefix6;
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s: new link-local %pI6, lowest now %pI6, highest %pI6",
+ ifc->ifp->name, &ifc->address->u.prefix6,
+ &pim_ifp->ll_lowest, &pim_ifp->ll_highest);
+ }
+#endif
detect_address_change(ifp, 0, __func__);
@@ -552,7 +574,7 @@ void pim_if_addr_add(struct connected *ifc)
close(ij->sock_fd);
join_fd = igmp_join_sock(
ifp->name, ifp->ifindex, ij->group_addr,
- ij->source_addr);
+ ij->source_addr, pim_ifp);
if (join_fd < 0) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
@@ -711,6 +733,43 @@ void pim_if_addr_del(struct connected *ifc, int force_prim_as_any)
? "secondary"
: "primary");
+#if PIM_IPV == 6
+ struct pim_interface *pim_ifp = ifc->ifp->info;
+
+ if (pim_ifp &&
+ (!IPV6_ADDR_CMP(&ifc->address->u.prefix6, &pim_ifp->ll_lowest) ||
+ !IPV6_ADDR_CMP(&ifc->address->u.prefix6, &pim_ifp->ll_highest))) {
+ struct listnode *cnode;
+ struct connected *cc;
+
+ memset(&pim_ifp->ll_lowest, 0xff, sizeof(pim_ifp->ll_lowest));
+ memset(&pim_ifp->ll_highest, 0, sizeof(pim_ifp->ll_highest));
+
+ for (ALL_LIST_ELEMENTS_RO(ifc->ifp->connected, cnode, cc)) {
+ if (!IN6_IS_ADDR_LINKLOCAL(&cc->address->u.prefix6) &&
+ !IN6_IS_ADDR_LOOPBACK(&cc->address->u.prefix6))
+ continue;
+
+ if (IPV6_ADDR_CMP(&cc->address->u.prefix6,
+ &pim_ifp->ll_lowest) < 0)
+ pim_ifp->ll_lowest = cc->address->u.prefix6;
+ if (IPV6_ADDR_CMP(&cc->address->u.prefix6,
+ &pim_ifp->ll_highest) > 0)
+ pim_ifp->ll_highest = cc->address->u.prefix6;
+ }
+
+ if (pim_ifp->ll_lowest.s6_addr[0] == 0xff)
+ memset(&pim_ifp->ll_lowest, 0,
+ sizeof(pim_ifp->ll_lowest));
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug(
+ "%s: removed link-local %pI6, lowest now %pI6, highest %pI6",
+ ifc->ifp->name, &ifc->address->u.prefix6,
+ &pim_ifp->ll_lowest, &pim_ifp->ll_highest);
+ }
+#endif
+
detect_address_change(ifp, force_prim_as_any, __func__);
pim_if_addr_del_igmp(ifc);
@@ -825,17 +884,36 @@ pim_addr pim_find_primary_addr(struct interface *ifp)
{
struct connected *ifc;
struct listnode *node;
- int v4_addrs = 0;
- int v6_addrs = 0;
struct pim_interface *pim_ifp = ifp->info;
- if (pim_ifp && !pim_addr_is_any(pim_ifp->update_source)) {
+ if (pim_ifp && !pim_addr_is_any(pim_ifp->update_source))
return pim_ifp->update_source;
- }
+
+#if PIM_IPV == 6
+ if (pim_ifp)
+ return pim_ifp->ll_highest;
+
+ pim_addr best_addr = PIMADDR_ANY;
for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
pim_addr addr;
+ if (ifc->address->family != AF_INET6)
+ continue;
+
+ addr = pim_addr_from_prefix(ifc->address);
+ if (!IN6_IS_ADDR_LINKLOCAL(&addr))
+ continue;
+ if (pim_addr_cmp(addr, best_addr) > 0)
+ best_addr = addr;
+ }
+
+ return best_addr;
+#else
+ int v4_addrs = 0;
+ int v6_addrs = 0;
+
+ for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
switch (ifc->address->family) {
case AF_INET:
v4_addrs++;
@@ -853,16 +931,9 @@ pim_addr pim_find_primary_addr(struct interface *ifp)
if (ifc->address->family != PIM_AF)
continue;
- addr = pim_addr_from_prefix(ifc->address);
-
-#if PIM_IPV == 6
- if (!IN6_IS_ADDR_LINKLOCAL(&addr))
- continue;
-#endif
- return addr;
+ return pim_addr_from_prefix(ifc->address);
}
-#if PIM_IPV == 4
/*
* If we have no v4_addrs and v6 is configured
* We probably are using unnumbered
@@ -882,8 +953,8 @@ pim_addr pim_find_primary_addr(struct interface *ifp)
if (lo_ifp && (lo_ifp != ifp))
return pim_find_primary_addr(lo_ifp);
}
-#endif
return PIMADDR_ANY;
+#endif
}
static int pim_iface_next_vif_index(struct interface *ifp)
@@ -1175,12 +1246,16 @@ static struct gm_join *igmp_join_find(struct list *join_list,
}
static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
- struct in_addr group_addr, struct in_addr source_addr)
+ struct in_addr group_addr, struct in_addr source_addr,
+ struct pim_interface *pim_ifp)
{
int join_fd;
+ pim_ifp->igmp_ifstat_joins_sent++;
+
join_fd = pim_socket_raw(IPPROTO_IGMP);
if (join_fd < 0) {
+ pim_ifp->igmp_ifstat_joins_failed++;
return -1;
}
@@ -1196,6 +1271,8 @@ static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
__func__, join_fd, group_str, source_str, ifindex,
ifname, errno, safe_strerror(errno));
+ pim_ifp->igmp_ifstat_joins_failed++;
+
close(join_fd);
return -2;
}
@@ -1203,6 +1280,7 @@ static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
return join_fd;
}
+#if PIM_IPV == 4
static struct gm_join *igmp_join_new(struct interface *ifp,
struct in_addr group_addr,
struct in_addr source_addr)
@@ -1215,7 +1293,7 @@ static struct gm_join *igmp_join_new(struct interface *ifp,
assert(pim_ifp);
join_fd = igmp_join_sock(ifp->name, ifp->ifindex, group_addr,
- source_addr);
+ source_addr, pim_ifp);
if (join_fd < 0) {
char group_str[INET_ADDRSTRLEN];
char source_str[INET_ADDRSTRLEN];
@@ -1241,7 +1319,9 @@ static struct gm_join *igmp_join_new(struct interface *ifp,
return ij;
}
+#endif /* PIM_IPV == 4 */
+#if PIM_IPV == 4
ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr)
{
@@ -1283,7 +1363,7 @@ ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr,
return ferr_ok();
}
-
+#endif /* PIM_IPV == 4 */
int pim_if_igmp_join_del(struct interface *ifp, struct in_addr group_addr,
struct in_addr source_addr)
@@ -1549,7 +1629,6 @@ static int pim_ifp_create(struct interface *ifp)
*/
if (pim_ifp)
pim_ifp->pim = pim;
-#if PIM_IPV == 4
pim_if_addr_add_all(ifp);
/*
@@ -1561,7 +1640,6 @@ static int pim_ifp_create(struct interface *ifp)
* this is a no-op if it's already been done.
*/
pim_if_create_pimreg(pim);
-#endif
}
#if PIM_IPV == 4
@@ -1599,6 +1677,7 @@ static int pim_ifp_create(struct interface *ifp)
static int pim_ifp_up(struct interface *ifp)
{
+ uint32_t table_id;
struct pim_interface *pim_ifp;
struct pim_instance *pim;
@@ -1621,9 +1700,6 @@ static int pim_ifp_up(struct interface *ifp)
if (pim_ifp)
pim_ifp->pim = pim;
-#if PIM_IPV == 4
- uint32_t table_id;
-
/*
pim_if_addr_add_all() suffices for bringing up both IGMP and
PIM
@@ -1652,7 +1728,6 @@ static int pim_ifp_up(struct interface *ifp)
}
}
}
-#endif
return 0;
}
@@ -1666,7 +1741,6 @@ static int pim_ifp_down(struct interface *ifp)
ifp->mtu, if_is_operative(ifp));
}
-#if PIM_IPV == 4
if (!if_is_operative(ifp)) {
pim_ifchannel_delete_all(ifp);
/*
@@ -1687,9 +1761,10 @@ static int pim_ifp_down(struct interface *ifp)
if (ifp->info) {
pim_if_del_vif(ifp);
+#if PIM_IPV == 4
pim_ifstat_reset(ifp);
- }
#endif
+ }
return 0;
}
@@ -1704,12 +1779,12 @@ static int pim_ifp_destroy(struct interface *ifp)
ifp->mtu, if_is_operative(ifp));
}
-#if PIM_IPV == 4
- struct pim_instance *pim;
-
if (!if_is_operative(ifp))
pim_if_addr_del_all(ifp);
+#if PIM_IPV == 4
+ struct pim_instance *pim;
+
pim = ifp->vrf->info;
if (pim && pim->vxlan.term_if == ifp)
pim_vxlan_del_term_dev(pim);
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 00ec8e7427..bab73eae86 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -96,6 +96,13 @@ struct pim_interface {
uint32_t options; /* bit vector */
ifindex_t mroute_vif_index;
struct pim_instance *pim;
+
+#if PIM_IPV == 6
+ /* link-locals: MLD uses lowest addr, PIM uses highest... */
+ pim_addr ll_lowest;
+ pim_addr ll_highest;
+#endif
+
pim_addr primary_address; /* remember addr to detect change */
struct list *sec_addr_list; /* list of struct pim_secondary_addr */
pim_addr update_source; /* user can statically set the primary
@@ -182,6 +189,10 @@ struct pim_interface {
bool bsm_enable; /* bsm processing enable */
bool ucast_bsm_accept; /* ucast bsm processing */
+ uint32_t igmp_ifstat_joins_sent;
+ uint32_t igmp_ifstat_joins_failed;
+ uint32_t igmp_peak_group_count;
+
struct {
bool enabled;
uint32_t min_rx;
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
index 956ab0d67c..f9fb8cf094 100644
--- a/pimd/pim_ifchannel.c
+++ b/pimd/pim_ifchannel.c
@@ -872,7 +872,7 @@ void pim_ifchannel_join_add(struct interface *ifp, pim_addr neigh_addr,
address of the join message is our primary address.
*/
if (ch->ifassert_state == PIM_IFASSERT_I_AM_LOSER) {
- zlog_warn("%s: Assert Loser recv Join%s from %pI4 on %s",
+ zlog_warn("%s: Assert Loser recv Join%s from %pPA on %s",
__func__, ch->sg_str, &neigh_addr, ifp->name);
assert_action_a5(ch);
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 5cdefd2828..57c4cdc470 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -37,11 +37,195 @@
#include "pim_str.h"
#include "pim_util.h"
#include "pim_time.h"
-#include "pim_zebra.h"
+#include "pim_ssm.h"
+#include "pim_tib.h"
static void group_timer_off(struct gm_group *group);
static void pim_igmp_general_query(struct thread *t);
+void igmp_anysource_forward_start(struct pim_instance *pim,
+ struct gm_group *group)
+{
+ struct gm_source *source;
+ struct in_addr src_addr = {.s_addr = 0};
+ /* Any source (*,G) is forwarded only if mode is EXCLUDE {empty} */
+ assert(group->group_filtermode_isexcl);
+ assert(listcount(group->group_source_list) < 1);
+
+ source = igmp_get_source_by_addr(group, src_addr, NULL);
+ if (!source) {
+ zlog_warn("%s: Failure to create * source", __func__);
+ return;
+ }
+
+ igmp_source_forward_start(pim, source);
+}
+
+void igmp_anysource_forward_stop(struct gm_group *group)
+{
+ struct gm_source *source;
+ struct in_addr star = {.s_addr = 0};
+
+ source = igmp_find_source_by_addr(group, star);
+ if (source)
+ igmp_source_forward_stop(source);
+}
+
+static void igmp_source_forward_reevaluate_one(struct pim_instance *pim,
+ struct gm_source *source)
+{
+ pim_sgaddr sg;
+ struct gm_group *group = source->source_group;
+ struct pim_ifchannel *ch;
+
+ if ((source->source_addr.s_addr != INADDR_ANY) ||
+ !IGMP_SOURCE_TEST_FORWARDING(source->source_flags))
+ return;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source->source_addr;
+ sg.grp = group->group_addr;
+
+ ch = pim_ifchannel_find(group->interface, &sg);
+ if (pim_is_grp_ssm(pim, group->group_addr)) {
+ /* If SSM group withdraw local membership */
+ if (ch &&
+ (ch->local_ifmembership == PIM_IFMEMBERSHIP_INCLUDE)) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "local membership del for %pSG as G is now SSM",
+ &sg);
+ pim_ifchannel_local_membership_del(group->interface,
+ &sg);
+ }
+ } else {
+ /* If ASM group add local membership */
+ if (!ch ||
+ (ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO)) {
+ if (PIM_DEBUG_PIM_EVENTS)
+ zlog_debug(
+ "local membership add for %pSG as G is now ASM",
+ &sg);
+ pim_ifchannel_local_membership_add(
+ group->interface, &sg, false /*is_vxlan*/);
+ }
+ }
+}
+
+void igmp_source_forward_reevaluate_all(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct listnode *grpnode;
+ struct gm_group *grp;
+ struct pim_ifchannel *ch, *ch_temp;
+
+ if (!pim_ifp)
+ continue;
+
+ /* scan igmp groups */
+ for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
+ grp)) {
+ struct listnode *srcnode;
+ struct gm_source *src;
+
+ /*
+ * RFC 4604
+ * section 2.2.1
+ * EXCLUDE mode does not apply to SSM addresses,
+ * and an SSM-aware router will ignore
+ * MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE
+ * requests in the SSM range.
+ */
+ if (pim_is_grp_ssm(pim, grp->group_addr) &&
+ grp->group_filtermode_isexcl) {
+ igmp_group_delete(grp);
+ } else {
+ /* scan group sources */
+ for (ALL_LIST_ELEMENTS_RO(
+ grp->group_source_list, srcnode,
+ src)) {
+ igmp_source_forward_reevaluate_one(pim,
+ src);
+ } /* scan group sources */
+ }
+ } /* scan igmp groups */
+
+ RB_FOREACH_SAFE (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb,
+ ch_temp) {
+ if (pim_is_grp_ssm(pim, ch->sg.grp)) {
+ if (pim_addr_is_any(ch->sg.src))
+ pim_ifchannel_delete(ch);
+ }
+ }
+ } /* scan interfaces */
+}
+
+void igmp_source_forward_start(struct pim_instance *pim,
+ struct gm_source *source)
+{
+ struct gm_group *group;
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source->source_addr;
+ sg.grp = source->source_group->group_addr;
+
+ if (PIM_DEBUG_IGMP_TRACE) {
+ zlog_debug("%s: (S,G)=%pSG oif=%s fwd=%d", __func__, &sg,
+ source->source_group->interface->name,
+ IGMP_SOURCE_TEST_FORWARDING(source->source_flags));
+ }
+
+ /* Prevent IGMP interface from installing multicast route multiple
+ times */
+ if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
+ return;
+ }
+
+ group = source->source_group;
+
+ if (tib_sg_gm_join(pim, sg, group->interface,
+ &source->source_channel_oil))
+ IGMP_SOURCE_DO_FORWARDING(source->source_flags);
+}
+
+/*
+ igmp_source_forward_stop: stop fowarding, but keep the source
+ igmp_source_delete: stop fowarding, and delete the source
+ */
+void igmp_source_forward_stop(struct gm_source *source)
+{
+ struct pim_interface *pim_oif;
+ struct gm_group *group;
+ pim_sgaddr sg;
+
+ memset(&sg, 0, sizeof(sg));
+ sg.src = source->source_addr;
+ sg.grp = source->source_group->group_addr;
+
+ if (PIM_DEBUG_IGMP_TRACE) {
+ zlog_debug("%s: (S,G)=%pSG oif=%s fwd=%d", __func__, &sg,
+ source->source_group->interface->name,
+ IGMP_SOURCE_TEST_FORWARDING(source->source_flags));
+ }
+
+ /* Prevent IGMP interface from removing multicast route multiple
+ times */
+ if (!IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
+ return;
+ }
+
+ group = source->source_group;
+ pim_oif = group->interface->info;
+
+ tib_sg_gm_prune(pim_oif->pim, sg, group->interface,
+ &source->source_channel_oil);
+ IGMP_SOURCE_DONT_FORWARDING(source->source_flags);
+}
+
/* This socket is used for TXing IGMP packets only, IGMP RX happens
* in pim_mroute_msg()
*/
@@ -51,6 +235,7 @@ static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp,
int fd;
int join = 0;
struct in_addr group;
+ struct pim_interface *pim_ifp = ifp->info;
fd = pim_socket_mcast(IPPROTO_IGMP, ifaddr, ifp, 1);
@@ -59,7 +244,8 @@ static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp,
if (PIM_IF_TEST_IGMP_LISTEN_ALLROUTERS(pim_options)) {
if (inet_aton(PIM_ALL_ROUTERS, &group)) {
- if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex))
+ if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex,
+ pim_ifp))
++join;
} else {
zlog_warn(
@@ -75,7 +261,7 @@ static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp,
IGMP routers must receive general queries for querier election.
*/
if (inet_aton(PIM_ALL_SYSTEMS, &group)) {
- if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex))
+ if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex, pim_ifp))
++join;
} else {
zlog_warn(
@@ -85,7 +271,8 @@ static int igmp_sock_open(struct in_addr ifaddr, struct interface *ifp,
}
if (inet_aton(PIM_ALL_IGMP_ROUTERS, &group)) {
- if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex)) {
+ if (!pim_socket_join(fd, group, ifaddr, ifp->ifindex,
+ pim_ifp)) {
++join;
}
} else {
@@ -320,16 +507,16 @@ static int igmp_recv_query(struct gm_sock *igmp, int query_version,
/* Collecting IGMP Rx stats */
switch (query_version) {
case 1:
- igmp->rx_stats.query_v1++;
+ igmp->igmp_stats.query_v1++;
break;
case 2:
- igmp->rx_stats.query_v2++;
+ igmp->igmp_stats.query_v2++;
break;
case 3:
- igmp->rx_stats.query_v3++;
+ igmp->igmp_stats.query_v3++;
break;
default:
- igmp->rx_stats.unsupported++;
+ igmp->igmp_stats.unsupported++;
}
/*
@@ -461,7 +648,7 @@ static int igmp_v1_recv_report(struct gm_sock *igmp, struct in_addr from,
}
/* Collecting IGMP Rx stats */
- igmp->rx_stats.report_v1++;
+ igmp->igmp_stats.report_v1++;
if (PIM_DEBUG_IGMP_TRACE) {
zlog_warn("%s %s: FIXME WRITEME", __FILE__, __func__);
@@ -613,7 +800,7 @@ int pim_igmp_packet(struct gm_sock *igmp, char *buf, size_t len)
zlog_warn("Ignoring unsupported IGMP message type: %d", msg_type);
/* Collecting IGMP Rx stats */
- igmp->rx_stats.unsupported++;
+ igmp->igmp_stats.unsupported++;
return -1;
}
@@ -737,13 +924,10 @@ static void pim_igmp_general_query(struct thread *t)
querier_str, dst_str, igmp->interface->name);
}
- igmp_send_query(pim_ifp->igmp_version, 0 /* igmp_group */, igmp->fd,
- igmp->interface->name, query_buf, sizeof(query_buf),
- 0 /* num_sources */, dst_addr, group_addr,
- pim_ifp->gm_query_max_response_time_dsec,
- 1 /* s_flag: always set for general queries */,
- igmp->querier_robustness_variable,
- igmp->querier_query_interval);
+ igmp_send_query(pim_ifp->igmp_version, 0 /* igmp_group */, query_buf,
+ sizeof(query_buf), 0 /* num_sources */, dst_addr,
+ group_addr, pim_ifp->gm_query_max_response_time_dsec,
+ 1 /* s_flag: always set for general queries */, igmp);
pim_igmp_general_query_on(igmp);
}
@@ -808,6 +992,8 @@ static void igmp_group_free(struct gm_group *group)
static void igmp_group_count_incr(struct pim_interface *pim_ifp)
{
+ uint32_t group_count = listcount(pim_ifp->gm_group_list);
+
++pim_ifp->pim->igmp_group_count;
if (pim_ifp->pim->igmp_group_count
== pim_ifp->pim->igmp_watermark_limit) {
@@ -816,6 +1002,9 @@ static void igmp_group_count_incr(struct pim_interface *pim_ifp)
pim_ifp->pim->igmp_group_count,
VRF_LOGNAME(pim_ifp->pim->vrf));
}
+
+ if (pim_ifp->igmp_peak_group_count < group_count)
+ pim_ifp->igmp_peak_group_count = group_count;
}
static void igmp_group_count_decr(struct pim_interface *pim_ifp)
@@ -990,7 +1179,7 @@ static struct gm_sock *igmp_sock_new(int fd, struct in_addr ifaddr,
pim_ifp->gm_default_robustness_variable;
igmp->sock_creation = pim_time_monotonic_sec();
- igmp_stats_init(&igmp->rx_stats);
+ igmp_stats_init(&igmp->igmp_stats);
if (mtrace_only) {
igmp->mtrace_only = mtrace_only;
@@ -1016,8 +1205,8 @@ static void pim_igmp_read(struct thread *t)
{
uint8_t buf[10000];
struct gm_sock *igmp = (struct gm_sock *)THREAD_ARG(t);
- struct sockaddr_in from;
- struct sockaddr_in to;
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
socklen_t fromlen = sizeof(from);
socklen_t tolen = sizeof(to);
ifindex_t ifindex = -1;
@@ -1282,23 +1471,29 @@ struct gm_group *igmp_add_group_by_addr(struct gm_sock *igmp,
return group;
}
-void igmp_send_query(int igmp_version, struct gm_group *group, int fd,
- const char *ifname, char *query_buf, int query_buf_size,
- int num_sources, struct in_addr dst_addr,
- struct in_addr group_addr,
+void igmp_send_query(int igmp_version, struct gm_group *group, char *query_buf,
+ int query_buf_size, int num_sources,
+ struct in_addr dst_addr, struct in_addr group_addr,
int query_max_response_time_dsec, uint8_t s_flag,
- uint8_t querier_robustness_variable,
- uint16_t querier_query_interval)
+ struct gm_sock *igmp)
{
+ if (pim_addr_is_any(group_addr) &&
+ ntohl(dst_addr.s_addr) == INADDR_ALLHOSTS_GROUP)
+ igmp->igmp_stats.general_queries_sent++;
+ else if (group)
+ igmp->igmp_stats.group_queries_sent++;
+
if (igmp_version == 3) {
- igmp_v3_send_query(group, fd, ifname, query_buf, query_buf_size,
- num_sources, dst_addr, group_addr,
+ igmp_v3_send_query(group, igmp->fd, igmp->interface->name,
+ query_buf, query_buf_size, num_sources,
+ dst_addr, group_addr,
query_max_response_time_dsec, s_flag,
- querier_robustness_variable,
- querier_query_interval);
+ igmp->querier_robustness_variable,
+ igmp->querier_query_interval);
} else if (igmp_version == 2) {
- igmp_v2_send_query(group, fd, ifname, query_buf, dst_addr,
- group_addr, query_max_response_time_dsec);
+ igmp_v2_send_query(group, igmp->fd, igmp->interface->name,
+ query_buf, dst_addr, group_addr,
+ query_max_response_time_dsec);
}
}
@@ -1329,13 +1524,10 @@ void igmp_send_query_on_intf(struct interface *ifp, int igmp_ver)
char query_buf[query_buf_size];
- igmp_send_query(igmp_ver, 0 /* igmp_group */, igmp->fd,
- igmp->interface->name, query_buf,
- sizeof(query_buf), 0 /* num_sources */,
- dst_addr, group_addr,
- pim_ifp->gm_query_max_response_time_dsec,
- 1 /* s_flag: always set for general queries */,
- igmp->querier_robustness_variable,
- igmp->querier_query_interval);
+ igmp_send_query(
+ igmp_ver, 0 /* igmp_group */, query_buf,
+ sizeof(query_buf), 0 /* num_sources */, dst_addr,
+ group_addr, pim_ifp->gm_query_max_response_time_dsec,
+ 1 /* s_flag: always set for general queries */, igmp);
}
}
diff --git a/pimd/pim_igmp.h b/pimd/pim_igmp.h
index 5c35996634..a642469f27 100644
--- a/pimd/pim_igmp.h
+++ b/pimd/pim_igmp.h
@@ -100,7 +100,7 @@ struct gm_sock {
bool mtrace_only;
- struct igmp_stats rx_stats;
+ struct igmp_stats igmp_stats;
};
struct pim_interface;
@@ -128,6 +128,15 @@ void pim_igmp_other_querier_timer_off(struct gm_sock *igmp);
int igmp_validate_checksum(char *igmp_msg, int igmp_msg_len);
#else /* PIM_IPV != 4 */
+static inline void pim_igmp_if_init(struct pim_interface *pim_ifp,
+ struct interface *ifp)
+{
+}
+
+static inline void pim_igmp_if_fini(struct pim_interface *pim_ifp)
+{
+}
+
static inline void pim_igmp_general_query_on(struct gm_sock *igmp)
{
}
@@ -204,6 +213,17 @@ struct gm_group {
};
#if PIM_IPV == 4
+struct pim_instance;
+
+void igmp_anysource_forward_start(struct pim_instance *pim,
+ struct gm_group *group);
+void igmp_anysource_forward_stop(struct gm_group *group);
+
+void igmp_source_forward_start(struct pim_instance *pim,
+ struct gm_source *source);
+void igmp_source_forward_stop(struct gm_source *source);
+void igmp_source_forward_reevaluate_all(struct pim_instance *pim);
+
struct gm_group *find_group_by_addr(struct gm_sock *igmp,
struct in_addr group_addr);
struct gm_group *igmp_add_group_by_addr(struct gm_sock *igmp,
@@ -220,13 +240,11 @@ void igmp_startup_mode_on(struct gm_sock *igmp);
void igmp_group_timer_on(struct gm_group *group, long interval_msec,
const char *ifname);
-void igmp_send_query(int igmp_version, struct gm_group *group, int fd,
- const char *ifname, char *query_buf, int query_buf_size,
- int num_sources, struct in_addr dst_addr,
- struct in_addr group_addr,
+void igmp_send_query(int igmp_version, struct gm_group *group, char *query_buf,
+ int query_buf_size, int num_sources,
+ struct in_addr dst_addr, struct in_addr group_addr,
int query_max_response_time_dsec, uint8_t s_flag,
- uint8_t querier_robustness_variable,
- uint16_t querier_query_interval);
+ struct gm_sock *igmp);
void igmp_group_delete(struct gm_group *group);
void igmp_send_query_on_intf(struct interface *ifp, int igmp_ver);
diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c
index d8210168e2..11bb2db7eb 100644
--- a/pimd/pim_igmp_mtrace.c
+++ b/pimd/pim_igmp_mtrace.c
@@ -626,7 +626,7 @@ int igmp_mtrace_recv_qry_req(struct gm_sock *igmp, struct ip *ip_hdr,
}
/* Collecting IGMP Rx stats */
- igmp->rx_stats.mtrace_req++;
+ igmp->igmp_stats.mtrace_req++;
if (PIM_DEBUG_MTRACE)
mtrace_debug(pim_ifp, mtracep, igmp_msg_len);
@@ -843,7 +843,7 @@ int igmp_mtrace_recv_response(struct gm_sock *igmp, struct ip *ip_hdr,
mtracep->checksum = checksum;
/* Collecting IGMP Rx stats */
- igmp->rx_stats.mtrace_rsp++;
+ igmp->igmp_stats.mtrace_rsp++;
if (PIM_DEBUG_MTRACE)
mtrace_debug(pim_ifp, mtracep, igmp_msg_len);
diff --git a/pimd/pim_igmp_stats.c b/pimd/pim_igmp_stats.c
index 40851a4529..1d51104687 100644
--- a/pimd/pim_igmp_stats.c
+++ b/pimd/pim_igmp_stats.c
@@ -43,4 +43,14 @@ void igmp_stats_add(struct igmp_stats *a, struct igmp_stats *b)
a->mtrace_rsp += b->mtrace_rsp;
a->mtrace_req += b->mtrace_req;
a->unsupported += b->unsupported;
+ a->peak_groups += b->peak_groups;
+ a->total_groups += b->total_groups;
+ a->total_source_groups += b->total_source_groups;
+ a->joins_sent += b->joins_sent;
+ a->joins_failed += b->joins_failed;
+ a->general_queries_sent += b->general_queries_sent;
+ a->group_queries_sent += b->group_queries_sent;
+ a->total_recv_messages += b->query_v1 + b->query_v2 + b->query_v3 +
+ b->report_v1 + b->report_v2 + b->report_v3 +
+ b->leave_v2 + b->mtrace_rsp + b->mtrace_req;
}
diff --git a/pimd/pim_igmp_stats.h b/pimd/pim_igmp_stats.h
index a70a433557..8c66986e03 100644
--- a/pimd/pim_igmp_stats.h
+++ b/pimd/pim_igmp_stats.h
@@ -23,16 +23,24 @@
#include <zebra.h>
struct igmp_stats {
- uint32_t query_v1;
- uint32_t query_v2;
- uint32_t query_v3;
- uint32_t report_v1;
- uint32_t report_v2;
- uint32_t report_v3;
- uint32_t leave_v2;
- uint32_t mtrace_rsp;
- uint32_t mtrace_req;
- uint32_t unsupported;
+ uint32_t query_v1;
+ uint32_t query_v2;
+ uint32_t query_v3;
+ uint32_t report_v1;
+ uint32_t report_v2;
+ uint32_t report_v3;
+ uint32_t leave_v2;
+ uint32_t mtrace_rsp;
+ uint32_t mtrace_req;
+ uint32_t unsupported;
+ uint32_t peak_groups;
+ uint32_t total_groups;
+ uint32_t total_source_groups;
+ uint32_t joins_sent;
+ uint32_t joins_failed;
+ uint32_t general_queries_sent;
+ uint32_t group_queries_sent;
+ uint32_t total_recv_messages;
};
#if PIM_IPV == 4
diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c
index a7c7c99ebf..a949d2b126 100644
--- a/pimd/pim_igmpv2.c
+++ b/pimd/pim_igmpv2.c
@@ -24,6 +24,7 @@
#include "pim_igmp.h"
#include "pim_igmpv2.h"
#include "pim_igmpv3.h"
+#include "pim_ssm.h"
#include "pim_str.h"
#include "pim_time.h"
#include "pim_util.h"
@@ -53,7 +54,8 @@ void igmp_v2_send_query(struct gm_group *group, int fd, const char *ifname,
/* max_resp_code must be non-zero else this will look like an IGMP v1
* query */
- max_resp_code = igmp_msg_encode16to8(query_max_response_time_dsec);
+ /* RFC 2236: 2.2. , v2's is equal to it */
+ max_resp_code = query_max_response_time_dsec;
assert(max_resp_code > 0);
query_buf[0] = PIM_IGMP_MEMBERSHIP_QUERY;
@@ -107,10 +109,13 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
{
struct interface *ifp = igmp->interface;
struct in_addr group_addr;
+ struct pim_interface *pim_ifp;
char group_str[INET_ADDRSTRLEN];
on_trace(__func__, igmp->interface, from);
+ pim_ifp = ifp->info;
+
if (igmp->mtrace_only)
return 0;
@@ -130,7 +135,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
}
/* Collecting IGMP Rx stats */
- igmp->rx_stats.report_v2++;
+ igmp->igmp_stats.report_v2++;
memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
@@ -142,6 +147,23 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
}
/*
+ * RFC 4604
+ * section 2.2.1
+ * EXCLUDE mode does not apply to SSM addresses, and an SSM-aware router
+ * will ignore MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE requests in
+ * the SSM range.
+ */
+ if (pim_is_grp_ssm(pim_ifp->pim, group_addr)) {
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "Ignoring IGMPv2 group record %pI4 from %s on %s exclude mode in SSM range",
+ &group_addr.s_addr, from_str, ifp->name);
+ }
+ return -1;
+ }
+
+
+ /*
* RFC 3376
* 7.3.2. In the Presence of Older Version Group Members
*
@@ -221,7 +243,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr,
}
/* Collecting IGMP Rx stats */
- igmp->rx_stats.leave_v2++;
+ igmp->igmp_stats.leave_v2++;
/*
* RFC 3376
diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c
index 87554bc8ba..b6114f9ead 100644
--- a/pimd/pim_igmpv3.c
+++ b/pimd/pim_igmpv3.c
@@ -32,6 +32,7 @@
#include "pim_time.h"
#include "pim_zebra.h"
#include "pim_oil.h"
+#include "pim_ssm.h"
static void group_retransmit_timer_on(struct gm_group *group);
static long igmp_group_timer_remain_msec(struct gm_group *group);
@@ -984,12 +985,10 @@ static void igmp_send_query_group(struct gm_group *group, char *query_buf,
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node, igmp)) {
igmp_send_query(
- pim_ifp->igmp_version, group, igmp->fd, ifp->name,
- query_buf, query_buf_size, num_sources,
- group->group_addr, group->group_addr,
+ pim_ifp->igmp_version, group, query_buf, query_buf_size,
+ num_sources, group->group_addr, group->group_addr,
pim_ifp->gm_specific_query_max_response_time_dsec,
- s_flag, igmp->querier_robustness_variable,
- igmp->querier_query_interval);
+ s_flag, igmp);
}
}
@@ -1822,6 +1821,64 @@ void igmp_v3_recv_query(struct gm_sock *igmp, const char *from_str,
} /* s_flag is clear: timer updates */
}
+static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
+ struct in_addr grp, int rec_type)
+{
+ struct pim_interface *pim_ifp;
+ struct in_addr grp_addr;
+
+ pim_ifp = ifp->info;
+
+ /* determine filtering status for group */
+ if (pim_is_group_filtered(pim_ifp, &grp)) {
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
+ &grp.s_addr, from_str, ifp->name,
+ pim_ifp->boundary_oil_plist);
+ }
+ return false;
+ }
+
+ /*
+ * If we receive a igmp report with the group in 224.0.0.0/24
+ * then we should ignore it
+ */
+
+ grp_addr.s_addr = ntohl(grp.s_addr);
+
+ if (pim_is_group_224_0_0_0_24(grp_addr)) {
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "Ignoring IGMPv3 group record %pI4 from %s on %s group range falls in 224.0.0.0/24",
+ &grp.s_addr, from_str, ifp->name);
+ }
+ return false;
+ }
+
+ /*
+ * RFC 4604
+ * section 2.2.1
+ * EXCLUDE mode does not apply to SSM addresses, and an SSM-aware router
+ * will ignore MODE_IS_EXCLUDE and CHANGE_TO_EXCLUDE_MODE requests in
+ * the SSM range.
+ */
+ if (pim_is_grp_ssm(pim_ifp->pim, grp)) {
+ switch (rec_type) {
+ case IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE:
+ case IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE:
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "Ignoring IGMPv3 group record %pI4 from %s on %s exclude mode in SSM range",
+ &grp.s_addr, from_str, ifp->name);
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
const char *from_str, char *igmp_msg, int igmp_msg_len)
{
@@ -1830,14 +1887,10 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
uint8_t *report_pastend = (uint8_t *)igmp_msg + igmp_msg_len;
struct interface *ifp = igmp->interface;
int i;
- int local_ncb = 0;
- struct pim_interface *pim_ifp;
if (igmp->mtrace_only)
return 0;
- pim_ifp = igmp->interface->info;
-
if (igmp_msg_len < IGMP_V3_MSG_MIN_SIZE) {
zlog_warn(
"Recv IGMP report v3 from %s on %s: size=%d shorter than minimum=%d",
@@ -1854,7 +1907,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
}
/* Collecting IGMP Rx stats */
- igmp->rx_stats.report_v3++;
+ igmp->igmp_stats.report_v3++;
num_groups = ntohs(
*(uint16_t *)(igmp_msg + IGMP_V3_REPORT_NUMGROUPS_OFFSET));
@@ -1882,9 +1935,6 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
int rec_auxdatalen;
int rec_num_sources;
int j;
- struct prefix lncb;
- struct prefix g;
- bool filtered = false;
if ((group_record + IGMP_V3_GROUP_RECORD_MIN_SIZE)
> report_pastend) {
@@ -1942,31 +1992,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
} /* for (sources) */
- lncb.family = AF_INET;
- lncb.u.prefix4.s_addr = 0x000000E0;
- lncb.prefixlen = 24;
-
- g.family = AF_INET;
- g.u.prefix4 = rec_group;
- g.prefixlen = IPV4_MAX_BITLEN;
-
- /* determine filtering status for group */
- filtered = pim_is_group_filtered(ifp->info, &rec_group);
-
- if (PIM_DEBUG_IGMP_PACKETS && filtered)
- zlog_debug(
- "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
- &rec_group, from_str, ifp->name,
- pim_ifp->boundary_oil_plist);
-
- /*
- * If we receive a igmp report with the group in 224.0.0.0/24
- * then we should ignore it
- */
- if (prefix_match(&lncb, &g))
- local_ncb = 1;
-
- if (!local_ncb && !filtered)
+ if (igmp_pkt_grp_addr_ok(ifp, from_str, rec_group, rec_type))
switch (rec_type) {
case IGMP_GRP_REC_TYPE_MODE_IS_INCLUDE:
igmpv3_report_isin(igmp, from, rec_group,
@@ -2006,7 +2032,6 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
group_record +=
8 + (rec_num_sources << 2) + (rec_auxdatalen << 2);
- local_ncb = 0;
} /* for (group records) */
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index 68c5b9167b..f8323deda0 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -30,18 +30,7 @@
#include "pim_vxlan_instance.h"
#include "pim_oil.h"
#include "pim_upstream.h"
-
-#if defined(HAVE_LINUX_MROUTE_H)
-#include <linux/mroute.h>
-#else
-/*
- Below: from <linux/mroute.h>
-*/
-
-#ifndef MAXVIFS
-#define MAXVIFS (256)
-#endif
-#endif
+#include "pim_mroute.h"
enum pim_spt_switchover {
PIM_SPT_IMMEDIATE,
@@ -176,7 +165,7 @@ struct pim_instance {
struct pim_vxlan_instance vxlan;
struct list *ssmpingd_list;
- struct in_addr ssmpingd_group_addr;
+ pim_addr ssmpingd_group_addr;
unsigned int igmp_group_count;
unsigned int igmp_watermark_limit;
diff --git a/pimd/pim_join.c b/pimd/pim_join.c
index 929beea26b..88078dd366 100644
--- a/pimd/pim_join.c
+++ b/pimd/pim_join.c
@@ -43,14 +43,10 @@
#include "pim_util.h"
#include "pim_ssm.h"
-static void on_trace(const char *label, struct interface *ifp,
- struct in_addr src)
+static void on_trace(const char *label, struct interface *ifp, pim_addr src)
{
- if (PIM_DEBUG_PIM_TRACE) {
- char src_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", src, src_str, sizeof(src_str));
- zlog_debug("%s: from %s on %s", label, src_str, ifp->name);
- }
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug("%s: from %pPA on %s", label, &src, ifp->name);
}
static void recv_join(struct interface *ifp, struct pim_neighbor *neigh,
@@ -422,6 +418,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
size_t packet_left = 0;
size_t packet_size = 0;
size_t group_size = 0;
+ pim_addr rpf_addr;
if (rpf->source_nexthop.interface)
pim_ifp = rpf->source_nexthop.interface->info;
@@ -430,8 +427,9 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
return -1;
}
- on_trace(__func__, rpf->source_nexthop.interface,
- rpf->rpf_addr.u.prefix4);
+ rpf_addr = pim_addr_from_prefix(&rpf->rpf_addr);
+
+ on_trace(__func__, rpf->source_nexthop.interface, rpf_addr);
if (!pim_ifp) {
zlog_warn("%s: multicast not enabled on interface %s", __func__,
@@ -439,15 +437,12 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
return -1;
}
- if (rpf->rpf_addr.u.prefix4.s_addr == INADDR_ANY) {
- if (PIM_DEBUG_PIM_J_P) {
- char dst_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<dst?>", rpf->rpf_addr.u.prefix4,
- dst_str, sizeof(dst_str));
- zlog_debug("%s: upstream=%s is myself on interface %s",
- __func__, dst_str,
- rpf->source_nexthop.interface->name);
- }
+ if (pim_addr_is_any(rpf_addr)) {
+ if (PIM_DEBUG_PIM_J_P)
+ zlog_debug(
+ "%s: upstream=%pPA is myself on interface %s",
+ __func__, &rpf_addr,
+ rpf->source_nexthop.interface->name);
return 0;
}
@@ -468,8 +463,8 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
memset(msg, 0, sizeof(*msg));
- pim_msg_addr_encode_ipv4_ucast((uint8_t *)&msg->addr,
- rpf->rpf_addr.u.prefix4);
+ pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
+ rpf_addr);
msg->reserved = 0;
msg->holdtime = htons(PIM_JP_HOLDTIME);
@@ -485,19 +480,17 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
packet_left = rpf->source_nexthop.interface->mtu - 24;
packet_left -= packet_size;
}
- if (PIM_DEBUG_PIM_J_P) {
- char dst_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<dst?>", rpf->rpf_addr.u.prefix4,
- dst_str, sizeof(dst_str));
+ if (PIM_DEBUG_PIM_J_P)
zlog_debug(
- "%s: sending (G)=%pPAs to upstream=%s on interface %s",
- __func__, &group->group, dst_str,
+ "%s: sending (G)=%pPAs to upstream=%pPA on interface %s",
+ __func__, &group->group, &rpf_addr,
rpf->source_nexthop.interface->name);
- }
group_size = pim_msg_get_jp_group_size(group->sources);
if (group_size > packet_left) {
- pim_msg_build_header(pim_msg, packet_size,
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size,
PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd,
pim_ifp->primary_address,
@@ -513,8 +506,8 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
msg = (struct pim_jp *)pim_msg;
memset(msg, 0, sizeof(*msg));
- pim_msg_addr_encode_ipv4_ucast((uint8_t *)&msg->addr,
- rpf->rpf_addr.u.prefix4);
+ pim_msg_addr_encode_ucast((uint8_t *)&msg->addr,
+ rpf_addr);
msg->reserved = 0;
msg->holdtime = htons(PIM_JP_HOLDTIME);
@@ -553,7 +546,9 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
grp = (struct pim_jp_groups *)curr_ptr;
if (packet_left < sizeof(struct pim_jp_groups)
|| msg->num_groups == 255) {
- pim_msg_build_header(pim_msg, packet_size,
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg,
+ packet_size,
PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd,
pim_ifp->primary_address,
@@ -573,8 +568,9 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
if (!new_packet) {
// msg->num_groups = htons (msg->num_groups);
- pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE, false);
+ pim_msg_build_header(
+ pim_ifp->primary_address, qpim_all_pim_routers_addr,
+ pim_msg, packet_size, PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
packet_size,
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index f381a764cc..7fc4f12d27 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -43,121 +43,19 @@
#include "pim_ssm.h"
#include "pim_sock.h"
#include "pim_vxlan.h"
+#include "pim_msg.h"
static void mroute_read_on(struct pim_instance *pim);
-static int pim_mroute_set(struct pim_instance *pim, int enable)
-{
- int err;
- int opt, data;
- socklen_t data_len = sizeof(data);
- long flags;
-
- /*
- * We need to create the VRF table for the pim mroute_socket
- */
- if (pim->vrf->vrf_id != VRF_DEFAULT) {
- frr_with_privs(&pimd_privs) {
-
- data = pim->vrf->data.l.table_id;
- err = setsockopt(pim->mroute_socket, IPPROTO_IP,
- MRT_TABLE,
- &data, data_len);
- if (err) {
- zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,IPPROTO_IP, MRT_TABLE=%d): errno=%d: %s",
- __FILE__, __func__, pim->mroute_socket,
- data, errno, safe_strerror(errno));
- return -1;
- }
- }
- }
-
- frr_with_privs(&pimd_privs) {
- opt = enable ? MRT_INIT : MRT_DONE;
- /*
- * *BSD *cares* about what value we pass down
- * here
- */
- data = 1;
- err = setsockopt(pim->mroute_socket, IPPROTO_IP,
- opt, &data, data_len);
- if (err) {
- zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,IPPROTO_IP,%s=%d): errno=%d: %s",
- __FILE__, __func__, pim->mroute_socket,
- enable ? "MRT_INIT" : "MRT_DONE", data, errno,
- safe_strerror(errno));
- return -1;
- }
- }
-
-#if defined(HAVE_IP_PKTINFO)
- if (enable) {
- /* Linux and Solaris IP_PKTINFO */
- data = 1;
- if (setsockopt(pim->mroute_socket, IPPROTO_IP, IP_PKTINFO,
- &data, data_len)) {
- zlog_warn(
- "Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
- pim->mroute_socket, errno,
- safe_strerror(errno));
- }
- }
-#endif
-
- setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
-
- flags = fcntl(pim->mroute_socket, F_GETFL, 0);
- if (flags < 0) {
- zlog_warn("Could not get flags on socket fd:%d %d %s",
- pim->mroute_socket, errno, safe_strerror(errno));
- close(pim->mroute_socket);
- return -1;
- }
- if (fcntl(pim->mroute_socket, F_SETFL, flags | O_NONBLOCK)) {
- zlog_warn("Could not set O_NONBLOCK on socket fd:%d %d %s",
- pim->mroute_socket, errno, safe_strerror(errno));
- close(pim->mroute_socket);
- return -1;
- }
-
- if (enable) {
-#if defined linux
- int upcalls = IGMPMSG_WRVIFWHOLE;
- opt = MRT_PIM;
-
- err = setsockopt(pim->mroute_socket, IPPROTO_IP, opt, &upcalls,
- sizeof(upcalls));
- if (err) {
- zlog_warn(
- "Failure to register for VIFWHOLE and WRONGVIF upcalls %d %s",
- errno, safe_strerror(errno));
- return -1;
- }
-#else
- zlog_warn(
- "PIM-SM will not work properly on this platform, until the ability to receive the WRVIFWHOLE upcall");
-#endif
- }
-
- return 0;
-}
-
-#if PIM_IPV == 4
-static const char *const igmpmsgtype2str[IGMPMSG_WRVIFWHOLE + 1] = {
- "<unknown_upcall?>", "NOCACHE", "WRONGVIF", "WHOLEPKT", "WRVIFWHOLE"};
-
-static int pim_mroute_msg_nocache(int fd, struct interface *ifp,
- const struct igmpmsg *msg)
+int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg)
{
struct pim_interface *pim_ifp = ifp->info;
struct pim_upstream *up;
struct pim_rpf *rpg;
pim_sgaddr sg;
- rpg = pim_ifp ? RP(pim_ifp->pim, msg->im_dst) : NULL;
+ rpg = pim_ifp ? RP(pim_ifp->pim, msg->msg_im_dst) : NULL;
/*
* If the incoming interface is unknown OR
* the Interface type is SSM we don't need to
@@ -176,7 +74,7 @@ static int pim_mroute_msg_nocache(int fd, struct interface *ifp,
* If we've received a multicast packet that isn't connected to
* us
*/
- if (!pim_if_connected_to_source(ifp, msg->im_src)) {
+ if (!pim_if_connected_to_source(ifp, msg->msg_im_src)) {
if (PIM_DEBUG_MROUTE_DETAIL)
zlog_debug(
"%s: Received incoming packet that doesn't originate on our seg",
@@ -185,13 +83,14 @@ static int pim_mroute_msg_nocache(int fd, struct interface *ifp,
}
memset(&sg, 0, sizeof(sg));
- sg.src = msg->im_src;
- sg.grp = msg->im_dst;
+ sg.src = msg->msg_im_src;
+ sg.grp = msg->msg_im_dst;
if (!(PIM_I_am_DR(pim_ifp))) {
if (PIM_DEBUG_MROUTE_DETAIL)
- zlog_debug("%s: Interface is not the DR blackholing incoming traffic for %pSG",
- __func__, &sg);
+ zlog_debug(
+ "%s: Interface is not the DR blackholing incoming traffic for %pSG",
+ __func__, &sg);
/*
* We are not the DR, but we are still receiving packets
@@ -238,22 +137,21 @@ static int pim_mroute_msg_nocache(int fd, struct interface *ifp,
return 0;
}
-static int pim_mroute_msg_wholepkt(int fd, struct interface *ifp,
- const char *buf)
+int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf)
{
struct pim_interface *pim_ifp;
pim_sgaddr sg;
struct pim_rpf *rpg;
- const struct ip *ip_hdr;
+ const ipv_hdr *ip_hdr;
struct pim_upstream *up;
pim_ifp = ifp->info;
- ip_hdr = (const struct ip *)buf;
+ ip_hdr = (const ipv_hdr *)buf;
memset(&sg, 0, sizeof(sg));
- sg.src = ip_hdr->ip_src;
- sg.grp = ip_hdr->ip_dst;
+ sg.src = IPV_SRC(ip_hdr);
+ sg.grp = IPV_DST(ip_hdr);
up = pim_upstream_find(pim_ifp->pim, &sg);
if (!up) {
@@ -268,8 +166,9 @@ static int pim_mroute_msg_wholepkt(int fd, struct interface *ifp,
__func__, NULL);
if (!up) {
if (PIM_DEBUG_MROUTE)
- zlog_debug("%s: Unable to create upstream information for %pSG",
- __func__, &sg);
+ zlog_debug(
+ "%s: Unable to create upstream information for %pSG",
+ __func__, &sg);
return 0;
}
pim_upstream_keep_alive_timer_start(
@@ -283,8 +182,9 @@ static int pim_mroute_msg_wholepkt(int fd, struct interface *ifp,
return 0;
}
if (PIM_DEBUG_MROUTE_DETAIL) {
- zlog_debug("%s: Unable to find upstream channel WHOLEPKT%pSG",
- __func__, &sg);
+ zlog_debug(
+ "%s: Unable to find upstream channel WHOLEPKT%pSG",
+ __func__, &sg);
}
return 0;
}
@@ -314,8 +214,9 @@ static int pim_mroute_msg_wholepkt(int fd, struct interface *ifp,
if (!up->t_rs_timer) {
if (pim_is_grp_ssm(pim_ifp->pim, sg.grp)) {
if (PIM_DEBUG_PIM_REG)
- zlog_debug("%pSG register forward skipped as group is SSM",
- &sg);
+ zlog_debug(
+ "%pSG register forward skipped as group is SSM",
+ &sg);
return 0;
}
@@ -327,23 +228,22 @@ static int pim_mroute_msg_wholepkt(int fd, struct interface *ifp,
return 0;
}
- pim_register_send((uint8_t *)buf + sizeof(struct ip),
- ntohs(ip_hdr->ip_len) - sizeof(struct ip),
+ pim_register_send((uint8_t *)buf + sizeof(ipv_hdr),
+ ntohs(IPV_LEN(ip_hdr)) - sizeof(ipv_hdr),
pim_ifp->primary_address, rpg, 0, up);
}
return 0;
}
-static int pim_mroute_msg_wrongvif(int fd, struct interface *ifp,
- const struct igmpmsg *msg)
+int pim_mroute_msg_wrongvif(int fd, struct interface *ifp, const kernmsg *msg)
{
struct pim_ifchannel *ch;
struct pim_interface *pim_ifp;
pim_sgaddr sg;
memset(&sg, 0, sizeof(sg));
- sg.src = msg->im_src;
- sg.grp = msg->im_dst;
+ sg.src = msg->msg_im_src;
+ sg.grp = msg->msg_im_dst;
/*
Send Assert(S,G) on iif as response to WRONGVIF kernel upcall.
@@ -358,16 +258,18 @@ static int pim_mroute_msg_wrongvif(int fd, struct interface *ifp,
if (!ifp) {
if (PIM_DEBUG_MROUTE)
- zlog_debug("%s: WRONGVIF (S,G)=%pSG could not find input interface for input_vif_index=%d",
- __func__, &sg, msg->im_vif);
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%pSG could not find input interface for input_vif_index=%d",
+ __func__, &sg, msg->msg_im_vif);
return -1;
}
pim_ifp = ifp->info;
if (!pim_ifp) {
if (PIM_DEBUG_MROUTE)
- zlog_debug("%s: WRONGVIF (S,G)=%pSG multicast not enabled on interface %s",
- __func__, &sg, ifp->name);
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%pSG multicast not enabled on interface %s",
+ __func__, &sg, ifp->name);
return -2;
}
@@ -375,16 +277,17 @@ static int pim_mroute_msg_wrongvif(int fd, struct interface *ifp,
if (!ch) {
pim_sgaddr star_g = sg;
if (PIM_DEBUG_MROUTE)
- zlog_debug("%s: WRONGVIF (S,G)=%pSG could not find channel on interface %s",
- __func__, &sg, ifp->name);
+ zlog_debug(
+ "%s: WRONGVIF (S,G)=%pSG could not find channel on interface %s",
+ __func__, &sg, ifp->name);
star_g.src = PIMADDR_ANY;
ch = pim_ifchannel_find(ifp, &star_g);
if (!ch) {
if (PIM_DEBUG_MROUTE)
- zlog_debug("%s: WRONGVIF (*,G)=%pSG could not find channel on interface %s",
- __func__, &star_g,
- ifp->name);
+ zlog_debug(
+ "%s: WRONGVIF (*,G)=%pSG could not find channel on interface %s",
+ __func__, &star_g, ifp->name);
return -3;
}
}
@@ -433,10 +336,9 @@ static int pim_mroute_msg_wrongvif(int fd, struct interface *ifp,
return 0;
}
-static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp,
- const char *buf)
+int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf)
{
- const struct ip *ip_hdr = (const struct ip *)buf;
+ const ipv_hdr *ip_hdr = (const ipv_hdr *)buf;
struct pim_interface *pim_ifp;
struct pim_instance *pim;
struct pim_ifchannel *ch;
@@ -447,8 +349,8 @@ static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp,
pim_ifp = ifp->info;
memset(&sg, 0, sizeof(sg));
- sg.src = ip_hdr->ip_src;
- sg.grp = ip_hdr->ip_dst;
+ sg.src = IPV_SRC(ip_hdr);
+ sg.grp = IPV_DST(ip_hdr);
ch = pim_ifchannel_find(ifp, &sg);
if (ch) {
@@ -481,8 +383,8 @@ static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp,
struct pim_rpf *rpf = RP(pim_ifp->pim, sg.grp);
/* No RPF or No RPF interface or No mcast on RPF interface */
- if (!rpf || !rpf->source_nexthop.interface
- || !rpf->source_nexthop.interface->info)
+ if (!rpf || !rpf->source_nexthop.interface ||
+ !rpf->source_nexthop.interface->info)
return 0;
/*
@@ -512,7 +414,7 @@ static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp,
* bow out of doing a nexthop lookup and
* setting the SPTBIT to true
*/
- if (up->upstream_register.s_addr != INADDR_ANY &&
+ if (!(pim_addr_is_any(up->upstream_register)) &&
pim_nexthop_lookup(pim_ifp->pim, &source,
up->upstream_register, 0)) {
pim_register_stop_send(source.interface, &sg,
@@ -551,8 +453,9 @@ static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp,
NULL);
if (!up) {
if (PIM_DEBUG_MROUTE)
- zlog_debug("%pSG: WRONGVIF%s unable to create upstream on interface",
- &sg, ifp->name);
+ zlog_debug(
+ "%pSG: WRONGVIF%s unable to create upstream on interface",
+ &sg, ifp->name);
return -2;
}
PIM_UPSTREAM_FLAG_SET_SRC_STREAM(up->flags);
@@ -577,117 +480,6 @@ static int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp,
return 0;
}
-static int pim_mroute_msg(struct pim_instance *pim, const char *buf,
- int buf_size, ifindex_t ifindex)
-{
- struct interface *ifp;
- const struct ip *ip_hdr;
- const struct igmpmsg *msg;
-
- if (buf_size < (int)sizeof(struct ip))
- return 0;
-
- ip_hdr = (const struct ip *)buf;
-
- if (ip_hdr->ip_p == IPPROTO_IGMP) {
-#if PIM_IPV == 4
- struct pim_interface *pim_ifp;
- struct in_addr ifaddr;
- struct gm_sock *igmp;
- const struct prefix *connected_src;
-
- /* We have the IP packet but we do not know which interface this
- * packet was
- * received on. Find the interface that is on the same subnet as
- * the source
- * of the IP packet.
- */
- ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id);
-
- if (!ifp || !ifp->info)
- return 0;
-
- connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
-
- if (!connected_src) {
- if (PIM_DEBUG_IGMP_PACKETS) {
- zlog_debug("Recv IGMP packet on interface: %s from a non-connected source: %pI4",
- ifp->name, &ip_hdr->ip_src);
- }
- return 0;
- }
-
- pim_ifp = ifp->info;
- ifaddr = connected_src->u.prefix4;
- igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
- ifaddr);
-
- if (PIM_DEBUG_IGMP_PACKETS) {
- zlog_debug(
- "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
- __func__, pim->vrf->name, ifp->name, igmp,
- &ip_hdr->ip_src, &ip_hdr->ip_dst);
- }
- if (igmp)
- pim_igmp_packet(igmp, (char *)buf, buf_size);
- else if (PIM_DEBUG_IGMP_PACKETS) {
- zlog_debug("No IGMP socket on interface: %s with connected source: %pFX",
- ifp->name, connected_src);
- }
-#endif
- } else if (ip_hdr->ip_p) {
- if (PIM_DEBUG_MROUTE_DETAIL) {
- zlog_debug(
- "%s: no kernel upcall proto=%d src: %pI4 dst: %pI4 msg_size=%d",
- __func__, ip_hdr->ip_p, &ip_hdr->ip_src, &ip_hdr->ip_dst,
- buf_size);
- }
-
- } else {
- msg = (const struct igmpmsg *)buf;
-
- ifp = pim_if_find_by_vif_index(pim, msg->im_vif);
-
- if (!ifp)
- return 0;
- if (PIM_DEBUG_MROUTE) {
- zlog_debug(
- "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI4,%pI4) on %s vifi=%d size=%d",
- __func__, igmpmsgtype2str[msg->im_msgtype],
- msg->im_msgtype, ip_hdr->ip_p,
- pim->mroute_socket, &msg->im_src, &msg->im_dst, ifp->name,
- msg->im_vif, buf_size);
- }
-
- switch (msg->im_msgtype) {
- case IGMPMSG_WRONGVIF:
- return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
- msg);
- case IGMPMSG_NOCACHE:
- return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
- msg);
- case IGMPMSG_WHOLEPKT:
- return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
- (const char *)msg);
- case IGMPMSG_WRVIFWHOLE:
- return pim_mroute_msg_wrvifwhole(
- pim->mroute_socket, ifp, (const char *)msg);
- default:
- break;
- }
- }
-
- return 0;
-}
-#else /* PIM_IPV != 4 */
-
-static int pim_mroute_msg(struct pim_instance *pim, const char *buf,
- int buf_size, ifindex_t ifindex)
-{
- return 0;
-}
-#endif /* PIM_IPV != 4 */
-
static void mroute_read(struct thread *t)
{
struct pim_instance *pim;
@@ -724,6 +516,8 @@ static void mroute_read(struct thread *t)
/* Keep reading */
done:
mroute_read_on(pim);
+
+ return;
}
static void mroute_read_on(struct pim_instance *pim)
@@ -743,8 +537,11 @@ int pim_mroute_socket_enable(struct pim_instance *pim)
frr_with_privs(&pimd_privs) {
+#if PIM_IPV == 4
fd = socket(AF_INET, SOCK_RAW, IPPROTO_IGMP);
-
+#else
+ fd = socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6);
+#endif
if (fd < 0) {
zlog_warn("Could not create mroute socket: errno=%d: %s",
errno,
@@ -812,7 +609,7 @@ int pim_mroute_add_vif(struct interface *ifp, pim_addr ifaddr,
unsigned char flags)
{
struct pim_interface *pim_ifp = ifp->info;
- struct vifctl vc;
+ pim_vifctl vc;
int err;
if (PIM_DEBUG_MROUTE)
@@ -821,9 +618,10 @@ int pim_mroute_add_vif(struct interface *ifp, pim_addr ifaddr,
pim_ifp->pim->vrf->name);
memset(&vc, 0, sizeof(vc));
- vc.vifc_vifi = pim_ifp->mroute_vif_index;
+ vc.vc_vifi = pim_ifp->mroute_vif_index;
+#if PIM_IPV == 4
#ifdef VIFF_USE_IFINDEX
- vc.vifc_lcl_ifindex = ifp->ifindex;
+ vc.vc_lcl_ifindex = ifp->ifindex;
#else
if (ifaddr.s_addr == INADDR_ANY) {
zlog_warn(
@@ -831,24 +629,29 @@ int pim_mroute_add_vif(struct interface *ifp, pim_addr ifaddr,
__func__);
return -1;
}
- memcpy(&vc.vifc_lcl_addr, &ifaddr, sizeof(vc.vifc_lcl_addr));
+ memcpy(&vc.vc_lcl_addr, &ifaddr, sizeof(vc.vc_lcl_addr));
+#endif
+#else
+ vc.vc_pifi = ifp->ifindex;
#endif
- vc.vifc_flags = flags;
- vc.vifc_threshold = PIM_MROUTE_MIN_TTL;
- vc.vifc_rate_limit = 0;
+ vc.vc_flags = flags;
+ vc.vc_threshold = PIM_MROUTE_MIN_TTL;
+ vc.vc_rate_limit = 0;
+#if PIM_IPV == 4
#ifdef PIM_DVMRP_TUNNEL
- if (vc.vifc_flags & VIFF_TUNNEL) {
- memcpy(&vc.vifc_rmt_addr, &vif_remote_addr,
- sizeof(vc.vifc_rmt_addr));
+ if (vc.vc_flags & VIFF_TUNNEL) {
+ memcpy(&vc.vc_rmt_addr, &vif_remote_addr,
+ sizeof(vc.vc_rmt_addr));
}
#endif
+#endif
- err = setsockopt(pim_ifp->pim->mroute_socket, IPPROTO_IP, MRT_ADD_VIF,
+ err = setsockopt(pim_ifp->pim->mroute_socket, PIM_IPPROTO, MRT_ADD_VIF,
(void *)&vc, sizeof(vc));
if (err) {
zlog_warn(
- "%s: failure: setsockopt(fd=%d,IPPROTO_IP,MRT_ADD_VIF,vif_index=%d,ifaddr=%pPAs,flag=%d): errno=%d: %s",
+ "%s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_ADD_VIF,vif_index=%d,ifaddr=%pPAs,flag=%d): errno=%d: %s",
__func__, pim_ifp->pim->mroute_socket, ifp->ifindex,
&ifaddr, flags, errno, safe_strerror(errno));
return -2;
@@ -860,7 +663,7 @@ int pim_mroute_add_vif(struct interface *ifp, pim_addr ifaddr,
int pim_mroute_del_vif(struct interface *ifp)
{
struct pim_interface *pim_ifp = ifp->info;
- struct vifctl vc;
+ pim_vifctl vc;
int err;
if (PIM_DEBUG_MROUTE)
@@ -869,13 +672,13 @@ int pim_mroute_del_vif(struct interface *ifp)
pim_ifp->pim->vrf->name);
memset(&vc, 0, sizeof(vc));
- vc.vifc_vifi = pim_ifp->mroute_vif_index;
+ vc.vc_vifi = pim_ifp->mroute_vif_index;
- err = setsockopt(pim_ifp->pim->mroute_socket, IPPROTO_IP, MRT_DEL_VIF,
+ err = setsockopt(pim_ifp->pim->mroute_socket, PIM_IPPROTO, MRT_DEL_VIF,
(void *)&vc, sizeof(vc));
if (err) {
zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,IPPROTO_IP,MRT_DEL_VIF,vif_index=%d): errno=%d: %s",
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_DEL_VIF,vif_index=%d): errno=%d: %s",
__FILE__, __func__, pim_ifp->pim->mroute_socket,
pim_ifp->mroute_vif_index, errno, safe_strerror(errno));
return -2;
@@ -983,20 +786,21 @@ static int pim_mroute_add(struct channel_oil *c_oil, const char *name)
&& *oil_parent(c_oil) != 0) {
*oil_parent(tmp_oil) = 0;
}
- err = setsockopt(pim->mroute_socket, IPPROTO_IP, MRT_ADD_MFC,
+ /* For IPv6 MRT_ADD_MFC is defined to MRT6_ADD_MFC */
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, MRT_ADD_MFC,
&tmp_oil->oil, sizeof(tmp_oil->oil));
if (!err && !c_oil->installed
&& !pim_addr_is_any(*oil_origin(c_oil))
&& *oil_parent(c_oil) != 0) {
*oil_parent(tmp_oil) = *oil_parent(c_oil);
- err = setsockopt(pim->mroute_socket, IPPROTO_IP, MRT_ADD_MFC,
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, MRT_ADD_MFC,
&tmp_oil->oil, sizeof(tmp_oil->oil));
}
if (err) {
zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,IPPROTO_IP,MRT_ADD_MFC): errno=%d: %s",
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_ADD_MFC): errno=%d: %s",
__FILE__, __func__, pim->mroute_socket, errno,
safe_strerror(errno));
return -2;
@@ -1172,12 +976,12 @@ int pim_mroute_del(struct channel_oil *c_oil, const char *name)
return -2;
}
- err = setsockopt(pim->mroute_socket, IPPROTO_IP, MRT_DEL_MFC,
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, MRT_DEL_MFC,
&c_oil->oil, sizeof(c_oil->oil));
if (err) {
if (PIM_DEBUG_MROUTE)
zlog_warn(
- "%s %s: failure: setsockopt(fd=%d,IPPROTO_IP,MRT_DEL_MFC): errno=%d: %s",
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,MRT_DEL_MFC): errno=%d: %s",
__FILE__, __func__, pim->mroute_socket, errno,
safe_strerror(errno));
return -2;
@@ -1199,6 +1003,7 @@ int pim_mroute_del(struct channel_oil *c_oil, const char *name)
void pim_mroute_update_counters(struct channel_oil *c_oil)
{
struct pim_instance *pim = c_oil->pim;
+ pim_sioc_sg_req sgreq;
c_oil->cc.oldpktcnt = c_oil->cc.pktcnt;
c_oil->cc.oldbytecnt = c_oil->cc.bytecnt;
@@ -1217,29 +1022,33 @@ void pim_mroute_update_counters(struct channel_oil *c_oil)
return;
}
-#if PIM_IPV == 4
- struct sioc_sg_req sgreq;
memset(&sgreq, 0, sizeof(sgreq));
+
+#if PIM_IPV == 4
sgreq.src = *oil_origin(c_oil);
sgreq.grp = *oil_mcastgrp(c_oil);
-
pim_zlookup_sg_statistics(c_oil);
- if (ioctl(pim->mroute_socket, SIOCGETSGCNT, &sgreq)) {
+#else
+ sgreq.src = c_oil->oil.mf6cc_origin;
+ sgreq.grp = c_oil->oil.mf6cc_mcastgrp;
+ /* TODO Zlookup_sg_statistics for V6 to be added */
+#endif
+ if (ioctl(pim->mroute_socket, PIM_SIOCGETSGCNT, &sgreq)) {
pim_sgaddr sg;
sg.src = *oil_origin(c_oil);
sg.grp = *oil_mcastgrp(c_oil);
- zlog_warn("ioctl(SIOCGETSGCNT=%lu) failure for (S,G)=%pSG: errno=%d: %s",
- (unsigned long)SIOCGETSGCNT, &sg,
- errno, safe_strerror(errno));
+ zlog_warn(
+ "ioctl(PIM_SIOCGETSGCNT=%lu) failure for (S,G)=%pSG: errno=%d: %s",
+ (unsigned long)PIM_SIOCGETSGCNT, &sg, errno,
+ safe_strerror(errno));
return;
}
c_oil->cc.pktcnt = sgreq.pktcnt;
c_oil->cc.bytecnt = sgreq.bytecnt;
c_oil->cc.wrong_if = sgreq.wrong_if;
-#endif
return;
}
diff --git a/pimd/pim_mroute.h b/pimd/pim_mroute.h
index 14b0a8ccaf..35ba60bf35 100644
--- a/pimd/pim_mroute.h
+++ b/pimd/pim_mroute.h
@@ -30,153 +30,107 @@
#define __EXTENSIONS__
#endif
-#include <netinet/in.h>
-#ifdef HAVE_NETINET_IP_MROUTE_H
-#include <netinet/ip_mroute.h>
-#endif
#define PIM_MROUTE_MIN_TTL (1)
#if PIM_IPV == 4
+
+#include <netinet/in.h>
#if defined(HAVE_LINUX_MROUTE_H)
#include <linux/mroute.h>
#else
-/*
- Below: from <linux/mroute.h>
-*/
-
-#ifndef MAXVIFS
-#define MAXVIFS (256)
+#ifndef VTYSH_EXTRACT_PL
+#include "linux/mroute.h"
#endif
-
-#ifndef SIOCGETVIFCNT
-#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */
-#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1)
-#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
#endif
-#ifndef MRT_INIT
-#define MRT_BASE 200
-#define MRT_INIT (MRT_BASE) /* Activate the kernel mroute code */
-#define MRT_DONE (MRT_BASE+1) /* Shutdown the kernel mroute */
-#define MRT_ADD_VIF (MRT_BASE+2) /* Add a virtual interface */
-#define MRT_DEL_VIF (MRT_BASE+3) /* Delete a virtual interface */
-#define MRT_ADD_MFC (MRT_BASE+4) /* Add a multicast forwarding entry */
-#define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */
-#define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */
-#define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */
-#define MRT_PIM (MRT_BASE+8) /* enable PIM code */
-#endif
+typedef struct vifctl pim_vifctl;
+typedef struct igmpmsg kernmsg;
+typedef struct sioc_sg_req pim_sioc_sg_req;
-#ifndef MRT_TABLE
-#define MRT_TABLE (209) /* Specify mroute table ID */
-#endif
+#define vc_vifi vifc_vifi
+#define vc_flags vifc_flags
+#define vc_threshold vifc_threshold
+#define vc_rate_limit vifc_rate_limit
+#define vc_lcl_addr vifc_lcl_addr
+#define vc_lcl_ifindex vifc_lcl_ifindex
+#define vc_rmt_addr vifc_rmt_addr
-#ifndef HAVE_VIFI_T
-typedef unsigned short vifi_t;
-#endif
+#define msg_im_vif im_vif
+#define msg_im_src im_src
+#define msg_im_dst im_dst
-#ifndef HAVE_STRUCT_VIFCTL
-struct vifctl {
- vifi_t vifc_vifi; /* Index of VIF */
- unsigned char vifc_flags; /* VIFF_ flags */
- unsigned char vifc_threshold; /* ttl limit */
- unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
- struct in_addr vifc_lcl_addr; /* Our address */
- struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */
-};
+#ifndef IGMPMSG_WRVIFWHOLE
+#define IGMPMSG_WRVIFWHOLE 4 /* For PIM processing */
#endif
-#ifndef HAVE_STRUCT_MFCCTL
-struct mfcctl {
- struct in_addr mfcc_origin; /* Origin of mcast */
- struct in_addr mfcc_mcastgrp; /* Group in question */
- vifi_t mfcc_parent; /* Where it arrived */
- unsigned char mfcc_ttls[MAXVIFS]; /* Where it is going */
- unsigned int mfcc_pkt_cnt; /* pkt count for src-grp */
- unsigned int mfcc_byte_cnt;
- unsigned int mfcc_wrong_if;
- int mfcc_expire;
-};
+#ifndef PIM_IPPROTO
+#define PIM_IPPROTO IPPROTO_IP
+#endif
+#ifndef PIM_SIOCGETSGCNT
+#define PIM_SIOCGETSGCNT SIOCGETSGCNT
#endif
-/*
- * Group count retrieval for mrouted
- */
-/*
- struct sioc_sg_req sgreq;
- memset(&sgreq, 0, sizeof(sgreq));
- memcpy(&sgreq.src, &source_addr, sizeof(sgreq.src));
- memcpy(&sgreq.grp, &group_addr, sizeof(sgreq.grp));
- ioctl(mrouter_s4, SIOCGETSGCNT, &sgreq);
- */
-#ifndef HAVE_STRUCT_SIOC_SG_REQ
-struct sioc_sg_req {
- struct in_addr src;
- struct in_addr grp;
- unsigned long pktcnt;
- unsigned long bytecnt;
- unsigned long wrong_if;
-};
+#else /* PIM_IPV != 4 */
+
+#include <netinet/ip6.h>
+
+#if defined(HAVE_LINUX_MROUTE6_H)
+#include <linux/mroute6.h>
+#else
+#ifndef VTYSH_EXTRACT_PL
+#include "linux/mroute6.h"
+#endif
#endif
-/*
- * To get vif packet counts
- */
-/*
- struct sioc_vif_req vreq;
- memset(&vreq, 0, sizeof(vreq));
- vreq.vifi = vif_index;
- ioctl(mrouter_s4, SIOCGETVIFCNT, &vreq);
- */
-#ifndef HAVE_STRUCT_SIOC_VIF_REQ
-struct sioc_vif_req {
- vifi_t vifi; /* Which iface */
- unsigned long icount; /* In packets */
- unsigned long ocount; /* Out packets */
- unsigned long ibytes; /* In bytes */
- unsigned long obytes; /* Out bytes */
-};
+#ifndef MRT_INIT
+#define MRT_BASE MRT6_BASE
+#define MRT_INIT MRT6_INIT
+#define MRT_DONE MRT6_DONE
+#define MRT_ADD_VIF MRT6_ADD_MIF
+#define MRT_DEL_VIF MRT6_DEL_MIF
+#define MRT_ADD_MFC MRT6_ADD_MFC
+#define MRT_DEL_MFC MRT6_DEL_MFC
+#define MRT_VERSION MRT6_VERSION
+#define MRT_ASSERT MRT6_ASSERT
+#define MRT_PIM MRT6_PIM
#endif
-/*
- * Pseudo messages used by mrouted
- */
-#ifndef IGMPMSG_NOCACHE
-#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */
-#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */
-#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
+#ifndef PIM_IPPROTO
+#define PIM_IPPROTO IPPROTO_IPV6
#endif
-#ifndef HAVE_STRUCT_IGMPMSG
-struct igmpmsg {
- uint32_t unused1, unused2;
- unsigned char im_msgtype; /* What is this */
- unsigned char im_mbz; /* Must be zero */
- unsigned char im_vif; /* Interface (this ought to be a vifi_t!) */
- unsigned char unused3;
- struct in_addr im_src, im_dst;
-};
+#ifndef PIM_SIOCGETSGCNT
+#define PIM_SIOCGETSGCNT SIOCGETSGCNT_IN6
#endif
-#endif /* HAVE_LINUX_MROUTE_H */
+#ifndef MRT6MSG_WRMIFWHOLE
+#define MRT6MSG_WRMIFWHOLE 4 /* For PIM processing */
+#endif
-typedef struct mfcctl pim_mfcctl;
+typedef struct mif6ctl pim_vifctl;
+typedef struct mrt6msg kernmsg;
+typedef mifi_t vifi_t;
+typedef struct sioc_sg_req6 pim_sioc_sg_req;
-#else /* PIM_IPV != 4 */
-#if defined(HAVE_LINUX_MROUTE6_H)
-#include <linux/mroute6.h>
-#endif
+#define vc_vifi mif6c_mifi
+#define vc_flags mif6c_flags
+#define vc_threshold vifc_threshold
+#define vc_pifi mif6c_pifi
+#define vc_rate_limit vifc_rate_limit
-typedef struct mf6cctl pim_mfcctl;
+#define msg_im_vif im6_mif
+#define msg_im_src im6_src
+#define msg_im_dst im6_dst
+#ifndef MAXVIFS
#define MAXVIFS IF_SETSIZE
#endif
-#ifndef IGMPMSG_WRVIFWHOLE
-#define IGMPMSG_WRVIFWHOLE 4 /* For PIM processing */
+#define VIFF_REGISTER MIFF_REGISTER
#endif
+
/*
Above: from <linux/mroute.h>
*/
@@ -201,4 +155,11 @@ int pim_mroute_del(struct channel_oil *c_oil, const char *name);
void pim_mroute_update_counters(struct channel_oil *c_oil);
bool pim_mroute_allow_iif_in_oil(struct channel_oil *c_oil,
int oif_index);
+int pim_mroute_msg(struct pim_instance *pim, const char *buf, size_t buf_size,
+ ifindex_t ifindex);
+int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg);
+int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf);
+int pim_mroute_msg_wrongvif(int fd, struct interface *ifp, const kernmsg *msg);
+int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf);
+int pim_mroute_set(struct pim_instance *pim, int enable);
#endif /* PIM_MROUTE_H */
diff --git a/pimd/pim_mroute_msg.c b/pimd/pim_mroute_msg.c
new file mode 100644
index 0000000000..7d80488c68
--- /dev/null
+++ b/pimd/pim_mroute_msg.c
@@ -0,0 +1,239 @@
+/*
+ * PIM for Quagga
+ * Copyright (C) 2022 Dell Technologies Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "log.h"
+#include "privs.h"
+#include "if.h"
+#include "prefix.h"
+#include "vty.h"
+#include "plist.h"
+#include "sockopt.h"
+#include "lib_errors.h"
+#include "lib/network.h"
+
+#include "pimd.h"
+#include "pim_mroute.h"
+#include "pim_oil.h"
+#include "pim_str.h"
+#include "pim_iface.h"
+#include "pim_macro.h"
+#include "pim_rp.h"
+#include "pim_oil.h"
+#include "pim_msg.h"
+#include "pim_sock.h"
+
+
+int pim_mroute_set(struct pim_instance *pim, int enable)
+{
+ int err;
+ int opt, data;
+ socklen_t data_len = sizeof(data);
+
+ /*
+ * We need to create the VRF table for the pim mroute_socket
+ */
+ if (pim->vrf->vrf_id != VRF_DEFAULT) {
+ frr_with_privs(&pimd_privs) {
+
+ data = pim->vrf->data.l.table_id;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO,
+ MRT_TABLE, &data, data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO, MRT_TABLE=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ data, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ }
+ }
+
+ frr_with_privs(&pimd_privs) {
+ opt = enable ? MRT_INIT : MRT_DONE;
+ /*
+ * *BSD *cares* about what value we pass down
+ * here
+ */
+ data = 1;
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &data,
+ data_len);
+ if (err) {
+ zlog_warn(
+ "%s %s: failure: setsockopt(fd=%d,PIM_IPPROTO,%s=%d): errno=%d: %s",
+ __FILE__, __func__, pim->mroute_socket,
+ enable ? "MRT_INIT" : "MRT_DONE", data, errno,
+ safe_strerror(errno));
+ return -1;
+ }
+ }
+
+#if defined(HAVE_IP_PKTINFO)
+ if (enable) {
+ /* Linux and Solaris IP_PKTINFO */
+ data = 1;
+ if (setsockopt(pim->mroute_socket, PIM_IPPROTO, IP_PKTINFO,
+ &data, data_len)) {
+ zlog_warn(
+ "Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+ }
+#endif
+
+ setsockopt_so_recvbuf(pim->mroute_socket, 1024 * 1024 * 8);
+
+ if (set_nonblocking (pim->mroute_socket) < 0) {
+ zlog_warn(
+ "Could not set non blocking on socket fd=%d: errno=%d: %s",
+ pim->mroute_socket, errno,
+ safe_strerror(errno));
+ }
+
+ if (enable) {
+#if defined linux
+ int upcalls = IGMPMSG_WRVIFWHOLE;
+ opt = MRT_PIM;
+
+ err = setsockopt(pim->mroute_socket, PIM_IPPROTO, opt, &upcalls,
+ sizeof(upcalls));
+ if (err) {
+ zlog_warn(
+ "Failure to register for VIFWHOLE and WRONGVIF upcalls %d %s",
+ errno, safe_strerror(errno));
+ return -1;
+ }
+#else
+ zlog_warn(
+ "PIM-SM will not work properly on this platform, until the ability to receive the WRVIFWHOLE upcall");
+#endif
+
+ }
+
+ return 0;
+}
+
+static const char *const igmpmsgtype2str[IGMPMSG_WRVIFWHOLE + 1] = {
+ "<unknown_upcall?>", "NOCACHE", "WRONGVIF", "WHOLEPKT", "WRVIFWHOLE"};
+
+
+int pim_mroute_msg(struct pim_instance *pim, const char *buf,
+ size_t buf_size, ifindex_t ifindex)
+{
+ struct interface *ifp;
+ const struct ip *ip_hdr;
+ const struct igmpmsg *msg;
+
+ if (buf_size < (int)sizeof(struct ip))
+ return 0;
+
+ ip_hdr = (const struct ip *)buf;
+
+ if (ip_hdr->ip_p == IPPROTO_IGMP) {
+ struct pim_interface *pim_ifp;
+ struct in_addr ifaddr;
+ struct gm_sock *igmp;
+ const struct prefix *connected_src;
+
+ /* We have the IP packet but we do not know which interface this
+ * packet was
+ * received on. Find the interface that is on the same subnet as
+ * the source
+ * of the IP packet.
+ */
+ ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id);
+
+ if (!ifp || !ifp->info)
+ return 0;
+
+ connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src);
+
+ if (!connected_src) {
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "Recv IGMP packet on interface: %s from a non-connected source: %pI4",
+ ifp->name, &ip_hdr->ip_src);
+ }
+ return 0;
+ }
+
+ pim_ifp = ifp->info;
+ ifaddr = connected_src->u.prefix4;
+ igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
+ ifaddr);
+
+ if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4",
+ __func__, pim->vrf->name, ifp->name, igmp,
+ &ip_hdr->ip_src, &ip_hdr->ip_dst);
+ }
+ if (igmp)
+ pim_igmp_packet(igmp, (char *)buf, buf_size);
+ else if (PIM_DEBUG_IGMP_PACKETS) {
+ zlog_debug(
+ "No IGMP socket on interface: %s with connected source: %pFX",
+ ifp->name, connected_src);
+ }
+ } else if (ip_hdr->ip_p) {
+ if (PIM_DEBUG_MROUTE_DETAIL) {
+ zlog_debug(
+ "%s: no kernel upcall proto=%d src: %pI4 dst: %pI4 msg_size=%ld",
+ __func__, ip_hdr->ip_p, &ip_hdr->ip_src,
+ &ip_hdr->ip_dst, (long int)buf_size);
+ }
+
+ } else {
+ msg = (const struct igmpmsg *)buf;
+
+ ifp = pim_if_find_by_vif_index(pim, msg->im_vif);
+
+ if (!ifp)
+ return 0;
+ if (PIM_DEBUG_MROUTE) {
+ zlog_debug(
+ "%s: pim kernel upcall %s type=%d ip_p=%d from fd=%d for (S,G)=(%pI4,%pI4) on %s vifi=%d size=%ld",
+ __func__, igmpmsgtype2str[msg->im_msgtype],
+ msg->im_msgtype, ip_hdr->ip_p,
+ pim->mroute_socket, &msg->im_src, &msg->im_dst,
+ ifp->name, msg->im_vif, (long int)buf_size);
+ }
+
+ switch (msg->im_msgtype) {
+ case IGMPMSG_WRONGVIF:
+ return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
+ msg);
+ case IGMPMSG_NOCACHE:
+ return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
+ msg);
+ case IGMPMSG_WHOLEPKT:
+ return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
+ (const char *)msg);
+ case IGMPMSG_WRVIFWHOLE:
+ return pim_mroute_msg_wrvifwhole(
+ pim->mroute_socket, ifp, (const char *)msg);
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/pimd/pim_msdp_packet.c b/pimd/pim_msdp_packet.c
index 03284ffa56..4adaca4e78 100644
--- a/pimd/pim_msdp_packet.c
+++ b/pimd/pim_msdp_packet.c
@@ -214,8 +214,6 @@ void pim_msdp_write(struct thread *thread)
return;
}
- sockopt_cork(mp->fd, 1);
-
/* Nonblocking write until TCP output buffer is full */
do {
int writenum;
@@ -280,8 +278,6 @@ void pim_msdp_write(struct thread *thread)
} while ((s = stream_fifo_head(mp->obuf)) != NULL);
pim_msdp_write_proceed_actions(mp);
- sockopt_cork(mp->fd, 0);
-
if (PIM_DEBUG_MSDP_INTERNAL) {
zlog_debug("MSDP peer %s pim_msdp_write wrote %d packets",
mp->key_str, work_cnt);
diff --git a/pimd/pim_msg.c b/pimd/pim_msg.c
index a0653e1a57..1eda51417f 100644
--- a/pimd/pim_msg.c
+++ b/pimd/pim_msg.c
@@ -38,10 +38,36 @@
#include "pim_jp_agg.h"
#include "pim_oil.h"
-void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
- uint8_t pim_msg_type, bool no_fwd)
+void pim_msg_build_header(pim_addr src, pim_addr dst, uint8_t *pim_msg,
+ size_t pim_msg_size, uint8_t pim_msg_type,
+ bool no_fwd)
{
struct pim_msg_header *header = (struct pim_msg_header *)pim_msg;
+ struct iovec iov[2], *iovp = iov;
+
+ /*
+ * The checksum for Registers is done only on the first 8 bytes of the
+ * packet, including the PIM header and the next 4 bytes, excluding the
+ * data packet portion
+ *
+ * for IPv6, the pseudoheader upper-level protocol length is also
+ * truncated, so let's just set it here before everything else.
+ */
+ if (pim_msg_type == PIM_MSG_TYPE_REGISTER)
+ pim_msg_size = PIM_MSG_REGISTER_LEN;
+
+#if PIM_IPV == 6
+ struct ipv6_ph phdr = {
+ .src = src,
+ .dst = dst,
+ .ulpl = htonl(pim_msg_size),
+ .next_hdr = IPPROTO_PIM,
+ };
+
+ iovp->iov_base = &phdr;
+ iovp->iov_len = sizeof(phdr);
+ iovp++;
+#endif
/*
* Write header
@@ -51,18 +77,12 @@ void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
header->Nbit = no_fwd;
header->reserved = 0;
-
header->checksum = 0;
- /*
- * The checksum for Registers is done only on the first 8 bytes of the
- * packet,
- * including the PIM header and the next 4 bytes, excluding the data
- * packet portion
- */
- if (pim_msg_type == PIM_MSG_TYPE_REGISTER)
- header->checksum = in_cksum(pim_msg, PIM_MSG_REGISTER_LEN);
- else
- header->checksum = in_cksum(pim_msg, pim_msg_size);
+ iovp->iov_base = header;
+ iovp->iov_len = pim_msg_size;
+ iovp++;
+
+ header->checksum = in_cksumv(iov, iovp - iov);
}
uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr)
diff --git a/pimd/pim_msg.h b/pimd/pim_msg.h
index 522e94504a..733210af3a 100644
--- a/pimd/pim_msg.h
+++ b/pimd/pim_msg.h
@@ -21,6 +21,9 @@
#define PIM_MSG_H
#include <netinet/in.h>
+#if PIM_IPV == 6
+#include <netinet/ip6.h>
+#endif
#include "pim_jp_agg.h"
@@ -158,10 +161,18 @@ struct pim_encoded_source_ipv6 {
typedef struct pim_encoded_ipv4_unicast pim_encoded_unicast;
typedef struct pim_encoded_group_ipv4 pim_encoded_group;
typedef struct pim_encoded_source_ipv4 pim_encoded_source;
+typedef struct ip ipv_hdr;
+#define IPV_SRC(ip_hdr) ((ip_hdr))->ip_src
+#define IPV_DST(ip_hdr) ((ip_hdr))->ip_dst
+#define IPV_LEN(ip_hdr) ((ip_hdr))->ip_len
#else
typedef struct pim_encoded_ipv6_unicast pim_encoded_unicast;
typedef struct pim_encoded_group_ipv6 pim_encoded_group;
typedef struct pim_encoded_source_ipv6 pim_encoded_source;
+typedef struct ip6_hdr ipv_hdr;
+#define IPV_SRC(ip_hdr) ((ip_hdr))->ip6_src
+#define IPV_DST(ip_hdr) ((ip_hdr))->ip6_dst
+#define IPV_LEN(ip_hdr) ((ip_hdr))->ip6_plen
#endif
/* clang-format on */
@@ -181,8 +192,33 @@ struct pim_jp {
struct pim_jp_groups groups[1];
} __attribute__((packed));
-void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
- uint8_t pim_msg_type, bool no_fwd);
+#if PIM_IPV == 4
+static inline pim_sgaddr pim_sgaddr_from_iphdr(const void *iphdr)
+{
+ const struct ip *ipv4_hdr = iphdr;
+ pim_sgaddr sg;
+
+ sg.src = ipv4_hdr->ip_src;
+ sg.grp = ipv4_hdr->ip_dst;
+
+ return sg;
+}
+#else
+static inline pim_sgaddr pim_sgaddr_from_iphdr(const void *iphdr)
+{
+ const struct ip6_hdr *ipv6_hdr = iphdr;
+ pim_sgaddr sg;
+
+ sg.src = ipv6_hdr->ip6_src;
+ sg.grp = ipv6_hdr->ip6_dst;
+
+ return sg;
+}
+#endif
+
+void pim_msg_build_header(pim_addr src, pim_addr dst, uint8_t *pim_msg,
+ size_t pim_msg_size, uint8_t pim_msg_type,
+ bool no_fwd);
uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr);
uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr);
diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h
index 72c96d7d73..273c7e8a61 100644
--- a/pimd/pim_nb.h
+++ b/pimd/pim_nb.h
@@ -198,6 +198,12 @@ int lib_interface_gmp_address_family_static_group_destroy(
int routing_control_plane_protocols_name_validate(
struct nb_cb_create_args *args);
+#if PIM_IPV == 4
+#define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv4"
+#else
+#define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv6"
+#endif
+
#define FRR_PIM_VRF_XPATH \
"/frr-routing:routing/control-plane-protocols/" \
"control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 27cac0c1a7..7fe7c0395f 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -34,6 +34,7 @@
#include "pim_util.h"
#include "log.h"
#include "lib_errors.h"
+#include "pim_util.h"
#if PIM_IPV == 6
#define pim6_msdp_err(funcname, argtype) \
@@ -256,21 +257,17 @@ static int pim_ssm_cmd_worker(struct pim_instance *pim, const char *plist,
return ret;
}
-static int pim_rp_cmd_worker(struct pim_instance *pim,
- struct in_addr rp_addr,
- struct prefix group, const char *plist,
- char *errmsg, size_t errmsg_len)
+static int pim_rp_cmd_worker(struct pim_instance *pim, pim_addr rp_addr,
+ struct prefix group, const char *plist,
+ char *errmsg, size_t errmsg_len)
{
- char rp_str[INET_ADDRSTRLEN];
int result;
- inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str));
-
result = pim_rp_new(pim, rp_addr, group, plist, RP_SRC_STATIC);
if (result == PIM_RP_NO_PATH) {
- snprintf(errmsg, errmsg_len,
- "No Path to RP address specified: %s", rp_str);
+ snprintfrr(errmsg, errmsg_len,
+ "No Path to RP address specified: %pPA", &rp_addr);
return NB_ERR_INCONSISTENCY;
}
@@ -295,16 +292,13 @@ static int pim_rp_cmd_worker(struct pim_instance *pim,
return NB_OK;
}
-static int pim_no_rp_cmd_worker(struct pim_instance *pim,
- struct in_addr rp_addr, struct prefix group,
- const char *plist,
+static int pim_no_rp_cmd_worker(struct pim_instance *pim, pim_addr rp_addr,
+ struct prefix group, const char *plist,
char *errmsg, size_t errmsg_len)
{
- char rp_str[INET_ADDRSTRLEN];
char group_str[PREFIX2STR_BUFFER];
int result;
- inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str));
prefix2str(&group, group_str, sizeof(group_str));
result = pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
@@ -316,8 +310,8 @@ static int pim_no_rp_cmd_worker(struct pim_instance *pim,
}
if (result == PIM_RP_BAD_ADDRESS) {
- snprintf(errmsg, errmsg_len,
- "Bad RP address specified: %s", rp_str);
+ snprintfrr(errmsg, errmsg_len, "Bad RP address specified: %pPA",
+ &rp_addr);
return NB_ERR_INCONSISTENCY;
}
@@ -354,6 +348,7 @@ static bool is_pim_interface(const struct lyd_node *dnode)
return false;
}
+#if PIM_IPV == 4
static int pim_cmd_igmp_start(struct interface *ifp)
{
struct pim_interface *pim_ifp;
@@ -382,12 +377,14 @@ static int pim_cmd_igmp_start(struct interface *ifp)
return NB_OK;
}
+#endif /* PIM_IPV == 4 */
/*
* CLI reconfiguration affects the interface level (struct pim_interface).
* This function propagates the reconfiguration to every active socket
* for that interface.
*/
+#if PIM_IPV == 4
static void igmp_sock_query_interval_reconfig(struct gm_sock *igmp)
{
struct interface *ifp;
@@ -412,6 +409,7 @@ static void igmp_sock_query_interval_reconfig(struct gm_sock *igmp)
*/
igmp_startup_mode_on(igmp);
}
+#endif
static void igmp_sock_query_reschedule(struct gm_sock *igmp)
{
@@ -442,6 +440,7 @@ static void igmp_sock_query_reschedule(struct gm_sock *igmp)
}
}
+#if PIM_IPV == 4
static void change_query_interval(struct pim_interface *pim_ifp,
int query_interval)
{
@@ -455,6 +454,7 @@ static void change_query_interval(struct pim_interface *pim_ifp,
igmp_sock_query_reschedule(igmp);
}
}
+#endif
static void change_query_max_response_time(struct pim_interface *pim_ifp,
int query_max_response_time_dsec)
@@ -936,7 +936,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
struct vrf *vrf;
struct pim_instance *pim;
int result;
- struct ipaddr source_addr;
+ pim_addr source_addr;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -946,16 +946,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- yang_dnode_get_ip(&source_addr, args->dnode, NULL);
- result = pim_ssmpingd_start(pim, source_addr.ip._v4_addr);
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ result = pim_ssmpingd_start(pim, source_addr);
if (result) {
- char source_str[INET_ADDRSTRLEN];
-
- ipaddr2str(&source_addr, source_str,
- sizeof(source_str));
- snprintf(args->errmsg, args->errmsg_len,
- "%% Failure starting ssmpingd for source %s: %d",
- source_str, result);
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "%% Failure starting ssmpingd for source %pPA: %d",
+ &source_addr, result);
return NB_ERR_INCONSISTENCY;
}
}
@@ -969,7 +967,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
struct vrf *vrf;
struct pim_instance *pim;
int result;
- struct ipaddr source_addr;
+ pim_addr source_addr;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -979,16 +977,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- yang_dnode_get_ip(&source_addr, args->dnode, NULL);
- result = pim_ssmpingd_stop(pim, source_addr.ip._v4_addr);
+ yang_dnode_get_pimaddr(&source_addr, args->dnode,
+ "./source-addr");
+ result = pim_ssmpingd_stop(pim, source_addr);
if (result) {
- char source_str[INET_ADDRSTRLEN];
-
- ipaddr2str(&source_addr, source_str,
- sizeof(source_str));
- snprintf(args->errmsg, args->errmsg_len,
- "%% Failure stopping ssmpingd for source %s: %d",
- source_str, result);
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "%% Failure stopping ssmpingd for source %pPA: %d",
+ &source_addr, result);
return NB_ERR_INCONSISTENCY;
}
@@ -2340,7 +2336,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
- struct ipaddr rp_addr;
+ pim_addr rp_addr;
const char *plist;
int result = 0;
@@ -2352,31 +2348,30 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- yang_dnode_get_ip(&rp_addr, args->dnode, "./rp-address");
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "./rp-address");
if (yang_dnode_get(args->dnode, "./group-list")) {
- yang_dnode_get_ipv4p(&group, args->dnode,
- "./group-list");
- apply_mask_ipv4((struct prefix_ipv4 *)&group);
- result = pim_no_rp_cmd_worker(pim, rp_addr.ip._v4_addr,
- group, NULL, args->errmsg,
- args->errmsg_len);
+ yang_dnode_get_prefix(&group, args->dnode,
+ "./group-list");
+ apply_mask(&group);
+ result = pim_no_rp_cmd_worker(pim, rp_addr, group, NULL,
+ args->errmsg,
+ args->errmsg_len);
}
else if (yang_dnode_get(args->dnode, "./prefix-list")) {
plist = yang_dnode_get_string(args->dnode,
"./prefix-list");
- if (!str2prefix("224.0.0.0/4", &group)) {
+ if (!pim_get_all_mcast_group(&group)) {
flog_err(
EC_LIB_DEVELOPMENT,
"Unable to convert 224.0.0.0/4 to prefix");
return NB_ERR_INCONSISTENCY;
}
- result = pim_no_rp_cmd_worker(pim, rp_addr.ip._v4_addr,
- group, plist,
- args->errmsg,
- args->errmsg_len);
+ result = pim_no_rp_cmd_worker(pim, rp_addr, group,
+ plist, args->errmsg,
+ args->errmsg_len);
}
if (result)
@@ -2396,7 +2391,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
- struct ipaddr rp_addr;
+ pim_addr rp_addr;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -2406,12 +2401,11 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- yang_dnode_get_ip(&rp_addr, args->dnode, "../rp-address");
- yang_dnode_get_ipv4p(&group, args->dnode, NULL);
- apply_mask_ipv4((struct prefix_ipv4 *)&group);
-
- return pim_rp_cmd_worker(pim, rp_addr.ip._v4_addr, group,
- NULL, args->errmsg, args->errmsg_len);
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ yang_dnode_get_prefix(&group, args->dnode, NULL);
+ apply_mask(&group);
+ return pim_rp_cmd_worker(pim, rp_addr, group, NULL,
+ args->errmsg, args->errmsg_len);
}
return NB_OK;
@@ -2423,7 +2417,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
- struct ipaddr rp_addr;
+ pim_addr rp_addr;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -2433,13 +2427,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- yang_dnode_get_ip(&rp_addr, args->dnode, "../rp-address");
- yang_dnode_get_ipv4p(&group, args->dnode, NULL);
- apply_mask_ipv4((struct prefix_ipv4 *)&group);
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ yang_dnode_get_prefix(&group, args->dnode, NULL);
+ apply_mask(&group);
- return pim_no_rp_cmd_worker(pim, rp_addr.ip._v4_addr, group,
- NULL, args->errmsg,
- args->errmsg_len);
+ return pim_no_rp_cmd_worker(pim, rp_addr, group, NULL,
+ args->errmsg, args->errmsg_len);
}
return NB_OK;
@@ -2454,7 +2447,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
- struct ipaddr rp_addr;
+ pim_addr rp_addr;
const char *plist;
switch (args->event) {
@@ -2466,14 +2459,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
plist = yang_dnode_get_string(args->dnode, NULL);
- yang_dnode_get_ip(&rp_addr, args->dnode, "../rp-address");
- if (!str2prefix("224.0.0.0/4", &group)) {
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
+ if (!pim_get_all_mcast_group(&group)) {
flog_err(EC_LIB_DEVELOPMENT,
"Unable to convert 224.0.0.0/4 to prefix");
return NB_ERR_INCONSISTENCY;
}
- return pim_rp_cmd_worker(pim, rp_addr.ip._v4_addr, group,
- plist, args->errmsg, args->errmsg_len);
+ return pim_rp_cmd_worker(pim, rp_addr, group, plist,
+ args->errmsg, args->errmsg_len);
}
return NB_OK;
@@ -2485,7 +2478,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
- struct ipaddr rp_addr;
+ pim_addr rp_addr;
const char *plist;
switch (args->event) {
@@ -2496,16 +2489,15 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
case NB_EV_APPLY:
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
- yang_dnode_get_ip(&rp_addr, args->dnode, "../rp-address");
+ yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
plist = yang_dnode_get_string(args->dnode, NULL);
- if (!str2prefix("224.0.0.0/4", &group)) {
+ if (!pim_get_all_mcast_group(&group)) {
flog_err(EC_LIB_DEVELOPMENT,
"Unable to convert 224.0.0.0/4 to prefix");
return NB_ERR_INCONSISTENCY;
}
- return pim_no_rp_cmd_worker(pim, rp_addr.ip._v4_addr, group,
- plist, args->errmsg,
- args->errmsg_len);
+ return pim_no_rp_cmd_worker(pim, rp_addr, group, plist,
+ args->errmsg, args->errmsg_len);
break;
}
@@ -2564,6 +2556,7 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
int lib_interface_gmp_address_family_enable_modify(
struct nb_cb_modify_args *args)
{
+#if PIM_IPV == 4
struct interface *ifp;
bool igmp_enable;
struct pim_interface *pim_ifp;
@@ -2611,7 +2604,9 @@ int lib_interface_gmp_address_family_enable_modify(
pim_if_delete(ifp);
}
}
-
+#else
+ /* TBD Depends on MLD data structure changes */
+#endif /* PIM_IPV == 4 */
return NB_OK;
}
@@ -2685,6 +2680,7 @@ int lib_interface_gmp_address_family_mld_version_modify(
case NB_EV_PREPARE:
case NB_EV_ABORT:
case NB_EV_APPLY:
+ /* TBD depends on MLD data structure changes */
break;
}
@@ -2711,6 +2707,7 @@ int lib_interface_gmp_address_family_mld_version_destroy(
int lib_interface_gmp_address_family_query_interval_modify(
struct nb_cb_modify_args *args)
{
+#if PIM_IPV == 4
struct interface *ifp;
int query_interval;
@@ -2724,7 +2721,9 @@ int lib_interface_gmp_address_family_query_interval_modify(
query_interval = yang_dnode_get_uint16(args->dnode, NULL);
change_query_interval(ifp->info, query_interval);
}
-
+#else
+ /* TBD Depends on MLD data structure changes */
+#endif
return NB_OK;
}
@@ -2816,6 +2815,7 @@ int lib_interface_gmp_address_family_robustness_variable_modify(
int lib_interface_gmp_address_family_static_group_create(
struct nb_cb_create_args *args)
{
+#if PIM_IPV == 4
struct interface *ifp;
struct ipaddr source_addr;
struct ipaddr group_addr;
@@ -2858,7 +2858,9 @@ int lib_interface_gmp_address_family_static_group_create(
return NB_ERR_INCONSISTENCY;
}
}
-
+#else
+ /* TBD Depends on MLD data structure changes */
+#endif /* PIM_IPV == 4 */
return NB_OK;
}
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index 48dd565b25..94dcfb8265 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -55,7 +55,7 @@ void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
int ret;
p = &(pnc->rpf.rpf_addr);
- ret = zclient_send_rnh(zclient, command, p, false, false,
+ ret = zclient_send_rnh(zclient, command, p, SAFI_UNICAST, false, false,
pim->vrf->vrf_id);
if (ret == ZCLIENT_SEND_FAILURE)
zlog_warn("sendmsg_nexthop: zclient_send_message() failed");
@@ -121,7 +121,7 @@ static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
pnc = pim_nexthop_cache_add(pim, &rpf);
pim_sendmsg_zebra_rnh(pim, zclient, pnc,
ZEBRA_NEXTHOP_REGISTER);
- if (PIM_DEBUG_PIM_NHT)
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug(
"%s: NHT cache and zebra notification added for %pFX(%s)",
__func__, addr, pim->vrf->name);
@@ -162,6 +162,7 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
return 0;
}
+#if PIM_IPV == 4
void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
{
struct pim_nexthop_cache *pnc;
@@ -175,6 +176,7 @@ void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr addr)
pnc->bsr_count++;
}
+#endif /* PIM_IPV == 4 */
static void pim_nht_drop_maybe(struct pim_instance *pim,
struct pim_nexthop_cache *pnc)
@@ -244,6 +246,7 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
pim_nht_drop_maybe(pim, pnc);
}
+#if PIM_IPV == 4
void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
@@ -276,7 +279,7 @@ void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr addr)
}
bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
- struct interface *src_ifp, struct in_addr src_ip)
+ struct interface *src_ifp, pim_addr src_ip)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
@@ -398,6 +401,7 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
}
return false;
}
+#endif /* PIM_IPV == 4 */
void pim_rp_nexthop_del(struct rp_info *rp_info)
{
@@ -482,23 +486,13 @@ static int pim_update_upstream_nh(struct pim_instance *pim,
uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
{
uint32_t hash_val;
- uint32_t s = 0, g = 0;
- if ((!src))
+ if (!src)
return 0;
- switch (src->family) {
- case AF_INET: {
- s = src->u.prefix4.s_addr;
- s = s == 0 ? 1 : s;
- if (grp)
- g = grp->u.prefix4.s_addr;
- } break;
- default:
- break;
- }
-
- hash_val = jhash_2words(g, s, 101);
+ hash_val = prefix_hash_key(src);
+ if (grp)
+ hash_val ^= prefix_hash_key(grp);
return hash_val;
}
@@ -549,9 +543,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
break;
}
- if (curr_route_valid
- && !pim_if_connected_to_source(nexthop->interface,
- src->u.prefix4)) {
+ if (curr_route_valid &&
+ !pim_if_connected_to_source(nexthop->interface,
+ src_addr)) {
nbr = pim_neighbor_find_prefix(
nexthop->interface,
&nexthop->mrib_nexthop_addr);
@@ -668,7 +662,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
nh_node->gate.ipv4;
#else
nexthop->mrib_nexthop_addr.u.prefix6 =
- nh_node->gate->ipv6;
+ nh_node->gate.ipv6;
#endif
nexthop->mrib_metric_preference = pnc->distance;
nexthop->mrib_route_metric = pnc->metric;
@@ -708,19 +702,20 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
struct vrf *vrf = vrf_lookup_by_id(vrf_id);
struct pim_instance *pim;
struct zapi_route nhr;
+ struct prefix match;
if (!vrf)
return 0;
pim = vrf->info;
- if (!zapi_nexthop_update_decode(zclient->ibuf, &nhr)) {
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &match, &nhr)) {
zlog_err("%s: Decode of nexthop update from zebra failed",
__func__);
return 0;
}
if (cmd == ZEBRA_NEXTHOP_UPDATE) {
- prefix_copy(&rpf.rpf_addr, &nhr.prefix);
+ prefix_copy(&rpf.rpf_addr, &match);
pnc = pim_nexthop_cache_find(pim, &rpf);
if (!pnc) {
if (PIM_DEBUG_PIM_NHT)
@@ -812,9 +807,9 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: NHT addr %pFX(%s) %d-nhop via %pI4(%s) type %d distance:%u metric:%u ",
- __func__, &nhr.prefix, pim->vrf->name,
- i + 1, &nexthop->gate.ipv4,
- ifp->name, nexthop->type, nhr.distance,
+ __func__, &match, pim->vrf->name, i + 1,
+ &nexthop->gate.ipv4, ifp->name,
+ nexthop->type, nhr.distance,
nhr.metric);
if (!ifp->info) {
@@ -868,7 +863,7 @@ int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS)
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d",
- __func__, &nhr.prefix, pim->vrf->name, nhr.nexthop_num,
+ __func__, &match, pim->vrf->name, nhr.nexthop_num,
pnc->nexthop_num, vrf_id, pnc->upstream_hash->count,
listcount(pnc->rp_list));
@@ -899,7 +894,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
uint32_t num_nbrs = 0;
pim_addr src_addr = pim_addr_from_prefix(src);
- if (PIM_DEBUG_PIM_NHT)
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
__func__, &src_addr, pim->vrf->name,
nexthop->last_lookup_time);
@@ -998,12 +993,14 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
if (!nbr && !if_is_loopback(ifp)) {
if (i == mod_val)
mod_val++;
- i++;
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
- "%s: NBR not found on input interface %s(%s) (RPF for source %pPA)",
- __func__, ifp->name,
- pim->vrf->name, &src_addr);
+ "%s: NBR (%pFXh) not found on input interface %s(%s) (RPF for source %pPA)",
+ __func__,
+ &nexthop_tab[i].nexthop_addr,
+ ifp->name, pim->vrf->name,
+ &src_addr);
+ i++;
continue;
}
}
@@ -1052,7 +1049,7 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
ifindex_t ifindex;
pim_addr src_addr;
- if (PIM_DEBUG_PIM_NHT) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL) {
src_addr = pim_addr_from_prefix(src);
}
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index 568c2eb232..d51f622ece 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -76,6 +76,6 @@ void pim_nht_bsr_add(struct pim_instance *pim, struct in_addr bsr_addr);
void pim_nht_bsr_del(struct pim_instance *pim, struct in_addr bsr_addr);
/* RPF(bsr_addr) == src_ip%src_ifp? */
bool pim_nht_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr_addr,
- struct interface *src_ifp, struct in_addr src_ip);
+ struct interface *src_ifp, pim_addr src_ip);
#endif
diff --git a/pimd/pim_oil.c b/pimd/pim_oil.c
index a499c884b4..5b5cc2c103 100644
--- a/pimd/pim_oil.c
+++ b/pimd/pim_oil.c
@@ -216,6 +216,10 @@ int pim_channel_del_oif(struct channel_oil *channel_oil, struct interface *oif,
pim_ifp = oif->info;
+ assertf(pim_ifp->mroute_vif_index >= 0,
+ "trying to del OIF %s with VIF (%d)", oif->name,
+ pim_ifp->mroute_vif_index);
+
/*
* Don't do anything if we've been asked to remove a source
* that is not actually on it.
@@ -418,6 +422,10 @@ int pim_channel_add_oif(struct channel_oil *channel_oil, struct interface *oif,
pim_ifp = oif->info;
+ assertf(pim_ifp->mroute_vif_index >= 0,
+ "trying to add OIF %s with VIF (%d)", oif->name,
+ pim_ifp->mroute_vif_index);
+
/* Prevent single protocol from subscribing same interface to
channel (S,G) multiple times */
if (channel_oil->oif_flags[pim_ifp->mroute_vif_index] & proto_mask) {
@@ -536,20 +544,27 @@ int pim_channel_add_oif(struct channel_oil *channel_oil, struct interface *oif,
int pim_channel_oil_empty(struct channel_oil *c_oil)
{
+#if PIM_IPV == 4
+ static struct mfcctl null_oil;
+#else
+ static struct mf6cctl null_oil;
+#endif
+
if (!c_oil)
return 1;
+
/* exclude pimreg from the OIL when checking if the inherited_oil is
* non-NULL.
* pimreg device (in all vrfs) uses a vifi of
* 0 (PIM_OIF_PIM_REGISTER_VIF) so we simply mfcc_ttls[0] */
+ if (oil_if_has(c_oil, 0)) {
#if PIM_IPV == 4
- static pim_mfcctl null_oil;
-
- return !memcmp(&c_oil->oil.mfcc_ttls[1], &null_oil.mfcc_ttls[1],
- sizeof(null_oil.mfcc_ttls) - sizeof(null_oil.mfcc_ttls[0]));
+ null_oil.mfcc_ttls[0] = 1;
#else
- CPP_NOTICE("FIXME STUB");
- return false;
+ IF_SET(0, &null_oil.mf6cc_ifset);
#endif
+ }
+
+ return !oil_if_cmp(&c_oil->oil, &null_oil);
}
diff --git a/pimd/pim_oil.h b/pimd/pim_oil.h
index a52e23351e..68b5ef474e 100644
--- a/pimd/pim_oil.h
+++ b/pimd/pim_oil.h
@@ -98,7 +98,11 @@ struct channel_oil {
struct rb_pim_oil_item oil_rb;
- pim_mfcctl oil;
+#if PIM_IPV == 4
+ struct mfcctl oil;
+#else
+ struct mf6cctl oil;
+#endif
int installed;
int oil_inherited_rescan;
int oil_size;
@@ -135,6 +139,12 @@ static inline void oil_if_set(struct channel_oil *c_oil, vifi_t ifi, uint8_t set
{
c_oil->oil.mfcc_ttls[ifi] = set;
}
+
+static inline int oil_if_cmp(struct mfcctl *oil1, struct mfcctl *oil2)
+{
+ return memcmp(&oil1->mfcc_ttls[0], &oil2->mfcc_ttls[0],
+ sizeof(oil1->mfcc_ttls));
+}
#else
static inline pim_addr *oil_origin(struct channel_oil *c_oil)
{
@@ -163,6 +173,12 @@ static inline void oil_if_set(struct channel_oil *c_oil, mifi_t ifi, bool set)
else
IF_CLR(ifi, &c_oil->oil.mf6cc_ifset);
}
+
+static inline int oil_if_cmp(struct mf6cctl *oil1, struct mf6cctl *oil2)
+{
+ return memcmp(&oil1->mf6cc_ifset, &oil2->mf6cc_ifset,
+ sizeof(oil1->mf6cc_ifset));
+}
#endif
extern int pim_channel_oil_compare(const struct channel_oil *c1,
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
index 5cc0d63e31..50bbc0fe18 100644
--- a/pimd/pim_pim.c
+++ b/pimd/pim_pim.c
@@ -138,31 +138,34 @@ void pim_sock_delete(struct interface *ifp, const char *delete_message)
}
/* For now check dst address for hello, assrt and join/prune is all pim rtr */
-static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, in_addr_t addr)
+static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, pim_addr addr)
{
if ((type == PIM_MSG_TYPE_HELLO) || (type == PIM_MSG_TYPE_ASSERT)
|| (type == PIM_MSG_TYPE_JOIN_PRUNE)) {
- if (addr != qpim_all_pim_routers_addr.s_addr)
+ if (pim_addr_cmp(addr, qpim_all_pim_routers_addr))
return false;
}
return true;
}
-int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
+int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
+ pim_sgaddr sg)
{
- struct ip *ip_hdr;
+ struct iovec iov[2], *iovp = iov;
+#if PIM_IPV == 4
+ struct ip *ip_hdr = (struct ip *)buf;
size_t ip_hlen; /* ip header length in bytes */
- char src_str[INET_ADDRSTRLEN];
- char dst_str[INET_ADDRSTRLEN];
+#endif
uint8_t *pim_msg;
- int pim_msg_len;
+ uint32_t pim_msg_len = 0;
uint16_t pim_checksum; /* received checksum */
uint16_t checksum; /* computed checksum */
struct pim_neighbor *neigh;
struct pim_msg_header *header;
bool no_fwd;
+#if PIM_IPV == 4
if (len < sizeof(*ip_hdr)) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
@@ -171,11 +174,31 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
return -1;
}
- ip_hdr = (struct ip *)buf;
ip_hlen = ip_hdr->ip_hl << 2; /* ip_hl gives length in 4-byte words */
+ sg = pim_sgaddr_from_iphdr(ip_hdr);
pim_msg = buf + ip_hlen;
pim_msg_len = len - ip_hlen;
+#else
+ struct ipv6_ph phdr = {
+ .src = sg.src,
+ .dst = sg.grp,
+ .ulpl = htonl(len),
+ .next_hdr = IPPROTO_PIM,
+ };
+
+ iovp->iov_base = &phdr;
+ iovp->iov_len = sizeof(phdr);
+ iovp++;
+
+ /* NB: header is not included in IPv6 RX */
+ pim_msg = buf;
+ pim_msg_len = len;
+#endif
+
+ iovp->iov_base = pim_msg;
+ iovp->iov_len = pim_msg_len;
+ iovp++;
header = (struct pim_msg_header *)pim_msg;
if (pim_msg_len < PIM_PIM_MIN_LEN) {
@@ -208,10 +231,21 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
pim_msg_len, PIM_MSG_REGISTER_LEN);
return -1;
}
+
+#if PIM_IPV == 6
+ phdr.ulpl = htonl(PIM_MSG_REGISTER_LEN);
+#endif
/* First 8 byte header checksum */
- checksum = in_cksum(pim_msg, PIM_MSG_REGISTER_LEN);
+ iovp[-1].iov_len = PIM_MSG_REGISTER_LEN;
+ checksum = in_cksumv(iov, iovp - iov);
+
if (checksum != pim_checksum) {
- checksum = in_cksum(pim_msg, pim_msg_len);
+#if PIM_IPV == 6
+ phdr.ulpl = htonl(pim_msg_len);
+#endif
+ iovp[-1].iov_len = pim_msg_len;
+
+ checksum = in_cksumv(iov, iovp - iov);
if (checksum != pim_checksum) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
@@ -223,7 +257,7 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
}
}
} else {
- checksum = in_cksum(pim_msg, pim_msg_len);
+ checksum = in_cksumv(iov, iovp - iov);
if (checksum != pim_checksum) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
@@ -235,43 +269,29 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
}
if (PIM_DEBUG_PIM_PACKETS) {
- pim_inet4_dump("<src?>", ip_hdr->ip_src, src_str,
- sizeof(src_str));
- pim_inet4_dump("<dst?>", ip_hdr->ip_dst, dst_str,
- sizeof(dst_str));
zlog_debug(
- "Recv PIM %s packet from %s to %s on %s: ttl=%d pim_version=%d pim_msg_size=%d checksum=%x",
- pim_pim_msgtype2str(header->type), src_str, dst_str,
- ifp->name, ip_hdr->ip_ttl, header->ver, pim_msg_len,
- checksum);
- if (PIM_DEBUG_PIM_PACKETDUMP_RECV) {
+ "Recv PIM %s packet from %pPA to %pPA on %s: pim_version=%d pim_msg_size=%d checksum=%x",
+ pim_pim_msgtype2str(header->type), &sg.src, &sg.grp,
+ ifp->name, header->ver, pim_msg_len, checksum);
+ if (PIM_DEBUG_PIM_PACKETDUMP_RECV)
pim_pkt_dump(__func__, pim_msg, pim_msg_len);
- }
}
- if (!pim_pkt_dst_addr_ok(header->type, ip_hdr->ip_dst.s_addr)) {
- char dst_str[INET_ADDRSTRLEN];
- char src_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<dst?>", ip_hdr->ip_dst, dst_str,
- sizeof(dst_str));
- pim_inet4_dump("<src?>", ip_hdr->ip_src, src_str,
- sizeof(src_str));
+ if (!pim_pkt_dst_addr_ok(header->type, sg.grp)) {
zlog_warn(
- "%s: Ignoring Pkt. Unexpected IP destination %s for %s (Expected: all_pim_routers_addr) from %s",
- __func__, dst_str, pim_pim_msgtype2str(header->type),
- src_str);
+ "%s: Ignoring Pkt. Unexpected IP destination %pPA for %s (Expected: all_pim_routers_addr) from %pPA",
+ __func__, &sg.grp, pim_pim_msgtype2str(header->type),
+ &sg.src);
return -1;
}
switch (header->type) {
case PIM_MSG_TYPE_HELLO:
- return pim_hello_recv(ifp, ip_hdr->ip_src,
- pim_msg + PIM_MSG_HEADER_LEN,
+ return pim_hello_recv(ifp, sg.src, pim_msg + PIM_MSG_HEADER_LEN,
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
case PIM_MSG_TYPE_REGISTER:
- return pim_register_recv(ifp, ip_hdr->ip_dst, ip_hdr->ip_src,
+ return pim_register_recv(ifp, sg.grp, sg.src,
pim_msg + PIM_MSG_HEADER_LEN,
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
@@ -280,38 +300,37 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
case PIM_MSG_TYPE_JOIN_PRUNE:
- neigh = pim_neighbor_find(ifp, ip_hdr->ip_src);
+ neigh = pim_neighbor_find(ifp, sg.src);
if (!neigh) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
- "%s %s: non-hello PIM message type=%d from non-neighbor %s on %s",
+ "%s %s: non-hello PIM message type=%d from non-neighbor %pPA on %s",
__FILE__, __func__, header->type,
- src_str, ifp->name);
+ &sg.src, ifp->name);
return -1;
}
pim_neighbor_timer_reset(neigh, neigh->holdtime);
- return pim_joinprune_recv(ifp, neigh, ip_hdr->ip_src,
+ return pim_joinprune_recv(ifp, neigh, sg.src,
pim_msg + PIM_MSG_HEADER_LEN,
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
case PIM_MSG_TYPE_ASSERT:
- neigh = pim_neighbor_find(ifp, ip_hdr->ip_src);
+ neigh = pim_neighbor_find(ifp, sg.src);
if (!neigh) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
- "%s %s: non-hello PIM message type=%d from non-neighbor %s on %s",
+ "%s %s: non-hello PIM message type=%d from non-neighbor %pPA on %s",
__FILE__, __func__, header->type,
- src_str, ifp->name);
+ &sg.src, ifp->name);
return -1;
}
pim_neighbor_timer_reset(neigh, neigh->holdtime);
- return pim_assert_recv(ifp, neigh, ip_hdr->ip_src,
+ return pim_assert_recv(ifp, neigh, sg.src,
pim_msg + PIM_MSG_HEADER_LEN,
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
case PIM_MSG_TYPE_BOOTSTRAP:
- return pim_bsm_process(ifp, ip_hdr, pim_msg, pim_msg_len,
- no_fwd);
+ return pim_bsm_process(ifp, &sg, pim_msg, pim_msg_len, no_fwd);
break;
default:
@@ -331,8 +350,8 @@ static void pim_sock_read(struct thread *t)
struct interface *ifp, *orig_ifp;
struct pim_interface *pim_ifp;
int fd;
- struct sockaddr_in from;
- struct sockaddr_in to;
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
socklen_t fromlen = sizeof(from);
socklen_t tolen = sizeof(to);
uint8_t buf[PIM_PIM_BUFSIZE_READ];
@@ -348,6 +367,8 @@ static void pim_sock_read(struct thread *t)
pim_ifp = ifp->info;
while (cont) {
+ pim_sgaddr sg;
+
len = pim_socket_recvfromto(fd, buf, sizeof(buf), &from,
&fromlen, &to, &tolen, &ifindex);
if (len < 0) {
@@ -377,7 +398,15 @@ static void pim_sock_read(struct thread *t)
ifindex);
goto done;
}
- int fail = pim_pim_packet(ifp, buf, len);
+#if PIM_IPV == 4
+ sg.src = ((struct sockaddr_in *)&from)->sin_addr;
+ sg.grp = ((struct sockaddr_in *)&to)->sin_addr;
+#else
+ sg.src = ((struct sockaddr_in6 *)&from)->sin6_addr;
+ sg.grp = ((struct sockaddr_in6 *)&to)->sin6_addr;
+#endif
+
+ int fail = pim_pim_packet(ifp, buf, len, sg);
if (fail) {
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug("%s: pim_pim_packet() return=%d",
@@ -428,7 +457,7 @@ static int pim_sock_open(struct interface *ifp)
return -1;
if (pim_socket_join(fd, qpim_all_pim_routers_addr,
- pim_ifp->primary_address, ifp->ifindex)) {
+ pim_ifp->primary_address, ifp->ifindex, pim_ifp)) {
close(fd);
return -2;
}
@@ -467,6 +496,9 @@ void pim_ifstat_reset(struct interface *ifp)
pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+ pim_ifp->igmp_ifstat_joins_sent = 0;
+ pim_ifp->igmp_ifstat_joins_failed = 0;
+ pim_ifp->igmp_peak_group_count = 0;
}
void pim_sock_reset(struct interface *ifp)
@@ -517,73 +549,62 @@ void pim_sock_reset(struct interface *ifp)
pim_ifstat_reset(ifp);
}
+#if PIM_IPV == 4
static uint16_t ip_id = 0;
-
+#endif
static int pim_msg_send_frame(int fd, char *buf, size_t len,
- struct sockaddr *dst, size_t salen)
+ struct sockaddr *dst, size_t salen,
+ const char *ifname)
{
- struct ip *ip = (struct ip *)buf;
-
- if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) < 0) {
- char dst_str[INET_ADDRSTRLEN];
-
- switch (errno) {
- case EMSGSIZE: {
- size_t hdrsize = sizeof(struct ip);
- size_t newlen1 = ((len - hdrsize) / 2) & 0xFFF8;
- size_t sendlen = newlen1 + hdrsize;
- size_t offset = ntohs(ip->ip_off);
-
- ip->ip_len = htons(sendlen);
- ip->ip_off = htons(offset | IP_MF);
- if (pim_msg_send_frame(fd, buf, sendlen, dst, salen)
- == 0) {
- struct ip *ip2 = (struct ip *)(buf + newlen1);
- size_t newlen2 = len - sendlen;
- sendlen = newlen2 + hdrsize;
-
- memcpy(ip2, ip, hdrsize);
- ip2->ip_len = htons(sendlen);
- ip2->ip_off = htons(offset + (newlen1 >> 3));
- return pim_msg_send_frame(fd, (char *)ip2,
- sendlen, dst, salen);
- }
- }
+ if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) >= 0)
+ return 0;
- return -1;
- default:
- if (PIM_DEBUG_PIM_PACKETS) {
- pim_inet4_dump("<dst?>", ip->ip_dst, dst_str,
- sizeof(dst_str));
- zlog_warn(
- "%s: sendto() failure to %s: fd=%d msg_size=%zd: errno=%d: %s",
- __func__, dst_str, fd, len, errno,
- safe_strerror(errno));
- }
- return -1;
- }
+#if PIM_IPV == 4
+ if (errno == EMSGSIZE) {
+ struct ip *ip = (struct ip *)buf;
+ size_t hdrsize = sizeof(struct ip);
+ size_t newlen1 = ((len - hdrsize) / 2) & 0xFFF8;
+ size_t sendlen = newlen1 + hdrsize;
+ size_t offset = ntohs(ip->ip_off);
+ int ret;
+
+ ip->ip_len = htons(sendlen);
+ ip->ip_off = htons(offset | IP_MF);
+
+ ret = pim_msg_send_frame(fd, buf, sendlen, dst, salen, ifname);
+ if (ret)
+ return ret;
+
+ struct ip *ip2 = (struct ip *)(buf + newlen1);
+ size_t newlen2 = len - sendlen;
+
+ sendlen = newlen2 + hdrsize;
+
+ memcpy(ip2, ip, hdrsize);
+ ip2->ip_len = htons(sendlen);
+ ip2->ip_off = htons(offset + (newlen1 >> 3));
+ return pim_msg_send_frame(fd, (char *)ip2, sendlen, dst, salen,
+ ifname);
}
+#endif
- return 0;
+ zlog_warn(
+ "%s: sendto() failure to %pSU: iface=%s fd=%d msg_size=%zd: %m",
+ __func__, dst, ifname, fd, len);
+ return -1;
}
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
int pim_msg_size, const char *ifname)
{
- struct sockaddr_in to;
socklen_t tolen;
unsigned char buffer[10000];
unsigned char *msg_start;
uint8_t ttl;
struct pim_msg_header *header;
- struct ip *ip;
memset(buffer, 0, 10000);
- int sendlen = sizeof(struct ip) + pim_msg_size;
-
- msg_start = buffer + sizeof(struct ip);
- memcpy(msg_start, pim_msg, pim_msg_size);
header = (struct pim_msg_header *)pim_msg;
/*
@@ -613,7 +634,11 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
break;
}
- ip = (struct ip *)buffer;
+#if PIM_IPV == 4
+ struct ip *ip = (struct ip *)buffer;
+ struct sockaddr_in to = {};
+ int sendlen = sizeof(*ip) + pim_msg_size;
+
ip->ip_id = htons(++ip_id);
ip->ip_hl = 5;
ip->ip_v = 4;
@@ -624,24 +649,41 @@ int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
ip->ip_ttl = ttl;
ip->ip_len = htons(sendlen);
- if (PIM_DEBUG_PIM_PACKETS) {
- char dst_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<dst?>", dst, dst_str, sizeof(dst_str));
- zlog_debug("%s: to %s on %s: msg_size=%d checksum=%x", __func__,
- dst_str, ifname, pim_msg_size, header->checksum);
- }
-
- memset(&to, 0, sizeof(to));
to.sin_family = AF_INET;
to.sin_addr = dst;
tolen = sizeof(to);
+#else
+ struct ip6_hdr *ip = (struct ip6_hdr *)buffer;
+ struct sockaddr_in6 to = {};
+ int sendlen = sizeof(*ip) + pim_msg_size;
+
+ ip->ip6_flow = 0;
+ ip->ip6_vfc = (6 << 4) | (IPTOS_PREC_INTERNETCONTROL >> 4);
+ ip->ip6_plen = htons(pim_msg_size);
+ ip->ip6_nxt = PIM_IP_PROTO_PIM;
+ ip->ip6_hlim = ttl;
+ ip->ip6_src = src;
+ ip->ip6_dst = dst;
+
+ to.sin6_family = AF_INET6;
+ to.sin6_addr = dst;
+ tolen = sizeof(to);
+#endif
+
+ msg_start = buffer + sizeof(*ip);
+ memcpy(msg_start, pim_msg, pim_msg_size);
+
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug("%s: to %pPA on %s: msg_size=%d checksum=%x",
+ __func__, &dst, ifname, pim_msg_size,
+ header->checksum);
if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {
pim_pkt_dump(__func__, pim_msg, pim_msg_size);
}
pim_msg_send_frame(fd, (char *)buffer, sendlen, (struct sockaddr *)&to,
- tolen);
+ tolen, ifname);
return 0;
}
@@ -654,20 +696,16 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)
pim_ifp = ifp->info;
- if (PIM_DEBUG_PIM_HELLO) {
- char dst_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<dst?>", qpim_all_pim_routers_addr, dst_str,
- sizeof(dst_str));
+ if (PIM_DEBUG_PIM_HELLO)
zlog_debug(
- "%s: to %s on %s: holdt=%u prop_d=%u overr_i=%u dis_join_supp=%d dr_prio=%u gen_id=%08x addrs=%d",
- __func__, dst_str, ifp->name, holdtime,
- pim_ifp->pim_propagation_delay_msec,
+ "%s: to %pPA on %s: holdt=%u prop_d=%u overr_i=%u dis_join_supp=%d dr_prio=%u gen_id=%08x addrs=%d",
+ __func__, &qpim_all_pim_routers_addr, ifp->name,
+ holdtime, pim_ifp->pim_propagation_delay_msec,
pim_ifp->pim_override_interval_msec,
PIM_IF_TEST_PIM_CAN_DISABLE_JOIN_SUPPRESSION(
pim_ifp->options),
pim_ifp->pim_dr_priority, pim_ifp->pim_generation_id,
listcount(ifp->connected));
- }
pim_tlv_size = pim_hello_build_tlv(
ifp, pim_msg + PIM_PIM_MIN_LEN,
@@ -685,7 +723,9 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)
assert(pim_msg_size >= PIM_PIM_MIN_LEN);
assert(pim_msg_size <= PIM_PIM_BUFSIZE_WRITE);
- pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO, false);
+ pim_msg_build_header(pim_ifp->primary_address,
+ qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
+ PIM_MSG_TYPE_HELLO, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
diff --git a/pimd/pim_pim.h b/pimd/pim_pim.h
index 1931e8cee8..822d8a18fa 100644
--- a/pimd/pim_pim.h
+++ b/pimd/pim_pim.h
@@ -54,7 +54,8 @@ void pim_sock_delete(struct interface *ifp, const char *delete_message);
void pim_hello_restart_now(struct interface *ifp);
void pim_hello_restart_triggered(struct interface *ifp);
-int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len);
+int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len,
+ pim_sgaddr sg);
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
int pim_msg_size, const char *ifname);
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index 2cc80f957c..45bcad3c26 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -61,7 +61,7 @@ void pim_register_join(struct pim_upstream *up)
pim_channel_add_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM, __func__);
up->reg_state = PIM_REG_JOIN;
- pim_vxlan_update_sg_reg_state(pim, up, true /*reg_join*/);
+ pim_vxlan_update_sg_reg_state(pim, up, true);
}
void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,
@@ -88,7 +88,8 @@ void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,
length = pim_encode_addr_ucast(b1, sg->src);
b1length += length;
- pim_msg_build_header(buffer, b1length + PIM_MSG_REGISTER_STOP_LEN,
+ pim_msg_build_header(src, originator, buffer,
+ b1length + PIM_MSG_REGISTER_STOP_LEN,
PIM_MSG_TYPE_REG_STOP, false);
pinfo = (struct pim_interface *)ifp->info;
@@ -108,13 +109,39 @@ void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,
++pinfo->pim_ifstat_reg_stop_send;
}
+static void pim_reg_stop_upstream(struct pim_instance *pim,
+ struct pim_upstream *up)
+{
+ switch (up->reg_state) {
+ case PIM_REG_NOINFO:
+ case PIM_REG_PRUNE:
+ return;
+ case PIM_REG_JOIN:
+ up->reg_state = PIM_REG_PRUNE;
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ pim_upstream_start_register_stop_timer(up, 0);
+ pim_vxlan_update_sg_reg_state(pim, up, false);
+ break;
+ case PIM_REG_JOIN_PENDING:
+ up->reg_state = PIM_REG_PRUNE;
+ pim_upstream_start_register_stop_timer(up, 0);
+ return;
+ }
+}
+
int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
{
struct pim_interface *pim_ifp = ifp->info;
struct pim_instance *pim = pim_ifp->pim;
- struct pim_upstream *upstream = NULL;
+ struct pim_upstream *up = NULL;
+ struct pim_rpf *rp;
+ pim_addr rpf_addr;
pim_sgaddr sg;
+ struct listnode *up_node;
+ struct pim_upstream *child;
bool wrong_af = false;
+ bool handling_star = false;
int l;
++pim_ifp->pim_ifstat_reg_stop_recv;
@@ -127,33 +154,65 @@ int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
if (wrong_af) {
zlog_err("invalid AF in Register-Stop on %s", ifp->name);
- return 0;
+ return -1;
}
- upstream = pim_upstream_find(pim, &sg);
- if (!upstream) {
- return 0;
- }
if (PIM_DEBUG_PIM_REG)
- zlog_debug("Received Register stop for %s", upstream->sg_str);
+ zlog_debug("Received Register stop for %pSG", &sg);
+
+ rp = RP(pim_ifp->pim, sg.grp);
+ if (rp) {
+ rpf_addr = pim_addr_from_prefix(&rp->rpf_addr);
+ if (pim_addr_cmp(sg.src, rpf_addr) == 0) {
+ handling_star = true;
+ sg.src = PIMADDR_ANY;
+ }
+ }
- switch (upstream->reg_state) {
- case PIM_REG_NOINFO:
- case PIM_REG_PRUNE:
- return 0;
- case PIM_REG_JOIN:
- upstream->reg_state = PIM_REG_PRUNE;
- pim_channel_del_oif(upstream->channel_oil, pim->regiface,
- PIM_OIF_FLAG_PROTO_PIM, __func__);
- pim_upstream_start_register_stop_timer(upstream, 0);
- pim_vxlan_update_sg_reg_state(pim, upstream,
- false/*reg_join*/);
- break;
- case PIM_REG_JOIN_PENDING:
- upstream->reg_state = PIM_REG_PRUNE;
- pim_upstream_start_register_stop_timer(upstream, 0);
- return 0;
+ /*
+ * RFC 7761 Sec 4.4.1
+ * Handling Register-Stop(*,G) Messages at the DR:
+ * A Register-Stop(*,G) should be treated as a
+ * Register-Stop(S,G) for all (S,G) Register state
+ * machines that are not in the NoInfo state.
+ */
+ up = pim_upstream_find(pim, &sg);
+ if (up) {
+ /*
+ * If the upstream find actually found a particular
+ * S,G then we *know* that the following for loop
+ * is not going to execute and this is ok
+ */
+ for (ALL_LIST_ELEMENTS_RO(up->sources, up_node, child)) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Executing Reg stop for %s",
+ child->sg_str);
+
+ pim_reg_stop_upstream(pim, child);
+ }
+
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Executing Reg stop for %s", up->sg_str);
+ pim_reg_stop_upstream(pim, up);
+ } else {
+ if (!handling_star)
+ return 0;
+ /*
+ * Unfortunately pim was unable to find a *,G
+ * but pim may still actually have individual
+ * S,G's that need to be processed. In that
+ * case pim must do the expensive walk to find
+ * and stop
+ */
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (pim_addr_cmp(up->sg.grp, sg.grp) == 0) {
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug("Executing Reg stop for %s",
+ up->sg_str);
+ pim_reg_stop_upstream(pim, up);
+ }
+ }
}
return 0;
@@ -203,7 +262,8 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,
memcpy(b1, (const unsigned char *)buf, buf_size);
- pim_msg_build_header(buffer, buf_size + PIM_MSG_REGISTER_LEN,
+ pim_msg_build_header(src, rpg->rpf_addr.u.prefix4, buffer,
+ buf_size + PIM_MSG_REGISTER_LEN,
PIM_MSG_TYPE_REGISTER, false);
++pinfo->pim_ifstat_reg_send;
@@ -311,30 +371,26 @@ void pim_null_register_send(struct pim_upstream *up)
* }
* }
*/
-int pim_register_recv(struct interface *ifp, struct in_addr dest_addr,
- struct in_addr src_addr, uint8_t *tlv_buf,
- int tlv_buf_size)
+int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size)
{
int sentRegisterStop = 0;
- struct ip *ip_hdr;
+ const void *ip_hdr;
pim_sgaddr sg;
uint32_t *bits;
int i_am_rp = 0;
struct pim_interface *pim_ifp = ifp->info;
struct pim_instance *pim = pim_ifp->pim;
+ pim_addr rp_addr;
#define PIM_MSG_REGISTER_BIT_RESERVED_LEN 4
- ip_hdr = (struct ip *)(tlv_buf + PIM_MSG_REGISTER_BIT_RESERVED_LEN);
-
- if (!if_address_is_local(&dest_addr, AF_INET, pim->vrf->vrf_id)) {
- if (PIM_DEBUG_PIM_REG) {
- char dest[INET_ADDRSTRLEN];
+ ip_hdr = (tlv_buf + PIM_MSG_REGISTER_BIT_RESERVED_LEN);
- pim_inet4_dump("<dst?>", dest_addr, dest, sizeof(dest));
+ if (!if_address_is_local(&dest_addr, PIM_AF, pim->vrf->vrf_id)) {
+ if (PIM_DEBUG_PIM_REG)
zlog_debug(
- "%s: Received Register message for destination address: %s that I do not own",
- __func__, dest);
- }
+ "%s: Received Register message for destination address: %pPA that I do not own",
+ __func__, &dest_addr);
return 0;
}
@@ -367,18 +423,14 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr,
* start of the actual Encapsulated data.
*/
memset(&sg, 0, sizeof(sg));
- sg.src = ip_hdr->ip_src;
- sg.grp = ip_hdr->ip_dst;
+ sg = pim_sgaddr_from_iphdr(ip_hdr);
i_am_rp = I_am_RP(pim, sg.grp);
- if (PIM_DEBUG_PIM_REG) {
- char src_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str));
- zlog_debug("Received Register message%pSG from %s on %s, rp: %d",
- &sg, src_str, ifp->name, i_am_rp);
- }
+ if (PIM_DEBUG_PIM_REG)
+ zlog_debug(
+ "Received Register message%pSG from %pPA on %s, rp: %d",
+ &sg, &src_addr, ifp->name, i_am_rp);
if (pim_is_grp_ssm(pim_ifp->pim, sg.grp)) {
if (pim_addr_is_any(sg.src)) {
@@ -390,9 +442,8 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr,
}
}
- if (i_am_rp
- && (dest_addr.s_addr
- == ((RP(pim, sg.grp))->rpf_addr.u.prefix4.s_addr))) {
+ rp_addr = pim_addr_from_prefix(&(RP(pim, sg.grp))->rpf_addr);
+ if (i_am_rp && (!pim_addr_cmp(dest_addr, rp_addr))) {
sentRegisterStop = 0;
if (pim->register_plist) {
@@ -407,31 +458,25 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr,
if (prefix_list_apply(plist, &src) == PREFIX_DENY) {
pim_register_stop_send(ifp, &sg, dest_addr,
src_addr);
- if (PIM_DEBUG_PIM_PACKETS) {
- char src_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<src?>", src_addr,
- src_str,
- sizeof(src_str));
+ if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
- "%s: Sending register-stop to %s for %pSG due to prefix-list denial, dropping packet",
- __func__, src_str, &sg);
- }
+ "%s: Sending register-stop to %pPA for %pSG due to prefix-list denial, dropping packet",
+ __func__, &src_addr, &sg);
return 0;
}
}
if (*bits & PIM_REGISTER_BORDER_BIT) {
- struct in_addr pimbr = pim_br_get_pmbr(&sg);
+ pim_addr pimbr = pim_br_get_pmbr(&sg);
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug(
"%s: Received Register message with Border bit set",
__func__);
- if (pimbr.s_addr == pim_br_unknown.s_addr)
+ if (pim_addr_is_any(pimbr))
pim_br_set_pmbr(&sg, src_addr);
- else if (src_addr.s_addr != pimbr.s_addr) {
+ else if (pim_addr_cmp(src_addr, pimbr)) {
pim_register_stop_send(ifp, &sg, dest_addr,
src_addr);
if (PIM_DEBUG_PIM_PACKETS)
diff --git a/pimd/pim_register.h b/pimd/pim_register.h
index fd4284b802..79c64d995f 100644
--- a/pimd/pim_register.h
+++ b/pimd/pim_register.h
@@ -32,15 +32,14 @@
int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size);
-int pim_register_recv(struct interface *ifp, struct in_addr dest_addr,
- struct in_addr src_addr, uint8_t *tlv_buf,
- int tlv_buf_size);
+int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
+ pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size);
-void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,
+void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
struct pim_rpf *rpg, int null_register,
struct pim_upstream *up);
-void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,
- struct in_addr src, struct in_addr originator);
+void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
+ pim_addr originator);
void pim_register_join(struct pim_upstream *up);
void pim_null_register_send(struct pim_upstream *up);
void pim_reg_del_on_couldreg_fail(struct interface *ifp);
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index 06b2216072..a552e77823 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -48,6 +48,8 @@
#include "pim_oil.h"
#include "pim_zebra.h"
#include "pim_bsm.h"
+#include "pim_util.h"
+#include "pim_ssm.h"
/* Cleanup pim->rpf_hash each node data */
void pim_rp_list_hash_clean(void *data)
@@ -76,26 +78,21 @@ int pim_rp_list_cmp(void *v1, void *v2)
{
struct rp_info *rp1 = (struct rp_info *)v1;
struct rp_info *rp2 = (struct rp_info *)v2;
+ int ret;
/*
* Sort by RP IP address
*/
- if (rp1->rp.rpf_addr.u.prefix4.s_addr
- < rp2->rp.rpf_addr.u.prefix4.s_addr)
- return -1;
-
- if (rp1->rp.rpf_addr.u.prefix4.s_addr
- > rp2->rp.rpf_addr.u.prefix4.s_addr)
- return 1;
+ ret = prefix_cmp(&rp1->rp.rpf_addr, &rp2->rp.rpf_addr);
+ if (ret)
+ return ret;
/*
* Sort by group IP address
*/
- if (rp1->group.u.prefix4.s_addr < rp2->group.u.prefix4.s_addr)
- return -1;
-
- if (rp1->group.u.prefix4.s_addr > rp2->group.u.prefix4.s_addr)
- return 1;
+ ret = prefix_cmp(&rp1->group, &rp2->group);
+ if (ret)
+ return ret;
return 0;
}
@@ -113,15 +110,14 @@ void pim_rp_init(struct pim_instance *pim)
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- if (!str2prefix("224.0.0.0/4", &rp_info->group)) {
+ if (!pim_get_all_mcast_group(&rp_info->group)) {
flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
+ "Unable to convert all-multicast prefix");
list_delete(&pim->rp_list);
route_table_finish(pim->rp_table);
XFREE(MTYPE_PIM_RP, rp_info);
return;
}
- rp_info->group.family = AF_INET;
pim_addr_to_prefix(&rp_info->rp.rpf_addr, PIMADDR_ANY);
listnode_add(pim->rp_list, rp_info);
@@ -129,9 +125,9 @@ void pim_rp_init(struct pim_instance *pim)
rn = route_node_get(pim->rp_table, &rp_info->group);
rn->info = rp_info;
if (PIM_DEBUG_PIM_TRACE)
- zlog_debug(
- "Allocated: %p for rp_info: %p(224.0.0.0/4) Lock: %d",
- rn, rp_info, route_node_get_lock_count(rn));
+ zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
+ rp_info, &rp_info->group,
+ route_node_get_lock_count(rn));
}
void pim_rp_free(struct pim_instance *pim)
@@ -148,15 +144,17 @@ void pim_rp_free(struct pim_instance *pim)
* Given an RP's prefix-list, return the RP's rp_info for that prefix-list
*/
static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
- struct in_addr rp,
- const char *plist)
+ pim_addr rp, const char *plist)
{
struct listnode *node;
struct rp_info *rp_info;
+ struct prefix rp_prefix;
+
+ pim_addr_to_prefix(&rp_prefix, rp);
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
- if (rp.s_addr == rp_info->rp.rpf_addr.u.prefix4.s_addr
- && rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
+ if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
+ rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
return rp_info;
}
}
@@ -185,16 +183,17 @@ static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
* Given an RP's address, return the RP's rp_info that is an exact match for
* 'group'
*/
-static struct rp_info *pim_rp_find_exact(struct pim_instance *pim,
- struct in_addr rp,
+static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
const struct prefix *group)
{
struct listnode *node;
struct rp_info *rp_info;
+ struct prefix rp_prefix;
+ pim_addr_to_prefix(&rp_prefix, rp);
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
- if (rp.s_addr == rp_info->rp.rpf_addr.u.prefix4.s_addr
- && prefix_same(&rp_info->group, group))
+ if (prefix_same(&rp_prefix, &rp_info->rp.rpf_addr) &&
+ prefix_same(&rp_info->group, group))
return rp_info;
}
@@ -238,7 +237,7 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
bp = NULL;
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (rp_info->plist) {
- plist = prefix_list_lookup(AFI_IP, rp_info->plist);
+ plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
if (prefix_list_apply_ext(plist, &entry, group, true)
== PREFIX_DENY || !entry)
@@ -371,7 +370,7 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
up->sg.grp);
if (PIM_DEBUG_PIM_TRACE)
- zlog_debug("%s: pim upstream update for old upstream %pI4",
+ zlog_debug("%s: pim upstream update for old upstream %pPA",
__func__, &old_upstream_addr);
if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
@@ -412,11 +411,10 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
}
-int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix group,
+int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
const char *plist, enum rp_source rp_src_flag)
{
int result = 0;
- char rp[INET_ADDRSTRLEN];
struct rp_info *rp_info;
struct rp_info *rp_all;
struct prefix group_all;
@@ -428,25 +426,20 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
struct pim_upstream *up;
bool upstream_updated = false;
- if (rp_addr.s_addr == INADDR_ANY)
+ if (pim_addr_is_any(rp_addr))
return PIM_RP_BAD_ADDRESS;
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- rp_info->rp.rpf_addr.family = AF_INET;
- rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
- rp_info->rp.rpf_addr.u.prefix4 = rp_addr;
+ pim_addr_to_prefix(&rp_info->rp.rpf_addr, rp_addr);
prefix_copy(&rp_info->group, &group);
rp_info->rp_src = rp_src_flag;
- inet_ntop(AF_INET, &rp_info->rp.rpf_addr.u.prefix4, rp, sizeof(rp));
-
if (plist) {
/*
* Return if the prefix-list is already configured for this RP
*/
- if (pim_rp_find_prefix_list(pim, rp_info->rp.rpf_addr.u.prefix4,
- plist)) {
+ if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
XFREE(MTYPE_PIM_RP, rp_info);
return PIM_SUCCESS;
}
@@ -464,14 +457,14 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
*/
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
tmp_rp_info)) {
- if (rp_info->rp.rpf_addr.u.prefix4.s_addr
- == tmp_rp_info->rp.rpf_addr.u.prefix4.s_addr) {
+ if (prefix_same(&rp_info->rp.rpf_addr,
+ &tmp_rp_info->rp.rpf_addr)) {
if (tmp_rp_info->plist)
- pim_rp_del_config(pim, rp, NULL,
+ pim_rp_del_config(pim, rp_addr, NULL,
tmp_rp_info->plist);
else
pim_rp_del_config(
- pim, rp,
+ pim, rp_addr,
prefix2str(&tmp_rp_info->group,
buffer, BUFSIZ),
NULL);
@@ -481,7 +474,7 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
} else {
- if (!str2prefix("224.0.0.0/4", &group_all)) {
+ if (!pim_get_all_mcast_group(&group_all)) {
XFREE(MTYPE_PIM_RP, rp_info);
return PIM_GROUP_BAD_ADDRESS;
}
@@ -500,11 +493,10 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
*/
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
tmp_rp_info)) {
- if (tmp_rp_info->plist
- && rp_info->rp.rpf_addr.u.prefix4.s_addr
- == tmp_rp_info->rp.rpf_addr.u.prefix4
- .s_addr) {
- pim_rp_del_config(pim, rp, NULL,
+ if (tmp_rp_info->plist &&
+ prefix_same(&rp_info->rp.rpf_addr,
+ &tmp_rp_info->rp.rpf_addr)) {
+ pim_rp_del_config(pim, rp_addr, NULL,
tmp_rp_info->plist);
}
}
@@ -519,10 +511,7 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
XFREE(MTYPE_PIM_RP, rp_info);
/* Register addr with Zebra NHT */
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 =
- rp_all->rp.rpf_addr.u.prefix4; // RP address
+ nht_p = rp_all->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
"%s: NHT Register rp_all addr %pFX grp %pFX ",
@@ -564,8 +553,7 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
/*
* Return if the group is already configured for this RP
*/
- tmp_rp_info = pim_rp_find_exact(
- pim, rp_info->rp.rpf_addr.u.prefix4, &rp_info->group);
+ tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
if (tmp_rp_info) {
if ((tmp_rp_info->rp_src != rp_src_flag)
&& (rp_src_flag == RP_SRC_STATIC))
@@ -601,8 +589,7 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
}
result = pim_rp_change(
- pim,
- rp_info->rp.rpf_addr.u.prefix4,
+ pim, rp_addr,
tmp_rp_info->group,
rp_src_flag);
XFREE(MTYPE_PIM_RP, rp_info);
@@ -643,9 +630,7 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
pim_rp_refresh_group_to_rp_mapping(pim);
/* Register addr with Zebra NHT */
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
@@ -657,32 +642,30 @@ int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr, struct prefix g
return PIM_SUCCESS;
}
-int pim_rp_del_config(struct pim_instance *pim, const char *rp,
- const char *group_range, const char *plist)
+void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
+ const char *group_range, const char *plist)
{
struct prefix group;
- struct in_addr rp_addr;
int result;
if (group_range == NULL)
- result = str2prefix("224.0.0.0/4", &group);
+ result = pim_get_all_mcast_group(&group);
else
result = str2prefix(group_range, &group);
- if (!result)
- return PIM_GROUP_BAD_ADDRESS;
-
- result = inet_pton(AF_INET, rp, &rp_addr);
- if (result <= 0)
- return PIM_RP_BAD_ADDRESS;
+ if (!result) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "%s: String to prefix failed for %pPAs group",
+ __func__, &rp_addr);
+ return;
+ }
- result = pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
- return result;
+ pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
}
-int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
- struct prefix group, const char *plist,
- enum rp_source rp_src_flag)
+int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag)
{
struct prefix g_all;
struct rp_info *rp_info;
@@ -694,12 +677,8 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
struct pim_upstream *up;
struct bsgrp_node *bsgrp = NULL;
struct bsm_rpinfo *bsrp = NULL;
- char rp_str[INET_ADDRSTRLEN];
bool upstream_updated = false;
- if (!inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str)))
- snprintf(rp_str, sizeof(rp_str), "<rp?>");
-
if (plist)
rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
else
@@ -714,8 +693,8 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
}
if (PIM_DEBUG_PIM_TRACE)
- zlog_debug("%s: Delete RP %s for the group %pFX", __func__,
- rp_str, &group);
+ zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
+ &rp_addr, &group);
/* While static RP is getting deleted, we need to check if dynamic RP
* present for the same group in BSM RP table, then install the dynamic
@@ -727,19 +706,11 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
if (bsgrp) {
bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
if (bsrp) {
- if (PIM_DEBUG_PIM_TRACE) {
- char bsrp_str[INET_ADDRSTRLEN];
-
- if (!inet_ntop(AF_INET, bsrp, bsrp_str,
- sizeof(bsrp_str)))
- snprintf(bsrp_str,
- sizeof(bsrp_str),
- "<bsrp?>");
-
+ if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
- "%s: BSM RP %s found for the group %pFX",
- __func__, bsrp_str, &group);
- }
+ "%s: BSM RP %pPA found for the group %pFX",
+ __func__, &bsrp->rp_address,
+ &group);
return pim_rp_change(pim, bsrp->rp_address,
group, RP_SRC_BSR);
}
@@ -752,15 +723,13 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
}
/* Deregister addr with Zebra NHT */
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Deregister RP addr %pFX with Zebra ", __func__,
&nht_p);
pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
- if (!str2prefix("224.0.0.0/4", &g_all))
+ if (!pim_get_all_mcast_group(&g_all))
return PIM_RP_BAD_ADDRESS;
rp_all = pim_rp_find_match_group(pim, &g_all);
@@ -851,7 +820,7 @@ int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
return PIM_SUCCESS;
}
-int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
struct prefix group, enum rp_source rp_src_flag)
{
struct prefix nht_p;
@@ -860,6 +829,7 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
struct rp_info *rp_info = NULL;
struct pim_upstream *up;
bool upstream_updated = false;
+ pim_addr old_rp_addr;
rn = route_node_lookup(pim->rp_table, &group);
if (!rn) {
@@ -875,7 +845,8 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
return result;
}
- if (rp_info->rp.rpf_addr.u.prefix4.s_addr == new_rp_addr.s_addr) {
+ old_rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
if (rp_info->rp_src != rp_src_flag) {
rp_info->rp_src = rp_src_flag;
route_unlock_node(rn);
@@ -883,12 +854,13 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
}
}
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.family = PIM_AF;
+ nht_p.prefixlen = PIM_MAX_BITLEN;
/* Deregister old RP addr with Zebra NHT */
- if (rp_info->rp.rpf_addr.u.prefix4.s_addr != INADDR_ANY) {
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+
+ if (!pim_addr_is_any(old_rp_addr)) {
+ nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
__func__, &nht_p);
@@ -898,7 +870,8 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
pim_rp_nexthop_del(rp_info);
listnode_delete(pim->rp_list, rp_info);
/* Update the new RP address*/
- rp_info->rp.rpf_addr.u.prefix4 = new_rp_addr;
+
+ pim_addr_to_prefix(&rp_info->rp.rpf_addr, new_rp_addr);
rp_info->rp_src = rp_src_flag;
rp_info->i_am_rp = 0;
@@ -923,7 +896,7 @@ int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
pim_zebra_update_all_interfaces(pim);
/* Register new RP addr with Zebra NHT */
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: NHT Register RP addr %pFX grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
@@ -954,9 +927,7 @@ void pim_rp_setup(struct pim_instance *pim)
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
continue;
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
@@ -1053,7 +1024,6 @@ void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
}
}
-#if PIM_IPV == 4
/*
* I_am_RP(G) is true if the group-to-RP mapping indicates that
* this router is the RP for the group.
@@ -1066,10 +1036,7 @@ int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
struct rp_info *rp_info;
memset(&g, 0, sizeof(g));
- g.family = AF_INET;
- g.prefixlen = IPV4_MAX_BITLEN;
- g.u.prefix4 = group;
-
+ pim_addr_to_prefix(&g, group);
rp_info = pim_rp_find_match_group(pim, &g);
if (rp_info)
@@ -1088,9 +1055,7 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
struct rp_info *rp_info;
memset(&g, 0, sizeof(g));
- g.family = AF_INET;
- g.prefixlen = IPV4_MAX_BITLEN;
- g.u.prefix4 = group;
+ pim_addr_to_prefix(&g, group);
rp_info = pim_rp_find_match_group(pim, &g);
@@ -1098,9 +1063,7 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
struct prefix nht_p;
/* Register addr with Zebra NHT */
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
"%s: NHT Register RP addr %pFX grp %pFX with Zebra",
@@ -1131,53 +1094,35 @@ int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
struct prefix g;
memset(&g, 0, sizeof(g));
- g.family = AF_INET;
- g.prefixlen = IPV4_MAX_BITLEN;
- g.u.prefix4 = group;
+
+ pim_addr_to_prefix(&g, group);
rp_info = pim_rp_find_match_group(pim, &g);
if (!rp_info || ((pim_rpf_addr_is_inaddr_any(&rp_info->rp)) &&
- (source.s_addr == INADDR_ANY))) {
+ (pim_addr_is_any(source)))) {
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Received a (*,G) with no RP configured",
__func__);
- up->s_addr = INADDR_ANY;
+ *up = PIMADDR_ANY;
return 0;
}
- *up = (source.s_addr == INADDR_ANY) ? rp_info->rp.rpf_addr.u.prefix4
- : source;
+ if (pim_addr_is_any(source))
+ *up = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
+ else
+ *up = source;
return 1;
}
-#else
-CPP_NOTICE("functions stubbed out for IPv6");
-
-int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
-{
- return 0;
-}
-
-struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
-{
- return NULL;
-}
-
-int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
- pim_addr source, pim_addr group)
-{
- return 0;
-}
-#endif
int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
const char *spaces)
{
struct listnode *node;
struct rp_info *rp_info;
- char rp_buffer[32];
int count = 0;
+ pim_addr rp_addr;
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
@@ -1186,31 +1131,28 @@ int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
if (rp_info->rp_src == RP_SRC_BSR)
continue;
+ rp_addr = pim_addr_from_prefix(&rp_info->rp.rpf_addr);
if (rp_info->plist)
- vty_out(vty, "%sip pim rp %s prefix-list %s\n", spaces,
- inet_ntop(AF_INET,
- &rp_info->rp.rpf_addr.u.prefix4,
- rp_buffer, 32),
- rp_info->plist);
+ vty_out(vty,
+ "%s" PIM_AF_NAME
+ " pim rp %pPA prefix-list %s\n",
+ spaces, &rp_addr, rp_info->plist);
else
- vty_out(vty, "%sip pim rp %s %pFX\n", spaces,
- inet_ntop(AF_INET,
- &rp_info->rp.rpf_addr.u.prefix4,
- rp_buffer, 32),
- &rp_info->group);
+ vty_out(vty, "%s" PIM_AF_NAME " pim rp %pPA %pFX\n",
+ spaces, &rp_addr, &rp_info->group);
count++;
}
return count;
}
-void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
+void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
+ struct vty *vty, bool uj)
{
struct rp_info *rp_info;
struct rp_info *prev_rp_info = NULL;
struct listnode *node;
char source[7];
- char buf[PREFIX_STRLEN];
json_object *json = NULL;
json_object *json_rp_rows = NULL;
@@ -1220,112 +1162,105 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
json = json_object_new_object();
else
vty_out(vty,
- "RP address group/prefix-list OIF I am RP Source\n");
+ "RP address group/prefix-list OIF I am RP Source Group-Type\n");
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
- if (!pim_rpf_addr_is_inaddr_any(&rp_info->rp)) {
- char buf[48];
+ if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
+ continue;
- if (rp_info->rp_src == RP_SRC_STATIC)
- strlcpy(source, "Static", sizeof(source));
- else if (rp_info->rp_src == RP_SRC_BSR)
- strlcpy(source, "BSR", sizeof(source));
- else
- strlcpy(source, "None", sizeof(source));
- if (uj) {
- /*
- * If we have moved on to a new RP then add the
- * entry for the previous RP
- */
- if (prev_rp_info
- && prev_rp_info->rp.rpf_addr.u.prefix4
- .s_addr
- != rp_info->rp.rpf_addr.u.prefix4
- .s_addr) {
- json_object_object_add(
- json,
- inet_ntop(AF_INET,
- &prev_rp_info->rp
- .rpf_addr.u
- .prefix4,
- buf, sizeof(buf)),
- json_rp_rows);
- json_rp_rows = NULL;
- }
+#if PIM_IPV == 4
+ pim_addr group = rp_info->group.u.prefix4;
+#else
+ pim_addr group = rp_info->group.u.prefix6;
+#endif
+ const char *group_type =
+ pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
- if (!json_rp_rows)
- json_rp_rows = json_object_new_array();
-
- json_row = json_object_new_object();
- json_object_string_addf(
- json_row, "rpAddress", "%pI4",
- &rp_info->rp.rpf_addr.u.prefix4);
- if (rp_info->rp.source_nexthop.interface)
- json_object_string_add(
- json_row, "outboundInterface",
- rp_info->rp.source_nexthop
- .interface->name);
- else
- json_object_string_add(
- json_row, "outboundInterface",
- "Unknown");
- if (rp_info->i_am_rp)
- json_object_boolean_true_add(json_row,
- "iAmRP");
- else
- json_object_boolean_false_add(json_row,
- "iAmRP");
+ if (range && !prefix_match(&rp_info->group, range))
+ continue;
- if (rp_info->plist)
- json_object_string_add(json_row,
- "prefixList",
- rp_info->plist);
- else
- json_object_string_addf(
- json_row, "group", "%pFX",
- &rp_info->group);
- json_object_string_add(json_row, "source",
- source);
+ if (rp_info->rp_src == RP_SRC_STATIC)
+ strlcpy(source, "Static", sizeof(source));
+ else if (rp_info->rp_src == RP_SRC_BSR)
+ strlcpy(source, "BSR", sizeof(source));
+ else
+ strlcpy(source, "None", sizeof(source));
+ if (uj) {
+ /*
+ * If we have moved on to a new RP then add the
+ * entry for the previous RP
+ */
+ if (prev_rp_info &&
+ prefix_cmp(&prev_rp_info->rp.rpf_addr,
+ &rp_info->rp.rpf_addr)) {
+ json_object_object_addf(
+ json, json_rp_rows, "%pFXh",
+ &prev_rp_info->rp.rpf_addr);
+ json_rp_rows = NULL;
+ }
- json_object_array_add(json_rp_rows, json_row);
- } else {
- vty_out(vty, "%-15s ",
- inet_ntop(AF_INET,
- &rp_info->rp.rpf_addr.u
- .prefix4,
- buf, sizeof(buf)));
-
- if (rp_info->plist)
- vty_out(vty, "%-18s ", rp_info->plist);
- else
- vty_out(vty, "%-18pFX ",
- &rp_info->group);
+ if (!json_rp_rows)
+ json_rp_rows = json_object_new_array();
+
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "rpAddress", "%pFXh",
+ &rp_info->rp.rpf_addr);
+ if (rp_info->rp.source_nexthop.interface)
+ json_object_string_add(
+ json_row, "outboundInterface",
+ rp_info->rp.source_nexthop
+ .interface->name);
+ else
+ json_object_string_add(json_row,
+ "outboundInterface",
+ "Unknown");
+ if (rp_info->i_am_rp)
+ json_object_boolean_true_add(json_row, "iAmRP");
+ else
+ json_object_boolean_false_add(json_row,
+ "iAmRP");
- if (rp_info->rp.source_nexthop.interface)
- vty_out(vty, "%-16s ",
- rp_info->rp.source_nexthop
- .interface->name);
- else
- vty_out(vty, "%-16s ", "(Unknown)");
+ if (rp_info->plist)
+ json_object_string_add(json_row, "prefixList",
+ rp_info->plist);
+ else
+ json_object_string_addf(json_row, "group",
+ "%pFX",
+ &rp_info->group);
+ json_object_string_add(json_row, "source", source);
+ json_object_string_add(json_row, "groupType",
+ group_type);
+
+ json_object_array_add(json_rp_rows, json_row);
+ } else {
+ vty_out(vty, "%-15pFXh ", &rp_info->rp.rpf_addr);
- if (rp_info->i_am_rp)
- vty_out(vty, "yes");
- else
- vty_out(vty, "no");
+ if (rp_info->plist)
+ vty_out(vty, "%-18s ", rp_info->plist);
+ else
+ vty_out(vty, "%-18pFX ", &rp_info->group);
- vty_out(vty, "%14s\n", source);
- }
- prev_rp_info = rp_info;
+ if (rp_info->rp.source_nexthop.interface)
+ vty_out(vty, "%-16s ",
+ rp_info->rp.source_nexthop
+ .interface->name);
+ else
+ vty_out(vty, "%-16s ", "(Unknown)");
+
+ if (rp_info->i_am_rp)
+ vty_out(vty, "yes");
+ else
+ vty_out(vty, "no");
+
+ vty_out(vty, "%14s", source);
+ vty_out(vty, "%6s\n", group_type);
}
+ prev_rp_info = rp_info;
}
if (uj) {
if (prev_rp_info && json_rp_rows)
- json_object_object_add(
- json,
- inet_ntop(AF_INET,
- &prev_rp_info->rp.rpf_addr.u.prefix4,
- buf, sizeof(buf)),
- json_rp_rows);
+ json_object_object_addf(json, json_rp_rows, "%pFXh",
+ &prev_rp_info->rp.rpf_addr);
vty_json(vty, json);
}
@@ -1343,17 +1278,20 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
continue;
- nht_p.family = AF_INET;
- nht_p.prefixlen = IPV4_MAX_BITLEN;
- nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ nht_p = rp_info->rp.rpf_addr;
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
&pnc))
continue;
for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
- if (nh_node->gate.ipv4.s_addr != INADDR_ANY)
+#if PIM_IPV == 4
+ if (!pim_addr_is_any(nh_node->gate.ipv4))
+ continue;
+#else
+ if (!pim_addr_is_any(nh_node->gate.ipv6))
continue;
+#endif
struct interface *ifp1 = if_lookup_by_index(
nh_node->ifindex, pim->vrf->vrf_id);
@@ -1366,15 +1304,11 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
#else
nh_node->gate.ipv6 = nbr->source_addr;
#endif
- if (PIM_DEBUG_PIM_NHT_RP) {
- char str[PREFIX_STRLEN];
- pim_addr_dump("<nht_addr?>", &nht_p, str,
- sizeof(str));
+ if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
- "%s: addr %s new nexthop addr %pPAs interface %s",
- __func__, str, &nbr->source_addr,
+ "%s: addr %pFXh new nexthop addr %pPAs interface %s",
+ __func__, &nht_p, &nbr->source_addr,
ifp1->name);
- }
}
}
}
diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h
index c223402ddd..04faeb5f26 100644
--- a/pimd/pim_rp.h
+++ b/pimd/pim_rp.h
@@ -47,15 +47,13 @@ void pim_rp_free(struct pim_instance *pim);
void pim_rp_list_hash_clean(void *data);
-int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
- struct prefix group, const char *plist,
- enum rp_source rp_src_flag);
-int pim_rp_del_config(struct pim_instance *pim, const char *rp,
- const char *group, const char *plist);
-int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
- struct prefix group, const char *plist,
- enum rp_source rp_src_flag);
-int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag);
+void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
+ const char *group, const char *plist);
+int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
+ const char *plist, enum rp_source rp_src_flag);
+int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
struct prefix group, enum rp_source rp_src_flag);
void pim_rp_prefix_list_update(struct pim_instance *pim,
struct prefix_list *plist);
@@ -80,8 +78,8 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group);
#define I_am_RP(P, G) pim_rp_i_am_rp ((P), (G))
#define RP(P, G) pim_rp_g ((P), (G))
-void pim_rp_show_information(struct pim_instance *pim, struct vty *vty,
- bool uj);
+void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
+ struct vty *vty, bool uj);
void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr);
int pim_rp_list_cmp(void *v1, void *v2);
struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index a99f5536b7..cee542aa13 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -71,17 +71,15 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
return false;
#endif
- if (!pim_addr_cmp(nexthop->last_lookup, addr)
- && (nexthop->last_lookup_time > pim->last_route_change_time)) {
- if (PIM_DEBUG_PIM_NHT) {
- char nexthop_str[PREFIX_STRLEN];
- pim_addr_dump("<nexthop?>", &nexthop->mrib_nexthop_addr,
- nexthop_str, sizeof(nexthop_str));
+ if ((!pim_addr_cmp(nexthop->last_lookup, addr)) &&
+ (nexthop->last_lookup_time > pim->last_route_change_time)) {
+ if (PIM_DEBUG_PIM_NHT)
zlog_debug(
- "%s: Using last lookup for %pPAs at %lld, %" PRId64" addr %s",
+ "%s: Using last lookup for %pPAs at %lld, %" PRId64
+ " addr %pFX",
__func__, &addr, nexthop->last_lookup_time,
- pim->last_route_change_time, nexthop_str);
- }
+ pim->last_route_change_time,
+ &nexthop->mrib_nexthop_addr);
pim->nexthop_lookups_avoided++;
return true;
} else {
@@ -140,18 +138,13 @@ bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
}
if (found) {
- if (PIM_DEBUG_ZEBRA) {
- char nexthop_str[PREFIX_STRLEN];
- pim_addr_dump("<nexthop?>",
- &nexthop_tab[i].nexthop_addr, nexthop_str,
- sizeof(nexthop_str));
+ if (PIM_DEBUG_ZEBRA)
zlog_debug(
- "%s %s: found nexthop %s for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
- __FILE__, __func__, nexthop_str, &addr,
- ifp->name, first_ifindex,
- nexthop_tab[i].route_metric,
+ "%s %s: found nexthop %pFX for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
+ __FILE__, __func__,
+ &nexthop_tab[i].nexthop_addr, &addr, ifp->name,
+ first_ifindex, nexthop_tab[i].route_metric,
nexthop_tab[i].protocol_distance);
- }
/* update nexthop data */
nexthop->interface = ifp;
nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
@@ -215,7 +208,6 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
bool neigh_needed = true;
uint32_t saved_mrib_route_metric;
pim_addr rpf_addr;
- pim_addr saved_rpf_addr;
if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
return PIM_RPF_OK;
@@ -264,19 +256,14 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
/* detect change in pim_nexthop */
if (nexthop_mismatch(&rpf->source_nexthop, &saved.source_nexthop)) {
- if (PIM_DEBUG_ZEBRA) {
- char nhaddr_str[PREFIX_STRLEN];
- pim_addr_dump("<addr?>",
- &rpf->source_nexthop.mrib_nexthop_addr,
- nhaddr_str, sizeof(nhaddr_str));
- zlog_debug("%s(%s): (S,G)=%s source nexthop now is: interface=%s address=%s pref=%d metric=%d",
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s(%s): (S,G)=%s source nexthop now is: interface=%s address=%pFX pref=%d metric=%d",
__func__, caller,
up->sg_str,
rpf->source_nexthop.interface ? rpf->source_nexthop.interface->name : "<ifname?>",
- nhaddr_str,
+ &rpf->source_nexthop.mrib_nexthop_addr,
rpf->source_nexthop.mrib_metric_preference,
rpf->source_nexthop.mrib_route_metric);
- }
pim_upstream_update_join_desired(pim, up);
pim_upstream_update_could_assert(up);
@@ -300,10 +287,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
}
/* detect change in RPF'(S,G) */
-
- saved_rpf_addr = pim_addr_from_prefix(&saved.rpf_addr);
-
- if (pim_addr_cmp(saved_rpf_addr, rpf_addr) ||
+ if (!prefix_same(&saved.rpf_addr, &rpf->rpf_addr) ||
saved.source_nexthop.interface != rpf->source_nexthop.interface) {
pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
return PIM_RPF_CHANGED;
@@ -418,7 +402,7 @@ unsigned int pim_rpf_hash_key(const void *arg)
{
const struct pim_nexthop_cache *r = arg;
-#if PIM_IPV == 4 || !defined(PIM_V6_TEMP_BREAK)
+#if PIM_IPV == 4
return jhash_1word(r->rpf.rpf_addr.u.prefix4.s_addr, 0);
#else
return jhash2(r->rpf.rpf_addr.u.prefix6.s6_addr32,
diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c
index 05b0f92a4b..8619cc3f83 100644
--- a/pimd/pim_sock.c
+++ b/pimd/pim_sock.c
@@ -34,22 +34,28 @@
#include "vrf.h"
#include "sockopt.h"
#include "lib_errors.h"
+#include "network.h"
#include "pimd.h"
#include "pim_mroute.h"
+#include "pim_iface.h"
#include "pim_sock.h"
#include "pim_str.h"
-/* GLOBAL VARS */
+#if PIM_IPV == 4
+#define setsockopt_iptos setsockopt_ipv4_tos
+#define setsockopt_multicast_loop setsockopt_ipv4_multicast_loop
+#else
+#define setsockopt_iptos setsockopt_ipv6_tclass
+#define setsockopt_multicast_loop setsockopt_ipv6_multicast_loop
+#endif
int pim_socket_raw(int protocol)
{
int fd;
frr_with_privs(&pimd_privs) {
-
- fd = socket(AF_INET, SOCK_RAW, protocol);
-
+ fd = socket(PIM_AF, SOCK_RAW, protocol);
}
if (fd < 0) {
@@ -66,10 +72,16 @@ void pim_socket_ip_hdr(int fd)
const int on = 1;
frr_with_privs(&pimd_privs) {
-
+#if PIM_IPV == 4
if (setsockopt(fd, IPPROTO_IP, IP_HDRINCL, &on, sizeof(on)))
- zlog_err("%s: Could not turn on IP_HDRINCL option: %s",
- __func__, safe_strerror(errno));
+ zlog_err("%s: Could not turn on IP_HDRINCL option: %m",
+ __func__);
+#else
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_HDRINCL, &on, sizeof(on)))
+ zlog_err(
+ "%s: Could not turn on IPV6_HDRINCL option: %m",
+ __func__);
+#endif
}
}
@@ -80,248 +92,256 @@ void pim_socket_ip_hdr(int fd)
int pim_socket_bind(int fd, struct interface *ifp)
{
int ret = 0;
-#ifdef SO_BINDTODEVICE
+#ifdef SO_BINDTODEVICE
frr_with_privs(&pimd_privs) {
-
ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, ifp->name,
strlen(ifp->name));
-
}
-
#endif
return ret;
}
-int pim_socket_mcast(int protocol, struct in_addr ifaddr, struct interface *ifp,
- uint8_t loop)
+#if PIM_IPV == 4
+static inline int pim_setsockopt(int protocol, int fd, struct interface *ifp)
{
- int rcvbuf = 1024 * 1024 * 8;
-#ifdef HAVE_STRUCT_IP_MREQN_IMR_IFINDEX
- struct ip_mreqn mreq;
-#else
- struct ip_mreq mreq;
-#endif
- int fd;
-
- fd = pim_socket_raw(protocol);
- if (fd < 0) {
- zlog_warn("Could not create multicast socket: errno=%d: %s",
- errno, safe_strerror(errno));
- return PIM_SOCK_ERR_SOCKET;
- }
-
-#ifdef SO_BINDTODEVICE
- int ret;
-
- ret = pim_socket_bind(fd, ifp);
- if (ret) {
- close(fd);
- zlog_warn(
- "Could not set fd: %d for interface: %s to device",
- fd, ifp->name);
- return PIM_SOCK_ERR_BIND;
- }
-#else
-/* XXX: use IP_PKTINFO / IP_RECVIF to emulate behaviour? Or change to
- * only use 1 socket for all interfaces? */
-#endif
+ int one = 1;
+ int ttl = 1;
- /* Needed to obtain destination address from recvmsg() */
- {
#if defined(HAVE_IP_PKTINFO)
- /* Linux and Solaris IP_PKTINFO */
- int opt = 1;
- if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &opt, sizeof(opt))) {
- zlog_warn(
- "Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
- }
+ /* Linux and Solaris IP_PKTINFO */
+ if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &one, sizeof(one)))
+ zlog_warn("Could not set PKTINFO on socket fd=%d: %m", fd);
#elif defined(HAVE_IP_RECVDSTADDR)
- /* BSD IP_RECVDSTADDR */
- int opt = 1;
- if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &opt,
- sizeof(opt))) {
- zlog_warn(
- "Could not set IP_RECVDSTADDR on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
- }
+ /* BSD IP_RECVDSTADDR */
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &one, sizeof(one)))
+ zlog_warn("Could not set IP_RECVDSTADDR on socket fd=%d: %m",
+ fd);
#else
- flog_err(
- EC_LIB_DEVELOPMENT,
- "%s %s: Missing IP_PKTINFO and IP_RECVDSTADDR: unable to get dst addr from recvmsg()",
- __FILE__, __func__);
- close(fd);
- return PIM_SOCK_ERR_DSTADDR;
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "Missing IP_PKTINFO and IP_RECVDSTADDR: unable to get dst addr from recvmsg()");
+ close(fd);
+ return PIM_SOCK_ERR_DSTADDR;
#endif
- }
-
- /* Set router alert (RFC 2113) for all IGMP messages (RFC 3376 4.
- * Message Formats)*/
+ /* Set router alert (RFC 2113) for all IGMP messages (RFC
+ * 3376 4. Message Formats)*/
if (protocol == IPPROTO_IGMP) {
uint8_t ra[4];
+
ra[0] = 148;
ra[1] = 4;
ra[2] = 0;
ra[3] = 0;
if (setsockopt(fd, IPPROTO_IP, IP_OPTIONS, ra, 4)) {
zlog_warn(
- "Could not set Router Alert Option on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
+ "Could not set Router Alert Option on socket fd=%d: %m",
+ fd);
close(fd);
return PIM_SOCK_ERR_RA;
}
}
- {
- int reuse = 1;
- if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse,
- sizeof(reuse))) {
- zlog_warn(
- "Could not set Reuse Address Option on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
- close(fd);
- return PIM_SOCK_ERR_REUSE;
- }
- }
-
- {
- const int MTTL = 1;
- int ttl = MTTL;
- if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, (void *)&ttl,
- sizeof(ttl))) {
- zlog_warn(
- "Could not set multicast TTL=%d on socket fd=%d: errno=%d: %s",
- MTTL, fd, errno, safe_strerror(errno));
- close(fd);
- return PIM_SOCK_ERR_TTL;
- }
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl))) {
+ zlog_warn("Could not set multicast TTL=%d on socket fd=%d: %m",
+ ttl, fd);
+ close(fd);
+ return PIM_SOCK_ERR_TTL;
}
- if (setsockopt_ipv4_multicast_loop(fd, loop)) {
+ if (setsockopt_ipv4_multicast_if(fd, PIMADDR_ANY, ifp->ifindex)) {
zlog_warn(
- "Could not %s Multicast Loopback Option on socket fd=%d: errno=%d: %s",
- loop ? "enable" : "disable", fd, errno,
- safe_strerror(errno));
+ "Could not set Outgoing Interface Option on socket fd=%d: %m",
+ fd);
close(fd);
- return PIM_SOCK_ERR_LOOP;
+ return PIM_SOCK_ERR_IFACE;
}
- memset(&mreq, 0, sizeof(mreq));
-#ifdef HAVE_STRUCT_IP_MREQN_IMR_IFINDEX
- mreq.imr_ifindex = ifp->ifindex;
-#else
-/*
- * I am not sure what to do here yet for *BSD
- */
-// mreq.imr_interface = ifindex;
-#endif
+ return 0;
+}
+#else /* PIM_IPV != 4 */
+static inline int pim_setsockopt(int protocol, int fd, struct interface *ifp)
+{
+ int ttl = 1;
+ struct ipv6_mreq mreq = {};
- if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, (void *)&mreq,
+ setsockopt_ipv6_pktinfo(fd, 1);
+ setsockopt_ipv6_multicast_hops(fd, ttl);
+
+ mreq.ipv6mr_interface = ifp->ifindex;
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, &mreq,
sizeof(mreq))) {
zlog_warn(
- "Could not set Outgoing Interface Option on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
+ "Could not set Outgoing Interface Option on socket fd=%d: %m",
+ fd);
close(fd);
return PIM_SOCK_ERR_IFACE;
}
- if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf, sizeof(rcvbuf)))
- zlog_warn("%s: Failure to set buffer size to %d", __func__,
- rcvbuf);
+ return 0;
+}
+#endif
- {
- long flags;
+int pim_socket_mcast(int protocol, pim_addr ifaddr, struct interface *ifp,
+ uint8_t loop)
+{
+ int fd;
+ int ret;
- flags = fcntl(fd, F_GETFL, 0);
- if (flags < 0) {
- zlog_warn(
- "Could not get fcntl(F_GETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
- close(fd);
- return PIM_SOCK_ERR_NONBLOCK_GETFL;
- }
+ fd = pim_socket_raw(protocol);
+ if (fd < 0) {
+ zlog_warn("Could not create multicast socket: errno=%d: %s",
+ errno, safe_strerror(errno));
+ return PIM_SOCK_ERR_SOCKET;
+ }
- if (fcntl(fd, F_SETFL, flags | O_NONBLOCK)) {
- zlog_warn(
- "Could not set fcntl(F_SETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
- close(fd);
- return PIM_SOCK_ERR_NONBLOCK_SETFL;
- }
+ /* XXX: if SO_BINDTODEVICE isn't available, use IP_PKTINFO / IP_RECVIF
+ * to emulate behaviour? Or change to only use 1 socket for all
+ * interfaces? */
+ ret = pim_socket_bind(fd, ifp);
+ if (ret) {
+ close(fd);
+ zlog_warn("Could not set fd: %d for interface: %s to device",
+ fd, ifp->name);
+ return PIM_SOCK_ERR_BIND;
}
- /* Set Tx socket DSCP byte */
- if (setsockopt_ipv4_tos(fd, IPTOS_PREC_INTERNETCONTROL)) {
- zlog_warn("can't set sockopt IP_TOS to PIM/IGMP socket %d: %s",
- fd, safe_strerror(errno));
+ set_nonblocking(fd);
+ sockopt_reuseaddr(fd);
+ setsockopt_so_recvbuf(fd, 8 * 1024 * 1024);
+
+ ret = pim_setsockopt(protocol, fd, ifp);
+ if (ret) {
+ zlog_warn("pim_setsockopt failed for interface: %s to device ",
+ ifp->name);
+ return ret;
+ }
+
+ /* leftover common sockopts */
+ if (setsockopt_multicast_loop(fd, loop)) {
+ zlog_warn(
+ "Could not %s Multicast Loopback Option on socket fd=%d: %m",
+ loop ? "enable" : "disable", fd);
+ close(fd);
+ return PIM_SOCK_ERR_LOOP;
}
+ /* Set Tx socket DSCP byte */
+ if (setsockopt_iptos(fd, IPTOS_PREC_INTERNETCONTROL))
+ zlog_warn("can't set sockopt IP[V6]_TOS to socket %d: %m", fd);
+
return fd;
}
-int pim_socket_join(int fd, struct in_addr group, struct in_addr ifaddr,
- ifindex_t ifindex)
+int pim_socket_join(int fd, pim_addr group, pim_addr ifaddr, ifindex_t ifindex,
+ struct pim_interface *pim_ifp)
{
int ret;
-#ifdef HAVE_STRUCT_IP_MREQN_IMR_IFINDEX
- struct ip_mreqn opt;
+#if PIM_IPV == 4
+ ret = setsockopt_ipv4_multicast(fd, IP_ADD_MEMBERSHIP, ifaddr,
+ group.s_addr, ifindex);
#else
- struct ip_mreq opt;
-#endif
+ struct ipv6_mreq opt;
- opt.imr_multiaddr = group;
-
-#ifdef HAVE_STRUCT_IP_MREQN_IMR_IFINDEX
- opt.imr_address = ifaddr;
- opt.imr_ifindex = ifindex;
-#else
- opt.imr_interface = ifaddr;
+ memcpy(&opt.ipv6mr_multiaddr, &group, 16);
+ opt.ipv6mr_interface = ifindex;
+ ret = setsockopt(fd, IPPROTO_IPV6, IPV6_JOIN_GROUP, &opt, sizeof(opt));
#endif
- ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &opt, sizeof(opt));
- if (ret) {
- char group_str[INET_ADDRSTRLEN];
- char ifaddr_str[INET_ADDRSTRLEN];
- if (!inet_ntop(AF_INET, &group, group_str, sizeof(group_str)))
- snprintf(group_str, sizeof(group_str), "<group?>");
- if (!inet_ntop(AF_INET, &ifaddr, ifaddr_str,
- sizeof(ifaddr_str)))
- snprintf(ifaddr_str, sizeof(ifaddr_str), "<ifaddr?>");
+ pim_ifp->igmp_ifstat_joins_sent++;
+ if (ret) {
flog_err(
EC_LIB_SOCKET,
- "Failure socket joining fd=%d group %s on interface address %s: errno=%d: %s",
- fd, group_str, ifaddr_str, errno, safe_strerror(errno));
+ "Failure socket joining fd=%d group %pPAs on interface address %pPAs: %m",
+ fd, &group, &ifaddr);
+ pim_ifp->igmp_ifstat_joins_failed++;
return ret;
}
- if (PIM_DEBUG_TRACE) {
- char group_str[INET_ADDRSTRLEN];
- char ifaddr_str[INET_ADDRSTRLEN];
- if (!inet_ntop(AF_INET, &group, group_str, sizeof(group_str)))
- snprintf(group_str, sizeof(group_str), "<group?>");
- if (!inet_ntop(AF_INET, &ifaddr, ifaddr_str,
- sizeof(ifaddr_str)))
- snprintf(ifaddr_str, sizeof(ifaddr_str), "<ifaddr?>");
-
+ if (PIM_DEBUG_TRACE)
zlog_debug(
- "Socket fd=%d joined group %s on interface address %s",
- fd, group_str, ifaddr_str);
+ "Socket fd=%d joined group %pPAs on interface address %pPAs",
+ fd, &group, &ifaddr);
+ return ret;
+}
+
+#if PIM_IPV == 4
+static void cmsg_getdstaddr(struct msghdr *mh, struct sockaddr_storage *dst,
+ ifindex_t *ifindex)
+{
+ struct cmsghdr *cmsg;
+ struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
+
+ for (cmsg = CMSG_FIRSTHDR(mh); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(mh, cmsg)) {
+#ifdef HAVE_IP_PKTINFO
+ if ((cmsg->cmsg_level == IPPROTO_IP) &&
+ (cmsg->cmsg_type == IP_PKTINFO)) {
+ struct in_pktinfo *i;
+
+ i = (struct in_pktinfo *)CMSG_DATA(cmsg);
+ if (dst4)
+ dst4->sin_addr = i->ipi_addr;
+ if (ifindex)
+ *ifindex = i->ipi_ifindex;
+
+ break;
+ }
+#endif
+
+#ifdef HAVE_IP_RECVDSTADDR
+ if ((cmsg->cmsg_level == IPPROTO_IP) &&
+ (cmsg->cmsg_type == IP_RECVDSTADDR)) {
+ struct in_addr *i = (struct in_addr *)CMSG_DATA(cmsg);
+
+ if (dst4)
+ dst4->sin_addr = *i;
+
+ break;
+ }
+#endif
+
+#if defined(HAVE_IP_RECVIF) && defined(CMSG_IFINDEX)
+ if (cmsg->cmsg_type == IP_RECVIF)
+ if (ifindex)
+ *ifindex = CMSG_IFINDEX(cmsg);
+#endif
}
+}
+#else /* PIM_IPV != 4 */
+static void cmsg_getdstaddr(struct msghdr *mh, struct sockaddr_storage *dst,
+ ifindex_t *ifindex)
+{
+ struct cmsghdr *cmsg;
+ struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
- return ret;
+ for (cmsg = CMSG_FIRSTHDR(mh); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(mh, cmsg)) {
+ if ((cmsg->cmsg_level == IPPROTO_IPV6) &&
+ (cmsg->cmsg_type == IPV6_PKTINFO)) {
+ struct in6_pktinfo *i;
+
+ i = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+
+ if (dst6)
+ dst6->sin6_addr = i->ipi6_addr;
+ if (ifindex)
+ *ifindex = i->ipi6_ifindex;
+ break;
+ }
+ }
}
+#endif /* PIM_IPV != 4 */
int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
- struct sockaddr_in *from, socklen_t *fromlen,
- struct sockaddr_in *to, socklen_t *tolen,
+ struct sockaddr_storage *from, socklen_t *fromlen,
+ struct sockaddr_storage *to, socklen_t *tolen,
ifindex_t *ifindex)
{
struct msghdr msgh;
- struct cmsghdr *cmsg;
struct iovec iov;
char cbuf[1000];
int err;
@@ -331,19 +351,12 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
* Use getsockname() to get sin_port.
*/
if (to) {
- struct sockaddr_in si;
- socklen_t si_len = sizeof(si);
-
- memset(&si, 0, sizeof(si));
- to->sin_family = AF_INET;
-
- pim_socket_getsockname(fd, (struct sockaddr *)&si, &si_len);
+ socklen_t to_len = sizeof(*to);
- to->sin_port = si.sin_port;
- to->sin_addr = si.sin_addr;
+ pim_socket_getsockname(fd, (struct sockaddr *)to, &to_len);
if (tolen)
- *tolen = sizeof(si);
+ *tolen = sizeof(*to);
}
memset(&msgh, 0, sizeof(struct msghdr));
@@ -364,66 +377,11 @@ int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
if (fromlen)
*fromlen = msgh.msg_namelen;
- for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
- cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
-
-#ifdef HAVE_IP_PKTINFO
- if ((cmsg->cmsg_level == IPPROTO_IP)
- && (cmsg->cmsg_type == IP_PKTINFO)) {
- struct in_pktinfo *i =
- (struct in_pktinfo *)CMSG_DATA(cmsg);
- if (to)
- to->sin_addr = i->ipi_addr;
- if (tolen)
- *tolen = sizeof(struct sockaddr_in);
- if (ifindex)
- *ifindex = i->ipi_ifindex;
-
- break;
- }
-#endif
-
-#ifdef HAVE_IP_RECVDSTADDR
- if ((cmsg->cmsg_level == IPPROTO_IP)
- && (cmsg->cmsg_type == IP_RECVDSTADDR)) {
- struct in_addr *i = (struct in_addr *)CMSG_DATA(cmsg);
- if (to)
- to->sin_addr = *i;
- if (tolen)
- *tolen = sizeof(struct sockaddr_in);
-
- break;
- }
-#endif
-
-#if defined(HAVE_IP_RECVIF) && defined(CMSG_IFINDEX)
- if (cmsg->cmsg_type == IP_RECVIF)
- if (ifindex)
- *ifindex = CMSG_IFINDEX(cmsg);
-#endif
-
- } /* for (cmsg) */
+ cmsg_getdstaddr(&msgh, to, ifindex);
return err; /* len */
}
-int pim_socket_mcastloop_get(int fd)
-{
- int loop;
- socklen_t loop_len = sizeof(loop);
-
- if (getsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, &loop, &loop_len)) {
- int e = errno;
- zlog_warn(
- "Could not get Multicast Loopback Option on socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
- errno = e;
- return PIM_SOCK_ERR_LOOP;
- }
-
- return loop;
-}
-
int pim_socket_getsockname(int fd, struct sockaddr *name, socklen_t *namelen)
{
if (getsockname(fd, name, namelen)) {
diff --git a/pimd/pim_sock.h b/pimd/pim_sock.h
index 08b0099321..2e9c043e84 100644
--- a/pimd/pim_sock.h
+++ b/pimd/pim_sock.h
@@ -38,17 +38,15 @@
int pim_socket_bind(int fd, struct interface *ifp);
void pim_socket_ip_hdr(int fd);
int pim_socket_raw(int protocol);
-int pim_socket_mcast(int protocol, struct in_addr ifaddr, struct interface *ifp,
+int pim_socket_mcast(int protocol, pim_addr ifaddr, struct interface *ifp,
uint8_t loop);
-int pim_socket_join(int fd, struct in_addr group, struct in_addr ifaddr,
- ifindex_t ifindex);
+int pim_socket_join(int fd, pim_addr group, pim_addr ifaddr, ifindex_t ifindex,
+ struct pim_interface *pim_ifp);
int pim_socket_recvfromto(int fd, uint8_t *buf, size_t len,
- struct sockaddr_in *from, socklen_t *fromlen,
- struct sockaddr_in *to, socklen_t *tolen,
+ struct sockaddr_storage *from, socklen_t *fromlen,
+ struct sockaddr_storage *to, socklen_t *tolen,
ifindex_t *ifindex);
-int pim_socket_mcastloop_get(int fd);
-
int pim_socket_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
#endif /* PIM_SOCK_H */
diff --git a/pimd/pim_ssm.c b/pimd/pim_ssm.c
index 688d38c84c..74310474d4 100644
--- a/pimd/pim_ssm.c
+++ b/pimd/pim_ssm.c
@@ -28,7 +28,7 @@
#include "pimd.h"
#include "pim_ssm.h"
-#include "pim_zebra.h"
+#include "pim_igmp.h"
static void pim_ssm_range_reevaluate(struct pim_instance *pim)
{
diff --git a/pimd/pim_ssm.h b/pimd/pim_ssm.h
index 117713b866..c6b6978218 100644
--- a/pimd/pim_ssm.h
+++ b/pimd/pim_ssm.h
@@ -34,7 +34,7 @@ struct pim_ssm {
void pim_ssm_prefix_list_update(struct pim_instance *pim,
struct prefix_list *plist);
-int pim_is_grp_ssm(struct pim_instance *pim, pim_addr group_addr);
+extern int pim_is_grp_ssm(struct pim_instance *pim, pim_addr group_addr);
int pim_ssm_range_set(struct pim_instance *pim, vrf_id_t vrf_id,
const char *plist_name);
void *pim_ssm_init(void);
diff --git a/pimd/pim_ssmpingd.c b/pimd/pim_ssmpingd.c
index 596b06cb38..afa7e37da1 100644
--- a/pimd/pim_ssmpingd.c
+++ b/pimd/pim_ssmpingd.c
@@ -30,8 +30,13 @@
#include "pim_ssmpingd.h"
#include "pim_time.h"
#include "pim_sock.h"
+#include "network.h"
+#if PIM_IPV == 4
static const char *const PIM_SSMPINGD_REPLY_GROUP = "232.43.211.234";
+#else
+static const char *const PIM_SSMPINGD_REPLY_GROUP = "ff3e::4321:1234";
+#endif
enum { PIM_SSMPINGD_REQUEST = 'Q', PIM_SSMPINGD_REPLY = 'A' };
@@ -43,7 +48,7 @@ void pim_ssmpingd_init(struct pim_instance *pim)
assert(!pim->ssmpingd_list);
- result = inet_pton(AF_INET, PIM_SSMPINGD_REPLY_GROUP,
+ result = inet_pton(PIM_AF, PIM_SSMPINGD_REPLY_GROUP,
&pim->ssmpingd_group_addr);
assert(result > 0);
@@ -56,7 +61,7 @@ void pim_ssmpingd_destroy(struct pim_instance *pim)
}
static struct ssmpingd_sock *ssmpingd_find(struct pim_instance *pim,
- struct in_addr source_addr)
+ pim_addr source_addr)
{
struct listnode *node;
struct ssmpingd_sock *ss;
@@ -65,7 +70,7 @@ static struct ssmpingd_sock *ssmpingd_find(struct pim_instance *pim,
return 0;
for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss))
- if (source_addr.s_addr == ss->source_addr.s_addr)
+ if (!pim_addr_cmp(source_addr, ss->source_addr))
return ss;
return 0;
@@ -76,73 +81,50 @@ static void ssmpingd_free(struct ssmpingd_sock *ss)
XFREE(MTYPE_PIM_SSMPINGD, ss);
}
-static int ssmpingd_socket(struct in_addr addr, int port, int mttl)
+#if PIM_IPV == 4
+static inline int ssmpingd_setsockopt(int fd, pim_addr addr, int mttl)
{
- struct sockaddr_in sockaddr;
- int fd;
-
- fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- if (fd < 0) {
- flog_err_sys(EC_LIB_SOCKET,
- "%s: could not create socket: errno=%d: %s",
- __func__, errno, safe_strerror(errno));
- return -1;
+ /* Needed to obtain destination address from recvmsg() */
+#if defined(HAVE_IP_PKTINFO)
+ /* Linux and Solaris IP_PKTINFO */
+ int opt = 1;
+ if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &opt, sizeof(opt))) {
+ zlog_warn(
+ "%s: could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
}
+#elif defined(HAVE_IP_RECVDSTADDR)
+ /* BSD IP_RECVDSTADDR */
+ int opt = 1;
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &opt, sizeof(opt))) {
+ zlog_warn(
+ "%s: could not set IP_RECVDSTADDR on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
+ }
+#else
+ flog_err(
+ EC_LIB_DEVELOPMENT,
+ "%s %s: missing IP_PKTINFO and IP_RECVDSTADDR: unable to get dst addr from recvmsg()",
+ __FILE__, __func__);
+ close(fd);
+ return -1;
+#endif
- sockaddr.sin_family = AF_INET;
- sockaddr.sin_addr = addr;
- sockaddr.sin_port = htons(port);
-
- if (bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr))) {
- char addr_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str));
+ if (setsockopt_ipv4_multicast_loop(fd, 0)) {
zlog_warn(
- "%s: bind(fd=%d,addr=%s,port=%d,len=%zu) failure: errno=%d: %s",
- __func__, fd, addr_str, port, sizeof(sockaddr), errno,
- safe_strerror(errno));
+ "%s: could not disable Multicast Loopback Option on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
close(fd);
- return -1;
+ return PIM_SOCK_ERR_LOOP;
}
- /* Needed to obtain destination address from recvmsg() */
- {
-#if defined(HAVE_IP_PKTINFO)
- /* Linux and Solaris IP_PKTINFO */
- int opt = 1;
- if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &opt, sizeof(opt))) {
- zlog_warn(
- "%s: could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
- __func__, fd, errno, safe_strerror(errno));
- }
-#elif defined(HAVE_IP_RECVDSTADDR)
- /* BSD IP_RECVDSTADDR */
- int opt = 1;
- if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &opt,
- sizeof(opt))) {
- zlog_warn(
- "%s: could not set IP_RECVDSTADDR on socket fd=%d: errno=%d: %s",
- __func__, fd, errno, safe_strerror(errno));
- }
-#else
- flog_err(
- EC_LIB_DEVELOPMENT,
- "%s %s: missing IP_PKTINFO and IP_RECVDSTADDR: unable to get dst addr from recvmsg()",
- __FILE__, __func__);
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, (void *)&addr,
+ sizeof(addr))) {
+ zlog_warn(
+ "%s: could not set Outgoing Interface Option on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
close(fd);
return -1;
-#endif
- }
-
- {
- int reuse = 1;
- if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse,
- sizeof(reuse))) {
- zlog_warn(
- "%s: could not set Reuse Address Option on socket fd=%d: errno=%d: %s",
- __func__, fd, errno, safe_strerror(errno));
- close(fd);
- return -1;
- }
}
if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, (void *)&mttl,
@@ -154,7 +136,15 @@ static int ssmpingd_socket(struct in_addr addr, int port, int mttl)
return -1;
}
- if (setsockopt_ipv4_multicast_loop(fd, 0)) {
+ return 0;
+}
+#else
+static inline int ssmpingd_setsockopt(int fd, pim_addr addr, int mttl)
+{
+ setsockopt_ipv6_pktinfo(fd, 1);
+ setsockopt_ipv6_multicast_hops(fd, mttl);
+
+ if (setsockopt_ipv6_multicast_loop(fd, 0)) {
zlog_warn(
"%s: could not disable Multicast Loopback Option on socket fd=%d: errno=%d: %s",
__func__, fd, errno, safe_strerror(errno));
@@ -162,7 +152,7 @@ static int ssmpingd_socket(struct in_addr addr, int port, int mttl)
return PIM_SOCK_ERR_LOOP;
}
- if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, (void *)&addr,
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, (void *)&addr,
sizeof(addr))) {
zlog_warn(
"%s: could not set Outgoing Interface Option on socket fd=%d: errno=%d: %s",
@@ -170,26 +160,45 @@ static int ssmpingd_socket(struct in_addr addr, int port, int mttl)
close(fd);
return -1;
}
+ return 0;
+}
+#endif
- {
- long flags;
- flags = fcntl(fd, F_GETFL, 0);
- if (flags < 0) {
- zlog_warn(
- "%s: could not get fcntl(F_GETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
- __func__, fd, errno, safe_strerror(errno));
- close(fd);
- return -1;
- }
+static int ssmpingd_socket(pim_addr addr, int port, int mttl)
+{
+ struct sockaddr_storage sockaddr;
+ int fd;
+ int ret;
+ socklen_t len = sizeof(sockaddr);
- if (fcntl(fd, F_SETFL, flags | O_NONBLOCK)) {
- zlog_warn(
- "%s: could not set fcntl(F_SETFL,O_NONBLOCK) on socket fd=%d: errno=%d: %s",
- __func__, fd, errno, safe_strerror(errno));
- close(fd);
- return -1;
- }
+ fd = socket(PIM_AF, SOCK_DGRAM, IPPROTO_UDP);
+ if (fd < 0) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "%s: could not create socket: errno=%d: %s",
+ __func__, errno, safe_strerror(errno));
+ return -1;
+ }
+
+ pim_socket_getsockname(fd, (struct sockaddr *)&sockaddr, &len);
+
+ if (bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr))) {
+ zlog_warn(
+ "%s: bind(fd=%d,addr=%pSUp,port=%d,len=%zu) failure: errno=%d: %s",
+ __func__, fd, &sockaddr, port, sizeof(sockaddr), errno,
+ safe_strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ set_nonblocking(fd);
+ sockopt_reuseaddr(fd);
+
+ ret = ssmpingd_setsockopt(fd, addr, mttl);
+ if (ret) {
+ zlog_warn("ssmpingd_setsockopt failed");
+ close(fd);
+ return -1;
}
return fd;
@@ -202,12 +211,9 @@ static void ssmpingd_delete(struct ssmpingd_sock *ss)
THREAD_OFF(ss->t_sock_read);
if (close(ss->sock_fd)) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", ss->source_addr, source_str,
- sizeof(source_str));
zlog_warn(
- "%s: failure closing ssmpingd sock_fd=%d for source %s: errno=%d: %s",
- __func__, ss->sock_fd, source_str, errno,
+ "%s: failure closing ssmpingd sock_fd=%d for source %pPA: errno=%d: %s",
+ __func__, ss->sock_fd, &ss->source_addr, errno,
safe_strerror(errno));
/* warning only */
}
@@ -217,7 +223,7 @@ static void ssmpingd_delete(struct ssmpingd_sock *ss)
}
static void ssmpingd_sendto(struct ssmpingd_sock *ss, const uint8_t *buf,
- int len, struct sockaddr_in to)
+ int len, struct sockaddr_storage to)
{
socklen_t tolen = sizeof(to);
int sent;
@@ -225,18 +231,15 @@ static void ssmpingd_sendto(struct ssmpingd_sock *ss, const uint8_t *buf,
sent = sendto(ss->sock_fd, buf, len, MSG_DONTWAIT,
(struct sockaddr *)&to, tolen);
if (sent != len) {
- char to_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<to?>", to.sin_addr, to_str, sizeof(to_str));
if (sent < 0) {
zlog_warn(
- "%s: sendto() failure to %s,%d: fd=%d len=%d: errno=%d: %s",
- __func__, to_str, ntohs(to.sin_port),
- ss->sock_fd, len, errno, safe_strerror(errno));
+ "%s: sendto() failure to %pSUp,fd=%d len=%d: errno=%d: %s",
+ __func__, &to, ss->sock_fd, len, errno,
+ safe_strerror(errno));
} else {
zlog_warn(
- "%s: sendto() partial to %s,%d: fd=%d len=%d: sent=%d",
- __func__, to_str, ntohs(to.sin_port),
- ss->sock_fd, len, sent);
+ "%s: sendto() partial to %pSUp, fd=%d len=%d: sent=%d",
+ __func__, &to, ss->sock_fd, len, sent);
}
}
}
@@ -244,8 +247,8 @@ static void ssmpingd_sendto(struct ssmpingd_sock *ss, const uint8_t *buf,
static int ssmpingd_read_msg(struct ssmpingd_sock *ss)
{
struct interface *ifp;
- struct sockaddr_in from;
- struct sockaddr_in to;
+ struct sockaddr_storage from;
+ struct sockaddr_storage to;
socklen_t fromlen = sizeof(from);
socklen_t tolen = sizeof(to);
ifindex_t ifindex = -1;
@@ -256,13 +259,11 @@ static int ssmpingd_read_msg(struct ssmpingd_sock *ss)
len = pim_socket_recvfromto(ss->sock_fd, buf, sizeof(buf), &from,
&fromlen, &to, &tolen, &ifindex);
+
if (len < 0) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", ss->source_addr, source_str,
- sizeof(source_str));
zlog_warn(
- "%s: failure receiving ssmping for source %s on fd=%d: errno=%d: %s",
- __func__, source_str, ss->sock_fd, errno,
+ "%s: failure receiving ssmping for source %pPA on fd=%d: errno=%d: %s",
+ __func__, &ss->source_addr, ss->sock_fd, errno,
safe_strerror(errno));
return -1;
}
@@ -270,37 +271,19 @@ static int ssmpingd_read_msg(struct ssmpingd_sock *ss)
ifp = if_lookup_by_index(ifindex, ss->pim->vrf->vrf_id);
if (buf[0] != PIM_SSMPINGD_REQUEST) {
- char source_str[INET_ADDRSTRLEN];
- char from_str[INET_ADDRSTRLEN];
- char to_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", ss->source_addr, source_str,
- sizeof(source_str));
- pim_inet4_dump("<from?>", from.sin_addr, from_str,
- sizeof(from_str));
- pim_inet4_dump("<to?>", to.sin_addr, to_str, sizeof(to_str));
zlog_warn(
- "%s: bad ssmping type=%d from %s,%d to %s,%d on interface %s ifindex=%d fd=%d src=%s",
- __func__, buf[0], from_str, ntohs(from.sin_port),
- to_str, ntohs(to.sin_port),
+ "%s: bad ssmping type=%d from %pSUp to %pSUp on interface %s ifindex=%d fd=%d src=%pPA",
+ __func__, buf[0], &from, &to,
ifp ? ifp->name : "<iface?>", ifindex, ss->sock_fd,
- source_str);
+ &ss->source_addr);
return 0;
}
if (PIM_DEBUG_SSMPINGD) {
- char source_str[INET_ADDRSTRLEN];
- char from_str[INET_ADDRSTRLEN];
- char to_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", ss->source_addr, source_str,
- sizeof(source_str));
- pim_inet4_dump("<from?>", from.sin_addr, from_str,
- sizeof(from_str));
- pim_inet4_dump("<to?>", to.sin_addr, to_str, sizeof(to_str));
zlog_debug(
- "%s: recv ssmping from %s,%d to %s,%d on interface %s ifindex=%d fd=%d src=%s",
- __func__, from_str, ntohs(from.sin_port), to_str,
- ntohs(to.sin_port), ifp ? ifp->name : "<iface?>",
- ifindex, ss->sock_fd, source_str);
+ "%s: recv ssmping from %pSUp, to %pSUp, on interface %s ifindex=%d fd=%d src=%pPA",
+ __func__, &from, &to, ifp ? ifp->name : "<iface?>",
+ ifindex, ss->sock_fd, &ss->source_addr);
}
buf[0] = PIM_SSMPINGD_REPLY;
@@ -309,7 +292,7 @@ static int ssmpingd_read_msg(struct ssmpingd_sock *ss)
ssmpingd_sendto(ss, buf, len, from);
/* multicast reply */
- from.sin_addr = ss->pim->ssmpingd_group_addr;
+ memcpy(&from, &ss->pim->ssmpingd_group_addr, sizeof(pim_addr));
ssmpingd_sendto(ss, buf, len, from);
return 0;
@@ -334,7 +317,7 @@ static void ssmpingd_read_on(struct ssmpingd_sock *ss)
}
static struct ssmpingd_sock *ssmpingd_new(struct pim_instance *pim,
- struct in_addr source_addr)
+ pim_addr source_addr)
{
struct ssmpingd_sock *ss;
int sock_fd;
@@ -347,11 +330,8 @@ static struct ssmpingd_sock *ssmpingd_new(struct pim_instance *pim,
sock_fd =
ssmpingd_socket(source_addr, /* port: */ 4321, /* mTTL: */ 64);
if (sock_fd < 0) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", source_addr, source_str,
- sizeof(source_str));
- zlog_warn("%s: ssmpingd_socket() failure for source %s",
- __func__, source_str);
+ zlog_warn("%s: ssmpingd_socket() failure for source %pPA",
+ __func__, &source_addr);
return 0;
}
@@ -371,7 +351,7 @@ static struct ssmpingd_sock *ssmpingd_new(struct pim_instance *pim,
return ss;
}
-int pim_ssmpingd_start(struct pim_instance *pim, struct in_addr source_addr)
+int pim_ssmpingd_start(struct pim_instance *pim, pim_addr source_addr)
{
struct ssmpingd_sock *ss;
@@ -382,47 +362,33 @@ int pim_ssmpingd_start(struct pim_instance *pim, struct in_addr source_addr)
}
{
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", source_addr, source_str,
- sizeof(source_str));
- zlog_info("%s: starting ssmpingd for source %s", __func__,
- source_str);
+ zlog_info("%s: starting ssmpingd for source %pPAs", __func__,
+ &source_addr);
}
ss = ssmpingd_new(pim, source_addr);
if (!ss) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", source_addr, source_str,
- sizeof(source_str));
- zlog_warn("%s: ssmpingd_new() failure for source %s", __func__,
- source_str);
+ zlog_warn("%s: ssmpingd_new() failure for source %pPAs",
+ __func__, &source_addr);
return -1;
}
return 0;
}
-int pim_ssmpingd_stop(struct pim_instance *pim, struct in_addr source_addr)
+int pim_ssmpingd_stop(struct pim_instance *pim, pim_addr source_addr)
{
struct ssmpingd_sock *ss;
ss = ssmpingd_find(pim, source_addr);
if (!ss) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", source_addr, source_str,
- sizeof(source_str));
- zlog_warn("%s: could not find ssmpingd for source %s", __func__,
- source_str);
+ zlog_warn("%s: could not find ssmpingd for source %pPAs",
+ __func__, &source_addr);
return -1;
}
- {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", source_addr, source_str,
- sizeof(source_str));
- zlog_info("%s: stopping ssmpingd for source %s", __func__,
- source_str);
- }
+ zlog_info("%s: stopping ssmpingd for source %pPAs", __func__,
+ &source_addr);
ssmpingd_delete(ss);
diff --git a/pimd/pim_ssmpingd.h b/pimd/pim_ssmpingd.h
index fafdd7ade1..c4376bd0e4 100644
--- a/pimd/pim_ssmpingd.h
+++ b/pimd/pim_ssmpingd.h
@@ -31,14 +31,14 @@ struct ssmpingd_sock {
int sock_fd; /* socket */
struct thread *t_sock_read; /* thread for reading socket */
- struct in_addr source_addr; /* source address */
+ pim_addr source_addr; /* source address */
int64_t creation; /* timestamp of socket creation */
int64_t requests; /* counter */
};
void pim_ssmpingd_init(struct pim_instance *pim);
void pim_ssmpingd_destroy(struct pim_instance *pim);
-int pim_ssmpingd_start(struct pim_instance *pim, struct in_addr source_addr);
-int pim_ssmpingd_stop(struct pim_instance *pim, struct in_addr source_addr);
+int pim_ssmpingd_start(struct pim_instance *pim, pim_addr source_addr);
+int pim_ssmpingd_stop(struct pim_instance *pim, pim_addr source_addr);
#endif /* PIM_SSMPINGD_H */
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
new file mode 100644
index 0000000000..838f11211e
--- /dev/null
+++ b/pimd/pim_tib.c
@@ -0,0 +1,178 @@
+/*
+ * TIB (Tree Information Base) - just PIM <> IGMP/MLD glue for now
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "pim_tib.h"
+
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_upstream.h"
+#include "pim_oil.h"
+#include "pim_nht.h"
+
+static struct channel_oil *
+tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
+{
+ struct pim_interface *pim_oif = oif->info;
+ int input_iface_vif_index = 0;
+ pim_addr vif_source;
+ struct prefix src, grp;
+ struct pim_nexthop nexthop;
+ struct pim_upstream *up = NULL;
+
+ if (!pim_rp_set_upstream_addr(pim, &vif_source, sg.src, sg.grp)) {
+ /* no PIM RP - create a dummy channel oil */
+ return pim_channel_oil_add(pim, &sg, __func__);
+ }
+
+ pim_addr_to_prefix(&src, vif_source); // RP or Src addr
+ pim_addr_to_prefix(&grp, sg.grp);
+
+ up = pim_upstream_find(pim, &sg);
+ if (up) {
+ memcpy(&nexthop, &up->rpf.source_nexthop,
+ sizeof(struct pim_nexthop));
+ pim_ecmp_nexthop_lookup(pim, &nexthop, &src, &grp, 0);
+ if (nexthop.interface)
+ input_iface_vif_index = pim_if_find_vifindex_by_ifindex(
+ pim, nexthop.interface->ifindex);
+ } else
+ input_iface_vif_index =
+ pim_ecmp_fib_lookup_if_vif_index(pim, &src, &grp);
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d",
+ __func__, &sg, &vif_source, input_iface_vif_index);
+
+ if (input_iface_vif_index < 1) {
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(
+ "%s %s: could not find input interface for %pSG",
+ __FILE__, __func__, &sg);
+
+ return pim_channel_oil_add(pim, &sg, __func__);
+ }
+
+ /*
+ * Protect IGMP against adding looped MFC entries created by both
+ * source and receiver attached to the same interface. See TODO T22.
+ * Block only when the intf is non DR DR must create upstream.
+ */
+ if ((input_iface_vif_index == pim_oif->mroute_vif_index) &&
+ !(PIM_I_am_DR(pim_oif))) {
+ /* ignore request for looped MFC entry */
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(
+ "%s: ignoring request for looped MFC entry (S,G)=%pSG: oif=%s vif_index=%d",
+ __func__, &sg, oif->name,
+ input_iface_vif_index);
+
+ return NULL;
+ }
+
+ return pim_channel_oil_add(pim, &sg, __func__);
+}
+
+bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp)
+{
+ struct pim_interface *pim_oif = oif->info;
+
+ if (!pim_oif) {
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug("%s: multicast not enabled on oif=%s?",
+ __func__, oif->name);
+ return false;
+ }
+
+ if (!*oilp)
+ *oilp = tib_sg_oil_setup(pim, sg, oif);
+ if (!*oilp)
+ return false;
+
+ if (PIM_I_am_DR(pim_oif) || PIM_I_am_DualActive(pim_oif)) {
+ int result;
+
+ result = pim_channel_add_oif(*oilp, oif,
+ PIM_OIF_FLAG_PROTO_IGMP, __func__);
+ if (result) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_warn("%s: add_oif() failed with return=%d",
+ __func__, result);
+ return false;
+ }
+ } else {
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(
+ "%s: %pSG was received on %s interface but we are not DR for that interface",
+ __func__, &sg, oif->name);
+
+ return false;
+ }
+ /*
+ Feed IGMPv3-gathered local membership information into PIM
+ per-interface (S,G) state.
+ */
+ if (!pim_ifchannel_local_membership_add(oif, &sg, false /*is_vxlan*/)) {
+ if (PIM_DEBUG_MROUTE)
+ zlog_warn(
+ "%s: Failure to add local membership for %pSG",
+ __func__, &sg);
+
+ pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_IGMP,
+ __func__);
+ return false;
+ }
+
+ return true;
+}
+
+void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp)
+{
+ int result;
+
+ /*
+ It appears that in certain circumstances that
+ igmp_source_forward_stop is called when IGMP forwarding
+ was not enabled in oif_flags for this outgoing interface.
+ Possibly because of multiple calls. When that happens, we
+ enter the below if statement and this function returns early
+ which in turn triggers the calling function to assert.
+ Making the call to pim_channel_del_oif and ignoring the return code
+ fixes the issue without ill effect, similar to
+ pim_forward_stop below.
+ */
+ result = pim_channel_del_oif(*oilp, oif, PIM_OIF_FLAG_PROTO_IGMP,
+ __func__);
+ if (result) {
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(
+ "%s: pim_channel_del_oif() failed with return=%d",
+ __func__, result);
+ return;
+ }
+
+ /*
+ Feed IGMPv3-gathered local membership information into PIM
+ per-interface (S,G) state.
+ */
+ pim_ifchannel_local_membership_del(oif, &sg);
+}
diff --git a/pimd/pim_tib.h b/pimd/pim_tib.h
new file mode 100644
index 0000000000..b320f4cde0
--- /dev/null
+++ b/pimd/pim_tib.h
@@ -0,0 +1,33 @@
+/*
+ * TIB (Tree Information Base) - just PIM <> IGMP/MLD glue for now
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRR_PIM_GLUE_H
+#define _FRR_PIM_GLUE_H
+
+#include "pim_addr.h"
+
+struct pim_instance;
+struct channel_oil;
+
+extern bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp);
+extern void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
+ struct interface *oif, struct channel_oil **oilp);
+
+#endif /* _FRR_PIM_GLUE_H */
diff --git a/pimd/pim_tlv.c b/pimd/pim_tlv.c
index 86403dd54a..028514659b 100644
--- a/pimd/pim_tlv.c
+++ b/pimd/pim_tlv.c
@@ -127,11 +127,7 @@ int pim_encode_addr_ucast(uint8_t *buf, pim_addr addr)
{
uint8_t *start = buf;
-#if PIM_IPV == 4
- *buf++ = PIM_MSG_ADDRESS_FAMILY_IPV4;
-#else
- *buf++ = PIM_MSG_ADDRESS_FAMILY_IPV6;
-#endif
+ *buf++ = PIM_MSG_ADDRESS_FAMILY;
*buf++ = 0;
memcpy(buf, &addr, sizeof(addr));
buf += sizeof(addr);
@@ -624,16 +620,15 @@ int pim_parse_addr_source(pim_sgaddr *sg, uint8_t *flags, const uint8_t *buf,
}
switch (family) {
- case PIM_MSG_ADDRESS_FAMILY_IPV4:
- if ((addr + sizeof(struct in_addr)) > pastend) {
+ case PIM_MSG_ADDRESS_FAMILY:
+ if ((addr + sizeof(sg->src)) > pastend) {
zlog_warn(
- "%s: IPv4 source address overflow: left=%td needed=%zu",
- __func__, pastend - addr,
- sizeof(struct in_addr));
+ "%s: IP source address overflow: left=%td needed=%zu",
+ __func__, pastend - addr, sizeof(sg->src));
return -3;
}
- memcpy(&sg->src, addr, sizeof(struct in_addr));
+ memcpy(&sg->src, addr, sizeof(sg->src));
/*
RFC 4601: 4.9.1 Encoded Source and Group Address Formats
@@ -642,27 +637,24 @@ int pim_parse_addr_source(pim_sgaddr *sg, uint8_t *flags, const uint8_t *buf,
The mask length MUST be equal to the mask length in bits for
the given Address Family and Encoding Type (32 for IPv4
- native
- and 128 for IPv6 native). A router SHOULD ignore any
- messages
- received with any other mask length.
+ native and 128 for IPv6 native). A router SHOULD ignore any
+ messages received with any other mask length.
*/
- if (mask_len != IPV4_MAX_BITLEN) {
- zlog_warn("%s: IPv4 bad source address mask: %d",
+ if (mask_len != PIM_MAX_BITLEN) {
+ zlog_warn("%s: IP bad source address mask: %d",
__func__, mask_len);
return -4;
}
- addr += sizeof(struct in_addr);
+ addr += sizeof(sg->src);
break;
- default: {
+ default:
zlog_warn(
"%s: unknown source address encoding family=%d: %02x%02x%02x%02x",
__func__, family, buf[0], buf[1], buf[2], buf[3]);
return -5;
}
- }
return addr - buf;
}
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 24833f5a63..571117ac0a 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -178,6 +178,14 @@ static void upstream_channel_oil_detach(struct pim_upstream *up)
}
+static void pim_upstream_timers_stop(struct pim_upstream *up)
+{
+ THREAD_OFF(up->t_ka_timer);
+ THREAD_OFF(up->t_rs_timer);
+ THREAD_OFF(up->t_msdp_reg_timer);
+ THREAD_OFF(up->t_join_timer);
+}
+
struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
struct pim_upstream *up, const char *name)
{
@@ -207,9 +215,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
if (pim_up_mlag_is_local(up))
pim_mlag_up_local_del(pim, up);
- THREAD_OFF(up->t_ka_timer);
- THREAD_OFF(up->t_rs_timer);
- THREAD_OFF(up->t_msdp_reg_timer);
+ pim_upstream_timers_stop(up);
if (up->join_state == PIM_UPSTREAM_JOINED) {
pim_jp_agg_single_upstream_send(&up->rpf, up, 0);
@@ -1945,6 +1951,7 @@ void pim_upstream_terminate(struct pim_instance *pim)
while ((up = rb_pim_upstream_first(&pim->upstream_head))) {
pim_upstream_del(pim, up, __func__);
+ pim_upstream_timers_stop(up);
}
rb_pim_upstream_fini(&pim->upstream_head);
diff --git a/pimd/pim_util.c b/pimd/pim_util.c
index 8232d7205b..4b67dbf1b1 100644
--- a/pimd/pim_util.c
+++ b/pimd/pim_util.c
@@ -152,3 +152,17 @@ bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp)
pl = prefix_list_lookup(PIM_AFI, pim_ifp->boundary_oil_plist);
return pl ? prefix_list_apply(pl, &grp_pfx) == PREFIX_DENY : false;
}
+
+
+/* This function returns all multicast group */
+int pim_get_all_mcast_group(struct prefix *prefix)
+{
+#if PIM_IPV == 4
+ if (!str2prefix("224.0.0.0/4", prefix))
+ return 0;
+#else
+ if (!str2prefix("FF00::0/8", prefix))
+ return 0;
+#endif
+ return 1;
+}
diff --git a/pimd/pim_util.h b/pimd/pim_util.h
index b9c227996e..a4362bef90 100644
--- a/pimd/pim_util.h
+++ b/pimd/pim_util.h
@@ -36,4 +36,5 @@ void pim_pkt_dump(const char *label, const uint8_t *buf, int size);
int pim_is_group_224_0_0_0_24(struct in_addr group_addr);
int pim_is_group_224_4(struct in_addr group_addr);
bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp);
+int pim_get_all_mcast_group(struct prefix *prefix);
#endif /* PIM_UTIL_H */
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 6de3a04b66..a0dea63b79 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -166,6 +166,11 @@ int pim_debug_config_write(struct vty *vty)
++writes;
}
+ if (PIM_DEBUG_PIM_NHT_DETAIL) {
+ vty_out(vty, "debug pim nht detail\n");
+ ++writes;
+ }
+
return writes;
}
@@ -259,10 +264,8 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
struct ssmpingd_sock *ss;
++writes;
for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss)) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<src?>", ss->source_addr, source_str,
- sizeof(source_str));
- vty_out(vty, "%sip ssmpingd %s\n", spaces, source_str);
+ vty_out(vty, "%sip ssmpingd %pPA\n", spaces,
+ &ss->source_addr);
++writes;
}
}
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
index 0acd3c0694..7f463715a5 100644
--- a/pimd/pim_zebra.c
+++ b/pimd/pim_zebra.c
@@ -55,7 +55,6 @@ struct zclient *zclient;
/* Router-id update message from zebra. */
-__attribute__((unused))
static int pim_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
{
struct prefix router_id;
@@ -65,7 +64,6 @@ static int pim_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
return 0;
}
-__attribute__((unused))
static int pim_zebra_interface_vrf_update(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp;
@@ -158,6 +156,10 @@ static int pim_zebra_if_address_add(ZAPI_CALLBACK_ARGS)
SET_FLAG(c->flags, ZEBRA_IFA_SECONDARY);
}
}
+#else /* PIM_IPV != 4 */
+ if (p->family != PIM_AF)
+ return 0;
+#endif
pim_if_addr_add(c);
if (pim_ifp) {
@@ -178,10 +180,6 @@ static int pim_zebra_if_address_add(ZAPI_CALLBACK_ARGS)
pim_if_addr_add_all(ifp);
}
}
-#else /* PIM_IPV != 4 */
- /* unused - for now */
- (void)pim_ifp;
-#endif
return 0;
}
@@ -220,8 +218,7 @@ static int pim_zebra_if_address_del(ZAPI_CALLBACK_ARGS)
#endif
}
-#if PIM_IPV == 4
- if (p->family == AF_INET) {
+ if (p->family == PIM_AF) {
struct pim_instance *pim;
pim = vrf->info;
@@ -229,7 +226,6 @@ static int pim_zebra_if_address_del(ZAPI_CALLBACK_ARGS)
pim_rp_setup(pim);
pim_i_am_rp_re_evaluate(pim);
}
-#endif
connected_free(&c);
return 0;
@@ -454,11 +450,12 @@ static void pim_zebra_capabilities(struct zclient_capabilities *cap)
static zclient_handler *const pim_handlers[] = {
[ZEBRA_INTERFACE_ADDRESS_ADD] = pim_zebra_if_address_add,
[ZEBRA_INTERFACE_ADDRESS_DELETE] = pim_zebra_if_address_del,
-#if PIM_IPV == 4
+
+ [ZEBRA_NEXTHOP_UPDATE] = pim_parse_nexthop_update,
[ZEBRA_ROUTER_ID_UPDATE] = pim_router_id_update_zebra,
[ZEBRA_INTERFACE_VRF_UPDATE] = pim_zebra_interface_vrf_update,
- [ZEBRA_NEXTHOP_UPDATE] = pim_parse_nexthop_update,
+#if PIM_IPV == 4
[ZEBRA_VXLAN_SG_ADD] = pim_zebra_vxlan_sg_proc,
[ZEBRA_VXLAN_SG_DEL] = pim_zebra_vxlan_sg_proc,
@@ -485,342 +482,13 @@ void pim_zebra_init(void)
zclient_lookup_new();
}
-#if PIM_IPV == 4
-void igmp_anysource_forward_start(struct pim_instance *pim,
- struct gm_group *group)
-{
- struct gm_source *source;
- struct in_addr src_addr = {.s_addr = 0};
- /* Any source (*,G) is forwarded only if mode is EXCLUDE {empty} */
- assert(group->group_filtermode_isexcl);
- assert(listcount(group->group_source_list) < 1);
-
- source = igmp_get_source_by_addr(group, src_addr, NULL);
- if (!source) {
- zlog_warn("%s: Failure to create * source", __func__);
- return;
- }
-
- igmp_source_forward_start(pim, source);
-}
-
-void igmp_anysource_forward_stop(struct gm_group *group)
-{
- struct gm_source *source;
- struct in_addr star = {.s_addr = 0};
-
- source = igmp_find_source_by_addr(group, star);
- if (source)
- igmp_source_forward_stop(source);
-}
-
-static void igmp_source_forward_reevaluate_one(struct pim_instance *pim,
- struct gm_source *source)
-{
- pim_sgaddr sg;
- struct gm_group *group = source->source_group;
- struct pim_ifchannel *ch;
-
- if ((source->source_addr.s_addr != INADDR_ANY)
- || !IGMP_SOURCE_TEST_FORWARDING(source->source_flags))
- return;
-
- memset(&sg, 0, sizeof(sg));
- sg.src = source->source_addr;
- sg.grp = group->group_addr;
-
- ch = pim_ifchannel_find(group->interface, &sg);
- if (pim_is_grp_ssm(pim, group->group_addr)) {
- /* If SSM group withdraw local membership */
- if (ch
- && (ch->local_ifmembership == PIM_IFMEMBERSHIP_INCLUDE)) {
- if (PIM_DEBUG_PIM_EVENTS)
- zlog_debug("local membership del for %pSG as G is now SSM",
- &sg);
- pim_ifchannel_local_membership_del(group->interface,
- &sg);
- }
- } else {
- /* If ASM group add local membership */
- if (!ch
- || (ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO)) {
- if (PIM_DEBUG_PIM_EVENTS)
- zlog_debug("local membership add for %pSG as G is now ASM",
- &sg);
- pim_ifchannel_local_membership_add(
- group->interface, &sg, false /*is_vxlan*/);
- }
- }
-}
-
-void igmp_source_forward_reevaluate_all(struct pim_instance *pim)
-{
- struct interface *ifp;
-
- FOR_ALL_INTERFACES (pim->vrf, ifp) {
- struct pim_interface *pim_ifp = ifp->info;
- struct listnode *grpnode;
- struct gm_group *grp;
- struct pim_ifchannel *ch, *ch_temp;
-
- if (!pim_ifp)
- continue;
-
- /* scan igmp groups */
- for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
- grp)) {
- struct listnode *srcnode;
- struct gm_source *src;
-
- /* scan group sources */
- for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
- srcnode, src)) {
- igmp_source_forward_reevaluate_one(pim, src);
- } /* scan group sources */
- } /* scan igmp groups */
-
- RB_FOREACH_SAFE (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb,
- ch_temp) {
- if (pim_is_grp_ssm(pim, ch->sg.grp)) {
- if (pim_addr_is_any(ch->sg.src))
- pim_ifchannel_delete(ch);
- }
- }
- } /* scan interfaces */
-}
-
-void igmp_source_forward_start(struct pim_instance *pim,
- struct gm_source *source)
-{
- struct pim_interface *pim_oif;
- struct gm_group *group;
- pim_sgaddr sg;
- int result;
- int input_iface_vif_index = 0;
-
- memset(&sg, 0, sizeof(sg));
- sg.src = source->source_addr;
- sg.grp = source->source_group->group_addr;
-
- if (PIM_DEBUG_IGMP_TRACE) {
- zlog_debug("%s: (S,G)=%pSG oif=%s fwd=%d", __func__, &sg,
- source->source_group->interface->name,
- IGMP_SOURCE_TEST_FORWARDING(source->source_flags));
- }
-
- /* Prevent IGMP interface from installing multicast route multiple
- times */
- if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
- return;
- }
-
- group = source->source_group;
- pim_oif = group->interface->info;
- if (!pim_oif) {
- if (PIM_DEBUG_IGMP_TRACE) {
- zlog_debug("%s: multicast not enabled on oif=%s ?",
- __func__,
- source->source_group->interface->name);
- }
- return;
- }
-
- if (!source->source_channel_oil) {
- pim_addr vif_source;
- struct prefix src, grp;
- struct pim_nexthop nexthop;
- struct pim_upstream *up = NULL;
-
- if (!pim_rp_set_upstream_addr(pim, &vif_source,
- source->source_addr, sg.grp)) {
- /*Create a dummy channel oil */
- source->source_channel_oil =
- pim_channel_oil_add(pim, &sg, __func__);
- }
-
- else {
- pim_addr_to_prefix(&src, vif_source); // RP or Src addr
- pim_addr_to_prefix(&grp, sg.grp);
-
- up = pim_upstream_find(pim, &sg);
- if (up) {
- memcpy(&nexthop, &up->rpf.source_nexthop,
- sizeof(struct pim_nexthop));
- pim_ecmp_nexthop_lookup(pim, &nexthop, &src,
- &grp, 0);
- if (nexthop.interface)
- input_iface_vif_index =
- pim_if_find_vifindex_by_ifindex(
- pim,
- nexthop.interface->ifindex);
- } else
- input_iface_vif_index =
- pim_ecmp_fib_lookup_if_vif_index(
- pim, &src, &grp);
-
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s: NHT %pSG vif_source %pPAs vif_index:%d ",
- __func__, &sg, &vif_source,
- input_iface_vif_index);
-
- if (input_iface_vif_index < 1) {
- if (PIM_DEBUG_IGMP_TRACE) {
- char source_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<source?>",
- source->source_addr,
- source_str, sizeof(source_str));
- zlog_debug(
- "%s %s: could not find input interface for source %s",
- __FILE__, __func__, source_str);
- }
- source->source_channel_oil =
- pim_channel_oil_add(pim, &sg, __func__);
- }
-
- else {
- /*
- * Protect IGMP against adding looped MFC
- * entries created by both source and receiver
- * attached to the same interface. See TODO
- * T22. Block only when the intf is non DR
- * DR must create upstream.
- */
- if ((input_iface_vif_index ==
- pim_oif->mroute_vif_index) &&
- !(PIM_I_am_DR(pim_oif))) {
- /* ignore request for looped MFC entry
- */
- if (PIM_DEBUG_IGMP_TRACE) {
- zlog_debug("%s: ignoring request for looped MFC entry (S,G)=%pSG: oif=%s vif_index=%d",
- __func__,
- &sg,
- source->source_group
- ->interface->name,
- input_iface_vif_index);
- }
- return;
- }
-
- source->source_channel_oil =
- pim_channel_oil_add(pim, &sg, __func__);
- if (!source->source_channel_oil) {
- if (PIM_DEBUG_IGMP_TRACE) {
- zlog_debug("%s %s: could not create OIL for channel (S,G)=%pSG",
- __FILE__, __func__,
- &sg);
- }
- return;
- }
- }
- }
- }
-
- if (PIM_I_am_DR(pim_oif) || PIM_I_am_DualActive(pim_oif)) {
- result = pim_channel_add_oif(source->source_channel_oil,
- group->interface,
- PIM_OIF_FLAG_PROTO_IGMP, __func__);
- if (result) {
- if (PIM_DEBUG_MROUTE) {
- zlog_warn("%s: add_oif() failed with return=%d",
- __func__, result);
- }
- return;
- }
- } else {
- if (PIM_DEBUG_IGMP_TRACE)
- zlog_debug("%s: %pSG was received on %s interface but we are not DR for that interface",
- __func__, &sg,
- group->interface->name);
-
- return;
- }
- /*
- Feed IGMPv3-gathered local membership information into PIM
- per-interface (S,G) state.
- */
- if (!pim_ifchannel_local_membership_add(group->interface, &sg,
- false /*is_vxlan*/)) {
- if (PIM_DEBUG_MROUTE)
- zlog_warn("%s: Failure to add local membership for %pSG",
- __func__, &sg);
-
- pim_channel_del_oif(source->source_channel_oil,
- group->interface, PIM_OIF_FLAG_PROTO_IGMP,
- __func__);
- return;
- }
-
- IGMP_SOURCE_DO_FORWARDING(source->source_flags);
-}
-
-/*
- igmp_source_forward_stop: stop fowarding, but keep the source
- igmp_source_delete: stop fowarding, and delete the source
- */
-void igmp_source_forward_stop(struct gm_source *source)
-{
- struct gm_group *group;
- pim_sgaddr sg;
- int result;
-
- memset(&sg, 0, sizeof(sg));
- sg.src = source->source_addr;
- sg.grp = source->source_group->group_addr;
-
- if (PIM_DEBUG_IGMP_TRACE) {
- zlog_debug("%s: (S,G)=%pSG oif=%s fwd=%d", __func__, &sg,
- source->source_group->interface->name,
- IGMP_SOURCE_TEST_FORWARDING(source->source_flags));
- }
-
- /* Prevent IGMP interface from removing multicast route multiple
- times */
- if (!IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
- return;
- }
-
- group = source->source_group;
-
- /*
- It appears that in certain circumstances that
- igmp_source_forward_stop is called when IGMP forwarding
- was not enabled in oif_flags for this outgoing interface.
- Possibly because of multiple calls. When that happens, we
- enter the below if statement and this function returns early
- which in turn triggers the calling function to assert.
- Making the call to pim_channel_del_oif and ignoring the return code
- fixes the issue without ill effect, similar to
- pim_forward_stop below.
- */
- result = pim_channel_del_oif(source->source_channel_oil,
- group->interface, PIM_OIF_FLAG_PROTO_IGMP,
- __func__);
- if (result) {
- if (PIM_DEBUG_IGMP_TRACE)
- zlog_debug(
- "%s: pim_channel_del_oif() failed with return=%d",
- __func__, result);
- return;
- }
-
- /*
- Feed IGMPv3-gathered local membership information into PIM
- per-interface (S,G) state.
- */
- pim_ifchannel_local_membership_del(group->interface, &sg);
-
- IGMP_SOURCE_DONT_FORWARDING(source->source_flags);
-}
-#endif /* PIM_IPV == 4 */
-
void pim_forward_start(struct pim_ifchannel *ch)
{
struct pim_upstream *up = ch->upstream;
uint32_t mask = 0;
if (PIM_DEBUG_PIM_TRACE)
- zlog_debug("%s: (S,G)=%pSG oif=%s (%pI4)", __func__, &ch->sg,
+ zlog_debug("%s: (S,G)=%pSG oif=%s (%pPA)", __func__, &ch->sg,
ch->interface->name, &up->upstream_addr);
if (PIM_IF_FLAG_TEST_PROTO_IGMP(ch->flags))
diff --git a/pimd/pim_zebra.h b/pimd/pim_zebra.h
index 8656d7563d..5879cdefb0 100644
--- a/pimd/pim_zebra.h
+++ b/pimd/pim_zebra.h
@@ -23,7 +23,6 @@
#include <zebra.h>
#include "zclient.h"
-#include "pim_igmp.h"
#include "pim_ifchannel.h"
void pim_zebra_init(void);
@@ -32,15 +31,6 @@ void pim_zebra_zclient_update(struct vty *vty);
void pim_scan_individual_oil(struct channel_oil *c_oil, int in_vif_index);
void pim_scan_oil(struct pim_instance *pim_matcher);
-void igmp_anysource_forward_start(struct pim_instance *pim,
- struct gm_group *group);
-void igmp_anysource_forward_stop(struct gm_group *group);
-
-void igmp_source_forward_start(struct pim_instance *pim,
- struct gm_source *source);
-void igmp_source_forward_stop(struct gm_source *source);
-void igmp_source_forward_reevaluate_all(struct pim_instance *pim);
-
void pim_forward_start(struct pim_ifchannel *ch);
void pim_forward_stop(struct pim_ifchannel *ch);
diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c
index c33e6032bf..c487f995e7 100644
--- a/pimd/pim_zlookup.c
+++ b/pimd/pim_zlookup.c
@@ -412,7 +412,7 @@ int zclient_lookup_nexthop(struct pim_instance *pim,
num_ifindex = zclient_lookup_nexthop_once(pim, nexthop_tab,
tab_size, addr);
if (num_ifindex < 1) {
- if (PIM_DEBUG_PIM_NHT)
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug(
"%s: lookup=%d/%d: could not find nexthop ifindex for address %pPA(%s)",
__func__, lookup, max_lookup, &addr,
diff --git a/pimd/pimd.h b/pimd/pimd.h
index d4eac58a29..1f7919ac6c 100644
--- a/pimd/pimd.h
+++ b/pimd/pimd.h
@@ -182,8 +182,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DEBUG_MSDP_PACKETS (router->debugs & PIM_MASK_MSDP_PACKETS)
#define PIM_DEBUG_MSDP_INTERNAL (router->debugs & PIM_MASK_MSDP_INTERNAL)
#define PIM_DEBUG_PIM_NHT (router->debugs & PIM_MASK_PIM_NHT)
-#define PIM_DEBUG_PIM_NHT_DETAIL \
- (router->debugs & (PIM_MASK_PIM_NHT_DETAIL | PIM_MASK_PIM_NHT))
+#define PIM_DEBUG_PIM_NHT_DETAIL (router->debugs & PIM_MASK_PIM_NHT_DETAIL)
#define PIM_DEBUG_PIM_NHT_RP (router->debugs & PIM_MASK_PIM_NHT_RP)
#define PIM_DEBUG_MTRACE (router->debugs & PIM_MASK_MTRACE)
#define PIM_DEBUG_VXLAN (router->debugs & PIM_MASK_VXLAN)
@@ -228,6 +227,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DO_DEBUG_MSDP_PACKETS (router->debugs |= PIM_MASK_MSDP_PACKETS)
#define PIM_DO_DEBUG_MSDP_INTERNAL (router->debugs |= PIM_MASK_MSDP_INTERNAL)
#define PIM_DO_DEBUG_PIM_NHT (router->debugs |= PIM_MASK_PIM_NHT)
+#define PIM_DO_DEBUG_PIM_NHT_DETAIL (router->debugs |= PIM_MASK_PIM_NHT_DETAIL)
#define PIM_DO_DEBUG_PIM_NHT_RP (router->debugs |= PIM_MASK_PIM_NHT_RP)
#define PIM_DO_DEBUG_MTRACE (router->debugs |= PIM_MASK_MTRACE)
#define PIM_DO_DEBUG_VXLAN (router->debugs |= PIM_MASK_VXLAN)
@@ -259,6 +259,8 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DONT_DEBUG_MSDP_PACKETS (router->debugs &= ~PIM_MASK_MSDP_PACKETS)
#define PIM_DONT_DEBUG_MSDP_INTERNAL (router->debugs &= ~PIM_MASK_MSDP_INTERNAL)
#define PIM_DONT_DEBUG_PIM_NHT (router->debugs &= ~PIM_MASK_PIM_NHT)
+#define PIM_DONT_DEBUG_PIM_NHT_DETAIL \
+ (router->debugs &= ~PIM_MASK_PIM_NHT_DETAIL)
#define PIM_DONT_DEBUG_PIM_NHT_RP (router->debugs &= ~PIM_MASK_PIM_NHT_RP)
#define PIM_DONT_DEBUG_MTRACE (router->debugs &= ~PIM_MASK_MTRACE)
#define PIM_DONT_DEBUG_VXLAN (router->debugs &= ~PIM_MASK_VXLAN)
diff --git a/pimd/subdir.am b/pimd/subdir.am
index 0fe40912b1..ba7bdb3493 100644
--- a/pimd/subdir.am
+++ b/pimd/subdir.am
@@ -21,7 +21,6 @@ pim_common = \
pimd/pim_assert.c \
pimd/pim_bfd.c \
pimd/pim_br.c \
- pimd/pim_bsm.c \
pimd/pim_cmd_common.c \
pimd/pim_errors.c \
pimd/pim_hello.c \
@@ -38,7 +37,9 @@ pim_common = \
pimd/pim_nb.c \
pimd/pim_nb_config.c \
pimd/pim_neighbor.c \
+ pimd/pim_nht.c \
pimd/pim_oil.c \
+ pimd/pim_pim.c \
pimd/pim_routemap.c \
pimd/pim_rp.c \
pimd/pim_rpf.c \
@@ -47,6 +48,7 @@ pim_common = \
pimd/pim_ssmpingd.c \
pimd/pim_static.c \
pimd/pim_str.c \
+ pimd/pim_tib.c \
pimd/pim_time.c \
pimd/pim_tlv.c \
pimd/pim_upstream.c \
@@ -59,6 +61,7 @@ pim_common = \
pimd_pimd_SOURCES = \
$(pim_common) \
+ pimd/pim_bsm.c \
pimd/pim_cmd.c \
pimd/pim_igmp.c \
pimd/pim_igmp_mtrace.c \
@@ -70,12 +73,11 @@ pimd_pimd_SOURCES = \
pimd/pim_msdp.c \
pimd/pim_msdp_packet.c \
pimd/pim_msdp_socket.c \
- pimd/pim_nht.c \
- pimd/pim_pim.c \
pimd/pim_register.c \
pimd/pim_signals.c \
pimd/pim_zlookup.c \
pimd/pim_zpthread.c \
+ pimd/pim_mroute_msg.c \
# end
nodist_pimd_pimd_SOURCES = \
@@ -89,6 +91,7 @@ pimd_pim6d_SOURCES = \
pimd/pim6_main.c \
pimd/pim6_stubs.c \
pimd/pim6_cmd.c \
+ pimd/pim6_mroute_msg.c \
# end
nodist_pimd_pim6d_SOURCES = \
@@ -141,6 +144,7 @@ noinst_HEADERS += \
pimd/pim_ssmpingd.h \
pimd/pim_static.h \
pimd/pim_str.h \
+ pimd/pim_tib.h \
pimd/pim_time.h \
pimd/pim_tlv.h \
pimd/pim_upstream.h \
diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c
index 78cc57cc44..889643f65e 100644
--- a/sharpd/sharp_vty.c
+++ b/sharpd/sharp_vty.c
@@ -1258,6 +1258,67 @@ DEFPY (show_sharp_cspf,
return CMD_SUCCESS;
}
+static struct interface *if_lookup_vrf_all(const char *ifname)
+{
+ struct interface *ifp;
+ struct vrf *vrf;
+
+ RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) {
+ ifp = if_lookup_by_name(ifname, vrf->vrf_id);
+ if (ifp)
+ return ifp;
+ }
+
+ return NULL;
+}
+
+DEFPY (sharp_interface_protodown,
+ sharp_interface_protodown_cmd,
+ "sharp interface IFNAME$ifname protodown",
+ SHARP_STR
+ INTERFACE_STR
+ IFNAME_STR
+ "Set interface protodown\n")
+{
+ struct interface *ifp;
+
+ ifp = if_lookup_vrf_all(ifname);
+
+ if (!ifp) {
+ vty_out(vty, "%% Can't find interface %s\n", ifname);
+ return CMD_WARNING;
+ }
+
+ if (sharp_zebra_send_interface_protodown(ifp, true) != 0)
+ return CMD_WARNING;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_sharp_interface_protodown,
+ no_sharp_interface_protodown_cmd,
+ "no sharp interface IFNAME$ifname protodown",
+ NO_STR
+ SHARP_STR
+ INTERFACE_STR
+ IFNAME_STR
+ "Set interface protodown\n")
+{
+ struct interface *ifp;
+
+ ifp = if_lookup_vrf_all(ifname);
+
+ if (!ifp) {
+ vty_out(vty, "%% Can't find interface %s\n", ifname);
+ return CMD_WARNING;
+ }
+
+ if (sharp_zebra_send_interface_protodown(ifp, false) != 0)
+ return CMD_WARNING;
+
+ return CMD_SUCCESS;
+}
+
void sharp_vty_init(void)
{
install_element(ENABLE_NODE, &install_routes_data_dump_cmd);
@@ -1290,5 +1351,8 @@ void sharp_vty_init(void)
&sharp_srv6_manager_release_locator_chunk_cmd);
install_element(ENABLE_NODE, &show_sharp_segment_routing_srv6_cmd);
+ install_element(ENABLE_NODE, &sharp_interface_protodown_cmd);
+ install_element(ENABLE_NODE, &no_sharp_interface_protodown_cmd);
+
return;
}
diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c
index 313febd9bb..b40c2c6228 100644
--- a/sharpd/sharp_zebra.c
+++ b/sharpd/sharp_zebra.c
@@ -631,7 +631,8 @@ void sharp_zebra_nexthop_watch(struct prefix *p, vrf_id_t vrf_id, bool import,
if (!watch)
command = ZEBRA_NEXTHOP_UNREGISTER;
- if (zclient_send_rnh(zclient, command, p, connected, false, vrf_id)
+ if (zclient_send_rnh(zclient, command, p, SAFI_UNICAST, connected,
+ false, vrf_id)
== ZCLIENT_SEND_FAILURE)
zlog_warn("%s: Failure to send nexthop to zebra", __func__);
}
@@ -680,16 +681,17 @@ static int sharp_nexthop_update(ZAPI_CALLBACK_ARGS)
{
struct sharp_nh_tracker *nht;
struct zapi_route nhr;
+ struct prefix matched;
- if (!zapi_nexthop_update_decode(zclient->ibuf, &nhr)) {
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &matched, &nhr)) {
zlog_err("%s: Decode of update failed", __func__);
return 0;
}
- zlog_debug("Received update for %pFX metric: %u", &nhr.prefix,
- nhr.metric);
+ zlog_debug("Received update for %pFX actual match: %pFX metric: %u",
+ &matched, &nhr.prefix, nhr.metric);
- nht = sharp_nh_tracker_get(&nhr.prefix);
+ nht = sharp_nh_tracker_get(&matched);
nht->nhop_num = nhr.nexthop_num;
nht->updates++;
@@ -968,6 +970,18 @@ static int sharp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
return 0;
}
+int sharp_zebra_send_interface_protodown(struct interface *ifp, bool down)
+{
+ zlog_debug("Sending zebra to set %s protodown %s", ifp->name,
+ down ? "on" : "off");
+
+ if (zclient_send_interface_protodown(zclient, ifp->vrf->vrf_id, ifp,
+ down) == ZCLIENT_SEND_FAILURE)
+ return -1;
+
+ return 0;
+}
+
static zclient_handler *const sharp_handlers[] = {
[ZEBRA_INTERFACE_ADDRESS_ADD] = interface_address_add,
[ZEBRA_INTERFACE_ADDRESS_DELETE] = interface_address_delete,
diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h
index 49f11a67e8..d8ea679797 100644
--- a/sharpd/sharp_zebra.h
+++ b/sharpd/sharp_zebra.h
@@ -73,4 +73,6 @@ extern void sharp_install_seg6local_route_helper(struct prefix *p,
enum seg6local_action_t act,
struct seg6local_context *ctx);
+extern int sharp_zebra_send_interface_protodown(struct interface *ifp,
+ bool down);
#endif
diff --git a/staticd/static_nht.c b/staticd/static_nht.c
index e1d6ba15d1..1d87a83c91 100644
--- a/staticd/static_nht.c
+++ b/staticd/static_nht.c
@@ -101,18 +101,15 @@ static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
}
}
-void static_nht_update(struct prefix *sp, struct prefix *nhp,
- uint32_t nh_num, afi_t afi, vrf_id_t nh_vrf_id)
+void static_nht_update(struct prefix *sp, struct prefix *nhp, uint32_t nh_num,
+ afi_t afi, safi_t safi, vrf_id_t nh_vrf_id)
{
struct vrf *vrf;
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- static_nht_update_safi(sp, nhp, nh_num, afi, SAFI_UNICAST,
- vrf, nh_vrf_id);
- static_nht_update_safi(sp, nhp, nh_num, afi, SAFI_MULTICAST,
- vrf, nh_vrf_id);
- }
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ static_nht_update_safi(sp, nhp, nh_num, afi, safi, vrf,
+ nh_vrf_id);
}
static void static_nht_reset_start_safi(struct prefix *nhp, afi_t afi,
@@ -166,16 +163,13 @@ static void static_nht_reset_start_safi(struct prefix *nhp, afi_t afi,
}
}
-void static_nht_reset_start(struct prefix *nhp, afi_t afi, vrf_id_t nh_vrf_id)
+void static_nht_reset_start(struct prefix *nhp, afi_t afi, safi_t safi,
+ vrf_id_t nh_vrf_id)
{
struct vrf *vrf;
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- static_nht_reset_start_safi(nhp, afi, SAFI_UNICAST,
- vrf, nh_vrf_id);
- static_nht_reset_start_safi(nhp, afi, SAFI_MULTICAST,
- vrf, nh_vrf_id);
- }
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ static_nht_reset_start_safi(nhp, afi, safi, vrf, nh_vrf_id);
}
static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi,
@@ -212,7 +206,7 @@ static void static_nht_mark_state_safi(struct prefix *sp, afi_t afi,
route_unlock_node(rn);
}
-void static_nht_mark_state(struct prefix *sp, vrf_id_t vrf_id,
+void static_nht_mark_state(struct prefix *sp, safi_t safi, vrf_id_t vrf_id,
enum static_install_states state)
{
struct vrf *vrf;
@@ -226,6 +220,5 @@ void static_nht_mark_state(struct prefix *sp, vrf_id_t vrf_id,
if (!vrf || !vrf->info)
return;
- static_nht_mark_state_safi(sp, afi, SAFI_UNICAST, vrf, state);
- static_nht_mark_state_safi(sp, afi, SAFI_MULTICAST, vrf, state);
+ static_nht_mark_state_safi(sp, afi, safi, vrf, state);
}
diff --git a/staticd/static_nht.h b/staticd/static_nht.h
index 08dba2ebb5..c29acc32ef 100644
--- a/staticd/static_nht.h
+++ b/staticd/static_nht.h
@@ -37,19 +37,21 @@ extern "C" {
* vrf_id -> The vrf the nexthop is in.
*/
extern void static_nht_update(struct prefix *sp, struct prefix *nhp,
- uint32_t nh_num, afi_t afi, vrf_id_t vrf_id);
+ uint32_t nh_num, afi_t afi, safi_t safi,
+ vrf_id_t vrf_id);
/*
* For the given tracked nexthop, nhp, mark all routes that use
* this route as in starting state again.
*/
-extern void static_nht_reset_start(struct prefix *nhp, afi_t afi,
+extern void static_nht_reset_start(struct prefix *nhp, afi_t afi, safi_t safi,
vrf_id_t nh_vrf_id);
/*
* For the given prefix, sp, mark it as in a particular state
*/
-extern void static_nht_mark_state(struct prefix *sp, vrf_id_t vrf_id,
+extern void static_nht_mark_state(struct prefix *sp, safi_t safi,
+ vrf_id_t vrf_id,
enum static_install_states state);
/*
diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c
index f937492ec2..af153b4bc3 100644
--- a/staticd/static_zebra.c
+++ b/staticd/static_zebra.c
@@ -44,9 +44,47 @@
#include "static_vty.h"
#include "static_debug.h"
+DEFINE_MTYPE_STATIC(STATIC, STATIC_NHT_DATA, "Static Nexthop tracking data");
+PREDECL_HASH(static_nht_hash);
+
+struct static_nht_data {
+ struct static_nht_hash_item itm;
+
+ struct prefix nh;
+ safi_t safi;
+
+ vrf_id_t nh_vrf_id;
+
+ uint32_t refcount;
+ uint8_t nh_num;
+};
+
+static int static_nht_data_cmp(const struct static_nht_data *nhtd1,
+ const struct static_nht_data *nhtd2)
+{
+ if (nhtd1->nh_vrf_id != nhtd2->nh_vrf_id)
+ return numcmp(nhtd1->nh_vrf_id, nhtd2->nh_vrf_id);
+ if (nhtd1->safi != nhtd2->safi)
+ return numcmp(nhtd1->safi, nhtd2->safi);
+
+ return prefix_cmp(&nhtd1->nh, &nhtd2->nh);
+}
+
+static unsigned int static_nht_data_hash(const struct static_nht_data *nhtd)
+{
+ unsigned int key = 0;
+
+ key = prefix_hash_key(&nhtd->nh);
+ return jhash_2words(nhtd->nh_vrf_id, nhtd->safi, key);
+}
+
+DECLARE_HASH(static_nht_hash, struct static_nht_data, itm, static_nht_data_cmp,
+ static_nht_data_hash);
+
+static struct static_nht_hash_head static_nht_hash[1];
+
/* Zebra structure to hold current status. */
struct zclient *zclient;
-static struct hash *static_nht_hash;
uint32_t zebra_ecmp_count = MULTIPATH_NUM;
/* Inteface addition message from zebra. */
@@ -104,31 +142,32 @@ static int route_notify_owner(ZAPI_CALLBACK_ARGS)
struct prefix p;
enum zapi_route_notify_owner note;
uint32_t table_id;
+ safi_t safi;
- if (!zapi_route_notify_decode(zclient->ibuf, &p, &table_id, &note,
- NULL, NULL))
+ if (!zapi_route_notify_decode(zclient->ibuf, &p, &table_id, &note, NULL,
+ &safi))
return -1;
switch (note) {
case ZAPI_ROUTE_FAIL_INSTALL:
- static_nht_mark_state(&p, vrf_id, STATIC_NOT_INSTALLED);
+ static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED);
zlog_warn("%s: Route %pFX failed to install for table: %u",
__func__, &p, table_id);
break;
case ZAPI_ROUTE_BETTER_ADMIN_WON:
- static_nht_mark_state(&p, vrf_id, STATIC_NOT_INSTALLED);
+ static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED);
zlog_warn(
"%s: Route %pFX over-ridden by better route for table: %u",
__func__, &p, table_id);
break;
case ZAPI_ROUTE_INSTALLED:
- static_nht_mark_state(&p, vrf_id, STATIC_INSTALLED);
+ static_nht_mark_state(&p, safi, vrf_id, STATIC_INSTALLED);
break;
case ZAPI_ROUTE_REMOVED:
- static_nht_mark_state(&p, vrf_id, STATIC_NOT_INSTALLED);
+ static_nht_mark_state(&p, safi, vrf_id, STATIC_NOT_INSTALLED);
break;
case ZAPI_ROUTE_REMOVE_FAIL:
- static_nht_mark_state(&p, vrf_id, STATIC_INSTALLED);
+ static_nht_mark_state(&p, safi, vrf_id, STATIC_INSTALLED);
zlog_warn("%s: Route %pFX failure to remove for table: %u",
__func__, &p, table_id);
break;
@@ -141,15 +180,6 @@ static void zebra_connected(struct zclient *zclient)
zclient_send_reg_requests(zclient, VRF_DEFAULT);
}
-struct static_nht_data {
- struct prefix *nh;
-
- vrf_id_t nh_vrf_id;
-
- uint32_t refcount;
- uint8_t nh_num;
-};
-
/* API to check whether the configured nexthop address is
* one of its local connected address or not.
*/
@@ -169,34 +199,37 @@ static int static_zebra_nexthop_update(ZAPI_CALLBACK_ARGS)
{
struct static_nht_data *nhtd, lookup;
struct zapi_route nhr;
+ struct prefix matched;
afi_t afi = AFI_IP;
- if (!zapi_nexthop_update_decode(zclient->ibuf, &nhr)) {
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &matched, &nhr)) {
zlog_err("Failure to decode nexthop update message");
return 1;
}
- if (nhr.prefix.family == AF_INET6)
+ if (matched.family == AF_INET6)
afi = AFI_IP6;
if (nhr.type == ZEBRA_ROUTE_CONNECT) {
- if (static_nexthop_is_local(vrf_id, &nhr.prefix,
- nhr.prefix.family))
+ if (static_nexthop_is_local(vrf_id, &matched,
+ nhr.prefix.family))
nhr.nexthop_num = 0;
}
memset(&lookup, 0, sizeof(lookup));
- lookup.nh = &nhr.prefix;
+ lookup.nh = matched;
lookup.nh_vrf_id = vrf_id;
+ lookup.safi = nhr.safi;
- nhtd = hash_lookup(static_nht_hash, &lookup);
+ nhtd = static_nht_hash_find(static_nht_hash, &lookup);
if (nhtd) {
nhtd->nh_num = nhr.nexthop_num;
- static_nht_reset_start(&nhr.prefix, afi, nhtd->nh_vrf_id);
- static_nht_update(NULL, &nhr.prefix, nhr.nexthop_num, afi,
- nhtd->nh_vrf_id);
+ static_nht_reset_start(&matched, afi, nhr.safi,
+ nhtd->nh_vrf_id);
+ static_nht_update(NULL, &matched, nhr.nexthop_num, afi,
+ nhr.safi, nhtd->nh_vrf_id);
} else
zlog_err("No nhtd?");
@@ -209,55 +242,50 @@ static void static_zebra_capabilities(struct zclient_capabilities *cap)
zebra_ecmp_count = cap->ecmp;
}
-static unsigned int static_nht_hash_key(const void *data)
+static struct static_nht_data *
+static_nht_hash_getref(const struct static_nht_data *ref)
{
- const struct static_nht_data *nhtd = data;
- unsigned int key = 0;
+ struct static_nht_data *nhtd;
- key = prefix_hash_key(nhtd->nh);
- return jhash_1word(nhtd->nh_vrf_id, key);
-}
+ nhtd = static_nht_hash_find(static_nht_hash, ref);
+ if (!nhtd) {
+ nhtd = XCALLOC(MTYPE_STATIC_NHT_DATA, sizeof(*nhtd));
-static bool static_nht_hash_cmp(const void *d1, const void *d2)
-{
- const struct static_nht_data *nhtd1 = d1;
- const struct static_nht_data *nhtd2 = d2;
+ prefix_copy(&nhtd->nh, &ref->nh);
+ nhtd->nh_vrf_id = ref->nh_vrf_id;
+ nhtd->safi = ref->safi;
- if (nhtd1->nh_vrf_id != nhtd2->nh_vrf_id)
- return false;
+ static_nht_hash_add(static_nht_hash, nhtd);
+ }
- return prefix_same(nhtd1->nh, nhtd2->nh);
+ nhtd->refcount++;
+ return nhtd;
}
-static void *static_nht_hash_alloc(void *data)
+static bool static_nht_hash_decref(struct static_nht_data *nhtd)
{
- struct static_nht_data *copy = data;
- struct static_nht_data *new;
-
- new = XMALLOC(MTYPE_TMP, sizeof(*new));
+ if (--nhtd->refcount > 0)
+ return true;
- new->nh = prefix_new();
- prefix_copy(new->nh, copy->nh);
- new->refcount = 0;
- new->nh_num = 0;
- new->nh_vrf_id = copy->nh_vrf_id;
-
- return new;
+ static_nht_hash_del(static_nht_hash, nhtd);
+ XFREE(MTYPE_STATIC_NHT_DATA, nhtd);
+ return false;
}
-static void static_nht_hash_free(void *data)
+static void static_nht_hash_clear(void)
{
- struct static_nht_data *nhtd = data;
+ struct static_nht_data *nhtd;
- prefix_free(&nhtd->nh);
- XFREE(MTYPE_TMP, nhtd);
+ while ((nhtd = static_nht_hash_pop(static_nht_hash)))
+ XFREE(MTYPE_STATIC_NHT_DATA, nhtd);
}
void static_zebra_nht_register(struct static_nexthop *nh, bool reg)
{
struct static_path *pn = nh->pn;
struct route_node *rn = pn->rn;
- struct static_nht_data *nhtd, lookup;
+ struct static_route_info *si = static_route_info_from_rnode(rn);
+ struct static_nht_data lookup;
uint32_t cmd;
struct prefix p;
afi_t afi = AFI_IP;
@@ -293,44 +321,42 @@ void static_zebra_nht_register(struct static_nexthop *nh, bool reg)
}
memset(&lookup, 0, sizeof(lookup));
- lookup.nh = &p;
+ lookup.nh = p;
lookup.nh_vrf_id = nh->nh_vrf_id;
+ lookup.safi = si->safi;
nh->nh_registered = reg;
if (reg) {
- nhtd = hash_get(static_nht_hash, &lookup,
- static_nht_hash_alloc);
- nhtd->refcount++;
+ struct static_nht_data *nhtd;
+
+ nhtd = static_nht_hash_getref(&lookup);
if (nhtd->refcount > 1) {
DEBUGD(&static_dbg_route,
"Already registered nexthop(%pFX) for %pRN %d",
&p, rn, nhtd->nh_num);
if (nhtd->nh_num)
- static_nht_update(&rn->p, nhtd->nh,
- nhtd->nh_num, afi,
+ static_nht_update(&rn->p, &nhtd->nh,
+ nhtd->nh_num, afi, si->safi,
nh->nh_vrf_id);
return;
}
} else {
- nhtd = hash_lookup(static_nht_hash, &lookup);
+ struct static_nht_data *nhtd;
+
+ nhtd = static_nht_hash_find(static_nht_hash, &lookup);
if (!nhtd)
return;
-
- nhtd->refcount--;
- if (nhtd->refcount >= 1)
+ if (static_nht_hash_decref(nhtd))
return;
-
- hash_release(static_nht_hash, nhtd);
- static_nht_hash_free(nhtd);
}
DEBUGD(&static_dbg_route, "%s nexthop(%pFX) for %pRN",
reg ? "Registering" : "Unregistering", &p, rn);
- if (zclient_send_rnh(zclient, cmd, &p, false, false, nh->nh_vrf_id)
- == ZCLIENT_SEND_FAILURE)
+ if (zclient_send_rnh(zclient, cmd, &p, si->safi, false, false,
+ nh->nh_vrf_id) == ZCLIENT_SEND_FAILURE)
zlog_warn("%s: Failure to send nexthop to zebra", __func__);
}
/*
@@ -341,6 +367,7 @@ int static_zebra_nh_update(struct static_nexthop *nh)
{
struct static_path *pn = nh->pn;
struct route_node *rn = pn->rn;
+ struct static_route_info *si = static_route_info_from_rnode(rn);
struct static_nht_data *nhtd, lookup = {};
struct prefix p = {};
afi_t afi = AFI_IP;
@@ -368,14 +395,15 @@ int static_zebra_nh_update(struct static_nexthop *nh)
break;
}
- lookup.nh = &p;
+ lookup.nh = p;
lookup.nh_vrf_id = nh->nh_vrf_id;
+ lookup.safi = si->safi;
- nhtd = hash_lookup(static_nht_hash, &lookup);
+ nhtd = static_nht_hash_find(static_nht_hash, &lookup);
if (nhtd && nhtd->nh_num) {
nh->state = STATIC_START;
- static_nht_update(&rn->p, nhtd->nh, nhtd->nh_num, afi,
- nh->nh_vrf_id);
+ static_nht_update(&rn->p, &nhtd->nh, nhtd->nh_num, afi,
+ si->safi, nh->nh_vrf_id);
return 1;
}
return 0;
@@ -530,14 +558,15 @@ void static_zebra_init(void)
zclient->zebra_capabilities = static_zebra_capabilities;
zclient->zebra_connected = zebra_connected;
- static_nht_hash = hash_create(static_nht_hash_key,
- static_nht_hash_cmp,
- "Static Nexthop Tracking hash");
+ static_nht_hash_init(static_nht_hash);
}
/* static_zebra_stop used by tests/lib/test_grpc.cpp */
void static_zebra_stop(void)
{
+ static_nht_hash_clear();
+ static_nht_hash_fini(static_nht_hash);
+
if (!zclient)
return;
zclient_stop(zclient);
diff --git a/tests/.gitignore b/tests/.gitignore
index 70d0ef6e0a..f00177abd8 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -1,6 +1,8 @@
*.log
*.sum
*.xml
+frr-northbound.proto
+frr_northbound*
.pytest_cache
/bgpd/test_aspath
/bgpd/test_bgp_table
diff --git a/tests/isisd/test_fuzz_isis_tlv.c b/tests/isisd/test_fuzz_isis_tlv.c
index 97aade6578..8f0b92d0fc 100644
--- a/tests/isisd/test_fuzz_isis_tlv.c
+++ b/tests/isisd/test_fuzz_isis_tlv.c
@@ -108,12 +108,12 @@ static int test(FILE *input, FILE *output)
}
fprintf(output, "Unpack log:\n%s", log);
- const char *s_tlvs = isis_format_tlvs(tlvs);
+ const char *s_tlvs = isis_format_tlvs(tlvs, NULL);
fprintf(output, "Unpacked TLVs:\n%s", s_tlvs);
struct isis_item *orig_auth = tlvs->isis_auth.head;
tlvs->isis_auth.head = NULL;
- s_tlvs = isis_format_tlvs(tlvs);
+ s_tlvs = isis_format_tlvs(tlvs, NULL);
struct isis_tlvs *tlv_copy = isis_copy_tlvs(tlvs);
tlvs->isis_auth.head = orig_auth;
isis_free_tlvs(tlvs);
@@ -133,7 +133,7 @@ static int test(FILE *input, FILE *output)
}
char *orig_tlvs = XSTRDUP(MTYPE_TMP, s_tlvs);
- s_tlvs = isis_format_tlvs(tlvs);
+ s_tlvs = isis_format_tlvs(tlvs, NULL);
if (strcmp(orig_tlvs, s_tlvs)) {
fprintf(output,
@@ -166,7 +166,7 @@ static int test(FILE *input, FILE *output)
fprintf(output, "Could not pack fragment, too large.\n");
assert(0);
}
- sbuf_push(&fragment_format, 0, "%s", isis_format_tlvs(tlvs));
+ sbuf_push(&fragment_format, 0, "%s", isis_format_tlvs(tlvs, NULL));
isis_free_tlvs(tlvs);
}
list_delete(&fragments);
diff --git a/tests/isisd/test_isis_spf.c b/tests/isisd/test_isis_spf.c
index a30f33ccad..971aba4c46 100644
--- a/tests/isisd/test_isis_spf.c
+++ b/tests/isisd/test_isis_spf.c
@@ -294,7 +294,7 @@ static int test_run(struct vty *vty, const struct isis_topology *topology,
/* Print the LDPDB. */
if (CHECK_FLAG(flags, F_DISPLAY_LSPDB))
- show_isis_database_lspdb(vty, area, level - 1,
+ show_isis_database_lspdb_vty(vty, area, level - 1,
&area->lspdb[level - 1], NULL,
ISIS_UI_LEVEL_DETAIL);
diff --git a/tests/lib/test_grpc.py b/tests/lib/test_grpc.py
index 06ae6c05dd..2e292fadc9 100644
--- a/tests/lib/test_grpc.py
+++ b/tests/lib/test_grpc.py
@@ -1,8 +1,10 @@
import inspect
import os
import subprocess
-import pytest
+
import frrtest
+import pytest
+
class TestGRPC(object):
program = "./test_grpc"
@@ -15,9 +17,13 @@ class TestGRPC(object):
basedir = os.path.dirname(inspect.getsourcefile(type(self)))
program = os.path.join(basedir, self.program)
proc = subprocess.Popen(
- [frrtest.binpath(program)], stdin=subprocess.PIPE, stdout=subprocess.PIPE
+ [frrtest.binpath(program)],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
)
output, _ = proc.communicate()
self.exitcode = proc.wait()
if self.exitcode != 0:
+ print("OUTPUT:\n" + output.decode("ascii"))
raise frrtest.TestExitNonzero(self)
diff --git a/tests/lib/test_printfrr.c b/tests/lib/test_printfrr.c
index 8f9d637afd..59d08ae82b 100644
--- a/tests/lib/test_printfrr.c
+++ b/tests/lib/test_printfrr.c
@@ -207,6 +207,24 @@ int main(int argc, char **argv)
assert(strcmp(p, "test#5") == 0);
XFREE(MTYPE_TMP, p);
+ struct prefix pfx;
+
+ str2prefix("192.168.1.23/24", &pfx);
+ printchk("192.168.1.23/24", "%pFX", &pfx);
+ printchk("192.168.1.23", "%pFXh", &pfx);
+
+ str2prefix("2001:db8::1234/64", &pfx);
+ printchk("2001:db8::1234/64", "%pFX", &pfx);
+ printchk("2001:db8::1234", "%pFXh", &pfx);
+
+ pfx.family = AF_UNIX;
+ printchk("UNK prefix", "%pFX", &pfx);
+ printchk("{prefix.af=AF_UNIX}", "%pFXh", &pfx);
+
+ str2prefix_eth("02:ca:fe:f0:0d:1e/48", (struct prefix_eth *)&pfx);
+ printchk("02:ca:fe:f0:0d:1e/48", "%pFX", &pfx);
+ printchk("02:ca:fe:f0:0d:1e", "%pFXh", &pfx);
+
struct prefix_sg sg;
sg.src.s_addr = INADDR_ANY;
sg.grp.s_addr = INADDR_ANY;
diff --git a/tests/lib/test_typelist.c b/tests/lib/test_typelist.c
index 607e29e56b..6e69658490 100644
--- a/tests/lib/test_typelist.c
+++ b/tests/lib/test_typelist.c
@@ -58,9 +58,10 @@
#define T_HASH (1 << 2)
#define T_HEAP (1 << 3)
#define T_ATOMIC (1 << 4)
+#define T_REVERSE (1 << 5)
#define _T_LIST (0)
-#define _T_DLIST (0)
+#define _T_DLIST (0 | T_REVERSE)
#define _T_ATOMLIST (0 | T_ATOMIC)
#define _T_HEAP (T_SORTED | T_HEAP)
#define _T_SORTLIST_UNIQ (T_SORTED | T_UNIQ)
@@ -68,8 +69,8 @@
#define _T_HASH (T_SORTED | T_UNIQ | T_HASH)
#define _T_SKIPLIST_UNIQ (T_SORTED | T_UNIQ)
#define _T_SKIPLIST_NONUNIQ (T_SORTED)
-#define _T_RBTREE_UNIQ (T_SORTED | T_UNIQ)
-#define _T_RBTREE_NONUNIQ (T_SORTED)
+#define _T_RBTREE_UNIQ (T_SORTED | T_UNIQ | T_REVERSE)
+#define _T_RBTREE_NONUNIQ (T_SORTED | T_REVERSE)
#define _T_ATOMSORT_UNIQ (T_SORTED | T_UNIQ | T_ATOMIC)
#define _T_ATOMSORT_NONUNIQ (T_SORTED | T_ATOMIC)
@@ -79,6 +80,7 @@
#define IS_HASH(type) (_T_TYPE(type) & T_HASH)
#define IS_HEAP(type) (_T_TYPE(type) & T_HEAP)
#define IS_ATOMIC(type) (_T_TYPE(type) & T_ATOMIC)
+#define IS_REVERSE(type) (_T_TYPE(type) & T_REVERSE)
static struct timeval ref, ref0;
diff --git a/tests/lib/test_typelist.h b/tests/lib/test_typelist.h
index 8261616ed2..e3579c67a2 100644
--- a/tests/lib/test_typelist.h
+++ b/tests/lib/test_typelist.h
@@ -31,6 +31,11 @@
#define list_const_next concat(TYPE, _const_next)
#define list_next concat(TYPE, _next)
#define list_next_safe concat(TYPE, _next_safe)
+#define list_const_last concat(TYPE, _const_last)
+#define list_last concat(TYPE, _last)
+#define list_const_prev concat(TYPE, _const_prev)
+#define list_prev concat(TYPE, _prev)
+#define list_prev_safe concat(TYPE, _prev_safe)
#define list_count concat(TYPE, _count)
#define list_add concat(TYPE, _add)
#define list_add_head concat(TYPE, _add_head)
@@ -171,6 +176,9 @@ static void concat(test_, TYPE)(void)
list_init(&head);
assert(list_first(&head) == NULL);
+#if IS_REVERSE(REALTYPE)
+ assert(list_last(&head) == NULL);
+#endif
ts_hash("init", "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119");
@@ -203,6 +211,10 @@ static void concat(test_, TYPE)(void)
assert(!list_first(&head));
assert(list_count(&other) == k);
assert(list_first(&other) != NULL);
+#if IS_REVERSE(REALTYPE)
+ assert(!list_last(&head));
+ assert(list_last(&other) != NULL);
+#endif
ts_hash_headx(
&other, "swap1",
"a538546a6e6ab0484e925940aa8dd02fd934408bbaed8cb66a0721841584d838");
@@ -269,13 +281,36 @@ static void concat(test_, TYPE)(void)
(void)cprev;
#else
assert(!cprev || cprev->val < citem->val);
+#if IS_REVERSE(REALTYPE)
+ assert(list_const_prev(chead, citem) == cprev);
+#endif
#endif
cprev = citem;
k++;
}
assert(list_count(chead) == k);
+#if IS_REVERSE(REALTYPE)
+ assert(cprev == list_const_last(chead));
+#endif
ts_ref("walk");
+#if IS_REVERSE(REALTYPE) && !IS_HASH(REALTYPE) && !IS_HEAP(REALTYPE)
+ cprev = NULL;
+ k = 0;
+
+ frr_rev_each(list_const, chead, citem) {
+ assert(!cprev || cprev->val > citem->val);
+ assert(list_const_next(chead, citem) == cprev);
+
+ cprev = citem;
+ k++;
+ }
+ assert(list_count(chead) == k);
+ assert(cprev == list_const_first(chead));
+
+ ts_ref("reverse-walk");
+#endif
+
#if IS_UNIQ(REALTYPE)
prng_free(prng);
prng = prng_new(0);
@@ -439,6 +474,9 @@ static void concat(test_, TYPE)(void)
}
assert(list_count(&head) == k);
assert(list_first(&head) != NULL);
+#if IS_REVERSE(REALTYPE)
+ assert(list_last(&head) != NULL);
+#endif
ts_hash("fill / add_tail", "eabfcf1413936daaf20965abced95762f45110a6619b84aac7d38481bce4ea19");
#if !IS_ATOMIC(REALTYPE)
@@ -451,6 +489,10 @@ static void concat(test_, TYPE)(void)
assert(!list_first(&head));
assert(list_count(&other) == k);
assert(list_first(&other) != NULL);
+#if IS_REVERSE(REALTYPE)
+ assert(!list_last(&head));
+ assert(list_last(&other) != NULL);
+#endif
ts_hash_head(
&other, "swap1",
"eabfcf1413936daaf20965abced95762f45110a6619b84aac7d38481bce4ea19");
@@ -534,6 +576,21 @@ static void concat(test_, TYPE)(void)
}
ts_hash("member", "42b8950c880535b2d2e0c980f9845f7841ecf675c0fb9801aec4170d2036349d");
#endif
+#if IS_REVERSE(REALTYPE)
+ i = 0;
+ prev = NULL;
+
+ frr_rev_each (list, &head, item) {
+ assert(item->scratchpad != 0);
+ assert(list_next(&head, item) == prev);
+
+ i++;
+ prev = item;
+ }
+ assert(list_first(&head) == prev);
+ assert(list_count(&head) == i);
+ ts_hash("reverse-walk", "42b8950c880535b2d2e0c980f9845f7841ecf675c0fb9801aec4170d2036349d");
+#endif
while ((item = list_pop(&head))) {
assert(item->scratchpad != 0);
@@ -746,6 +803,13 @@ static void concat(test_, TYPE)(void)
#undef list_first
#undef list_next
#undef list_next_safe
+#undef list_const_first
+#undef list_const_next
+#undef list_last
+#undef list_prev
+#undef list_prev_safe
+#undef list_const_last
+#undef list_const_prev
#undef list_count
#undef list_add
#undef list_add_head
diff --git a/tests/topotests/analyze.py b/tests/topotests/analyze.py
index 888e706339..bdb2e56ee1 100755
--- a/tests/topotests/analyze.py
+++ b/tests/topotests/analyze.py
@@ -198,9 +198,12 @@ def main():
logging.critical("%s doesn't exist", args.results)
sys.exit(1)
ttfiles = [args.results]
+ elif os.path.exists("/tmp/topotests/topotests.xml"):
+ ttfiles.append("/tmp/topotests/topotests.xml")
- if not ttfiles and os.path.exists("/tmp/topotests.xml"):
- ttfiles.append("/tmp/topotests.xml")
+ if not ttfiles:
+ if os.path.exists("/tmp/topotests.xml"):
+ ttfiles.append("/tmp/topotests.xml")
for f in ttfiles:
m = re.match(r"tt-group-(\d+)/topotests.xml", f)
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/bgp_vrf_dynamic_route_leak_topo3.json b/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/bgp_vrf_dynamic_route_leak_topo3.json
new file mode 100644
index 0000000000..9c73baff14
--- /dev/null
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/bgp_vrf_dynamic_route_leak_topo3.json
@@ -0,0 +1,1088 @@
+{
+ "address_types": ["ipv4","ipv6"],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "r3-link1": {"ipv4": "13.1.1.1/24", "ipv6": "13::1:1/120", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "13.1.1.1/24", "ipv6": "13::1:1/120", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "1",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "1",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "1",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "1",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ }
+ },
+ "r2": {
+ "links": {
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "2",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "2",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "2",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "2",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ }
+ },
+ "r3": {
+ "links": {
+ "r1-link1": {"ipv4": "13.1.1.2/24", "ipv6": "13::1:2/120", "vrf": "RED"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r1-link3": {"ipv4": "13.1.1.2/24", "ipv6": "13::1:2/120", "vrf": "GREEN"},
+ "r1-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r2-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r2-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r4-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r4-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r4-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r4-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r5-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r5-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r5-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r5-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "3",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link2": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link2": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link3": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ }
+ },
+ "r4": {
+ "links": {
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "4",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "4",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link2": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "4",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "4",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "r5": {
+ "links": {
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "3",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py b/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py
new file mode 100644
index 0000000000..1cf1b29097
--- /dev/null
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak_topo3/test_bgp_vrf_dynamic_route_leak_topo3.py
@@ -0,0 +1,1803 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test BGP Multi-VRF Dynamic Route Leaking:
+1. Verify that with multiple tenant VRFs, dynamically imported routes are
+ further advertised to eBGP peers.
+2. Verify the route-map operations along with dynamic import command
+3. Verify that deleting static routes from originating VRF also deletes
+ routes from other VRFs and peers.
+4. Verify that deleting and adding "import" command multiple times shows
+ consistent results.
+"""
+
+import os
+import sys
+import time
+import pytest
+import platform
+from time import sleep
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topotest import version_cmp
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ step,
+ create_route_maps,
+ create_static_routes,
+ create_prefix_lists,
+ create_bgp_community_lists,
+ get_frr_ipv6_linklocal,
+)
+
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_bgp_community,
+ verify_bgp_rib,
+)
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Global variables
+NETWORK1_1 = {"ipv4": "11.11.11.1/32", "ipv6": "11:11::1/128"}
+NETWORK1_2 = {"ipv4": "11.11.11.11/32", "ipv6": "11:11::11/128"}
+NETWORK1_3 = {"ipv4": "10.10.10.1/32", "ipv6": "10:10::1/128"}
+NETWORK1_4 = {"ipv4": "10.10.10.100/32", "ipv6": "10:10::100/128"}
+NETWORK1_5 = {"ipv4": "110.110.110.1/32", "ipv6": "110:110::1/128"}
+NETWORK1_6 = {"ipv4": "110.110.110.100/32", "ipv6": "110:110::100/128"}
+
+NETWORK2_1 = {"ipv4": "22.22.22.2/32", "ipv6": "22:22::2/128"}
+NETWORK2_2 = {"ipv4": "22.22.22.22/32", "ipv6": "22:22::22/128"}
+NETWORK2_3 = {"ipv4": "20.20.20.20/32", "ipv6": "20:20::20/128"}
+NETWORK2_4 = {"ipv4": "20.20.20.200/32", "ipv6": "20:20::200/128"}
+NETWORK2_5 = {"ipv4": "220.220.220.20/32", "ipv6": "220:220::20/128"}
+NETWORK2_6 = {"ipv4": "220.220.220.200/32", "ipv6": "220:220::200/128"}
+
+NETWORK3_1 = {"ipv4": "30.30.30.3/32", "ipv6": "30:30::3/128"}
+NETWORK3_2 = {"ipv4": "30.30.30.30/32", "ipv6": "30:30::30/128"}
+
+PREFIX_LIST = {
+ "ipv4": ["11.11.11.1", "22.22.22.2", "22.22.22.22"],
+ "ipv6": ["11:11::1", "22:22::2", "22:22::22"],
+}
+PREFERRED_NEXT_HOP = "global"
+VRF_LIST = ["RED", "BLUE", "GREEN"]
+COMM_VAL_1 = "100:100"
+COMM_VAL_2 = "500:500"
+COMM_VAL_3 = "600:600"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_vrf_dynamic_route_leak_topo3.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Run these tests for kernel version 4.19 or above
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ "BGP vrf dynamic route leak tests will not run "
+ '(have kernel "{}", but it requires >= 4.19)'.format(platform.release())
+ )
+ pytest.skip(error_msg)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global BGP_CONVERGENCE
+ global ADDR_TYPES
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_dynamic_import_routes_advertised_to_ebgp_peers_p0(request):
+ """
+ Verify that with multiple tenant VRFs, dynamically imported routes are
+ further advertised to eBGP peers.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R2 and R3 and redistribute in BGP for "
+ "BLUE and RED vrf instances"
+ )
+ for dut, network in zip(
+ ["r2", "r3"], [[NETWORK1_1, NETWORK1_2], [NETWORK2_1, NETWORK2_2]]
+ ):
+ for vrf_name, network_vrf in zip(["RED", "BLUE"], network):
+ step("Configure static route for VRF : {} on {}".format(vrf_name, dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [network_vrf[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for dut, as_num in zip(["r2", "r3"], ["2", "3"]):
+ for vrf_name in ["RED", "BLUE"]:
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ }
+ }
+ )
+
+ redist_dict = {
+ dut: {
+ "bgp": [
+ {"vrf": vrf_name, "local_as": as_num, "address_family": temp}
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R2 and R3 has installed redistributed routes in BLUE "
+ "and RED vrfs"
+ )
+ for dut, network in zip(
+ ["r2", "r3"], [[NETWORK2_1, NETWORK2_2], [NETWORK1_1, NETWORK1_2]]
+ ):
+ for vrf_name, network_vrf in zip(["RED", "BLUE"], network):
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [network_vrf[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Import BLUE vrf's route in tenant vrf RED on R2 and then import "
+ "vrf RED's routes into BLUE vrf on R3"
+ )
+
+ for dut, as_num, vrf_name, vrf_import in zip(
+ ["r2", "r3"], ["2", "3"], ["RED", "BLUE"], ["BLUE", "RED"]
+ ):
+ step("Import vrf {} int vrf {}, on router {}".format(vrf_import, vrf_name, dut))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_import}}}})
+
+ import_dict = {
+ dut: {
+ "bgp": [{"vrf": vrf_name, "local_as": as_num, "address_family": temp}]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R2's vrf RED and R3's vrf BLUE has installed 4 set of "
+ "prefixes. Routes imported from BLUE vrf (originated R2's & received "
+ "from R3). Vrf RED's local routes (originated by R2's & received "
+ "from R3)"
+ )
+ step(
+ "Verify that R2 and R3 has installed redistributed routes in BLUE "
+ "and RED vrfs"
+ )
+
+ for dut, vrf_name in zip(["r2", "r3"], ["RED", "BLUE"]):
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK1_2[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Additionally, R2 receives R3's BLUE vrf's prefixes and then import "
+ "into vrf RED. These imported routes are advertised back to "
+ "(originator)R3 but now in vrf RED, however R3 doesn't install these "
+ "in vrf RED. Denied due to own AS"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK1_2[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import vrf BLUE from vrf RED's instance on R2.")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": "BLUE", "delete": True}}}}
+ )
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Verify on R3 that, there is no change in FIB of vrf BLUE and R2's "
+ "BLUE vrf originated routes are removed from vrf RED on R3."
+ )
+ for vrf_name in ["RED", "BLUE"]:
+ for addr_type in ADDR_TYPES:
+ if vrf_name == "RED":
+ network_vrf = [NETWORK1_1[addr_type], NETWORK2_1[addr_type]]
+ elif vrf_name == "BLUE":
+ network_vrf = [
+ NETWORK1_1[addr_type],
+ NETWORK1_2[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ ]
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": network_vrf,
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import vrf BLUE from vrf RED's instance on R2.")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "BLUE"}}}})
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "All the routes described in earlier step should be added, once "
+ "import command on R2 is re-added."
+ )
+ for dut, vrf_name in zip(["r2", "r3"], ["RED", "BLUE"]):
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK1_2[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import vrf RED from BLUE vrf on R3")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": "RED", "delete": True}}}}
+ )
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": "BLUE", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify on R2 that, there is no change in FIB of vrf RED and R3's "
+ "vrf RED's originated routes are removed from vrf BLUE on R2."
+ )
+ for vrf_name in ["RED", "BLUE"]:
+ for addr_type in ADDR_TYPES:
+ if vrf_name == "BLUE":
+ network_vrf = [NETWORK1_2[addr_type], NETWORK2_2[addr_type]]
+ elif vrf_name == "RED":
+ network_vrf = [
+ NETWORK1_1[addr_type],
+ NETWORK1_2[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ ]
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": network_vrf,
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Add import vrf RED from BLUE vrf on R3")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "RED"}}}})
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": "BLUE", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "All the routes described in earlier step should be added, once "
+ "import command on R2 is re-added."
+ )
+ for dut, vrf_name in zip(["r2", "r3"], ["RED", "BLUE"]):
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK1_2[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_dynamic_imported_matching_prefix_based_on_community_list_p0(request):
+ """
+ Verify the route-map operations along with dynamic import command
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R3 for vrf RED and redistribute in BGP " "instance"
+ )
+ for vrf_name, networks in zip(
+ ["RED", "BLUE"], [[NETWORK1_1, NETWORK1_2], [NETWORK2_1, NETWORK2_2]]
+ ):
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [networks[0][addr_type], networks[1][addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure route-map to set community attribute for a specific " "prefix on R3"
+ )
+ for addr_type in ADDR_TYPES:
+ input_dict_pf = {
+ "r3": {
+ "prefix_lists": {
+ addr_type: {
+ "pflist_ABC_{}".format(addr_type): [
+ {
+ "seqid": 10,
+ "network": NETWORK1_1[addr_type],
+ "action": "permit",
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_pf)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ input_dict_cl = {
+ "r3": {
+ "bgp_community_lists": [
+ {
+ "community_type": "expanded",
+ "action": "permit",
+ "name": "COMM",
+ "value": COMM_VAL_1,
+ }
+ ]
+ }
+ }
+ result = create_bgp_community_lists(tgen, input_dict_cl)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_rm = {
+ "r3": {
+ "route_maps": {
+ "rmap_XYZ_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pflist_ABC_{}".format(addr_type)
+ }
+ },
+ "set": {"community": {"num": COMM_VAL_1}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_rm)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Apply this route-map on R3 to set community under vrf RED/BLUE "
+ "while redistributing the prefixes into BGP"
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "redistribute": [
+ {
+ "redist_type": "static",
+ "attribute": {
+ "route-map": "rmap_XYZ_{}".format(addr_type)
+ },
+ }
+ ]
+ }
+ }
+ }
+ )
+
+ for vrf_name in ["RED", "BLUE"]:
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that specific prefixes matched in route-map have community "
+ "attribute value 100:100 tagged"
+ )
+ input_dict_comm = {"community": COMM_VAL_1}
+ for addr_type in ADDR_TYPES:
+ result = verify_bgp_community(
+ tgen, addr_type, "r3", [NETWORK1_1[addr_type]], input_dict_comm, vrf="RED"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure a route-map for filtering the prefixes based on community "
+ "attribute while importing into default vrf"
+ )
+ for addr_type in ADDR_TYPES:
+ input_dict_rm = {
+ "r3": {
+ "route_maps": {
+ "rmap_IMP_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": 10,
+ "match": {"community_list": {"id": "COMM"}},
+ "set": {"community": {"num": COMM_VAL_2}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_rm)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Apply the route-map while Importing vrf RED/BLUE's prefixes into "
+ "GREEN vrf on router R3"
+ )
+ temp = {}
+ for vrf_name in ["RED", "BLUE"]:
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ inport_dict = {
+ "r3": {"bgp": [{"vrf": "GREEN", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, inport_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "import": {"vrf": "route-map rmap_IMP_{}".format(addr_type)}
+ }
+ }
+ }
+ )
+
+ inport_dict = {
+ "r3": {"bgp": [{"vrf": "GREEN", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, inport_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_comm = {"community": COMM_VAL_2}
+ step(
+ "Verify on R3 that only prefixes with community value {} in vrf RED "
+ "are imported to vrf GREEN. While importing, the community value "
+ "has been changed to {}".format(COMM_VAL_1, COMM_VAL_2)
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [{"network": [NETWORK1_1[addr_type]], "vrf": "GREEN"}]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK2_1[addr_type],
+ NETWORK2_2[addr_type],
+ NETWORK1_2[addr_type],
+ ],
+ "vrf": "GREEN",
+ }
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_bgp_community(
+ tgen, addr_type, "r3", [NETWORK1_1[addr_type]], input_dict_comm, vrf="GREEN"
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value in zip(["Delete", "Add"], [True, False]):
+ step("{} import vrf RED/BLUE command one by one from vrf GREEN".format(action))
+ temp = {}
+ for vrf_name in ["RED", "BLUE"]:
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {"import": {"vrf": vrf_name, "delete": value}}
+ }
+ }
+ )
+
+ inport_dict = {
+ "r3": {"bgp": [{"vrf": "GREEN", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, inport_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that when import vrf RED/BLUE is {} one by one, all "
+ "routes of respective vrf disappear from vrf GREEN without "
+ "affecting (BLUE/RED) routes".format(action.lower())
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [NETWORK1_1[addr_type]], "vrf": "GREEN"}
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, "r3", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes["r3"]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, "r3", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes["r3"]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value in zip(["Delete", "Re-add"], [True, False]):
+ step(
+ "{} route-map IMP from global config when import and route-maps "
+ "are applied in vrf GREEN".format(action)
+ )
+ for addr_type in ADDR_TYPES:
+ input_dict_rm = {
+ "r3": {
+ "route_maps": {
+ "rmap_IMP_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": 10,
+ "match": {"community_list": {"id": "COMM"}},
+ "set": {"community": {"num": COMM_VAL_2}},
+ "delete": value,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_rm)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that when import vrf RED/BLUE is {} one by one, all "
+ "routes of respective vrf disappear from vrf GREEN without "
+ "affecting (BLUE/RED) routes".format(action.lower())
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [NETWORK1_1[addr_type]], "vrf": "GREEN"}
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, "r3", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes["r3"]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, "r3", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes["r3"]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_dynamic_import_routes_delete_static_route_p1(request):
+ """
+ Verify that deleting static routes from originating VRF also deletes
+ routes from other VRFs and peers.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R3 for each tenant vrf and redistribute "
+ "in respective BGP instance"
+ )
+ vrf_list = VRF_LIST + ["default"]
+ for vrf_name, network in zip(
+ vrf_list, [NETWORK1_1, NETWORK2_1, NETWORK3_1, NETWORK1_2]
+ ):
+ step("Configure static route for VRF : {}".format(vrf_name))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name, network in zip(vrf_list, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step(
+ "Verify that R3 has installed redistributed routes in respective "
+ "vrfs: {}".format(vrf_name)
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Import routes among vrfs as mentioned below on router R3")
+
+ for vrf_name, vrf_import in zip(
+ ["GREEN", "BLUE", "default"], ["RED", "GREEN", "BLUE"]
+ ):
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_import}}}})
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name, vrf_import, installed, not_installed in zip(
+ ["BLUE", "default"],
+ ["GREEN", "BLUE"],
+ [NETWORK3_1, NETWORK2_1],
+ [NETWORK1_1, NETWORK3_1],
+ ):
+ step(
+ "Verify that only locally originated routes of vrf {} are "
+ "advertised to vrf {}".format(vrf_import, vrf_name)
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [installed[addr_type]], "vrf": vrf_name}
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that non local originated routes {} of vrf {} are "
+ "not advertised to vrf {}".format(
+ not_installed[addr_type], vrf_import, vrf_name
+ )
+ )
+
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [not_installed[addr_type]], "vrf": vrf_name}
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen, addr_type, "r2", static_routes, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r2"]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name, result, static_routes["r2"]["static_routes"][0]["network"]
+ )
+
+ step("Delete static routes from vrf RED")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Verify on R2 and R3, that only vrf RED and GREEN's RIB/FIB withdraw "
+ "deleted routes"
+ )
+ for dut in ["r2", "r3"]:
+ step(
+ "Verify on {}, that only vrf RED and GREEN's RIB/FIB withdraw "
+ "deleted routes".format(dut)
+ )
+ for vrf_name in ["RED", "GREEN"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [NETWORK1_1[addr_type]], "vrf": vrf_name}
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen, addr_type, "r2", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes["r2"]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, "r2", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ step("Delete static routes from vrf BLUE")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "BLUE",
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for dut in ["r2", "r3"]:
+ step(
+ "Verify on {}, that only default and BLUE vrf's RIB/FIB "
+ "withdraw deleted routes".format(dut)
+ )
+ for vrf_name in ["BLUE", "default"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [NETWORK2_1[addr_type]], "vrf": vrf_name}
+ ]
+ }
+ }
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ step("Delete static routes from vrf default")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [NETWORK1_2[addr_type]],
+ "next_hop": "blackhole",
+ "delete": True,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for dut in ["r2", "r3"]:
+ step(
+ "Verify on {}, that only default vrf RIB/FIB withdraw deleted "
+ "routes".format(dut)
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {"network": [NETWORK1_2[addr_type]], "vrf": vrf_name}
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Add back all the routes that were deleted")
+ for vrf_name, network in zip(
+ vrf_list, [NETWORK1_1, NETWORK2_1, NETWORK3_1, NETWORK1_2]
+ ):
+ step("Configure static route for VRF : {}".format(vrf_name))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name, network in zip(vrf_list, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step(
+ "Verify that R3 has installed redistributed routes in respective "
+ "vrfs: {}".format(vrf_name)
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_dynamic_import_routes_add_delete_import_command_p1(request):
+ """
+ Verify that deleting and adding "import" command multiple times shows
+ consistent results.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R2 for vrf RED and redistribute in "
+ "respective BGP instance"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF RED")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that R2 has installed redistributed routes in respective " "vrfs only")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "RED"}]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Import vrf RED's routes into vrf GREEN on R2")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "RED"}}}})
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "GREEN", "local_as": 2, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify on R2, that it installs imported routes from vrf RED to vrf "
+ "GREEN's RIB/FIB"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "GREEN"}]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("On R3 import routes from vrfs GREEN to default")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "GREEN"}}}})
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that R2's vrf RED routes are now imported into vrf default "
+ "of R3, next-hop pointing to vrf GREEN"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+
+ next_hop_1 = topo["routers"]["r2"]["links"]["r3-link3"][addr_type].split("/")[0]
+ result = verify_bgp_rib(
+ tgen, addr_type, "r3", static_routes, next_hop=next_hop_1
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, next_hop=next_hop_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete import command from R3's default vrf instance for both "
+ "address-families 1 by 1 (ipv4/ipv6)"
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": "GREEN", "delete": True}}}}
+ )
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that R2's vrf RED routes are now removed from vrf "
+ "default on R3, however vrf GREEN still retains those"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete import command from R2's vrf GREEN instance for both "
+ "address-families 1 by 1 (ipv4/ipv6)"
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": "RED", "delete": True}}}}
+ )
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "GREEN", "local_as": 2, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Verify that R2's vrf RED routes are now removed from vrf GREEN "
+ "on R2 & R3 as well"
+ )
+ for dut in ["r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {"network": [NETWORK2_1[addr_type]], "vrf": "GREEN"}
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+
+ step(
+ "Add import command from R3's default vrf instance for both "
+ "address-families 1 by 1 (ipv4/ipv6)"
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "GREEN"}}}})
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that there are no routes installed on R3's vrf default " "RIB/FIB.")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Add import command from R2's vrf GREEN instance for both "
+ "address-families 1 by 1 (ipv4/ipv6)."
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "RED"}}}})
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "GREEN", "local_as": 2, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that R2's vrf RED routes are now imported into vrf "
+ "default of R3, next-hop pointing to vrf GREEN"
+ )
+ for dut in ["r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {"network": [NETWORK2_1[addr_type]], "vrf": "GREEN"}
+ ]
+ }
+ }
+ if dut == "r3":
+ next_hop_1 = topo["routers"]["r2"]["links"]["r3-link3"][
+ addr_type
+ ].split("/")[0]
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, next_hop=next_hop_1
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ if dut == "r3":
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, next_hop=next_hop_1
+ )
+ else:
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Delete import command from R3's default vrf instance for both "
+ "address-families 1 by 1 (ipv4/ipv6)."
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": "GREEN", "delete": True}}}}
+ )
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that R2's vrf RED routes are now removed from vrf "
+ "default on R3, however vrf GREEN still retains those."
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "GREEN"}]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ static_routes = {
+ "r2": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Delete redistribute static from R2 for vrf RED")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ }
+ }
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that R2's vrf RED routes are now removed from vrf GREEN "
+ "on R2 & R3 as well."
+ )
+ for dut in ["r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {"network": [NETWORK2_1[addr_type]], "vrf": "GREEN"}
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+
+ step(
+ "Add import command from R3's default vrf instance for both "
+ "address-families 1 by 1 (ipv4/ipv6)."
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "GREEN"}}}})
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that there are no routes installed on R3's vrf default " "RIB/FIB")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name, result, static_routes["r3"]["static_routes"][0]["network"]
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Add redistribute static from R2 for vrf RED")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that R2's vrf RED routes are now imported into vrf "
+ "default of R3, next-hop pointing to vrf GREEN"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+ next_hop_1 = topo["routers"]["r2"]["links"]["r3-link3"][addr_type].split("/")[0]
+ result = verify_bgp_rib(
+ tgen, addr_type, "r3", static_routes, next_hop=next_hop_1
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes, next_hop=next_hop_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/bgp_vrf_dynamic_route_leak_topo4.json b/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/bgp_vrf_dynamic_route_leak_topo4.json
new file mode 100644
index 0000000000..9c73baff14
--- /dev/null
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/bgp_vrf_dynamic_route_leak_topo4.json
@@ -0,0 +1,1088 @@
+{
+ "address_types": ["ipv4","ipv6"],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "r3-link1": {"ipv4": "13.1.1.1/24", "ipv6": "13::1:1/120", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "13.1.1.1/24", "ipv6": "13::1:1/120", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "1",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "1",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "1",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "1",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ }
+ },
+ "r2": {
+ "links": {
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "2",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "2",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "2",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "2",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ }
+ },
+ "r3": {
+ "links": {
+ "r1-link1": {"ipv4": "13.1.1.2/24", "ipv6": "13::1:2/120", "vrf": "RED"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r1-link3": {"ipv4": "13.1.1.2/24", "ipv6": "13::1:2/120", "vrf": "GREEN"},
+ "r1-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r2-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r2-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r4-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r4-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r4-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r4-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r5-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r5-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r5-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r5-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "3",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link1": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link2": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link2": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link3": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ },
+ "r5": {
+ "dest_link": {
+ "r3-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ }
+ },
+ "r4": {
+ "links": {
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "4",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "4",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link2": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "4",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "4",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "r5": {
+ "links": {
+ "r3-link1": {"ipv4": "auto", "ipv6": "auto", "vrf": "RED"},
+ "r3-link2": {"ipv4": "auto", "ipv6": "auto", "vrf": "BLUE"},
+ "r3-link3": {"ipv4": "auto", "ipv6": "auto", "vrf": "GREEN"},
+ "r3-link4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "vrfs":[
+ {"name": "RED", "id": "1"},
+ {"name": "BLUE", "id": "2"},
+ {"name": "GREEN", "id": "3"}
+ ],
+ "bgp":
+ [
+ {
+ "local_as": "3",
+ "vrf": "RED",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "BLUE",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "vrf": "GREEN",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "local_as": "3",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r5-link4": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py b/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py
new file mode 100644
index 0000000000..82daf08e18
--- /dev/null
+++ b/tests/topotests/bgp_vrf_dynamic_route_leak_topo4/test_bgp_vrf_dynamic_route_leak_topo4.py
@@ -0,0 +1,1909 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test BGP Multi-VRF Dynamic Route Leaking:
+1. Verify recursive import among Tenant VRFs.
+2. Verify that dynamic import works fine between two different Tenant VRFs.
+ When next-hop IPs are same across all VRFs.
+ When next-hop IPs are different across all VRFs.
+3. Verify that with multiple tenant VRFs, dynamic import works fine between
+ Tenant VRFs to default VRF.
+ When next-hop IPs and prefixes are same across all VRFs.
+ When next-hop IPs and prefixes are different across all VRFs.
+"""
+
+import os
+import sys
+import time
+import pytest
+import platform
+from time import sleep
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topotest import version_cmp
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ step,
+ create_route_maps,
+ create_static_routes,
+ create_prefix_lists,
+ create_bgp_community_lists,
+ get_frr_ipv6_linklocal,
+)
+
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_bgp_community,
+ verify_bgp_rib,
+)
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Global variables
+NETWORK1_1 = {"ipv4": "11.11.11.1/32", "ipv6": "11:11::1/128"}
+NETWORK1_2 = {"ipv4": "11.11.11.11/32", "ipv6": "11:11::11/128"}
+NETWORK1_3 = {"ipv4": "10.10.10.1/32", "ipv6": "10:10::1/128"}
+NETWORK1_4 = {"ipv4": "10.10.10.100/32", "ipv6": "10:10::100/128"}
+NETWORK1_5 = {"ipv4": "110.110.110.1/32", "ipv6": "110:110::1/128"}
+NETWORK1_6 = {"ipv4": "110.110.110.100/32", "ipv6": "110:110::100/128"}
+
+NETWORK2_1 = {"ipv4": "22.22.22.2/32", "ipv6": "22:22::2/128"}
+NETWORK2_2 = {"ipv4": "22.22.22.22/32", "ipv6": "22:22::22/128"}
+NETWORK2_3 = {"ipv4": "20.20.20.20/32", "ipv6": "20:20::20/128"}
+NETWORK2_4 = {"ipv4": "20.20.20.200/32", "ipv6": "20:20::200/128"}
+NETWORK2_5 = {"ipv4": "220.220.220.20/32", "ipv6": "220:220::20/128"}
+NETWORK2_6 = {"ipv4": "220.220.220.200/32", "ipv6": "220:220::200/128"}
+
+NETWORK3_1 = {"ipv4": "30.30.30.3/32", "ipv6": "30:30::3/128"}
+NETWORK3_2 = {"ipv4": "30.30.30.30/32", "ipv6": "30:30::30/128"}
+
+PREFIX_LIST = {
+ "ipv4": ["11.11.11.1", "22.22.22.2", "22.22.22.22"],
+ "ipv6": ["11:11::1", "22:22::2", "22:22::22"],
+}
+PREFERRED_NEXT_HOP = "global"
+VRF_LIST = ["RED", "BLUE", "GREEN"]
+COMM_VAL_1 = "100:100"
+COMM_VAL_2 = "500:500"
+COMM_VAL_3 = "600:600"
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_vrf_dynamic_route_leak_topo4.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Run these tests for kernel version 4.19 or above
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ "BGP vrf dynamic route leak tests will not run "
+ '(have kernel "{}", but it requires >= 4.19)'.format(platform.release())
+ )
+ pytest.skip(error_msg)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global BGP_CONVERGENCE
+ global ADDR_TYPES
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_dynamic_import_recursive_import_tenant_vrf_p1(request):
+ """
+ Verify recursive import among Tenant VRFs.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R2 for vrf RED and redistribute in "
+ "respective BGP instance"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF RED")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that R2 has installed redistributed routes in vrf RED only")
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "RED"}]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Import vrf RED's routes into vrf GREEN on R2")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "RED"}}}})
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "GREEN", "local_as": 2, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify on R2, that it installs imported routes from vrf RED to vrf "
+ "GREEN's RIB/FIB pointing next-hop to vrf RED"
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "GREEN"}]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r2", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("On R3 import routes from vrf GREEN to vrf default")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "GREEN"}}}})
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify on R3, that it installs imported routes from vrf GREEN to "
+ "vrf default RIB/FIB pointing next-hop to vrf GREEN. "
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {"static_routes": [{"network": [NETWORK2_1[addr_type]]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("On R4 import routes from vrf default to vrf BLUE")
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": "default"}}}})
+
+ import_dict = {
+ "r4": {"bgp": [{"vrf": "BLUE", "local_as": 4, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify on R4, that it installs imported routes from vrf default to "
+ "vrf BLUE RIB/FIB pointing next-hop to vrf default."
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r4": {
+ "static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "BLUE"}]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r4", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r4", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for dut, vrf_name, vrf_import, as_num in zip(
+ ["r2", "r4"], ["GREEN", "BLUE"], ["RED", "default"], [2, 4]
+ ):
+
+ for action, value in zip(["Delete", "Re-add"], [True, False]):
+ step("{} the import command on {} router".format(action, dut))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {"import": {"vrf": vrf_import, "delete": value}}
+ }
+ }
+ )
+
+ import_dict = {
+ dut: {
+ "bgp": [
+ {"vrf": vrf_name, "local_as": as_num, "address_family": temp}
+ ]
+ }
+ }
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r4": {
+ "static_routes": [
+ {"network": [NETWORK2_1[addr_type]], "vrf": "BLUE"}
+ ]
+ }
+ }
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, "r4", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes["r4"]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, "r4", static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes["r4"]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, "r4", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r4", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_dynamic_import_routes_between_two_tenant_vrf_p0(request):
+ """
+ Verify that dynamic import works fine between two different Tenant VRFs.
+
+ When next-hop IPs are same across all VRFs.
+ When next-hop IPs are different across all VRFs.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R3 for each vrf and redistribute in "
+ "respective BGP instance"
+ )
+
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step("Configure static route for VRF : {}".format(vrf_name))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step(
+ "Verify that R3 has installed redistributed routes in respective "
+ "vrfs: {}".format(vrf_name)
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Import from vrf GREEN+BLUE into vrf RED on R3")
+
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": "RED", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify on R1, that it installs all the routes(local+imported) in "
+ "vrf RED's RIB/FIB and doesn't get confuse with next-hop attribute, "
+ "as all vrfs on R1 are using same IP address for next-hop"
+ )
+
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ next_hop_1 = topo["routers"]["r3"]["links"]["r1-link1"][addr_type].split("/")[0]
+ result = verify_bgp_rib(
+ tgen, addr_type, "r1", static_routes, next_hop=next_hop_1
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", static_routes, next_hop=next_hop_1)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import vrf GREEN/BLUE/Both command from vrf RED's instance on" " R3")
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": vrf_name, "delete": True}}}}
+ )
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": "RED", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that R1,R2 & R3 withdraw imported routes from vrf RED's RIB")
+ for dut in ["r1", "r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type], NETWORK3_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in Route table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ step("Add import vrf GREEN/BLUE/Both command from vrf RED's instance on " "R3")
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": "RED", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify that {} reinstall imported routes from vrf RED's RIB".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type], NETWORK3_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value in zip(["Shut", "No shut"], [True, False]):
+ step(
+ "{} the neighborship between R1-R3 and R1-R2 for vrf GREEN, BLUE "
+ "and default".format(action)
+ )
+ bgp_disable = {"r3": {"bgp": []}}
+ for vrf_name in ["GREEN", "BLUE", "default"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {"r3-link1": {"shutdown": value}}
+ },
+ "r2": {
+ "dest_link": {"r3-link1": {"shutdown": value}}
+ },
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable["r3"]["bgp"].append(
+ {"vrf": vrf_name, "local_as": 3, "address_family": temp}
+ )
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify RIB/FIB of vrf RED will be unchanged on all 3 routers")
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value, status in zip(
+ ["Shut", "No shut"], [True, False], ["Withdraw", "Reinstall"]
+ ):
+ step("{} the neighborship between R1-R3 and R1-R2 for vrf RED".format(action))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r3-link1": {"shutdown": value}}},
+ "r2": {"dest_link": {"r3-link1": {"shutdown": value}}},
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable = {
+ "r3": {"bgp": [{"vrf": "RED", "local_as": 3, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R1 and R2 {} all the routes from RED vrf's RIB and"
+ " FIB".format(status)
+ )
+ for dut in ["r1", "r2"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import command from router R3 and configure the same on R2")
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": vrf_name, "delete": True}}}}
+ )
+
+ import_dict = {
+ "r3": {"bgp": [{"vrf": "RED", "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that once import commands are removed from R3, all imported "
+ "routes are withdrawn from RIB/FIB of vrf RED on R1/R2/R3"
+ )
+
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type], NETWORK3_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+
+ step(
+ "Configure static routes on R2 for each vrf and redistribute in "
+ "respective BGP instance"
+ )
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step("Configure static route for VRF : {}".format(vrf_name))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": vrf_name, "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove redistribute static route on BGP VRF : {} on r3".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ }
+ }
+ )
+
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ import_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify after import commands are re-configured on R2's vrf RED, all "
+ "those routes are installed again in vrf RED of R1,R2,R3"
+ )
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Remove/add import vrf GREEN/BLUE/both command from vrf RED's " "instance on R2"
+ )
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": vrf_name, "delete": True}}}}
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that R1,R2 & R3 withdraw imported routes from vrf RED's RIB")
+ for dut in ["r1", "r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type], NETWORK3_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+
+ step("Add import vrf GREEN/BLUE/Both command from vrf RED's instance on " "R2")
+ for vrf_name in ["BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": "RED", "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify that {} reinstall imported routes from vrf RED's RIB".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [NETWORK2_1[addr_type], NETWORK3_1[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value in zip(["Shut", "No shut"], [True, False]):
+ step(
+ "{} the neighborship between R2-R3 for vrf GREEN, BLUE and default".format(
+ action
+ )
+ )
+ bgp_disable = {"r2": {"bgp": []}}
+ for vrf_name in ["GREEN", "BLUE", "default"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {"r2-link1": {"shutdown": value}}
+ }
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable["r2"]["bgp"].append(
+ {"vrf": vrf_name, "local_as": 2, "address_family": temp}
+ )
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify RIB/FIB of vrf RED will be unchanged on all 3 routers")
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value, status in zip(
+ ["Shut", "No shut"], [True, False], ["Withdraw", "Reinstall"]
+ ):
+ step("{} the neighborship between R2-R3 for vrf RED".format(action))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3-link1": {"shutdown": value}}}
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable = {
+ "r3": {"bgp": [{"vrf": "RED", "local_as": 3, "address_family": temp}]}
+ }
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R1 and R2 {} all the routes from RED vrf's RIB and"
+ " FIB".format(status)
+ )
+ for dut in ["r1", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ "vrf": "RED",
+ }
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_dynamic_import_routes_between_tenant_to_default_vrf_p0(request):
+ """
+ Verify that with multiple tenant VRFs, dynamic import works fine between
+ Tenant VRFs to default VRF.
+
+ When next-hop IPs and prefixes are same across all VRFs.
+ When next-hop IPs and prefixes are different across all VRFs.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ reset_config_on_routers(tgen)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step(
+ "Configure static routes on R3 for each vrf and redistribute in "
+ "respective BGP instance"
+ )
+
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step("Configure static route for VRF : {}".format(vrf_name))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step(
+ "Verify that R3 has installed redistributed routes in respective "
+ "vrfs: {}".format(vrf_name)
+ )
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Import all tenant vrfs(GREEN+BLUE+RED) in default vrf on R3")
+
+ for vrf_name in ["RED", "BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ redist_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify on R3 that it installs all the routes(imported from tenant "
+ "VRFs) in default vrf. Additionally, verify that R1 & R2 also "
+ "receive these routes from R3 and install in default vrf, next-hop "
+ "pointing to R3"
+ )
+
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+
+ for dut in ["r2", "r1"]:
+ next_hop_val = topo["routers"]["r3"]["links"]["{}-link4".format(dut)][
+ addr_type
+ ].split("/")[0]
+
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, next_hop=next_hop_val
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, next_hop=next_hop_val
+ )
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r3", static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value, status in zip(
+ ["Remove", "Add"], [True, False], ["withdraw", "re-install"]
+ ):
+ step(
+ "{} import vrf GREEN/BLUE/RED/all command from default vrf "
+ "instance on R3".format(action)
+ )
+ for vrf_name in ["RED", "BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {"import": {"vrf": vrf_name, "delete": value}}
+ }
+ }
+ )
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R1,R2 & R3 {} imported routes from GREEN/BLUE/RED/all"
+ " in default vrf's RIB".format(status)
+ )
+ for dut in ["r1", "r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value in zip(["Shut", "No shut"], [True, False]):
+ step(
+ "{} the neighborship between R1-R3 and R1-R2 for vrf RED, GREEN "
+ "and BLUE".format(action)
+ )
+ bgp_disable = {"r3": {"bgp": []}}
+ for vrf_name in ["RED", "GREEN", "BLUE"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {"r3-link4": {"shutdown": value}}
+ },
+ "r2": {
+ "dest_link": {"r3-link4": {"shutdown": value}}
+ },
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable["r3"]["bgp"].append(
+ {"vrf": vrf_name, "local_as": 3, "address_family": temp}
+ )
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that when peering is shutdown for tenant vrfs, it "
+ "doesn't impact the RIB/FIB of default vrf on router R1 and R2"
+ )
+ for dut in ["r1", "r2"]:
+ step("Verify RIB/FIB for default vrf on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value, status in zip(
+ ["Shut", "No shut"], [True, False], ["Withdraw", "Reinstall"]
+ ):
+ step(
+ "{} the neighborship between R1-R3 and R2-R3 for default vrf".format(action)
+ )
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r3-link4": {"shutdown": value}}},
+ "r2": {"dest_link": {"r3-link4": {"shutdown": value}}},
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R1 and R2 {} all the routes from default vrf's RIB"
+ " and FIB".format(status)
+ )
+ for dut in ["r1", "r2"]:
+ step("Verify RIB/FIB for default vrf on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import command from router R3 and configure the same on R2")
+ temp = {}
+ for vrf_name in VRF_LIST:
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": vrf_name, "delete": True}}}}
+ )
+
+ import_dict = {"r3": {"bgp": [{"local_as": 3, "address_family": temp}]}}
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that once import commands are removed from R3, all imported "
+ "routes are withdrawn from RIB/FIB of default vrf on R1/R2/R3"
+ )
+
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for default vrf on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for vrf_name, network in zip(VRF_LIST, [NETWORK1_1, NETWORK2_1, NETWORK3_1]):
+ step("Configure static route for VRF : {} on r2".format(vrf_name))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [network[addr_type]],
+ "next_hop": "blackhole",
+ "vrf": vrf_name,
+ }
+ ]
+ }
+ }
+
+ result = create_static_routes(tgen, static_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Redistribute static route on BGP VRF : {}".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"redistribute": [{"redist_type": "static"}]}}}
+ )
+
+ redist_dict = {
+ "r2": {"bgp": [{"vrf": vrf_name, "local_as": 2, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove redistribute static route on BGP VRF : {} on r3".format(vrf_name))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ }
+ }
+ )
+
+ redist_dict = {
+ "r3": {"bgp": [{"vrf": vrf_name, "local_as": 3, "address_family": temp}]}
+ }
+
+ result = create_router_bgp(tgen, topo, redist_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for vrf_name in ["RED", "BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ import_dict = {"r2": {"bgp": [{"local_as": 2, "address_family": temp}]}}
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify after import commands are re-configured on R2's vrf RED, all "
+ "those routes are installed again in default vrf of R1,R2,R3"
+ )
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Remove import vrf RED/GREEN/BLUE/all one by one from default vrf" " on R2")
+ for vrf_name in ["RED", "BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {addr_type: {"unicast": {"import": {"vrf": vrf_name, "delete": True}}}}
+ )
+
+ import_dict = {"r2": {"bgp": [{"local_as": 2, "address_family": temp}]}}
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R1, R2 and R3 withdraws imported routes from default "
+ "vrf's RIB and FIB "
+ )
+ for dut in ["r1", "r2", "r3"]:
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \nError {}\n"
+ "Routes {} still in BGP table".format(
+ tc_name, result, static_routes[dut]["static_routes"][0]["network"]
+ )
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ step("Add import vrf RED/GREEN/BLUE/all one by one from default vrf on R2")
+ for vrf_name in ["RED", "BLUE", "GREEN"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update({addr_type: {"unicast": {"import": {"vrf": vrf_name}}}})
+
+ import_dict = {"r2": {"bgp": [{"local_as": 2, "address_family": temp}]}}
+
+ result = create_router_bgp(tgen, topo, import_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify that {} reinstall imported routes from vrf RED's RIB".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value in zip(["Shut", "No shut"], [True, False]):
+ step(
+ "{} the neighborship between R2-R3 for vrf GREEN, BLUE and RED".format(
+ action
+ )
+ )
+ bgp_disable = {"r2": {"bgp": []}}
+ for vrf_name in ["GREEN", "BLUE", "RED"]:
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {"r2-link4": {"shutdown": value}}
+ }
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable["r2"]["bgp"].append(
+ {"vrf": vrf_name, "local_as": 2, "address_family": temp}
+ )
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify RIB/FIB of vrf RED will be unchanged on all 3 routers")
+ for dut in ["r1", "r2", "r3"]:
+ step("Verify RIB/FIB for vrf RED on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ for action, value, status in zip(
+ ["Shut", "No shut"], [True, False], ["Withdraw", "Reinstall"]
+ ):
+ step("{} the neighborship between R2-R3 for default vrf".format(action))
+ temp = {}
+ for addr_type in ADDR_TYPES:
+ temp.update(
+ {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r3": {"dest_link": {"r2-link4": {"shutdown": value}}}
+ }
+ }
+ }
+ }
+ )
+
+ bgp_disable = {"r2": {"bgp": [{"local_as": 2, "address_family": temp}]}}
+ result = create_router_bgp(tgen, topo, bgp_disable)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify that R1 and R2 {} all the routes from default vrfs RIB and"
+ " FIB".format(status)
+ )
+ for dut in ["r1", "r3"]:
+ step("Verify RIB/FIB for default vrf on {}".format(dut))
+ for addr_type in ADDR_TYPES:
+ static_routes = {
+ dut: {
+ "static_routes": [
+ {
+ "network": [
+ NETWORK1_1[addr_type],
+ NETWORK2_1[addr_type],
+ NETWORK3_1[addr_type],
+ ],
+ "next_hop": "blackhole",
+ }
+ ]
+ }
+ }
+
+ if value:
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nError {}\n" "Routes {} still in BGP table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, static_routes, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error {}" "Routes {} still in Route table".format(
+ tc_name,
+ result,
+ static_routes[dut]["static_routes"][0]["network"],
+ )
+ else:
+ result = verify_bgp_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, static_routes)
+ assert result is True, "Testcase {} : Failed \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index e7a72ef33d..b3ff9d79ca 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -70,6 +70,8 @@ from lib.common_config import (
configure_vxlan,
configure_brctl,
create_interface_in_kernel,
+ kill_router_daemons,
+ start_router_daemons
)
from lib.topolog import logger
@@ -1755,6 +1757,221 @@ def test_route_map_operations_for_evpn_address_family_p1(request, attribute):
write_test_footer(tc_name)
+def test_evpn_address_family_with_graceful_restart_p0(request):
+ """
+ Verify Graceful-restart function for EVPN address-family.
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ add_default_routes(tgen)
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [{
+ "network": NETWORK1_2[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "RED"
+ }]
+ },
+ "r4":{
+ "static_routes": [{
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE"
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN"
+ }]
+ }
+ }
+
+ result = create_static_routes(tgen, input_dict_1)
+ assert result is True, 'Testcase {} : Failed \n Error: {}'.format(
+ tc_name, result)
+
+ step("Redistribute static in (IPv4 and IPv6) address-family "
+ "on Edge-1 for all VRFs.")
+
+ input_dict_2={}
+ for dut in ["r3", "r4"]:
+ temp = {dut: {"bgp": []}}
+ input_dict_2.update(temp)
+
+ if dut == "r3":
+ VRFS = ["RED"]
+ AS_NUM = [3]
+ if dut == "r4":
+ VRFS = ["BLUE", "GREEN"]
+ AS_NUM = [4, 4]
+
+ for vrf, as_num in zip(VRFS, AS_NUM):
+ temp[dut]["bgp"].append(
+ {
+ "local_as": as_num,
+ "vrf": vrf,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ })
+
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ step("Verify on router Edge-1 that EVPN routes corresponding to "
+ "all VRFs are received from both routers DCG-1 and DCG-2")
+
+ for addr_type in ADDR_TYPES:
+ input_routes = {
+ "r3": {
+ "static_routes": [{
+ "network": NETWORK1_2[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "RED"
+ }]
+ },
+ "r4":{
+ "static_routes": [{
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE"
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN"
+ }]
+ }
+ }
+
+ result = verify_rib(tgen, addr_type, "e1", input_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ step("Configure DCG-2 as GR restarting node for EVPN session between"
+ " DCG-2 and EDGE-1, following by a session reset using 'clear bgp *'"
+ " command.")
+
+ input_dict_gr = {
+ "d2": {
+ "bgp":
+ [
+ {
+ "local_as": "200",
+ "graceful-restart": {
+ "graceful-restart": True,
+ }
+ }
+ ]
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_gr)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ step("Verify that DCG-2 changes it's role to GR-restarting router "
+ "and EDGE-1 becomes the GR-helper.")
+
+ step("Kill BGPd daemon on DCG-2.")
+ kill_router_daemons(tgen, "d2", ["bgpd"])
+
+ step("Verify that EDGE-1 keep stale entries for EVPN RT-5 routes "
+ "received from DCG-2 before the restart.")
+
+ for addr_type in ADDR_TYPES:
+ input_routes = {
+ "r4":{
+ "static_routes": [{
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE"
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN"
+ }]
+ }
+ }
+ result = verify_evpn_routes(tgen, topo, "e1", input_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ step("Verify that DCG-2 keeps BGP routes in Zebra until BGPd "
+ "comes up or end of 'rib-stale-time'")
+
+ step("Start BGPd daemon on DCG-2.")
+ start_router_daemons(tgen, "d2", ["bgpd"])
+
+ step("Verify that EDGE-1 removed all the stale entries.")
+ for addr_type in ADDR_TYPES:
+ input_routes = {
+ "r4":{
+ "static_routes": [{
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE"
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN"
+ }]
+ }
+ }
+ result = verify_evpn_routes(tgen, topo, "e1", input_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ step("Verify that DCG-2 refresh zebra with EVPN routes. "
+ "(no significance of 'rib-stale-time'")
+
+ for addr_type in ADDR_TYPES:
+ input_routes = {
+ "r4":{
+ "static_routes": [{
+ "network": NETWORK1_3[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "BLUE"
+ },
+ {
+ "network": NETWORK1_4[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "vrf": "GREEN"
+ }]
+ }
+ }
+ result = verify_rib(tgen, addr_type, "d2", input_routes)
+ assert result is True, "Testcase {} :Failed \n Error: {}". \
+ format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
@pytest.mark.parametrize("attribute", ["locPrf", "weight", "path"])
def test_bgp_attributes_for_evpn_address_family_p1(request, attribute):
"""
diff --git a/tests/topotests/grpc_basic/lib b/tests/topotests/grpc_basic/lib
new file mode 120000
index 0000000000..dc598c56dc
--- /dev/null
+++ b/tests/topotests/grpc_basic/lib
@@ -0,0 +1 @@
+../lib \ No newline at end of file
diff --git a/tests/topotests/grpc_basic/r1/zebra.conf b/tests/topotests/grpc_basic/r1/zebra.conf
new file mode 100644
index 0000000000..49a0911c53
--- /dev/null
+++ b/tests/topotests/grpc_basic/r1/zebra.conf
@@ -0,0 +1,8 @@
+log record-priority
+log timestamp precision 6
+log extended extlog
+ destination file ext-log.txt create
+ timestamp precision 6
+ structured-data code-location
+interface r1-eth0
+ ip address 192.168.1.1/24 \ No newline at end of file
diff --git a/tests/topotests/grpc_basic/r2/zebra.conf b/tests/topotests/grpc_basic/r2/zebra.conf
new file mode 100644
index 0000000000..20da1885d4
--- /dev/null
+++ b/tests/topotests/grpc_basic/r2/zebra.conf
@@ -0,0 +1,8 @@
+log record-priority
+log timestamp precision 6
+log extended extlog
+ destination file ext-log.txt create
+ timestamp precision 6
+ structured-data code-location
+interface r2-eth0
+ ip address 192.168.1.2/24
diff --git a/tests/topotests/grpc_basic/test_basic_grpc.py b/tests/topotests/grpc_basic/test_basic_grpc.py
new file mode 100644
index 0000000000..b6812a5afc
--- /dev/null
+++ b/tests/topotests/grpc_basic/test_basic_grpc.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# February 21 2022, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2022, LabN Consulting, L.L.C.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+"""
+test_basic_grpc.py: Test Basic gRPC.
+"""
+
+import logging
+import os
+import sys
+
+import pytest
+
+from lib.common_config import step
+from lib.micronet import commander
+from lib.topogen import Topogen, TopoRouter
+from lib.topolog import logger
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+GRPCP_ZEBRA = 50051
+GRPCP_STATICD = 50052
+GRPCP_BFDD = 50053
+GRPCP_ISISD = 50054
+GRPCP_OSPFD = 50055
+GRPCP_PIMD = 50056
+
+pytestmark = [
+ # pytest.mark.mgmtd -- Need a new non-protocol marker
+ # pytest.mark.bfdd,
+ # pytest.mark.isisd,
+ # pytest.mark.ospfd,
+ # pytest.mark.pimd,
+ pytest.mark.staticd,
+]
+
+script_path = os.path.realpath(os.path.join(CWD, "../lib/grpc-query.py"))
+
+try:
+ commander.cmd_raises([script_path, "--check"])
+except Exception:
+ pytest.skip(
+ "skipping; cannot create or import gRPC proto modules", allow_module_level=True
+ )
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+ topodef = {"s1": ("r1", "r2")}
+ tgen = Topogen(topodef, request.module.__name__)
+
+ tgen.start_topology()
+ router_list = tgen.routers()
+
+ for rname, router in router_list.items():
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf", f"-M grpc:{GRPCP_ZEBRA}")
+ router.load_config(TopoRouter.RD_STATIC, None, f"-M grpc:{GRPCP_STATICD}")
+ # router.load_config(TopoRouter.RD_BFD, None, f"-M grpc:{GRPCP_BFDD}")
+ # router.load_config(TopoRouter.RD_ISIS, None, f"-M grpc:{GRPCP_ISISD}")
+ # router.load_config(TopoRouter.RD_OSPF, None, f"-M grpc:{GRPCP_OSPFD}")
+ # router.load_config(TopoRouter.RD_PIM, None, f"-M grpc:{GRPCP_PIMD}")
+
+ tgen.start_router()
+ yield tgen
+
+ logging.info("Stopping all routers (no assert on error)")
+ tgen.stop_topology()
+
+
+# Let's not do this so we catch errors
+# Fixture that executes before each test
+@pytest.fixture(autouse=True)
+def skip_on_failure(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of previous test failure")
+
+
+# ===================
+# The tests functions
+# ===================
+
+
+def run_grpc_client(r, port, commands):
+ if not isinstance(commands, str):
+ commands = "\n".join(commands) + "\n"
+ if not commands.endswith("\n"):
+ commands += "\n"
+ return r.cmd_raises([script_path, f"--port={port}"], stdin=commands)
+
+
+def test_connectivity(tgen):
+ r1 = tgen.gears["r1"]
+ output = r1.cmd_raises("ping -c1 192.168.1.2")
+ logging.info("ping output: %s", output)
+
+
+def test_capabilities(tgen):
+ r1 = tgen.gears["r1"]
+ output = run_grpc_client(r1, GRPCP_ZEBRA, "GETCAP")
+ logging.info("grpc output: %s", output)
+
+
+def test_get_config(tgen):
+ nrepeat = 5
+ r1 = tgen.gears["r1"]
+
+ step("'GET' inteface config 10 times, once per invocation")
+
+ for i in range(0, nrepeat):
+ output = run_grpc_client(r1, GRPCP_ZEBRA, "GET,/frr-interface:lib")
+ logging.info("[iteration %s]: grpc GET output: %s", i, output)
+
+ step(f"'GET' YANG {nrepeat} times in one invocation")
+ commands = ["GET,/frr-interface:lib" for _ in range(0, 10)]
+ output = run_grpc_client(r1, GRPCP_ZEBRA, commands)
+ logging.info("grpc GET*{%d} output: %s", nrepeat, output)
+
+
+def test_get_vrf_config(tgen):
+ r1 = tgen.gears["r1"]
+
+ step("'GET' get VRF config")
+
+ output = run_grpc_client(r1, GRPCP_ZEBRA, "GET,/frr-vrf:lib")
+ logging.info("grpc GET /frr-vrf:lib output: %s", output)
+
+
+def test_shutdown_checks(tgen):
+ # Start a process rnuning that will fetch bunches of data then shut the routers down
+ # and check for cores.
+ nrepeat = 100
+ r1 = tgen.gears["r1"]
+ commands = ["GET,/frr-interface:lib" for _ in range(0, nrepeat)]
+ p = r1.popen([script_path, f"--port={GRPCP_ZEBRA}"] + commands)
+ import time
+
+ time.sleep(1)
+ try:
+ for r in tgen.routers().values():
+ r.net.stopRouter()
+ r.net.checkRouterCores()
+ finally:
+ if p:
+ p.terminate()
+ p.wait()
+
+
+# Memory leak test template
+# Not compatible with the shutdown check above
+def _test_memory_leak(tgen):
+ "Run the memory leak test and report results."
+
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/isis_topo1/test_isis_topo1.py b/tests/topotests/isis_topo1/test_isis_topo1.py
index 94c5faf2e0..014722387f 100644
--- a/tests/topotests/isis_topo1/test_isis_topo1.py
+++ b/tests/topotests/isis_topo1/test_isis_topo1.py
@@ -236,6 +236,94 @@ def test_isis_linux_route6_installation():
assert topotest.json_cmp(actual, expected) is None, assertmsg
+def test_isis_summary_json():
+ "Check json struct in show isis summary json"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Checking 'show isis summary json'")
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis summary json", isjson=True)
+ assertmsg = "Test isis summary json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['vrf'] == "default", assertmsg
+ assert json_output['areas'][0]['area'] == "1", assertmsg
+ assert json_output['areas'][0]['levels'][0]['id'] != '3', assertmsg
+
+
+def test_isis_interface_json():
+ "Check json struct in show isis interface json"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Checking 'show isis interface json'")
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis interface json", isjson=True)
+ assertmsg = "Test isis interface json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['areas'][0]['circuits'][0]['interface']['name'] == rname+"-eth0", assertmsg
+
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis interface detail json", isjson=True)
+ assertmsg = "Test isis interface json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['areas'][0]['circuits'][0]['interface']['name'] == rname+"-eth0", assertmsg
+
+
+def test_isis_neighbor_json():
+ "Check json struct in show isis neighbor json"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ #tgen.mininet_cli()
+ logger.info("Checking 'show isis neighbor json'")
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis neighbor json", isjson=True)
+ assertmsg = "Test isis neighbor json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['areas'][0]['circuits'][0]['interface'] == rname+"-eth0", assertmsg
+
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis neighbor detail json", isjson=True)
+ assertmsg = "Test isis neighbor json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['areas'][0]['circuits'][0]['interface']['name'] == rname+"-eth0", assertmsg
+
+
+def test_isis_database_json():
+ "Check json struct in show isis database json"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ #tgen.mininet_cli()
+ logger.info("Checking 'show isis database json'")
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis database json", isjson=True)
+ assertmsg = "Test isis database json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['areas'][0]['area']['name'] == "1", assertmsg
+ assert json_output['areas'][0]['levels'][0]['id'] != '3', assertmsg
+
+ for rname, router in tgen.routers().items():
+ logger.info("Checking router %s", rname)
+ json_output = tgen.gears[rname].vtysh_cmd("show isis database detail json", isjson=True)
+ assertmsg = "Test isis database json failed in '{}' data '{}'".format(rname, json_output)
+ assert json_output['areas'][0]['area']['name'] == "1", assertmsg
+ assert json_output['areas'][0]['levels'][0]['id'] != '3', assertmsg
+
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 34afa0d2c1..4dd44e3e9e 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -991,11 +991,11 @@ def __create_bgp_unicast_address_family(
config_data.append("no {} allowas-in {}".format(neigh_cxt, allow_as_in))
if "shutdown" in peer:
- shut_val = peer["shutdown"]
- if shut_val is True:
- config_data.append("{} shutdown".format(neigh_cxt))
- elif shut_val is False:
- config_data.append("no {} shutdown".format(neigh_cxt))
+ config_data.append(
+ "{} {} shutdown".format(
+ "no" if not peer["shutdown"] else "", neigh_cxt
+ )
+ )
if prefix_lists:
for prefix_list in prefix_lists:
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index c0572fca4c..0b97637c1e 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -1505,11 +1505,9 @@ def create_vrf_cfg(tgen, topo, input_dict=None, build=False):
config_data = []
if "vrfs" in c_data:
for vrf in c_data["vrfs"]:
- del_action = vrf.setdefault("delete", False)
name = vrf.setdefault("name", None)
table_id = vrf.setdefault("id", None)
- vni = vrf.setdefault("vni", None)
- del_vni = vrf.setdefault("no_vni", None)
+ del_action = vrf.setdefault("delete", False)
if del_action:
# Kernel cmd- Add VRF and table
@@ -1543,43 +1541,45 @@ def create_vrf_cfg(tgen, topo, input_dict=None, build=False):
)
rnode.run(cmd)
- if "links" in c_data:
- for destRouterLink, data in sorted(
- c_data["links"].items()
- ):
- # Loopback interfaces
- if "type" in data and data["type"] == "loopback":
- interface_name = destRouterLink
- else:
- interface_name = data["interface"]
+ for vrf in c_data["vrfs"]:
+ vni = vrf.setdefault("vni", None)
+ del_vni = vrf.setdefault("no_vni", None)
+
+ if "links" in c_data:
+ for destRouterLink, data in sorted(c_data["links"].items()):
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
- if "vrf" in data:
- vrf_list = data["vrf"]
+ if "vrf" in data:
+ vrf_list = data["vrf"]
- if type(vrf_list) is not list:
- vrf_list = [vrf_list]
+ if type(vrf_list) is not list:
+ vrf_list = [vrf_list]
- for _vrf in vrf_list:
- cmd = "ip link set {} master {}".format(
- interface_name, _vrf
- )
+ for _vrf in vrf_list:
+ cmd = "ip link set {} master {}".format(
+ interface_name, _vrf
+ )
- logger.info(
- "[DUT: %s]: Running" " kernel cmd [%s]",
- c_router,
- cmd,
- )
- rnode.run(cmd)
+ logger.info(
+ "[DUT: %s]: Running" " kernel cmd [%s]",
+ c_router,
+ cmd,
+ )
+ rnode.run(cmd)
- if vni:
- config_data.append("vrf {}".format(vrf["name"]))
- cmd = "vni {}".format(vni)
- config_data.append(cmd)
+ if vni:
+ config_data.append("vrf {}".format(vrf["name"]))
+ cmd = "vni {}".format(vni)
+ config_data.append(cmd)
- if del_vni:
- config_data.append("vrf {}".format(vrf["name"]))
- cmd = "no vni {}".format(del_vni)
- config_data.append(cmd)
+ if del_vni:
+ config_data.append("vrf {}".format(vrf["name"]))
+ cmd = "no vni {}".format(del_vni)
+ config_data.append(cmd)
if config_data:
config_data_dict[c_router] = config_data
@@ -2536,6 +2536,7 @@ def create_route_maps(tgen, input_dict, build=False):
ipv6_data = set_data.setdefault("ipv6", {})
local_preference = set_data.setdefault("locPrf", None)
metric = set_data.setdefault("metric", None)
+ metric_type = set_data.setdefault("metric-type", None)
as_path = set_data.setdefault("path", {})
weight = set_data.setdefault("weight", None)
community = set_data.setdefault("community", {})
@@ -2559,7 +2560,11 @@ def create_route_maps(tgen, input_dict, build=False):
# Metric
if metric:
- rmap_data.append("set metric {} \n".format(metric))
+ del_comm = set_data.setdefault("delete", None)
+ if del_comm:
+ rmap_data.append("no set metric {}".format(metric))
+ else:
+ rmap_data.append("set metric {}".format(metric))
# Origin
if origin:
diff --git a/tests/topotests/lib/grpc-query.py b/tests/topotests/lib/grpc-query.py
new file mode 100755
index 0000000000..61f01c36bb
--- /dev/null
+++ b/tests/topotests/lib/grpc-query.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# February 22 2022, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2022, LabN Consulting, L.L.C.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import argparse
+import logging
+import os
+import sys
+
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+# This is painful but works if you have installed grpc and grpc_tools would be *way*
+# better if we actually built and installed these but ... python packaging.
+try:
+ import grpc
+ import grpc_tools
+
+ from micronet import commander
+
+ commander.cmd_raises(f"cp {CWD}/../../../grpc/frr-northbound.proto .")
+ commander.cmd_raises(
+ f"python -m grpc_tools.protoc --python_out=. --grpc_python_out=. -I . frr-northbound.proto"
+ )
+except Exception as error:
+ logging.error("can't create proto definition modules %s", error)
+ raise
+
+try:
+ sys.path[0:0] = "."
+ import frr_northbound_pb2
+ import frr_northbound_pb2_grpc
+
+ # Would be nice if compiling the modules internally from the source worked
+ # # import grpc_tools.protoc
+ # # proto_include = pkg_resources.resource_filename("grpc_tools", "_proto")
+ # from grpc_tools.protoc import _proto_file_to_module_name, _protos_and_services
+ # try:
+ # frr_northbound_pb2, frr_northbound_pb2_grpc = _protos_and_services(
+ # "frr_northbound.proto"
+ # )
+ # finally:
+ # os.chdir(CWD)
+except Exception as error:
+ logging.error("can't import proto definition modules %s", error)
+ raise
+
+
+class GRPCClient:
+ def __init__(self, server, port):
+ self.channel = grpc.insecure_channel("{}:{}".format(server, port))
+ self.stub = frr_northbound_pb2_grpc.NorthboundStub(self.channel)
+
+ def get_capabilities(self):
+ request = frr_northbound_pb2.GetCapabilitiesRequest()
+ response = "NONE"
+ try:
+ response = self.stub.GetCapabilities(request)
+ except Exception as error:
+ logging.error("Got exception from stub: %s", error)
+
+ logging.debug("GRPC Capabilities: %s", response)
+ return response
+
+ def get(self, xpath):
+ request = frr_northbound_pb2.GetRequest()
+ request.path.append(xpath)
+ request.type = frr_northbound_pb2.GetRequest.ALL
+ request.encoding = frr_northbound_pb2.XML
+ xml = ""
+ for r in self.stub.Get(request):
+ logging.info('GRPC Get path: "%s" value: %s', request.path, r)
+ xml += str(r.data.data)
+ return xml
+
+
+def next_action(action_list=None):
+ "Get next action from list or STDIN"
+ if action_list:
+ for action in action_list:
+ yield action
+ else:
+ while True:
+ try:
+ action = input("")
+ if not action:
+ break
+ yield action.strip()
+ except EOFError:
+ break
+
+
+def main(*args):
+ parser = argparse.ArgumentParser(description="gRPC Client")
+ parser.add_argument(
+ "-s", "--server", default="localhost", help="gRPC Server Address"
+ )
+ parser.add_argument(
+ "-p", "--port", type=int, default=50051, help="gRPC Server TCP Port"
+ )
+ parser.add_argument("-v", "--verbose", action="store_true", help="be verbose")
+ parser.add_argument("--check", action="store_true", help="check runable")
+ parser.add_argument("actions", nargs="*", help="GETCAP|GET,xpath")
+ args = parser.parse_args(*args)
+
+ level = logging.DEBUG if args.verbose else logging.INFO
+ logging.basicConfig(
+ level=level,
+ format="%(asctime)s %(levelname)s: GRPC-CLI-CLIENT: %(name)s %(message)s",
+ )
+
+ if args.check:
+ sys.exit(0)
+
+ c = GRPCClient(args.server, args.port)
+
+ for action in next_action(args.actions):
+ action = action.casefold()
+ logging.info("GOT ACTION: %s", action)
+ if action == "getcap":
+ caps = c.get_capabilities()
+ print("Capabilities:", caps)
+ elif action.startswith("get,"):
+ # Print Interface State and Config
+ _, xpath = action.split(",", 1)
+ print("Get XPath: ", xpath)
+ xml = c.get(xpath)
+ print("{}: {}".format(xpath, xml))
+ # for _ in range(0, 1):
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/topotests/lib/micronet_cli.py b/tests/topotests/lib/micronet_cli.py
index 4292f47ce0..ef804f6dc2 100644
--- a/tests/topotests/lib/micronet_cli.py
+++ b/tests/topotests/lib/micronet_cli.py
@@ -110,7 +110,7 @@ def doline(unet, line, writef):
args = oargs.split()
if not args or (len(args) == 1 and args[0] == "*"):
args = sorted(unet.hosts.keys())
- hosts = [unet.hosts[x] for x in args]
+ hosts = [unet.hosts[x] for x in args if x in unet.hosts]
for host in hosts:
if cmd == "t" or cmd == "term":
host.run_in_window("bash", title="sh-%s" % host)
@@ -250,6 +250,8 @@ def cli(
prompt=None,
background=True,
):
+ logger = logging.getLogger("cli-client")
+
if prompt is None:
prompt = "unet> "
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index 8d2bf12af2..e7ea7d32ba 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -1668,7 +1668,7 @@ def verify_ospf6_rib(
logger.info("Checking router %s RIB:", router)
# Verifying RIB routes
- command = "show ipv6 ospf route"
+ command = "show ipv6 ospf route detail"
found_routes = []
missing_routes = []
@@ -1710,6 +1710,8 @@ def verify_ospf6_rib(
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
+ if len(ip_list) == 1:
+ ip_list = [network]
st_found = False
nh_found = False
for st_rt in ip_list:
@@ -1846,7 +1848,7 @@ def verify_ospf6_rib(
return errormsg
if metric is not None:
- if "type2cost" not in ospf_rib_json[st_rt]:
+ if "metricCostE2" not in ospf_rib_json[st_rt]:
errormsg = (
"[DUT: {}]: metric is"
" not present for"
@@ -1854,7 +1856,7 @@ def verify_ospf6_rib(
)
return errormsg
- if metric != ospf_rib_json[st_rt]["type2cost"]:
+ if metric != ospf_rib_json[st_rt]["metricCostE2"]:
errormsg = (
"[DUT: {}]: metric value "
"{} is not matched for "
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index a83ae7071f..4ed5b2f825 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -804,9 +804,11 @@ class TopoRouter(TopoGear):
for daemon in self.RD:
# This will not work for all daemons
daemonstr = self.RD.get(daemon).rstrip("d")
- result = self.run(
- "grep 'router {}' {}".format(daemonstr, source)
- ).strip()
+ if daemonstr == "pim":
+ grep_cmd = "grep 'ip {}' {}".format(daemonstr, source)
+ else:
+ grep_cmd = "grep 'router {}' {}".format(daemonstr, source)
+ result = self.run(grep_cmd).strip()
if result:
self.load_config(daemon)
else:
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 4e5fe4c90b..e786ae02cd 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -737,6 +737,7 @@ def proto_name_to_number(protocol):
"sharp": "194",
"pbr": "195",
"static": "196",
+ "ospf6": "197",
}.get(
protocol, protocol
) # default return same as input
@@ -1749,7 +1750,7 @@ class Router(Node):
daemon, self.logdir, self.name
)
- cmdopt = "{} --log file:{}.log --log-level debug".format(
+ cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
daemon_opts, daemon
)
if extra_opts:
diff --git a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py
index eb8561c404..d17aeda3ea 100644
--- a/tests/topotests/ospf6_topo2/test_ospf6_topo2.py
+++ b/tests/topotests/ospf6_topo2/test_ospf6_topo2.py
@@ -134,6 +134,7 @@ def build_topo(tgen):
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet")
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
@@ -585,10 +586,11 @@ def test_nssa_range():
logger.info("Expecting NSSA range to be added on r3")
routes = {
"2001:db8:1000::/64": {
- "metricType":2,
- "metricCost":20,
- "metricCostE2":10,
- }}
+ "metricType": 2,
+ "metricCost": 20,
+ "metricCostE2": 10,
+ }
+ }
expect_ospfv3_routes("r3", routes, wait=30, type="external-2", detail=True)
# Change the NSSA range cost.
@@ -601,10 +603,11 @@ def test_nssa_range():
logger.info("Expecting NSSA range to be updated with new cost")
routes = {
"2001:db8:1000::/64": {
- "metricType":2,
- "metricCost":20,
- "metricCostE2":1000,
- }}
+ "metricType": 2,
+ "metricCost": 20,
+ "metricCostE2": 1000,
+ }
+ }
expect_ospfv3_routes("r3", routes, wait=30, type="external-2", detail=True)
# Configure the NSSA range to not be advertised.
@@ -631,12 +634,12 @@ def test_nssa_range():
logger.info("Expecting previously summarized routes to be re-added")
routes = {
"2001:db8:1000::1/128": {
- "metricType":2,
- "metricCost":20,
+ "metricType": 2,
+ "metricCostE2": 20,
},
"2001:db8:1000::2/128": {
- "metricType":2,
- "metricCost":20,
+ "metricType": 2,
+ "metricCostE2": 20,
},
}
expect_ospfv3_routes("r3", routes, wait=30, type="external-2", detail=True)
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/__init__.py b/tests/topotests/ospf_multi_vrf_bgp_route_leak/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/__init__.py
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf
new file mode 100644
index 0000000000..e365e25772
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf
@@ -0,0 +1,57 @@
+!
+hostname r1
+password zebra
+log file /tmp/r1-frr.log
+!
+interface r1-eth0
+ ip address 10.0.1.1/24
+!
+interface r1-eth1
+ ip address 10.0.20.1/24
+!
+interface r1-eth2 vrf neno
+ ip address 10.0.30.1/24
+!
+ip forwarding
+!
+router ospf
+ ospf router-id 10.0.255.1
+ redistribute bgp
+ network 10.0.1.0/24 area 0
+ network 10.0.20.0/24 area 0
+!
+router ospf vrf neno
+ ospf router-id 10.0.255.1
+ redistribute bgp
+ network 10.0.30.0/24 area 0
+!
+!
+router bgp 99
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute ospf
+ import vrf neno
+ !
+!
+router bgp 99 vrf neno
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute ospf
+ import vrf route-map rmap
+ import vrf default
+ !
+!
+!!!!!!!!!!!!!!!!!!!!!
+! VRFs neno and ray subnets
+ip prefix-list nets seq 5 permit 10.0.3.0/24
+ip prefix-list nets seq 10 permit 10.0.30.0/24
+ip prefix-list nets seq 15 permit 10.0.4.0/24
+ip prefix-list nets seq 20 permit 10.0.40.0/24
+ip prefix-list nets seq 25 deny any
+!
+route-map rmap permit 10
+ match ip address prefix-list nets
+ exit
+!
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-default.txt
new file mode 100644
index 0000000000..45b7fad334
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-default.txt
@@ -0,0 +1,19 @@
+VRF Name: default
+============ OSPF network routing table ============
+N 10.0.1.0/24 [10] area: 0.0.0.0
+ directly attached to r1-eth0
+N 10.0.2.0/24 [20] area: 0.0.0.0
+ via 10.0.20.2, r1-eth1
+N 10.0.20.0/24 [10] area: 0.0.0.0
+ directly attached to r1-eth1
+
+============ OSPF router routing table =============
+R 10.0.255.2 [10] area: 0.0.0.0, ASBR
+ via 10.0.20.2, r1-eth1
+
+============ OSPF external routing table ===========
+N E2 10.0.4.0/24 [10/20] tag: 0
+ via 10.0.20.2, r1-eth1
+N E2 10.0.40.0/24 [10/20] tag: 0
+ via 10.0.20.2, r1-eth1
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-neno.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-neno.txt
new file mode 100644
index 0000000000..cc2c1baa17
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/ospf-vrf-neno.txt
@@ -0,0 +1,12 @@
+VRF Name: neno
+============ OSPF network routing table ============
+N 10.0.3.0/24 [20] area: 0.0.0.0
+ via 10.0.30.3, r1-eth2
+N 10.0.30.0/24 [10] area: 0.0.0.0
+ directly attached to r1-eth2
+
+============ OSPF router routing table =============
+R 10.0.255.3 [10] area: 0.0.0.0, ASBR
+ via 10.0.30.3, r1-eth2
+
+============ OSPF external routing table ===========
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
new file mode 100644
index 0000000000..86c089ab3b
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
@@ -0,0 +1,9 @@
+O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
+C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX
+O>* 10.0.2.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX
+B>* 10.0.3.0/24 [20/20] via 10.0.30.3, r1-eth2 (vrf neno), weight 1, XX:XX:XX
+O>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX
+O 10.0.20.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
+C>* 10.0.20.0/24 is directly connected, r1-eth1, XX:XX:XX
+B>* 10.0.30.0/24 [20/0] is directly connected, r1-eth2 (vrf neno), weight 1, XX:XX:XX
+O>* 10.0.40.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
new file mode 100644
index 0000000000..4e818eb6f1
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
@@ -0,0 +1,6 @@
+VRF neno:
+O>* 10.0.3.0/24 [110/20] via 10.0.30.3, r1-eth2, weight 1, XX:XX:XX
+B>* 10.0.4.0/24 [20/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX
+O 10.0.30.0/24 [110/10] is directly connected, r1-eth2, weight 1, XX:XX:XX
+C>* 10.0.30.0/24 is directly connected, r1-eth2, XX:XX:XX
+B>* 10.0.40.0/24 [20/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf
new file mode 100644
index 0000000000..e87899ca77
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf
@@ -0,0 +1,62 @@
+!
+hostname r2
+password zebra
+log file /tmp/r2-frr.log
+!
+interface r2-eth0
+ ip address 10.0.2.2/24
+!
+interface r2-eth1
+ ip address 10.0.20.2/24
+!
+ip route 0.0.0.0/0 10.0.20.1
+!
+interface r2-eth2 vrf ray
+ ip address 10.0.40.2/24
+!
+ip forwarding
+!
+vrf ray
+ ip protocol bgp route-map rmap
+ exit-vrf
+!
+router ospf
+ ospf router-id 10.0.255.2
+ redistribute bgp
+ network 10.0.2.0/24 area 0
+ network 10.0.20.0/24 area 0
+!
+router ospf vrf ray
+ ospf router-id 10.0.255.1
+ redistribute bgp
+ network 10.0.40.0/24 area 0
+!
+
+router bgp 99
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute ospf
+ import vrf ray
+ !
+!
+router bgp 99 vrf ray
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ redistribute ospf
+ import vrf default
+ !
+!
+!!!!!!!!!!!!!!!!!!!!!
+! VRFs neno and ray subnets
+ip prefix-list nets seq 5 permit 10.0.3.0/24
+ip prefix-list nets seq 10 permit 10.0.30.0/24
+ip prefix-list nets seq 15 permit 10.0.4.0/24
+ip prefix-list nets seq 20 permit 10.0.40.0/24
+ip prefix-list nets seq 25 deny any
+!
+route-map rmap permit 10
+ match ip address prefix-list nets
+ exit
+!
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-default.txt
new file mode 100644
index 0000000000..77b8038b70
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-default.txt
@@ -0,0 +1,20 @@
+VRF Name: default
+============ OSPF network routing table ============
+N 10.0.1.0/24 [20] area: 0.0.0.0
+ via 10.0.20.1, r2-eth1
+N 10.0.2.0/24 [10] area: 0.0.0.0
+ directly attached to r2-eth0
+N 10.0.20.0/24 [10] area: 0.0.0.0
+ directly attached to r2-eth1
+
+============ OSPF router routing table =============
+R 10.0.255.1 [10] area: 0.0.0.0, ASBR
+ via 10.0.20.1, r2-eth1
+
+============ OSPF external routing table ===========
+N E2 10.0.3.0/24 [10/20] tag: 0
+ via 10.0.20.1, r2-eth1
+N E2 10.0.30.0/24 [10/20] tag: 0
+ via 10.0.20.1, r2-eth1
+
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-ray.txt
new file mode 100644
index 0000000000..b70ee9d5a6
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/ospf-vrf-ray.txt
@@ -0,0 +1,15 @@
+VRF Name: ray
+============ OSPF network routing table ============
+N 10.0.4.0/24 [20] area: 0.0.0.0
+ via 10.0.40.4, r2-eth2
+N 10.0.40.0/24 [10] area: 0.0.0.0
+ directly attached to r2-eth2
+
+============ OSPF router routing table =============
+R 10.0.255.4 [10] area: 0.0.0.0, ASBR
+ via 10.0.40.4, r2-eth2
+
+============ OSPF external routing table ===========
+
+
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
new file mode 100644
index 0000000000..9681d8a04e
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
@@ -0,0 +1,10 @@
+S>* 0.0.0.0/0 [1/0] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
+O>* 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
+O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
+C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX
+O>* 10.0.3.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
+B>* 10.0.4.0/24 [20/20] via 10.0.40.4, r2-eth2 (vrf ray), weight 1, XX:XX:XX
+O 10.0.20.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
+C>* 10.0.20.0/24 is directly connected, r2-eth1, XX:XX:XX
+O>* 10.0.30.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
+B>* 10.0.40.0/24 [20/0] is directly connected, r2-eth2 (vrf ray), weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
new file mode 100644
index 0000000000..ce9903ae71
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
@@ -0,0 +1,9 @@
+VRF ray:
+B 10.0.1.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX
+B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX
+B>* 10.0.3.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX
+O>* 10.0.4.0/24 [110/20] via 10.0.40.4, r2-eth2, weight 1, XX:XX:XX
+B 10.0.20.0/24 [20/0] is directly connected, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX
+B>* 10.0.30.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX
+O 10.0.40.0/24 [110/10] is directly connected, r2-eth2, weight 1, XX:XX:XX
+C>* 10.0.40.0/24 is directly connected, r2-eth2, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf
new file mode 100644
index 0000000000..2657f589d8
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf
@@ -0,0 +1,22 @@
+!
+hostname r3
+password zebra
+log file /tmp/r3-frr.log
+!
+interface r3-eth0
+ ip address 10.0.3.3/24
+!
+interface r3-eth1
+ ip address 10.0.30.3/24
+!
+ip forwarding
+!
+!
+router ospf
+ ospf router-id 10.0.255.3
+ redistribute kernel
+ redistribute connected
+ redistribute static
+ network 10.0.3.0/24 area 0
+ network 10.0.30.0/24 area 0
+!
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/ospf-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/ospf-vrf-default.txt
new file mode 100644
index 0000000000..3eb5690834
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/ospf-vrf-default.txt
@@ -0,0 +1,17 @@
+VRF Name: default
+============ OSPF network routing table ============
+N 10.0.3.0/24 [10] area: 0.0.0.0
+ directly attached to r3-eth0
+N 10.0.30.0/24 [10] area: 0.0.0.0
+ directly attached to r3-eth1
+
+============ OSPF router routing table =============
+R 10.0.255.1 [10] area: 0.0.0.0, ASBR
+ via 10.0.30.1, r3-eth1
+
+============ OSPF external routing table ===========
+N E2 10.0.4.0/24 [10/20] tag: 0
+ via 10.0.30.1, r3-eth1
+N E2 10.0.40.0/24 [10/20] tag: 0
+ via 10.0.30.1, r3-eth1
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
new file mode 100644
index 0000000000..f6f861b73b
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
@@ -0,0 +1,8 @@
+O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX
+C>* 10.0.3.0/24 is directly connected, r3-eth0, XX:XX:XX
+O>* 10.0.4.0/24 [110/20] via 10.0.30.1, r3-eth1, weight 1, XX:XX:XX
+O 10.0.30.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
+C>* 10.0.30.0/24 is directly connected, r3-eth1, XX:XX:XX
+O>* 10.0.40.0/24 [110/20] via 10.0.30.1, r3-eth1, weight 1, XX:XX:XX
+
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf
new file mode 100644
index 0000000000..79d8077062
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf
@@ -0,0 +1,22 @@
+!
+hostname r4
+password zebra
+log file /tmp/r4-frr.log
+!
+interface r4-eth0
+ ip address 10.0.4.4/24
+!
+interface r4-eth1
+ ip address 10.0.40.4/24
+!
+ip forwarding
+!
+!
+router ospf
+ ospf router-id 10.0.255.4
+ redistribute kernel
+ redistribute connected
+ redistribute static
+ network 10.0.4.0/24 area 0
+ network 10.0.40.0/24 area 0
+!
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/ospf-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/ospf-vrf-default.txt
new file mode 100644
index 0000000000..ad799af996
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/ospf-vrf-default.txt
@@ -0,0 +1,17 @@
+VRF Name: default
+============ OSPF network routing table ============
+N 10.0.4.0/24 [10] area: 0.0.0.0
+ directly attached to r4-eth0
+N 10.0.40.0/24 [10] area: 0.0.0.0
+ directly attached to r4-eth1
+
+============ OSPF router routing table =============
+R 10.0.255.1 [10] area: 0.0.0.0, ASBR
+ via 10.0.40.2, r4-eth1
+
+============ OSPF external routing table ===========
+N E2 10.0.3.0/24 [10/20] tag: 0
+ via 10.0.40.2, r4-eth1
+N E2 10.0.30.0/24 [10/20] tag: 0
+ via 10.0.40.2, r4-eth1
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
new file mode 100644
index 0000000000..b6be5e7fdb
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
@@ -0,0 +1,7 @@
+O>* 10.0.3.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX
+O 10.0.4.0/24 [110/10] is directly connected, r4-eth0, weight 1, XX:XX:XX
+C>* 10.0.4.0/24 is directly connected, r4-eth0, XX:XX:XX
+O>* 10.0.30.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX
+O 10.0.40.0/24 [110/10] is directly connected, r4-eth1, weight 1, XX:XX:XX
+C>* 10.0.40.0/24 is directly connected, r4-eth1, XX:XX:XX
+
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/test_ospf_multi_vrf_bgp_route_leak.py b/tests/topotests/ospf_multi_vrf_bgp_route_leak/test_ospf_multi_vrf_bgp_route_leak.py
new file mode 100644
index 0000000000..0b6b50ec06
--- /dev/null
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/test_ospf_multi_vrf_bgp_route_leak.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+
+#
+# test_ospf_multi_vrf_bgp_route_leak.py
+#
+# Copyright (c) 2022 ATCorp
+# Jafar Al-Gharaibeh
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import os
+import sys
+from functools import partial
+import pytest
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+"""
+test_ospf_multi_vrf_bgp_route_leak.py: Test OSPF with multi vrf setup and route leaking.
+"""
+
+TOPOLOGY = """
+ bgp route leaking (connected/ospf), vrfs: neno <==> default <==> ray
+ routes leaking to vrfs are limited to neno and ray routes.
+
+ 10.0.1.1/24
+ ^
+ |vrf:default
+ +---+---+
+ 10.0.30.0/24 .1| |.1
+ +-----------------+ R1 +
+ | vrf:neno | |
+ | +---+---+ ^
+ |.3 .1|vrf:default | 10.0.4.4/24
+ +---+---+ | +---+---+
+ | | 10.0.20.0/24 | |
+ | R3 | | | R4 |
+ | | |.2 | |
+ +---+---+ +---+---+ +---+---+
+ | | | vrf:ray |.4
+ v | R2 +----------------+
+10.0.3.3/24 | |.2 10.0.40.0/24
+ +---+---+
+ |
+ v
+ 10.0.2.2/24
+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# Required to instantiate the topology builder class.
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ # Create a empty network for router 1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+
+ # Create a empty network for router 2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+
+ # Create a empty network for router 3
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r3"])
+
+ # Create a empty network for router 4
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r4"])
+
+ # Interconect router 1, 2
+ switch = tgen.add_switch("s1-2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # Interconect router 1, 3
+ switch = tgen.add_switch("s1-3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+ # Interconect router 2, 4
+ switch = tgen.add_switch("s2-4")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+ logger.info("OSPF Multi VRF Topology with BGP route leaking:\n {}".format(TOPOLOGY))
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ r1_vrf_setup_cmds = [
+ "ip link add name neno type vrf table 11",
+ "ip link set dev neno up",
+ "ip link set dev r1-eth2 vrf neno up",
+ ]
+ r2_vrf_setup_cmds = [
+ "ip link add name ray type vrf table 11",
+ "ip link set dev ray up",
+ "ip link set dev r2-eth2 vrf ray up",
+ ]
+
+ # Starting Routers
+ router_list = tgen.routers()
+
+ # Create VRFs on r1/r2 and bind to interfaces
+ for cmd in r1_vrf_setup_cmds:
+ tgen.net["r1"].cmd(cmd)
+ for cmd in r2_vrf_setup_cmds:
+ tgen.net["r2"].cmd(cmd)
+
+ logger.info("Testing OSPF VRF support")
+
+ for rname, router in router_list.items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+ for router in router_list.values():
+ if router.has_version("<", "4.0"):
+ tgen.set_error("unsupported version")
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+# Shared test function to validate expected output.
+def compare_show_ip_route_vrf(rname, expected, vrf_name):
+ """
+ Calls 'show ip route vrf [vrf_name] route' and compare the obtained
+ result with the expected output.
+ """
+ tgen = get_topogen()
+ current = topotest.ip4_route_zebra(tgen.gears[rname], vrf_name)
+ ret = topotest.difflines(
+ current, expected, title1="Current output", title2="Expected output"
+ )
+ return ret
+
+
+def test_ospf_convergence():
+ "Test OSPF daemon convergence"
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ for rname, router in tgen.routers().items():
+ logger.info('Waiting for router "%s" convergence', rname)
+
+ for vrf in ["default", "neno", "ray"]:
+ # Load expected results from the command
+ reffile = os.path.join(CWD, "{}/ospf-vrf-{}.txt".format(rname, vrf))
+ if vrf == "default" or os.path.exists(reffile):
+ expected = open(reffile).read()
+
+ # Run test function until we get an result. Wait at most 80 seconds.
+ test_func = partial(
+ topotest.router_output_cmp,
+ router,
+ "show ip ospf vrf {} route".format(vrf),
+ expected,
+ )
+ result, diff = topotest.run_and_expect(
+ test_func, "", count=80, wait=1
+ )
+ assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff)
+ assert result, assertmsg
+
+
+def test_ospf_kernel_route():
+ "Test OSPF kernel route installation"
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ rlist = tgen.routers().values()
+ for router in rlist:
+ logger.info('Checking OSPF IPv4 kernel routes in "%s"', router.name)
+ for vrf in ["default", "neno", "ray"]:
+ reffile = os.path.join(CWD, "{}/zebra-vrf-{}.txt".format(router.name, vrf))
+ if vrf == "default" or os.path.exists(reffile):
+ expected = open(reffile).read()
+ # Run test function until we get an result. Wait at most 80 seconds.
+ test_func = partial(
+ compare_show_ip_route_vrf, router.name, expected, vrf
+ )
+ result, diff = topotest.run_and_expect(
+ test_func, "", count=80, wait=1
+ )
+ assertmsg = 'OSPF IPv4 route mismatch in router "{}": {}'.format(
+ router.name, diff
+ )
+ assert result, assertmsg
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_type7_lsa.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_type7_lsa.json
new file mode 100644
index 0000000000..27b36aea17
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_asbr_summary_type7_lsa.json
@@ -0,0 +1,202 @@
+{
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR1",
+ "ospf6": {
+ "area": "0.0.0.3"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_dual_stack.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_dual_stack.json
new file mode 100644
index 0000000000..5555d9291e
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_dual_stack.json
@@ -0,0 +1,312 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ },
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "description": "DummyIntftoR1",
+ "ospf6": {
+ "area": "0.0.0.0"
+ },
+ "ospf": {
+ "area": "0.0.0.0"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "1.0.4.17",
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json
index c928093925..22f46e2bf6 100644
--- a/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp.json
@@ -2,6 +2,7 @@
"address_types": [
"ipv6"
],
+
"ipv6base": "fd00::",
"ipv6mask": 64,
"link_ip_start": {
@@ -9,6 +10,7 @@
"v6mask": 64
},
"lo_prefix": {
+
"ipv6": "2001:db8:f::",
"v6mask": 128
},
@@ -21,6 +23,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -29,6 +32,7 @@
},
"r1-link1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -37,6 +41,7 @@
},
"r1-link2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -45,6 +50,7 @@
},
"r1-link3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -53,6 +59,7 @@
},
"r1-link4": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -61,6 +68,7 @@
},
"r1-link5": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -69,6 +77,7 @@
},
"r1-link6": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -77,6 +86,7 @@
},
"r1-link7": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -85,6 +95,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -93,6 +104,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -101,6 +113,7 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.0",
"neighbors": {
@@ -139,6 +152,7 @@
},
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -147,6 +161,7 @@
},
"r0-link1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -155,6 +170,7 @@
},
"r0-link2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -163,6 +179,7 @@
},
"r0-link3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -171,6 +188,7 @@
},
"r0-link4": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -179,6 +197,7 @@
},
"r0-link5": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -187,6 +206,7 @@
},
"r0-link6": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -195,6 +215,7 @@
},
"r0-link7": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -203,6 +224,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -211,6 +233,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -222,6 +245,7 @@
"description": "DummyIntftoR3"
}
},
+
"ospf6": {
"router_id": "100.1.1.1",
"neighbors": {
@@ -260,6 +284,7 @@
},
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -268,6 +293,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -276,6 +302,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -283,6 +310,7 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.2",
"neighbors": {
@@ -300,6 +328,7 @@
},
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -309,6 +338,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -317,6 +347,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -326,14 +357,13 @@
"r1-link0": {
"ipv6": "auto",
"description": "DummyIntftoR1",
- "ospf": {
- "area": "0.0.0.0"
- },
+
"ospf6": {
"area": "0.0.0.0"
}
}
},
+
"ospf6": {
"router_id": "100.1.1.3",
"neighbors": {
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp_lan.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp_lan.json
new file mode 100644
index 0000000000..53b3f49e62
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_ecmp_lan.json
@@ -0,0 +1,264 @@
+{
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "switches": {
+ "s1": {
+ "links": {
+ "r0": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 98
+ }
+ },
+ "r1": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 99
+ }
+ },
+ "r2": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ },
+ "r3": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ },
+ "r4": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ },
+ "r5": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ },
+ "r6": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ },
+ "r7": {
+
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ }
+ }
+ }
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {},
+ "r4": {},
+ "r5": {},
+ "r6": {},
+ "r7": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3-link0": {
+
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {},
+ "r4": {},
+ "r5": {},
+ "r6": {},
+ "r7": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link0": {
+
+ "ipv6": "auto",
+ "description": "DummyIntftoR1"
+
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.4",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ },
+ "r5": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.5",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ },
+ "r6": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.6",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ },
+ "r7": {
+ "links": {
+ "lo": {
+
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.7",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_lan.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_lan.json
new file mode 100644
index 0000000000..3a2fc022e5
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_lan.json
@@ -0,0 +1,140 @@
+{
+ "address_types": [
+ "ipv6"
+ ],
+
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "switches": {
+ "s1": {
+ "links": {
+ "r0": {
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 98
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 99
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+
+ "ospf6": {
+ "area": "0.0.0.3",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "priority": 0
+ }
+ }
+ }
+ }
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR1"
+ }
+ },
+
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_nssa2.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_nssa2.json
new file mode 100644
index 0000000000..b1432b9bee
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_nssa2.json
@@ -0,0 +1,197 @@
+
+{
+ "address_types": [
+ "ipv6"
+ ],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto"
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r1": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv6": "auto",
+ "ospf6": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv6": "auto",
+ "description": "DummyIntftoR1",
+ "ospf6": {
+ "area": "0.0.0.3"
+ }
+ }
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json
index 226f84f320..a1c7bd72d4 100644
--- a/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_routemaps.json
@@ -1,14 +1,26 @@
{
- "address_types": ["ipv6"],
+ "address_types": [
+ "ipv6"
+ ],
+
"ipv6base": "fd00::",
"ipv6mask": 64,
- "link_ip_start": {"ipv6": "fd00::", "v6mask": 64},
- "lo_prefix": {"ipv6": "2001:db8:f::", "v6mask": 128},
+ "link_ip_start": {
+
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
"routers": {
"r0": {
"links": {
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -17,6 +29,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -25,6 +38,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -32,15 +46,21 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.0",
- "neighbors": {"r1": {}, "r2": {}, "r3": {}}
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
}
},
"r1": {
"links": {
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -49,6 +69,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -57,6 +78,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -64,15 +86,21 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.1",
- "neighbors": {"r0": {}, "r2": {}, "r3": {}}
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
}
},
"r2": {
"links": {
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -81,6 +109,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -89,6 +118,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -96,15 +126,21 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.2",
- "neighbors": {"r1": {}, "r0": {}, "r3": {}}
+ "neighbors": {
+ "r1": {},
+ "r0": {},
+ "r3": {}
+ }
}
},
"r3": {
"links": {
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -113,6 +149,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -121,6 +158,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -128,10 +166,15 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.3",
- "neighbors": {"r0": {}, "r1": {}, "r2": {}}
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
}
}
}
-}
+} \ No newline at end of file
diff --git a/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json b/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json
index 3669b3a554..e70481ace9 100644
--- a/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json
+++ b/tests/topotests/ospfv3_basic_functionality/ospfv3_rte_calc.json
@@ -5,13 +5,16 @@
"address_types": [
"ipv6"
],
+
"ipv6base": "fd00::",
"ipv6mask": 64,
"link_ip_start": {
+
"ipv6": "fd00::",
"v6mask": 64
},
"lo_prefix": {
+
"ipv6": "2001:db8:f::",
"v6mask": 128
},
@@ -24,6 +27,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -32,6 +36,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -40,6 +45,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -47,6 +53,7 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.0",
"neighbors": {
@@ -63,6 +70,7 @@
},
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -71,6 +79,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -79,6 +88,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -86,6 +96,7 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.1",
"neighbors": {
@@ -103,6 +114,7 @@
},
"r0": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -111,6 +123,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -119,6 +132,7 @@
},
"r3": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -126,6 +140,7 @@
}
}
},
+
"ospf6": {
"router_id": "100.1.1.2",
"neighbors": {
@@ -146,6 +161,7 @@
},
"r1": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
@@ -154,6 +170,7 @@
},
"r2": {
"ipv6": "auto",
+
"ospf6": {
"area": "0.0.0.0",
"hello_interval": 1,
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
index 47333fcb39..36cde06c3e 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
@@ -56,6 +56,7 @@ from lib.common_config import (
create_prefix_lists,
create_route_maps,
topo_daemons,
+ create_interfaces_cfg,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -747,6 +748,1084 @@ def test_ospfv3_type5_summary_tc42_p0(request):
write_test_footer(tc_name)
+def test_ospfv3_type5_summary_tc43_p0(request):
+ """OSPF summarisation with metric type 2."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Change the summary address mask to lower match (ex - 16 to 8)")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "16"},
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "delete": True,
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ sleep(5)
+
+ input_dict = {
+ "2011::/16": {
+ "Summary address": "2011::/16",
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that external routes(static / connected) are summarised"
+ " to configured summary address with newly configured mask."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": "2011::0/16"}]}}
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Change the summary address mask to higher match (ex - 8 to 24)")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "2011::/32": {
+ "Summary address": "2011::/32",
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 0,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Verify that external routes(static / connected) are summarised"
+ " to configured summary address with newly configured mask."
+ )
+ step("Configure 2 summary address with different mask of same network.")
+ step(
+ "Verify that external routes(static / connected) are summarised "
+ "to configured summary address with highest match."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": "2011::0/32"}]}}
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(" Un configure one of the summary address.")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ sleep(5)
+
+ step(
+ "Verify that external routes(static / connected) are summarised"
+ " to configured summary address with newly configured mask."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": "2011::0/16"}]}}
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "16"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes(static / connected) are summarised "
+ "to configured summary address with highest match."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": "2011::0/16"}]}}
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def ospfv3_type5_summary_tc45_p0(request):
+ """OSPF summarisation with Tag option"""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure OSPF on all the routers of the topology.")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": "1234",
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and only one route is sent to R1 with configured tag."
+ )
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "1234"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": "1234",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+
+ step("Configure Min tag value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32", "tag": 1}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "1"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure Max Tag Value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": 4294967295,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "4294967295"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 4294967295,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("configure new static route with different tag.")
+ input_dict_static_rtes_11 = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK_11["ipv6"], "next_hop": "blackhole", "tag": "88888"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes_11)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("New tag has not been used by summary address.")
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "88888"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(
+ tgen, dut, input_dict_summary, tag="88888", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen,
+ "ipv6",
+ dut,
+ input_dict_summary,
+ protocol=protocol,
+ tag="88888",
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 88888,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary address")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": 4294967295,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that 6 routes are advertised to neighbour with 5 routes"
+ " without any tag, 1 route with tag."
+ )
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that summary address is flushed from neighbor.")
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Configure summary first & then configure matching static route.")
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole", "delete": True},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole", "delete": True},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Repeat steps 1 to 10 of summarisation in non Back bone area.")
+ reset_config_on_routers(tgen)
+
+ step("Change the area id on the interface on R0")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"area": "0.0.0.1"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Change the area id on the interface ")
+ input_dict = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"area": "0.0.0.1"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, ospf_covergence
+ )
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": "1234",
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and only one route is sent to R1 with configured tag."
+ )
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "1234"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+
+ step("Configure Min tag value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32", "tag": 1}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "1"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure Max Tag Value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": 4294967295,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "4294967295"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 4294967295,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("configure new static route with different tag.")
+ input_dict_static_rtes_11 = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK_11["ipv6"], "next_hop": "blackhole", "tag": "88888"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes_11)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("New tag has not been used by summary address.")
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv6"][0], "tag": "88888"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf6_rib(
+ tgen, dut, input_dict_summary, tag="88888", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen,
+ "ipv6",
+ dut,
+ input_dict_summary,
+ protocol=protocol,
+ tag="88888",
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv6"][0]: {
+ "Summary address": SUMMARY["ipv6"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 88888,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary address")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": 4294967295,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that 6 routes are advertised to neighbour with 5 routes"
+ " without any tag, 1 route with tag."
+ )
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that summary address is flushed from neighbor.")
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Configure summary first & then configure matching static route.")
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole", "delete": True},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole", "delete": True},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf6": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
def test_ospfv3_type5_summary_tc46_p0(request):
"""OSPF summarisation with advertise and no advertise option"""
tc_name = request.node.name
@@ -1023,6 +2102,14 @@ def test_ospfv3_type5_summary_tc46_p0(request):
), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Verify that originally advertised routes are withdraw from there" " peer.")
+ output = tgen.gears["r0"].vtysh_cmd(
+ "show ipv6 ospf6 database as-external json", isjson=True
+ )
+
+ output = tgen.gears["r1"].vtysh_cmd(
+ "show ipv6 ospf6 database as-external json", isjson=True
+ )
+
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
@@ -1657,127 +2744,6 @@ def test_ospfv3_type5_summary_tc49_p2(request):
result is not True
), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step("Kill OSPF6d daemon on R0.")
- kill_router_daemons(tgen, "r0", ["ospf6d"])
-
- step("Bring up OSPF6d daemon on R0.")
- start_router_daemons(tgen, "r0", ["ospf6d"])
-
- step("Verify OSPF neighbors are up after bringing back ospf6d in R0")
- # Api call verify whether OSPF is converged
- ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
- ospf_covergence
- )
-
- step(
- "Verify that external routes are summarised to configured summary "
- "address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1."
- )
- input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
- dut = "r1"
-
- result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
- assert (
- result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
-
- step("Verify that show ip ospf summary should show the summaries.")
- input_dict = {
- SUMMARY["ipv6"][0]: {
- "Summary address": SUMMARY["ipv6"][0],
- "Metric-type": "E2",
- "Metric": 20,
- "Tag": 0,
- "External route count": 5,
- }
- }
- dut = "r0"
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
- assert (
- result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
-
- step("Verify that originally advertised routes are withdraw from there" " peer.")
- input_dict = {
- "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
- }
- dut = "r1"
- result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
- )
-
- result = verify_rib(
- tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
- )
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
-
- step("restart zebrad")
- kill_router_daemons(tgen, "r0", ["zebra"])
-
- step("Bring up zebra daemon on R0.")
- start_router_daemons(tgen, "r0", ["zebra"])
-
- step(
- "Verify that external routes are summarised to configured summary "
- "address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1."
- )
- input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
- dut = "r1"
-
- result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
-
- result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
- assert (
- result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
-
- step("Verify that show ip ospf summary should show the summaries.")
- input_dict = {
- SUMMARY["ipv6"][0]: {
- "Summary address": SUMMARY["ipv6"][0],
- "Metric-type": "E2",
- "Metric": 20,
- "Tag": 0,
- "External route count": 5,
- }
- }
- dut = "r0"
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
- assert (
- result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
-
- step("Verify that originally advertised routes are withdraw from there" " peer.")
- input_dict = {
- "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
- }
- dut = "r1"
- result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
- )
-
- result = verify_rib(
- tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
- )
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
-
write_test_footer(tc_name)
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py
new file mode 100644
index 0000000000..fe8be0a4b3
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_ecmp_lan.py
@@ -0,0 +1,400 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+from copy import deepcopy
+from ipaddress import IPv4Address
+from lib.topotest import frr_unicode
+import ipaddress
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ step,
+ create_route_maps,
+ shutdown_bringup_interface,
+ create_interfaces_cfg,
+ topo_daemons,
+ get_frr_ipv6_linklocal,
+)
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ config_ospf_interface,
+ clear_ospf,
+ verify_ospf6_rib,
+ create_router_ospf,
+ verify_ospf6_interface,
+ verify_ospf6_database,
+ config_ospf6_interface,
+)
+
+from ipaddress import IPv6Address
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+
+# Global variables
+topo = None
+
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["2::1/128", "2::2/128", "2::3/128", "2::4/128", "2::5/128"],
+}
+MASK = {"ipv6": "32", "ipv6": "128"}
+
+"""
+TOPOOLOGY =
+ Please view in a fixed-width font such as Courier.
+ Topo : Broadcast Networks
+ +---+ +---+ +---+ +---+
+ |R0 + +R1 + +R2 + +R3 |
+ +-+-+ +-+-+ +-+-+ +-+-+
+ | | | |
+ | | | |
+ --+-----------+--------------+---------------+-----
+ Ethernet Segment
+
+TESTCASES =
+1. Verify OSPF ECMP with max path configured as 8
+ (Edge having 1 uplink port as broadcast network,
+ connect to 8 TORs - LAN case)
+
+ """
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo, switch_name
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/ospfv3_ecmp_lan.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, lan=True)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ switch_name = [k for k in topo["switches"].keys()][0]
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+
+ try:
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ except OSError:
+ # OSError exception is raised when mininet tries to stop switch
+ # though switch is stopped once but mininet tries to stop same
+ # switch again, where it ended up with exception
+ pass
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def red_static(dut, config=True):
+ """Local def for Redstribute static routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+def red_connected(dut, config=True):
+ """Local def for Redstribute connected routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {
+ "redistribute": [{"redist_type": "connected", "del_action": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospfv3_lan_ecmp_tc18_p0(request):
+ """
+ OSPF ECMP.
+
+ Verify OSPF ECMP with max path configured as 8
+ (Edge having 1 uplink port as broadcast network,
+ connect to 8 TORs - LAN case)
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step(". Configure ospf in all the routers on LAN interface.")
+
+ reset_config_on_routers(tgen)
+
+ step("Verify that OSPF is up with 8 neighborship sessions.")
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, lan=True)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(
+ "Configure a static route in all the routes and "
+ "redistribute static/connected in OSPF."
+ )
+
+ for rtr in topo["routers"]:
+ input_dict = {
+ rtr: {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 5, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = rtr
+ red_static(dut)
+
+ step(
+ "Verify that route in R0 in stalled with 8 hops. "
+ "Verify ospf route table and ip route table."
+ )
+
+ nh = []
+ for rtr in topo["routers"]:
+ llip = get_llip(rtr, switch_name)
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ nh.append(llip)
+
+ llip = get_llip("r1", switch_name)
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ nh.remove(llip)
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "ospf6"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(" clear ip ospf interface on DUT(r0)")
+ clear_ospf(tgen, "r0", ospf="ospf6")
+
+ step(
+ "Verify that after clearing the ospf interface all the "
+ "neighbours are up and routes are installed with 8 next hop "
+ "in ospf and ip route tables on R0"
+ )
+
+ dut = "r0"
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, lan=True)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(" clear ip ospf interface on R2")
+ clear_ospf(tgen, "r2")
+
+ dut = "r2"
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, lan=True)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("Delete static/connected cmd in ospf in all the routes one by one.")
+ for rtr in topo["routers"]:
+ input_dict = {
+ rtr: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py
new file mode 100644
index 0000000000..dc3b915d49
--- /dev/null
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_nssa2.py
@@ -0,0 +1,482 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+from copy import deepcopy
+import ipaddress
+from lib.ospf import (
+ verify_ospf6_neighbor,
+ config_ospf6_interface,
+ clear_ospf,
+ verify_ospf6_rib,
+ verify_ospf6_interface,
+ verify_ospf6_database,
+ create_router_ospf,
+)
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp_and_verify,
+ verify_bgp_rib,
+)
+from lib.topolog import logger
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ step,
+ topo_daemons,
+ create_route_maps,
+ shutdown_bringup_interface,
+ create_interfaces_cfg,
+ check_router_status,
+)
+from ipaddress import IPv4Address
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+
+# Global variables
+topo = None
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": [
+ "2011:0:20::1/128",
+ "2011:0:20::2/128",
+ "2011:0:20::3/128",
+ "2011:0:20::4/128",
+ "2011:0:20::5/128",
+ ],
+}
+"""
+TOPOOLOGY =
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+
+
+
+TESTCASES =
+1. OSPF Learning - Verify OSPF can learn different types of LSA and
+ processes them.[Edge learning different types of LSAs]
+2. Verify that ospf non back bone area can be configured as NSSA area
+3. Verify that ospf NSSA area DUT is capable receiving & processing
+ Type7 N2 route.
+"""
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/ospfv3_nssa2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment."""
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def red_static(dut, config=True):
+ """Local def for Redstribute static routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "static"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+def red_connected(dut, config=True):
+ """Local def for Redstribute connected routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf6": {"redistribute": [{"redist_type": "connected"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf6": {
+ "redistribute": [{"redist_type": "connected", "del_action": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospfv3_nssa_tc26_p0(request):
+ """Verify that ospf non back bone area can be configured as NSSA area"""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure ospf area 2 on r0 , r1 & r4, make the area 2 as NSSA area")
+
+ reset_config_on_routers(tgen)
+
+ input_dict = {
+ "r2": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 5, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Redistribute static route in R2 ospf.")
+ dut = "r2"
+ red_static(dut)
+
+ step("Verify that Type 5 LSA is originated by R2.")
+ dut = "r0"
+ protocol = "ospf6"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Un configure redistribute command in R4")
+ dut = "r2"
+ red_static(dut, config=False)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
+ }
+
+ step("Configure area 0 on interface of r2 connecting to r1")
+
+ input_dict = {
+ "r2": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r2"]["links"]["r1"]["interface"],
+ "ospf6": {"area": "0.0.0.2"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r2": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r2"]["links"]["r1"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbor goes down between r2 and r1.")
+ result = verify_ospf6_neighbor(tgen, topo, dut="r2", expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Nbrs are not down" "Error: {}".format(tc_name, result)
+
+ step("Now configure area 0 on interface of r1 connecting to r2.")
+
+ input_dict = {
+ "r1": {
+ "links": {
+ "r2": {
+ "interface": topo["routers"]["r1"]["links"]["r2"]["interface"],
+ "ospf6": {"area": "0.0.0.2"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r1": {
+ "links": {
+ "r2": {
+ "interface": topo["routers"]["r1"]["links"]["r2"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that ospf neighbour comes up between r2 and r1.")
+ result = verify_ospf6_neighbor(tgen, topo, dut="r2")
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure area 2 on interface of r2 connecting to r1.")
+
+ input_dict = {
+ "r2": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r2"]["links"]["r1"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r2": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r2"]["links"]["r1"]["interface"],
+ "ospf6": {"area": "0.0.0.2"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbor goes down between r2 and r1.")
+ result = verify_ospf6_neighbor(tgen, topo, dut="r2", expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Nbrs are not down" "Error: {}".format(tc_name, result)
+
+ step("Now configure area 2 on interface of r1 connecting to r2.")
+
+ input_dict = {
+ "r1": {
+ "links": {
+ "r2": {
+ "interface": topo["routers"]["r1"]["links"]["r2"]["interface"],
+ "ospf6": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r1": {
+ "links": {
+ "r2": {
+ "interface": topo["routers"]["r1"]["links"]["r2"]["interface"],
+ "ospf6": {"area": "0.0.0.2"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that ospf neighbour comes up between r2 and r1.")
+ result = verify_ospf6_neighbor(tgen, topo, dut="r2")
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+# As per internal discussion, this script has to be removed as translator
+# function is not supported, for more details kindly check this PR 2565570
+def ospfv3_nssa_tc27_p0(request):
+ """
+ OSPF NSSA.
+
+ Verify that ospf NSSA area DUT is capable receiving & processing
+ Type7 N2 route.
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure ospf area 2 on r0 , r1 & r4, make the area 2 as NSSA area")
+
+ reset_config_on_routers(tgen)
+
+ input_dict = {
+ "r2": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 5, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Redistribute static route in R2 ospf.")
+ dut = "r2"
+ red_static(dut)
+
+ step("Verify that Type 5 LSA is originated by R2.")
+ dut = "r0"
+ protocol = "ospf6"
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Un configure redistribute command in R4")
+ dut = "r2"
+ red_static(dut, config=False)
+
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
+ }
+
+ dut = "r0"
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
index 461efbe979..d7cf951c5f 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_routemaps.py
@@ -178,6 +178,312 @@ def teardown_module(mod):
# ##################################
+def test_ospfv3_routemaps_functionality_tc19_p0(request):
+ """
+ OSPF Route map - Verify OSPF route map support functionality.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Create static routes(10.0.20.1/32 and 10.0.20.2/32) in R0")
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r1 = {"r0": {"ospf6": {"redistribute": [{"redist_type": "static"}]}}}
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ lsid = NETWORK["ipv6"][0].split("/")[0]
+ rid = routerids[0]
+
+ protocol = "ospf"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r1 = {
+ "r0": {
+ "ospf6": {"redistribute": [{"redist_type": "static", "del_action": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Create prefix-list in R0 to permit 10.0.20.1/32 prefix &" " deny 10.0.20.2/32"
+ )
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": NETWORK["ipv6"][0],
+ "action": "permit",
+ },
+ {"seqid": 11, "network": "any", "action": "deny"},
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that prefix-list is created in R0.")
+ result = verify_prefix_lists(tgen, pfx_list)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n, prefix list creation failed. Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "permit",
+ "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure route map rmap1 and redistribute static routes to"
+ " ospf using route map rmap1"
+ )
+
+ ospf_red_r1 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("Verify that route map is activated in OSPF.")
+
+ step("Verify that route 10.0.20.1 is allowed and 10.0.20.2 is denied.")
+ dut = "r1"
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r1"
+ lsid = NETWORK["ipv6"][1].split("/")[0]
+ rid = routerids[0]
+
+ step("Change prefix rules to permit 10.0.20.2 and deny 10.0.20.1")
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": NETWORK["ipv6"][1],
+ "action": "permit",
+ },
+ {"seqid": 11, "network": "any", "action": "deny"},
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that route 10.0.20.2 is allowed and 10.0.20.1 is denied.")
+ dut = "r1"
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][1], "no_of_ip": 1, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ step("Delete and reconfigure prefix list.")
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": NETWORK["ipv6"][1],
+ "action": "permit",
+ "delete": True,
+ },
+ {
+ "seqid": 11,
+ "network": "any",
+ "action": "deny",
+ "delete": True,
+ },
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 5, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": NETWORK["ipv6"][1],
+ "action": "permit",
+ },
+ {"seqid": 11, "network": "any", "action": "deny"},
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that route 10.0.20.2 is allowed and 10.0.20.1 is denied.")
+ dut = "r1"
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][1], "no_of_ip": 1, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv6"][0], "no_of_ip": 1, "next_hop": "Null0"}
+ ]
+ }
+ }
+ result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route found in the RIB, Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
def test_ospfv3_routemaps_functionality_tc20_p0(request):
"""
OSPF route map support functionality.
@@ -461,7 +767,7 @@ def test_ospfv3_routemaps_functionality_tc22_p0(request):
{
"action": "permit",
"seq_id": "20",
- "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv4"}},
+ "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv6"}},
},
]
}
@@ -474,7 +780,7 @@ def test_ospfv3_routemaps_functionality_tc22_p0(request):
input_dict_2 = {
"r0": {
"prefix_lists": {
- "ipv4": {
+ "ipv6": {
"pf_list_1_ipv6": [
{"seqid": 10, "network": NETWORK["ipv6"][0], "action": "permit"}
]
@@ -489,8 +795,8 @@ def test_ospfv3_routemaps_functionality_tc22_p0(request):
input_dict_2 = {
"r0": {
"prefix_lists": {
- "ipv4": {
- "pf_list_2_ipv4": [
+ "ipv6": {
+ "pf_list_2_ipv6": [
{"seqid": 10, "network": NETWORK["ipv6"][1], "action": "permit"}
]
}
@@ -567,7 +873,7 @@ def test_ospfv3_routemaps_functionality_tc22_p0(request):
{
"action": "deny",
"seq_id": "20",
- "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv4"}},
+ "match": {"ipv6": {"prefix_lists": "pf_list_2_ipv6"}},
}
]
}
@@ -624,6 +930,106 @@ def test_ospfv3_routemaps_functionality_tc22_p0(request):
write_test_footer(tc_name)
+def test_ospfv3_routemaps_functionality_tc23_p0(request):
+ """
+ OSPF Route map - Multiple set clauses.
+
+ Verify OSPF route map support functionality when we add/remove route-maps
+ with multiple set clauses and without any match statement.(Set only)
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step(
+ " Create static routes(10.0.20.1/32) in R1 and "
+ "redistribute to OSPF using route map."
+ )
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf6": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure route map with set clause (set metric)")
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {"action": "permit", "seq_id": 10, "set": {"metric": 123}}
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that configured metric is applied to ospf routes.")
+ dut = "r1"
+ protocol = "ospf"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict, metric=123)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, metric=123)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("un configure the set clause")
+ # Create route map
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "action": "permit",
+ "seq_id": 10,
+ "set": {"metric": 123, "delete": True},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that metric falls back to original metric for ospf routes.")
+ dut = "r1"
+ protocol = "ospf"
+
+ result = verify_ospf6_rib(tgen, dut, input_dict, metric=20)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol, metric=20)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
def test_ospfv3_routemaps_functionality_tc24_p0(request):
"""
OSPF Route map - Multiple set clauses.
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
index d8f659e5a9..21d03fadfb 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_rte_calc.py
@@ -48,17 +48,21 @@ from lib.common_config import (
create_interfaces_cfg,
topo_daemons,
get_frr_ipv6_linklocal,
+ check_router_status,
+ create_static_routes,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
-
+from lib.bgp import create_router_bgp, verify_bgp_convergence
from lib.ospf import (
verify_ospf6_neighbor,
+ clear_ospf,
verify_ospf6_rib,
+ verify_ospf_database,
create_router_ospf,
- verify_ospf6_interface,
config_ospf6_interface,
+ verify_ospf6_interface,
)
@@ -251,6 +255,8 @@ def red_connected(dut, config=True):
# ##################################
# Test cases start here.
# ##################################
+
+
def test_ospfv3_redistribution_tc5_p0(request):
"""Test OSPF intra area route calculations."""
tc_name = request.node.name
@@ -259,7 +265,7 @@ def test_ospfv3_redistribution_tc5_p0(request):
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
+ check_router_status(tgen)
global topo
step("Bring up the base config.")
@@ -280,7 +286,11 @@ def test_ospfv3_redistribution_tc5_p0(request):
nh = llip
input_dict = {
- "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]}
+ "r1": {
+ "static_routes": [
+ {"network": ip_net, "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
}
dut = "r1"
@@ -372,7 +382,7 @@ def test_ospfv3_redistribution_tc6_p0(request):
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
+ check_router_status(tgen)
global topo
step("Bring up the base config.")
@@ -380,8 +390,8 @@ def test_ospfv3_redistribution_tc6_p0(request):
step("Verify that OSPF neighbors are FULL.")
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
- ospf_covergence
+ assert ospf_covergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, ospf_covergence
)
step("verify intra area route is calculated for r0-r3 interface ip in R1")
@@ -391,7 +401,11 @@ def test_ospfv3_redistribution_tc6_p0(request):
assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip)
nh = llip
input_dict = {
- "r1": {"static_routes": [{"network": ip_net, "no_of_ip": 1, "routeType": "N"}]}
+ "r1": {
+ "static_routes": [
+ {"network": ip_net, "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
}
dut = "r1"
@@ -460,9 +474,6 @@ def test_ospfv3_redistribution_tc6_p0(request):
intf = topo["routers"]["r0"]["links"]["r3"]["interface"]
shutdown_bringup_interface(tgen, dut, intf, False)
- step("Verify that intraroute calculated for R1 intf on R0 is deleted.")
- dut = "r1"
-
step("un shut the OSPF interface on R0")
dut = "r0"
shutdown_bringup_interface(tgen, dut, intf, True)
@@ -478,6 +489,168 @@ def test_ospfv3_redistribution_tc6_p0(request):
write_test_footer(tc_name)
+def test_ospfv3_redistribution_tc8_p1(request):
+ """
+ Test OSPF redistribution of connected routes.
+
+ Verify OSPF redistribution of connected routes when bgp multi hop
+ neighbor is configured using ospf routes
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config.")
+ step(
+ "Configure loopback interface on all routers, and redistribut"
+ "e connected routes into ospf"
+ )
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ step(
+ "verify that connected routes -loopback is found in all routers"
+ "advertised/exchaged via ospf"
+ )
+ for rtr in topo["routers"]:
+ red_static(rtr)
+ red_connected(rtr)
+
+ for node in topo["routers"]:
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": topo["routers"][node]["links"]["lo"]["ipv6"],
+ "no_of_ip": 1,
+ }
+ ]
+ }
+ }
+ for rtr in topo["routers"]:
+ result = verify_rib(tgen, "ipv6", rtr, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure E BGP multi hop using the loopback addresses.")
+ as_num = 100
+ for node in topo["routers"]:
+ as_num += 1
+ topo["routers"][node].update(
+ {
+ "bgp": {
+ "local_as": as_num,
+ "address_family": {"ipv6": {"unicast": {"neighbor": {}}}},
+ }
+ }
+ )
+ for node in topo["routers"]:
+ for rtr in topo["routers"]:
+ if node is not rtr:
+ topo["routers"][node]["bgp"]["address_family"]["ipv6"]["unicast"][
+ "neighbor"
+ ].update(
+ {
+ rtr: {
+ "dest_link": {
+ "lo": {"source_link": "lo", "ebgp_multihop": 2}
+ }
+ }
+ }
+ )
+
+ result = create_router_bgp(tgen, topo, topo["routers"])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Modify router id
+ input_dict = {
+ "r0": {"bgp": {"router_id": "11.11.11.11"}},
+ "r1": {"bgp": {"router_id": "22.22.22.22"}},
+ "r2": {"bgp": {"router_id": "33.33.33.33"}},
+ "r3": {"bgp": {"router_id": "44.44.44.44"}},
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that BGP neighbor is ESTABLISHED")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Configure couple of static routes in R0 and "
+ "Redistribute static routes in R1 bgp."
+ )
+
+ for rtr in topo["routers"]:
+ ospf_red = {
+ rtr: {
+ "ospf6": {"redistribute": [{"redist_type": "static", "delete": True}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r0 = {
+ "r0": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}}
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ protocol = "bgp"
+ for rtr in ["r1", "r2", "r3"]:
+ result = verify_rib(tgen, "ipv6", rtr, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Clear ospf neighbours in R0")
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr)
+
+ step("Verify that OSPF neighbours are reset and forms new adjacencies.")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("Verify that BGP neighbours are reset and forms new adjacencies.")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ protocol = "bgp"
+ for rtr in ["r1", "r2", "r3"]:
+ result = verify_rib(tgen, "ipv6", rtr, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
def test_ospfv3_cost_tc52_p0(request):
"""OSPF Cost - verifying ospf interface cost functionality"""
tc_name = request.node.name
@@ -485,6 +658,8 @@ def test_ospfv3_cost_tc52_p0(request):
tgen = get_topogen()
global topo
step("Bring up the base config.")
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
reset_config_on_routers(tgen)
step(
@@ -565,6 +740,184 @@ def test_ospfv3_cost_tc52_p0(request):
write_test_footer(tc_name)
+def test_ospfv3_def_rte_tc9_p0(request):
+ """OSPF default route - Verify OSPF default route origination."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config.")
+ step("Configure OSPF on all the routers of the topology.")
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ step(" Configure default-information originate always on R0.")
+ input_dict = {"r0": {"ospf6": {"default-information": {"originate": True}}}}
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ step(" Configure default-information originate always on R0.")
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ "always": True,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that default route is originated in area always.")
+ dut = "r1"
+
+ step(" Configure default-information originate metric type 1 on R0.")
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ "always": True,
+ "metric-type": 1,
+ }
+ }
+ }
+ }
+
+ step(
+ "Verify that default route is originated in area when external "
+ "routes are present in R0 with metric type as 1."
+ )
+ dut = "r0"
+ step(
+ "Verify that on R1 default route with type 1 is installed"
+ " (R1 is DUT in this case)"
+ )
+ dut = "r1"
+ step("Configure default-information originate metric type 2 on R0.")
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ "always": True,
+ "metric-type": 2,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that default route is originated in area when external"
+ " routes are present in R0 with metric type as 2."
+ )
+
+ dut = "r1"
+ step(" Configure default-information originate metric 100 on R0")
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ "always": True,
+ "metric-type": 2,
+ "metric": 100,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that default route is originated with cost as 100 on R0.")
+
+ dut = "r1"
+
+ step("Delete the default-information command")
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ "always": True,
+ "delete": True,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ step("Configure default-information originate always on R0.")
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ "always": True,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure default route originate with active def route in zebra")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": "0::0/0",
+ "no_of_ip": 1,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "ospf6": {
+ "default-information": {
+ "originate": True,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that default route is originated by R0.")
+ dut = "r1"
+
+ step("Delete static route")
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": "0::0/0",
+ "no_of_ip": 1,
+ "next_hop": "Null0",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
index ed70c09fae..9ec06ec36b 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_single_area.py
@@ -26,6 +26,8 @@ import os
import sys
import time
import pytest
+import ipaddress
+
from copy import deepcopy
from lib.topotest import frr_unicode
@@ -47,6 +49,8 @@ from lib.common_config import (
step,
create_interfaces_cfg,
topo_daemons,
+ create_debug_log_config,
+ apply_raw_config,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
@@ -55,6 +59,9 @@ from lib.ospf import (
verify_ospf6_neighbor,
clear_ospf,
verify_ospf6_interface,
+ create_router_ospf,
+ config_ospf6_interface,
+ verify_ospf6_rib,
)
from ipaddress import IPv6Address
@@ -381,6 +388,956 @@ def test_ospfv3_p2p_tc3_p0(request):
write_test_footer(tc_name)
+def test_ospfv3_hello_tc10_p0(request):
+ """
+ OSPF timers.
+
+ Verify OSPF interface timer hello interval functionality
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("modify hello timer from default value to some other value on r1")
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 11, "dead_interval": 12},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "verify that new timer value is configured and applied using "
+ "the show ip ospf interface command."
+ )
+ dut = "r1"
+ input_dict = {
+ "r1": {
+ "links": {
+ "r0": {
+ "ospf6": {
+ "timerIntervalsConfigHello": 11,
+ "timerIntervalsConfigDead": 12,
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("modify hello timer from default value to r1 hello timer on r2")
+
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"hello_interval": 11, "dead_interval": 12},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "ospf6": {
+ "timerIntervalsConfigHello": 11,
+ "timerIntervalsConfigDead": 12,
+ }
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("reconfigure the default hello timer value to default on r1 and r2")
+
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"hello_interval": 10, "dead_interval": 40},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 10, "dead_interval": 40},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "ospf6": {
+ "timerIntervalsConfigHello": 10,
+ "timerIntervalsConfigDead": 40,
+ }
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("reconfigure the default hello timer value to default on r1 and r2")
+
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"hello_interval": 10, "dead_interval": 40},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 10, "dead_interval": 40},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "ospf6": {
+ "timerIntervalsConfigHello": 10,
+ "timerIntervalsConfigDead": 40,
+ }
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("configure hello timer = 1 on r1 and r2")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"hello_interval": 1, "dead_interval": 4},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 1, "dead_interval": 4},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "ospf6": {
+ "timerIntervalsConfigHello": 1,
+ "timerIntervalsConfigDead": 4,
+ }
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(" Configure hello timer = 65535")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"hello_interval": 65535, "dead_interval": 4},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 65535, "dead_interval": 4},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "ospf6": {
+ "timerIntervalsConfigHello": 65535,
+ "timerIntervalsConfigDead": 4,
+ }
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, ospf_covergence
+ )
+ step(" Try configuring timer values outside range for example 65536")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"hello_interval": 65536, "dead_interval": 4},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Create interface failed. Error: {}".format(
+ tc_name, result
+ )
+
+ step("Unconfigure the hello timer from the interface from r1 and r2.")
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 65535},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that timer value is deleted from intf & " "set to default value 40 sec."
+ )
+ input_dict = {"r1": {"links": {"r0": {"ospf6": {"timerIntervalsConfigHello": 10}}}}}
+ dut = "r1"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_dead_tc11_p0(request):
+ """
+ OSPF timers.
+
+ Verify OSPF interface timer dead interval functionality
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("modify dead interval from default value to some other value on r1")
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"hello_interval": 12, "dead_interval": 48},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "verify that new timer value is configured and applied using "
+ "the show ip ospf interface command."
+ )
+ dut = "r1"
+ input_dict = {"r1": {"links": {"r0": {"ospf6": {"timerIntervalsConfigDead": 48}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("modify dead interval from default value to r1" "dead interval timer on r2")
+
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"dead_interval": 48, "hello_interval": 12},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {"r0": {"links": {"r1": {"ospf6": {"timerIntervalsConfigDead": 48}}}}}
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, ospf_covergence
+ )
+
+ step("remove ospf on R0")
+ ospf_del = {"r0": {"ospf6": {"delete": True}}}
+ result = create_router_ospf(tgen, topo, ospf_del)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ # reconfiguring deleted ospf process by resetting the configs.
+ reset_config_on_routers(tgen)
+
+ step("reconfigure the default dead interval timer value to " "default on r1 and r2")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"dead_interval": 40},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"dead_interval": 40},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {"r0": {"links": {"r1": {"ospf6": {"timerIntervalsConfigDead": 40}}}}}
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, ospf_covergence
+ )
+
+ step(" Configure dead timer = 65535 on r1 and r2")
+
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"dead_interval": 65535},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"dead_interval": 65535},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that new timer value is configured.")
+ input_dict = {
+ "r0": {"links": {"r1": {"ospf6": {"timerIntervalsConfigDead": 65535}}}}
+ }
+ dut = "r0"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verify that ospf neighbours are full")
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
+ assert ospf_covergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, ospf_covergence
+ )
+
+ step(" Try configuring timer values outside range for example 65536")
+ topo1 = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf6": {"dead_interval": 65536},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Create interface config failed. Error: {}".format(
+ tc_name, result
+ )
+
+ step("Unconfigure the dead timer from the interface from r1 and r2.")
+
+ topo1 = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf6": {"dead_interval": 65535},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, topo1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that timer value is deleted from intf & " "set to default value 40 sec."
+ )
+ input_dict = {"r1": {"links": {"r0": {"ospf6": {"timerIntervalsConfigDead": 40}}}}}
+ dut = "r1"
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_tc4_mtu_ignore_p0(request):
+ """
+ OSPF NFSM - MTU change
+
+ Verify NFSM events when ospf nbr changes with different MTU values
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step(" Bring up the base config as per the topology")
+ step("Configure OSPF on all the routers of the topology.")
+ step("Verify that OSPF neighbors are FULL.")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Modify the MTU to non default Value on R0 to R1 interface. "
+ "Reset ospf neighbors on R0."
+ )
+
+ rtr0 = tgen.routers()["r0"]
+ rtr1 = tgen.routers()["r1"]
+
+ r0_r1_intf = topo["routers"]["r0"]["links"]["r1"]["interface"]
+ r1_r0_intf = topo["routers"]["r1"]["links"]["r0"]["interface"]
+
+ rtr0.run("ifconfig {} mtu 1400".format(r0_r1_intf))
+
+ clear_ospf(tgen, "r0", ospf="ospf6")
+ clear_ospf(tgen, "r1", ospf="ospf6")
+
+ step(
+ "Verify that OSPF neighborship between R0 and R1 is stuck in Exstart" " State."
+ )
+ result = verify_ospf6_neighbor(tgen, topo, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n OSPF nbrs are Full "
+ "instead of Exstart. Error: {}".format(tc_name, result)
+ )
+
+ step(
+ "Verify that configured MTU value is updated in the show ip " "ospf interface."
+ )
+
+ dut = "r0"
+ input_dict = {"r0": {"links": {"r1": {"ospf6": {"interfaceMtu": 1400}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Modify the MTU to non default Value on R0 to R1 interface. "
+ "Reset ospf neighbors on R0."
+ )
+ rtr0.run("ifconfig {} mtu 1500".format(r0_r1_intf))
+
+ clear_ospf(tgen, "r0", ospf="ospf6")
+
+ step("Verify that OSPF neighborship between R0 and R1 becomes full.")
+ result = verify_ospf6_neighbor(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure mtu ignore and change the value of the mtu to non default"
+ " on R0 to R1 interface. Reset ospf neighbors on R0."
+ )
+ r0_ospf_mtu = {"r0": {"links": {"r1": {"ospf6": {"mtu_ignore": True}}}}}
+ result = config_ospf6_interface(tgen, topo, r0_ospf_mtu)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ input_dict = {"r0": {"links": {"r1": {"ospf6": {"mtuMismatchDetection": True}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ r1_ospf_mtu = {"r1": {"links": {"r0": {"ospf6": {"mtu_ignore": True}}}}}
+ result = config_ospf6_interface(tgen, topo, r1_ospf_mtu)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ rtr0.run("ifconfig {} mtu 1400".format(r0_r1_intf))
+
+ clear_ospf(tgen, "r0", ospf="ospf6")
+
+ step("Verify that OSPF neighborship between R0 and R1 becomes full.")
+ result = verify_ospf6_neighbor(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Unconfigure mtu-ignore command from the interface. "
+ "Reset ospf neighbors on R0."
+ )
+
+ r1_ospf_mtu = {
+ "r1": {"links": {"r0": {"ospf6": {"mtu_ignore": True, "delete": True}}}}
+ }
+ result = config_ospf6_interface(tgen, topo, r1_ospf_mtu)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ clear_ospf(tgen, "r0", ospf="ospf6")
+
+ step(
+ "Verify that OSPF neighborship between R0 and R1 is stuck in Exstart" " State."
+ )
+ result = verify_ospf6_neighbor(tgen, topo, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n OSPF nbrs are Full "
+ "instead of Exstart. Error: {}".format(tc_name, result)
+ )
+
+ step("Modify the MTU to again default valaue on R0 to R1 interface.")
+
+ rtr0.run("ifconfig {} mtu 1500".format(r0_r1_intf))
+
+ clear_ospf(tgen, "r0", ospf="ospf6")
+
+ step("Verify that OSPF neighborship between R0 and R1 becomes full.")
+ result = verify_ospf6_neighbor(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure ospf interface with jumbo MTU (9216)." "Reset ospf neighbors on R0."
+ )
+
+ rtr0.run("ifconfig {} mtu 9216".format(r0_r1_intf))
+ rtr1.run("ifconfig {} mtu 9216".format(r1_r0_intf))
+
+ clear_ospf(tgen, "r0", ospf="ospf6")
+ clear_ospf(tgen, "r1", ospf="ospf6")
+
+ step("Verify that OSPF neighborship between R0 and R1 becomes full.")
+ result = verify_ospf6_neighbor(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that jumbo MTU is updated in the show ip ospf interface.")
+ dut = "r0"
+ input_dict = {"r0": {"links": {"r1": {"ospf6": {"interfaceMtu": 9216}}}}}
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospfv3_show_p1(request):
+ """Verify ospf show commands with json output."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ input_dict = {"r2": {"debug": {"log_file": "debug.log", "enable": ["ospf6"]}}}
+
+ result = create_debug_log_config(tgen, input_dict)
+
+ # Code coverage steps #Do Not upstream
+ input_dict_config = {
+ "r1": {
+ "raw_config": [
+ "end",
+ "debug ospf6 event",
+ "debug ospf6 gr helper",
+ "debug ospf6 ism events",
+ "debug ospf6 ism status",
+ "debug ospf6 ism timers",
+ "debug ospf6 nsm events",
+ "debug ospf6 nsm status",
+ "debug ospf6 nsm timers ",
+ "debug ospf6 nssa",
+ "debug ospf6 lsa aggregate",
+ "debug ospf6 lsa flooding ",
+ "debug ospf6 lsa generate",
+ "debug ospf6 lsa install ",
+ "debug ospf6 lsa refresh",
+ "debug ospf6 packet all detail",
+ "debug ospf6 packet all recv",
+ "debug ospf6 packet all send",
+ "debug ospf6 packet dd detail",
+ "debug ospf6 packet dd recv",
+ "debug ospf6 packet dd send ",
+ "debug ospf6 packet hello detail",
+ "debug ospf6 packet hello recv",
+ "debug ospf6 packet hello send",
+ "debug ospf6 packet ls-ack detail",
+ "debug ospf6 packet ls-ack recv",
+ "debug ospf6 packet ls-ack send",
+ "debug ospf6 packet ls-request detail",
+ "debug ospf6 packet ls-request recv",
+ "debug ospf6 packet ls-request send",
+ "debug ospf6 packet ls-update detail",
+ "debug ospf6 packet ls-update recv",
+ "debug ospf6 packet ls-update send",
+ "debug ospf6 sr",
+ "debug ospf6 te ",
+ "debug ospf6 zebra interface",
+ "debug ospf6 zebra redistribute",
+ ]
+ }
+ }
+
+ apply_raw_config(tgen, input_dict_config)
+
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr, ospf="ospf6")
+
+ step(" Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr, ospf="ospf6")
+
+ dut = "r1"
+ input_dict = {
+ "r1": {
+ "links": {
+ "r0": {
+ "ospf6": {
+ "status": "up",
+ "type": "BROADCAST",
+ "ospf6Enabled": True,
+ "attachedToArea": True,
+ "instanceId": 0,
+ "interfaceMtu": 1500,
+ "autoDetect": 1500,
+ "mtuMismatchDetection": "enabled",
+ "areaId": "0.0.0.0",
+ "cost": 10,
+ "transmitDelaySec": 1,
+ "priority": 1,
+ "timerIntervalsConfigHello": 1,
+ "timerIntervalsConfigDead": 4,
+ "timerIntervalsConfigRetransmit": 5,
+ "dr": "0.0.0.0",
+ "bdr": "0.0.0.0",
+ "numberOfInterfaceScopedLsa": 2,
+ "pendingLsaLsUpdateCount": 0,
+ "lsUpdateSendThread": "off",
+ "pendingLsaLsAckCount": 0,
+ "lsAckSendThread": "off",
+ }
+ }
+ }
+ }
+ }
+ result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ nh = topo["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {"network": ip_net, "no_of_ip": 1, "routeType": "Network"}
+ ]
+ }
+ }
+
+ dut = "r1"
+ result = verify_ospf6_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def ospfv3_router_id_tc14_p2(request):
+ """OSPF Router ID - Verify OSPF router id changes."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step(" Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure system router id as 1.1.1.1 on R1 , clear ospf router")
+ ospf_rid = {"r0": {"ospf6": {"router_id": "1.1.1.1"}}}
+ result = create_router_ospf(tgen, topo, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ step("configure ospf router id as 1.1.1.2 on R1, clear ospf router")
+ ospf_rid = {"r1": {"ospf6": {"router_id": "1.1.1.2"}}}
+ result = create_router_ospf(tgen, topo, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ topo1 = deepcopy(topo)
+ step("Verify that OSPF takes system router ID as ospf router id.")
+
+ topo1["routers"]["r0"]["ospf6"]["router_id"] = "1.1.1.1"
+ topo1["routers"]["r1"]["ospf6"]["router_id"] = "1.1.1.2"
+
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr, ospf="ospf6")
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo1)
+ assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(" delete ospf router id and clear ospf process.")
+ ospf_rid = {"r0": {"ospf6": {"del_router_id": "1.1.1.1"}}}
+ result = create_router_ospf(tgen, topo, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ ospf_rid = {"r1": {"ospf6": {"del_router_id": "1.1.1.2"}}}
+ result = create_router_ospf(tgen, topo, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ reset_config_on_routers(tgen)
+
+ step(" Configure R0 R1 R2 with same router ids")
+ ospf_rid = {"r0": {"ospf6": {"router_id": "1.1.1.1"}}}
+ result = create_router_ospf(tgen, topo, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ step("configure ospf router id as 1.1.1.2 on R1, reboot router")
+ ospf_rid = {"r1": {"ospf6": {"router_id": "1.1.1.1"}}}
+ result = create_router_ospf(tgen, topo, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo, expected=False)
+ assert (
+ ospf_covergence is not True
+ ), "OSPF NBRs are up.Failed \n Error:" " {}".format(ospf_covergence)
+ topo1 = {}
+ topo1 = deepcopy(topo)
+
+ for rtr in ["r1", "r2", "r3", "r0"]:
+ topo1["routers"][rtr]["ospf6"].pop("router_id")
+
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is not True, (
+ "Testcase {} :Failed \n Neighborship "
+ "should not up as no router id is configured. Error: {}".format(tc_name, result)
+ )
+
+ step("Clear ospf process and check nbrs should not be up.")
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr, ospf="ospf6")
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is not True, (
+ "Testcase {} :Failed \n Neighborship "
+ "should not up as no router id is configured. Error: {}".format(tc_name, result)
+ )
+
+ topo1 = deepcopy(topo)
+
+ step("Configure system router id on routers , clear ospf router")
+ ospf_rid = {
+ "r0": {"ospf6": {"router_id": "1.1.1.1"}},
+ "r1": {"ospf6": {"router_id": "1.1.1.2"}},
+ "r2": {"ospf6": {"router_id": "1.1.1.3"}},
+ "r3": {"ospf6": {"router_id": "1.1.1.4"}},
+ }
+ result = create_router_ospf(tgen, topo1, ospf_rid)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ topo1["routers"]["r0"]["ospf6"]["router_id"] = "1.1.1.1"
+ topo1["routers"]["r1"]["ospf6"]["router_id"] = "1.1.1.2"
+ topo1["routers"]["r2"]["ospf6"]["router_id"] = "1.1.1.3"
+ topo1["routers"]["r3"]["ospf6"]["router_id"] = "1.1.1.4"
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo1)
+ assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(" Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ ospf_covergence = verify_ospf6_neighbor(tgen, topo)
+ assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py
index ae891d9067..b72691b71e 100644
--- a/tests/topotests/zebra_rib/test_zebra_rib.py
+++ b/tests/topotests/zebra_rib/test_zebra_rib.py
@@ -31,6 +31,7 @@ import sys
from functools import partial
import pytest
import json
+import platform
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
@@ -45,6 +46,20 @@ from time import sleep
pytestmark = [pytest.mark.sharpd]
+krel = platform.release()
+
+
+def config_macvlan(tgen, r_str, device, macvlan):
+ "Creates specified macvlan interace on physical device"
+
+ if topotest.version_cmp(krel, "5.1") < 0:
+ return
+
+ router = tgen.gears[r_str]
+ router.run(
+ "ip link add {} link {} type macvlan mode bridge".format(macvlan, device)
+ )
+ router.run("ip link set {} up".format(macvlan))
def setup_module(mod):
@@ -62,6 +77,8 @@ def setup_module(mod):
TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
)
+ # Macvlan interface for protodown func test */
+ config_macvlan(tgen, "r1", "r1-eth0", "r1-eth0-macvlan")
# Initialize all routers.
tgen.start_router()
@@ -269,6 +286,46 @@ def test_route_map_usage():
assert ok, result
+def test_protodown():
+ "Run protodown basic functionality test and report results."
+ pdown = False
+ count = 0
+ tgen = get_topogen()
+ if topotest.version_cmp(krel, "5.1") < 0:
+ tgen.errors = "kernel 5.1 needed for protodown tests"
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ # Set interface protodown on
+ r1.vtysh_cmd("sharp interface r1-eth0-macvlan protodown")
+
+ # Timeout to wait for dplane to handle it
+ while count < 10:
+ count += 1
+ output = r1.vtysh_cmd("show interface r1-eth0-macvlan")
+ if re.search(r"protodown reasons:.*sharp", output):
+ pdown = True
+ break
+ sleep(1)
+
+ assert pdown is True, "Interface r1-eth0-macvlan not set protodown"
+
+ # Set interface protodown off
+ r1.vtysh_cmd("no sharp interface r1-eth0-macvlan protodown")
+
+ # Timeout to wait for dplane to handle it
+ while count < 10:
+ count += 1
+ output = r1.vtysh_cmd("show interface r1-eth0-macvlan")
+ if not re.search(r"protodown reasons:.*sharp", output):
+ pdown = False
+ break
+ sleep(1)
+
+ assert pdown is False, "Interface r1-eth0-macvlan not set protodown off"
+
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
diff --git a/tools/coccinelle/json_object_add_camel_case.cocci b/tools/coccinelle/json_object_add_camel_case.cocci
new file mode 100644
index 0000000000..279ba213ac
--- /dev/null
+++ b/tools/coccinelle/json_object_add_camel_case.cocci
@@ -0,0 +1,19 @@
+// Catch whitespaces in JSON keys
+
+@r@
+identifier json;
+constant key;
+identifier func =~ "json_object_";
+position p;
+@@
+
+func(json, key, ...)@p
+
+@script:python@
+fmt << r.key;
+p << r.p;
+@@
+if " " in str(fmt):
+ print("Whitespace detected in JSON keys %s:%s:%s:%s" % (p[0].file, p[0].line, p[0].column, fmt))
+if str(fmt)[1].isupper():
+ print("Capital first detected in JSON keys %s:%s:%s:%s" % (p[0].file, p[0].line, p[0].column, fmt))
diff --git a/tools/frrcommon.sh.in b/tools/frrcommon.sh.in
index 7cde7a119e..d95f2d4be7 100644
--- a/tools/frrcommon.sh.in
+++ b/tools/frrcommon.sh.in
@@ -191,7 +191,7 @@ daemon_stop() {
[ -z "$fail" -a -z "$pid" ] && fail="pid file is empty"
[ -n "$fail" ] || kill -0 "$pid" 2>/dev/null || fail="pid $pid not running"
- if [ -n "$fail" ]; then
+ if [ -n "$fail" ] && [ "$2" != "--quiet" ]; then
log_failure_msg "Cannot stop $dmninst: $fail"
return 1
fi
@@ -262,7 +262,7 @@ all_stop() {
done
for dmninst in $reversed; do
- daemon_stop "$dmninst" &
+ daemon_stop "$dmninst" "$1" &
pids="$pids $!"
done
for pid in $pids; do
@@ -350,7 +350,7 @@ frrcommon_main() {
start) all_start;;
stop) all_stop;;
restart)
- all_stop
+ all_stop --quiet
all_start
;;
*) $cmd "$@";;
diff --git a/tools/frrinit.sh.in b/tools/frrinit.sh.in
index e41f2706e0..df5f0853da 100644
--- a/tools/frrinit.sh.in
+++ b/tools/frrinit.sh.in
@@ -77,7 +77,7 @@ reload)
# systemd doesn't set WATCHDOG_USEC for reload commands.
watchfrr_pidfile="$V_PATH/watchfrr.pid"
watchfrr_pid="`cat \"$watchfrr_pidfile\"`"
- watchfrr_cmdline="`strings /proc/$watchfrr_pid/cmdline`"
+ watchfrr_cmdline="`tr '\0' '\n' < /proc/$watchfrr_pid/cmdline`"
if [ -d "/proc/$watchfrr_pid" ]; then
wdt="`tr '\0' '\n' < /proc/$watchfrr_pid/environ | grep '^WATCHDOG_USEC='`"
wdt="${wdt#WATCHDOG_USEC=}"
diff --git a/vrrpd/Makefile b/vrrpd/Makefile
index 027c6ee1f8..0abb1a6381 100644
--- a/vrrpd/Makefile
+++ b/vrrpd/Makefile
@@ -1,7 +1,7 @@
all: ALWAYS
- @$(MAKE) -s -C .. vrrp/vrrp
+ @$(MAKE) -s -C .. vrrpd/vrrpd
%: ALWAYS
- @$(MAKE) -s -C .. vrrp/$@
+ @$(MAKE) -s -C .. vrrpd/$@
Makefile:
#nothing
diff --git a/vtysh/extract.pl.in b/vtysh/extract.pl.in
index d940e03e1c..228a136b71 100755
--- a/vtysh/extract.pl.in
+++ b/vtysh/extract.pl.in
@@ -116,9 +116,9 @@ sub scan_file {
}
elsif ($file =~ /lib\/plist\.c$/) {
if ($defun_array[1] =~ m/ipv6/) {
- $protocol = "VTYSH_RIPNGD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD";
+ $protocol = "VTYSH_RIPNGD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_PIM6D|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD";
} else {
- $protocol = "VTYSH_RIPD|VTYSH_OSPFD|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD";
+ $protocol = "VTYSH_RIPD|VTYSH_OSPFD|VTYSH_BGPD|VTYSH_ZEBRA|VTYSH_PIMD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_ISISD|VTYSH_FABRICD";
}
}
elsif ($file =~ /lib\/if_rmap\.c$/) {
@@ -143,10 +143,9 @@ sub scan_file {
elsif ($fabricd) {
$protocol = "VTYSH_FABRICD";
}
-# Enable VTYSH_PIM6D once pim6_cmd.c is merged
-# elsif ($file =~ /pimd\/pim6_cmd\.c$/) {
-# $protocol = "VTYSH_PIM6D";
-# }
+ elsif ($file =~ /pimd\/pim6_.*\.c$/) {
+ $protocol = "VTYSH_PIM6D";
+ }
else {
($protocol) = ($file =~ /^(?:.*\/)?([a-z0-9]+)\/[a-zA-Z0-9_\-]+\.c$/);
$protocol = "VTYSH_" . uc $protocol;
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index ffef260636..9e8f73b101 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -73,6 +73,7 @@ struct vtysh_client {
struct thread *log_reader;
int log_fd;
+ uint32_t lost_msgs;
};
static bool stderr_tty;
@@ -3656,6 +3657,15 @@ static void vtysh_log_read(struct thread *thread)
if (ret < 0 && ERRNO_IO_RETRY(errno))
return;
+ if (stderr_stdout_same) {
+#ifdef HAVE_RL_CLEAR_VISIBLE_LINE
+ rl_clear_visible_line();
+#else
+ puts("\r");
+#endif
+ fflush(stdout);
+ }
+
if (ret <= 0) {
struct timespec ts;
@@ -3679,17 +3689,17 @@ static void vtysh_log_read(struct thread *thread)
buf.hdr.ts_nsec = ts.tv_nsec;
buf.hdr.prio = LOG_ERR;
buf.hdr.flags = 0;
- buf.hdr.arghdrlen = 0;
+ buf.hdr.texthdrlen = 0;
buf.hdr.n_argpos = 0;
- }
+ } else {
+ int32_t lost_msgs = buf.hdr.lost_msgs - vclient->lost_msgs;
- if (stderr_stdout_same) {
-#ifdef HAVE_RL_CLEAR_VISIBLE_LINE
- rl_clear_visible_line();
-#else
- puts("\r");
-#endif
- fflush(stdout);
+ if (lost_msgs > 0) {
+ vclient->lost_msgs = buf.hdr.lost_msgs;
+ fprintf(stderr,
+ "%d log messages from %s lost (vtysh reading too slowly)\n",
+ lost_msgs, vclient->name);
+ }
}
text = buf.text + sizeof(buf.hdr.argpos[0]) * buf.hdr.n_argpos;
diff --git a/yang/frr-route-types.yang b/yang/frr-route-types.yang
index aeb52a6520..ffc671c99a 100644
--- a/yang/frr-route-types.yang
+++ b/yang/frr-route-types.yang
@@ -162,9 +162,7 @@ module frr-route-types {
typedef ipv6-multicast-group-prefix {
type inet:ipv6-prefix {
pattern
- '(((FF|ff)[0-9a-fA-F]{2}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/((1[6-9])|([2-9][0-9])|(1[0-1][0-9])|(12[0-8])))';
- pattern
- '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)(/.+)';
+ '(([fF]{2}[0-9a-fA-F]{2}):).*';
}
description
"This type represents an IPv6 multicast group prefix,
diff --git a/zebra/debug_nl.c b/zebra/debug_nl.c
index 260ba30b3c..b7d12bf537 100644
--- a/zebra/debug_nl.c
+++ b/zebra/debug_nl.c
@@ -255,6 +255,40 @@ const char *ifi_type2str(int type)
}
}
+const char *ifla_pdr_type2str(int type)
+{
+ switch (type) {
+ case IFLA_PROTO_DOWN_REASON_UNSPEC:
+ return "UNSPEC";
+ case IFLA_PROTO_DOWN_REASON_MASK:
+ return "MASK";
+ case IFLA_PROTO_DOWN_REASON_VALUE:
+ return "VALUE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char *ifla_info_type2str(int type)
+{
+ switch (type) {
+ case IFLA_INFO_UNSPEC:
+ return "UNSPEC";
+ case IFLA_INFO_KIND:
+ return "KIND";
+ case IFLA_INFO_DATA:
+ return "DATA";
+ case IFLA_INFO_XSTATS:
+ return "XSTATS";
+ case IFLA_INFO_SLAVE_KIND:
+ return "SLAVE_KIND";
+ case IFLA_INFO_SLAVE_DATA:
+ return "SLAVE_DATA";
+ default:
+ return "UNKNOWN";
+ }
+}
+
const char *rta_type2str(int type)
{
switch (type) {
@@ -358,6 +392,8 @@ const char *rta_type2str(int type)
case IFLA_EVENT:
return "EVENT";
#endif /* IFLA_EVENT */
+ case IFLA_PROTO_DOWN_REASON:
+ return "PROTO_DOWN_REASON";
default:
return "UNKNOWN";
}
@@ -838,6 +874,42 @@ const char *nh_flags2str(uint32_t flags, char *buf, size_t buflen)
/*
* Netlink abstractions.
*/
+static void nllink_pdr_dump(struct rtattr *rta, size_t msglen)
+{
+ size_t plen;
+ uint32_t u32v;
+
+next_rta:
+ /* Check the header for valid length and for outbound access. */
+ if (RTA_OK(rta, msglen) == 0)
+ return;
+
+ plen = RTA_PAYLOAD(rta);
+ zlog_debug(" linkinfo [len=%d (payload=%zu) type=(%d) %s]",
+ rta->rta_len, plen, rta->rta_type,
+ ifla_pdr_type2str(rta->rta_type));
+ switch (rta->rta_type) {
+ case IFLA_PROTO_DOWN_REASON_MASK:
+ case IFLA_PROTO_DOWN_REASON_VALUE:
+ if (plen < sizeof(uint32_t)) {
+ zlog_debug(" invalid length");
+ break;
+ }
+
+ u32v = *(uint32_t *)RTA_DATA(rta);
+ zlog_debug(" %u", u32v);
+ break;
+
+ default:
+ /* NOTHING: unhandled. */
+ break;
+ }
+
+ /* Get next pointer and start iteration again. */
+ rta = RTA_NEXT(rta, msglen);
+ goto next_rta;
+}
+
static void nllink_linkinfo_dump(struct rtattr *rta, size_t msglen)
{
size_t plen;
@@ -851,7 +923,7 @@ next_rta:
plen = RTA_PAYLOAD(rta);
zlog_debug(" linkinfo [len=%d (payload=%zu) type=(%d) %s]",
rta->rta_len, plen, rta->rta_type,
- rta_type2str(rta->rta_type));
+ ifla_info_type2str(rta->rta_type));
switch (rta->rta_type) {
case IFLA_INFO_KIND:
if (plen == 0) {
@@ -888,8 +960,10 @@ static void nllink_dump(struct ifinfomsg *ifi, size_t msglen)
struct rtattr *rta;
size_t plen, it;
uint32_t u32v;
+ uint8_t u8v;
char bytestr[16];
char dbuf[128];
+ unsigned short rta_type;
/* Get the first attribute and go from there. */
rta = IFLA_RTA(ifi);
@@ -899,10 +973,10 @@ next_rta:
return;
plen = RTA_PAYLOAD(rta);
+ rta_type = rta->rta_type & ~NLA_F_NESTED;
zlog_debug(" rta [len=%d (payload=%zu) type=(%d) %s]", rta->rta_len,
- plen, rta->rta_type, rta_type2str(rta->rta_type));
- switch (rta->rta_type) {
- case IFLA_IFNAME:
+ plen, rta_type, rta_type2str(rta_type));
+ switch (rta_type) {
case IFLA_IFALIAS:
if (plen == 0) {
zlog_debug(" invalid length");
@@ -927,6 +1001,7 @@ next_rta:
#endif /* IFLA_GSO_MAX_SIZE */
case IFLA_CARRIER_CHANGES:
case IFLA_MASTER:
+ case IFLA_LINK:
if (plen < sizeof(uint32_t)) {
zlog_debug(" invalid length");
break;
@@ -936,6 +1011,15 @@ next_rta:
zlog_debug(" %u", u32v);
break;
+ case IFLA_PROTO_DOWN:
+ if (plen < sizeof(uint8_t)) {
+ zlog_debug(" invalid length");
+ break;
+ }
+
+ u8v = *(uint8_t *)RTA_DATA(rta);
+ zlog_debug(" %u", u8v);
+ break;
case IFLA_ADDRESS:
datap = RTA_DATA(rta);
dbuf[0] = 0;
@@ -952,7 +1036,11 @@ next_rta:
break;
case IFLA_LINKINFO:
- nllink_linkinfo_dump(RTA_DATA(rta), msglen);
+ nllink_linkinfo_dump(RTA_DATA(rta), plen);
+ break;
+
+ case IFLA_PROTO_DOWN_REASON:
+ nllink_pdr_dump(RTA_DATA(rta), plen);
break;
default:
@@ -1027,6 +1115,7 @@ static void nlneigh_dump(struct ndmsg *ndm, size_t msglen)
uint16_t vid;
char bytestr[16];
char dbuf[128];
+ unsigned short rta_type;
#ifndef NDA_RTA
#define NDA_RTA(ndm) \
@@ -1043,9 +1132,10 @@ next_rta:
return;
plen = RTA_PAYLOAD(rta);
+ rta_type = rta->rta_type & ~NLA_F_NESTED;
zlog_debug(" rta [len=%d (payload=%zu) type=(%d) %s]", rta->rta_len,
- plen, rta->rta_type, neigh_rta2str(rta->rta_type));
- switch (rta->rta_type & ~ NLA_F_NESTED) {
+ plen, rta->rta_type, neigh_rta2str(rta_type));
+ switch (rta_type) {
case NDA_LLADDR:
datap = RTA_DATA(rta);
dbuf[0] = 0;
@@ -1153,6 +1243,7 @@ static void nlnh_dump(struct nhmsg *nhm, size_t msglen)
uint32_t u32v;
unsigned long count, i;
struct nexthop_grp *nhgrp;
+ unsigned short rta_type;
rta = RTM_NHA(nhm);
@@ -1162,9 +1253,10 @@ next_rta:
return;
plen = RTA_PAYLOAD(rta);
+ rta_type = rta->rta_type & ~NLA_F_NESTED;
zlog_debug(" rta [len=%d (payload=%zu) type=(%d) %s]", rta->rta_len,
- plen, rta->rta_type, nhm_rta2str(rta->rta_type));
- switch (rta->rta_type & ~NLA_F_NESTED) {
+ plen, rta->rta_type, nhm_rta2str(rta_type));
+ switch (rta_type) {
case NHA_ID:
u32v = *(uint32_t *)RTA_DATA(rta);
zlog_debug(" %u", u32v);
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index 8c8004190b..ec4ea372f1 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -812,6 +812,9 @@ static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
case DPLANE_OP_INTF_ADDR_ADD:
case DPLANE_OP_INTF_ADDR_DEL:
case DPLANE_OP_INTF_NETCONFIG:
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
case DPLANE_OP_NONE:
break;
@@ -1232,7 +1235,8 @@ static void fpm_process_queue(struct thread *t)
* the output data in the STREAM_WRITEABLE
* check above, so we can ignore the return
*/
- (void)fpm_nl_enqueue(fnc, ctx);
+ if (fnc->socket != -1)
+ (void)fpm_nl_enqueue(fnc, ctx);
/* Account the processed entries. */
processed_contexts++;
diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c
index db8ee3236c..873aea236d 100644
--- a/zebra/if_netlink.c
+++ b/zebra/if_netlink.c
@@ -77,6 +77,7 @@
#include "zebra/netconf_netlink.h"
extern struct zebra_privs_t zserv_privs;
+uint8_t frr_protodown_r_bit = FRR_PROTODOWN_REASON_DEFAULT_BIT;
/* Note: on netlink systems, there should be a 1-to-1 mapping between interface
names and ifindex values. */
@@ -102,7 +103,7 @@ static void set_ifindex(struct interface *ifp, ifindex_t ifi_index,
EC_LIB_INTERFACE,
"interface rename detected on up interface: index %d was renamed from %s to %s, results are uncertain!",
ifi_index, oifp->name, ifp->name);
- if_delete_update(oifp);
+ if_delete_update(&oifp);
}
}
if_set_index(ifp, ifi_index);
@@ -814,33 +815,90 @@ static int netlink_bridge_interface(struct nlmsghdr *h, int len, ns_id_t ns_id,
return 0;
}
-/* If the interface is an es bond member then it must follow EVPN's
- * protodown setting
+static bool is_if_protodown_reason_only_frr(uint32_t rc_bitfield)
+{
+ /* This shouldn't be possible */
+ assert(frr_protodown_r_bit < 32);
+ return (rc_bitfield == (((uint32_t)1) << frr_protodown_r_bit));
+}
+
+/*
+ * Process interface protodown dplane update.
+ *
+ * If the interface is an es bond member then it must follow EVPN's
+ * protodown setting.
*/
static void netlink_proc_dplane_if_protodown(struct zebra_if *zif,
- bool protodown)
+ struct rtattr **tb)
{
- bool zif_protodown;
+ bool protodown;
+ bool old_protodown;
+ uint32_t rc_bitfield = 0;
+ struct rtattr *pd_reason_info[IFLA_MAX + 1];
+
+ protodown = !!*(uint8_t *)RTA_DATA(tb[IFLA_PROTO_DOWN]);
+
+ if (tb[IFLA_PROTO_DOWN_REASON]) {
+ netlink_parse_rtattr_nested(pd_reason_info, IFLA_INFO_MAX,
+ tb[IFLA_PROTO_DOWN_REASON]);
- zif_protodown = !!(zif->flags & ZIF_FLAG_PROTODOWN);
- if (protodown == zif_protodown)
+ if (pd_reason_info[IFLA_PROTO_DOWN_REASON_VALUE])
+ rc_bitfield = *(uint32_t *)RTA_DATA(
+ pd_reason_info[IFLA_PROTO_DOWN_REASON_VALUE]);
+ }
+
+ /*
+ * Set our reason code to note it wasn't us.
+ * If the reason we got from the kernel is ONLY frr though, don't
+ * set it.
+ */
+ COND_FLAG(zif->protodown_rc, ZEBRA_PROTODOWN_EXTERNAL,
+ protodown && rc_bitfield &&
+ !is_if_protodown_reason_only_frr(rc_bitfield));
+
+
+ old_protodown = !!ZEBRA_IF_IS_PROTODOWN(zif);
+ if (protodown == old_protodown)
return;
if (IS_ZEBRA_DEBUG_EVPN_MH_ES || IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("interface %s dplane change, protdown %s",
zif->ifp->name, protodown ? "on" : "off");
+ /* Set protodown, respectively */
+ COND_FLAG(zif->flags, ZIF_FLAG_PROTODOWN, protodown);
+
if (zebra_evpn_is_es_bond_member(zif->ifp)) {
+ /* Check it's not already being sent to the dplane first */
+ if (protodown &&
+ CHECK_FLAG(zif->flags, ZIF_FLAG_SET_PROTODOWN)) {
+ if (IS_ZEBRA_DEBUG_EVPN_MH_ES || IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "bond mbr %s protodown on recv'd but already sent protodown on to the dplane",
+ zif->ifp->name);
+ return;
+ }
+
+ if (!protodown &&
+ CHECK_FLAG(zif->flags, ZIF_FLAG_UNSET_PROTODOWN)) {
+ if (IS_ZEBRA_DEBUG_EVPN_MH_ES || IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "bond mbr %s protodown off recv'd but already sent protodown off to the dplane",
+ zif->ifp->name);
+ return;
+ }
+
if (IS_ZEBRA_DEBUG_EVPN_MH_ES || IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
- "bond mbr %s re-instate protdown %s in the dplane",
- zif->ifp->name, zif_protodown ? "on" : "off");
- netlink_protodown(zif->ifp, zif_protodown);
- } else {
- if (protodown)
- zif->flags |= ZIF_FLAG_PROTODOWN;
+ "bond mbr %s reinstate protodown %s in the dplane",
+ zif->ifp->name, old_protodown ? "on" : "off");
+
+ if (old_protodown)
+ SET_FLAG(zif->flags, ZIF_FLAG_SET_PROTODOWN);
else
- zif->flags &= ~ZIF_FLAG_PROTODOWN;
+ SET_FLAG(zif->flags, ZIF_FLAG_UNSET_PROTODOWN);
+
+ dplane_intf_update(zif->ifp);
}
}
@@ -859,6 +917,29 @@ static uint8_t netlink_parse_lacp_bypass(struct rtattr **linkinfo)
}
/*
+ * Only called at startup to cleanup leftover protodown reasons we may
+ * have not cleaned up. We leave protodown set though.
+ */
+static void if_sweep_protodown(struct zebra_if *zif)
+{
+ bool protodown;
+
+ protodown = !!ZEBRA_IF_IS_PROTODOWN(zif);
+
+ if (!protodown)
+ return;
+
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("interface %s sweeping protodown %s reason 0x%x",
+ zif->ifp->name, protodown ? "on" : "off",
+ zif->protodown_rc);
+
+ /* Only clear our reason codes, leave external if it was set */
+ UNSET_FLAG(zif->protodown_rc, ZEBRA_PROTODOWN_ALL);
+ dplane_intf_update(zif->ifp);
+}
+
+/*
* Called from interface_lookup_netlink(). This function is only used
* during bootstrap.
*/
@@ -905,7 +986,8 @@ static int netlink_interface(struct nlmsghdr *h, ns_id_t ns_id, int startup)
/* Looking up interface name. */
memset(linkinfo, 0, sizeof(linkinfo));
- netlink_parse_rtattr(tb, IFLA_MAX, IFLA_RTA(ifi), len);
+ netlink_parse_rtattr_flags(tb, IFLA_MAX, IFLA_RTA(ifi), len,
+ NLA_F_NESTED);
/* check for wireless messages to ignore */
if ((tb[IFLA_WIRELESS] != NULL) && (ifi->ifi_change == 0)) {
@@ -1020,10 +1102,8 @@ static int netlink_interface(struct nlmsghdr *h, ns_id_t ns_id, int startup)
zebra_l2if_update_bond_slave(ifp, bond_ifindex, !!bypass);
if (tb[IFLA_PROTO_DOWN]) {
- uint8_t protodown;
-
- protodown = *(uint8_t *)RTA_DATA(tb[IFLA_PROTO_DOWN]);
- netlink_proc_dplane_if_protodown(zif, !!protodown);
+ netlink_proc_dplane_if_protodown(zif, tb);
+ if_sweep_protodown(zif);
}
return 0;
@@ -1244,6 +1324,41 @@ netlink_put_address_update_msg(struct nl_batch *bth,
false);
}
+static ssize_t netlink_intf_msg_encoder(struct zebra_dplane_ctx *ctx, void *buf,
+ size_t buflen)
+{
+ enum dplane_op_e op;
+ int cmd = 0;
+
+ op = dplane_ctx_get_op(ctx);
+
+ switch (op) {
+ case DPLANE_OP_INTF_UPDATE:
+ cmd = RTM_SETLINK;
+ break;
+ case DPLANE_OP_INTF_INSTALL:
+ cmd = RTM_NEWLINK;
+ break;
+ case DPLANE_OP_INTF_DELETE:
+ cmd = RTM_DELLINK;
+ break;
+ default:
+ flog_err(
+ EC_ZEBRA_NHG_FIB_UPDATE,
+ "Context received for kernel interface update with incorrect OP code (%u)",
+ op);
+ return -1;
+ }
+
+ return netlink_intf_msg_encode(cmd, ctx, buf, buflen);
+}
+
+enum netlink_msg_status
+netlink_put_intf_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx)
+{
+ return netlink_batch_add_msg(bth, ctx, netlink_intf_msg_encoder, false);
+}
+
int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
@@ -1716,7 +1831,8 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
/* Looking up interface name. */
memset(linkinfo, 0, sizeof(linkinfo));
- netlink_parse_rtattr(tb, IFLA_MAX, IFLA_RTA(ifi), len);
+ netlink_parse_rtattr_flags(tb, IFLA_MAX, IFLA_RTA(ifi), len,
+ NLA_F_NESTED);
/* check for wireless messages to ignore */
if ((tb[IFLA_WIRELESS] != NULL) && (ifi->ifi_change == 0)) {
@@ -1834,7 +1950,7 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
ZEBRA_INTERFACE_VRF_LOOPBACK);
/* Update link. */
- zebra_if_update_link(ifp, link_ifindex, ns_id);
+ zebra_if_update_link(ifp, link_ifindex, link_nsid);
ifp->ll_type =
netlink_to_zebra_link_type(ifi->ifi_type);
@@ -1856,14 +1972,9 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
zebra_l2if_update_bond_slave(ifp, bond_ifindex,
!!bypass);
- if (tb[IFLA_PROTO_DOWN]) {
- uint8_t protodown;
+ if (tb[IFLA_PROTO_DOWN])
+ netlink_proc_dplane_if_protodown(ifp->info, tb);
- protodown = *(uint8_t *)RTA_DATA(
- tb[IFLA_PROTO_DOWN]);
- netlink_proc_dplane_if_protodown(ifp->info,
- !!protodown);
- }
} else if (ifp->vrf->vrf_id != vrf_id) {
/* VRF change for an interface. */
if (IS_ZEBRA_DEBUG_KERNEL)
@@ -1876,6 +1987,7 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
} else {
bool was_bridge_slave, was_bond_slave;
uint8_t chgflags = ZEBRA_BRIDGE_NO_ACTION;
+ zif = ifp->info;
/* Interface update. */
if (IS_ZEBRA_DEBUG_KERNEL)
@@ -1903,15 +2015,21 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
memcpy(old_hw_addr, ifp->hw_addr, INTERFACE_HWADDR_MAX);
/* Update link. */
- zebra_if_update_link(ifp, link_ifindex, ns_id);
+ zebra_if_update_link(ifp, link_ifindex, link_nsid);
ifp->ll_type =
netlink_to_zebra_link_type(ifi->ifi_type);
netlink_interface_update_hw_addr(tb, ifp);
+ if (tb[IFLA_PROTO_DOWN])
+ netlink_proc_dplane_if_protodown(ifp->info, tb);
+
if (if_is_no_ptm_operative(ifp)) {
+ bool is_up = if_is_operative(ifp);
ifp->flags = ifi->ifi_flags & 0x0000fffff;
- if (!if_is_no_ptm_operative(ifp)) {
+ if (!if_is_no_ptm_operative(ifp) ||
+ CHECK_FLAG(zif->flags,
+ ZIF_FLAG_PROTODOWN)) {
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
"Intf %s(%u) has gone DOWN",
@@ -1927,7 +2045,7 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
zlog_debug(
"Intf %s(%u) PTM up, notifying clients",
name, ifp->ifindex);
- zebra_interface_up_update(ifp);
+ if_up(ifp, !is_up);
/* Update EVPN VNI when SVI MAC change
*/
@@ -1956,12 +2074,14 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
}
} else {
ifp->flags = ifi->ifi_flags & 0x0000fffff;
- if (if_is_operative(ifp)) {
+ if (if_is_operative(ifp) &&
+ !CHECK_FLAG(zif->flags,
+ ZIF_FLAG_PROTODOWN)) {
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
"Intf %s(%u) has come UP",
name, ifp->ifindex);
- if_up(ifp);
+ if_up(ifp, true);
if (IS_ZEBRA_IF_BRIDGE(ifp))
chgflags =
ZEBRA_BRIDGE_MASTER_UP;
@@ -1990,15 +2110,6 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
else if (IS_ZEBRA_IF_BOND_SLAVE(ifp) || was_bond_slave)
zebra_l2if_update_bond_slave(ifp, bond_ifindex,
!!bypass);
-
- if (tb[IFLA_PROTO_DOWN]) {
- uint8_t protodown;
-
- protodown = *(uint8_t *)RTA_DATA(
- tb[IFLA_PROTO_DOWN]);
- netlink_proc_dplane_if_protodown(ifp->info,
- !!protodown);
- }
}
zif = ifp->info;
@@ -2033,7 +2144,7 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
else if (IS_ZEBRA_IF_VXLAN(ifp))
zebra_l2_vxlanif_del(ifp);
- if_delete_update(ifp);
+ if_delete_update(&ifp);
/* If VRF, delete the VRF structure itself. */
if (zif_type == ZEBRA_IF_VRF && !vrf_is_backend_netns())
@@ -2043,30 +2154,72 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
return 0;
}
-int netlink_protodown(struct interface *ifp, bool down)
-{
- struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+/**
+ * Interface encoding helper function.
+ *
+ * \param[in] cmd netlink command.
+ * \param[in] ctx dataplane context (information snapshot).
+ * \param[out] buf buffer to hold the packet.
+ * \param[in] buflen amount of buffer bytes.
+ */
+ssize_t netlink_intf_msg_encode(uint16_t cmd,
+ const struct zebra_dplane_ctx *ctx, void *buf,
+ size_t buflen)
+{
struct {
struct nlmsghdr n;
struct ifinfomsg ifa;
- char buf[NL_PKT_BUF_SIZE];
- } req;
+ char buf[];
+ } *req = buf;
- memset(&req, 0, sizeof(req));
+ struct rtattr *nest_protodown_reason;
+ ifindex_t ifindex = dplane_ctx_get_ifindex(ctx);
+ bool down = dplane_ctx_intf_is_protodown(ctx);
+ bool pd_reason_val = dplane_ctx_get_intf_pd_reason_val(ctx);
+ struct nlsock *nl =
+ kernel_netlink_nlsock_lookup(dplane_ctx_get_ns_sock(ctx));
- req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
- req.n.nlmsg_flags = NLM_F_REQUEST;
- req.n.nlmsg_type = RTM_SETLINK;
- req.n.nlmsg_pid = zns->netlink_cmd.snl.nl_pid;
+ if (buflen < sizeof(*req))
+ return 0;
- req.ifa.ifi_index = ifp->ifindex;
+ memset(req, 0, sizeof(*req));
- nl_attr_put(&req.n, sizeof(req), IFLA_PROTO_DOWN, &down, sizeof(down));
- nl_attr_put32(&req.n, sizeof(req), IFLA_LINK, ifp->ifindex);
+ if (cmd != RTM_SETLINK)
+ flog_err(
+ EC_ZEBRA_INTF_UPDATE_FAILURE,
+ "Only RTM_SETLINK message type currently supported in dplane pthread");
- return netlink_talk(netlink_talk_filter, &req.n, &zns->netlink_cmd, zns,
- false);
+ req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req->n.nlmsg_flags = NLM_F_REQUEST;
+ req->n.nlmsg_type = cmd;
+ req->n.nlmsg_pid = nl->snl.nl_pid;
+
+ req->ifa.ifi_index = ifindex;
+
+ nl_attr_put8(&req->n, buflen, IFLA_PROTO_DOWN, down);
+ nl_attr_put32(&req->n, buflen, IFLA_LINK, ifindex);
+
+ /* Reason info nest */
+ nest_protodown_reason =
+ nl_attr_nest(&req->n, buflen, IFLA_PROTO_DOWN_REASON);
+
+ if (!nest_protodown_reason)
+ return -1;
+
+ nl_attr_put32(&req->n, buflen, IFLA_PROTO_DOWN_REASON_MASK,
+ (1 << frr_protodown_r_bit));
+ nl_attr_put32(&req->n, buflen, IFLA_PROTO_DOWN_REASON_VALUE,
+ ((int)pd_reason_val) << frr_protodown_r_bit);
+
+ nl_attr_nest_end(&req->n, nest_protodown_reason);
+
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: %s, protodown=%d reason_val=%d ifindex=%u",
+ __func__, nl_msg_type_to_str(cmd), down,
+ pd_reason_val, ifindex);
+
+ return NLMSG_ALIGN(req->n.nlmsg_len);
}
/* Interface information read by netlink. */
@@ -2082,4 +2235,35 @@ void interface_list(struct zebra_ns *zns)
interface_addr_lookup_netlink(zns);
}
+void if_netlink_set_frr_protodown_r_bit(uint8_t bit)
+{
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "Protodown reason bit index changed: bit-index %u -> bit-index %u",
+ frr_protodown_r_bit, bit);
+
+ frr_protodown_r_bit = bit;
+}
+
+void if_netlink_unset_frr_protodown_r_bit(void)
+{
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "Protodown reason bit index changed: bit-index %u -> bit-index %u",
+ frr_protodown_r_bit, FRR_PROTODOWN_REASON_DEFAULT_BIT);
+
+ frr_protodown_r_bit = FRR_PROTODOWN_REASON_DEFAULT_BIT;
+}
+
+
+bool if_netlink_frr_protodown_r_bit_is_set(void)
+{
+ return (frr_protodown_r_bit != FRR_PROTODOWN_REASON_DEFAULT_BIT);
+}
+
+uint8_t if_netlink_get_frr_protodown_r_bit(void)
+{
+ return frr_protodown_r_bit;
+}
+
#endif /* GNU_LINUX */
diff --git a/zebra/if_netlink.h b/zebra/if_netlink.h
index a1ce7af8c7..46eac25377 100644
--- a/zebra/if_netlink.h
+++ b/zebra/if_netlink.h
@@ -40,6 +40,9 @@ int netlink_interface_addr_dplane(struct nlmsghdr *h, ns_id_t ns_id,
extern int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup);
extern int interface_lookup_netlink(struct zebra_ns *zns);
+extern ssize_t netlink_intf_msg_encode(uint16_t cmd,
+ const struct zebra_dplane_ctx *ctx,
+ void *buf, size_t buflen);
extern enum netlink_msg_status
netlink_put_gre_set_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
@@ -47,19 +50,19 @@ extern enum netlink_msg_status
netlink_put_address_update_msg(struct nl_batch *bth,
struct zebra_dplane_ctx *ctx);
-/*
- * Set protodown status of interface.
- *
- * ifp
- * Interface to set protodown on.
- *
- * down
- * If true, set protodown on. If false, set protodown off.
+extern enum netlink_msg_status
+netlink_put_intf_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx);
+
+#define FRR_PROTODOWN_REASON_DEFAULT_BIT 7
+/* Protodown bit setter/getter
*
- * Returns:
- * 0
+ * Allow users to change the bit if it conflicts with another
+ * on their system.
*/
-int netlink_protodown(struct interface *ifp, bool down);
+extern void if_netlink_set_frr_protodown_r_bit(uint8_t bit);
+extern void if_netlink_unset_frr_protodown_r_bit(void);
+extern bool if_netlink_frr_protodown_r_bit_is_set(void);
+extern uint8_t if_netlink_get_frr_protodown_r_bit(void);
#ifdef __cplusplus
}
diff --git a/zebra/if_socket.c b/zebra/if_socket.c
new file mode 100644
index 0000000000..309d5a3f3e
--- /dev/null
+++ b/zebra/if_socket.c
@@ -0,0 +1,41 @@
+/*
+ * Zebra Interface interaction with the kernel using socket.
+ * Copyright (C) 2022 NVIDIA CORPORATION & AFFILIATES
+ * Stephen Worley
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FRR; see the file COPYING. If not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <zebra.h>
+
+#ifndef HAVE_NETLINK
+
+#include "lib_errors.h"
+
+#include "zebra/rt.h"
+#include "zebra/zebra_dplane.h"
+#include "zebra/zebra_errors.h"
+
+enum zebra_dplane_result kernel_intf_update(struct zebra_dplane_ctx *ctx)
+{
+ flog_err(EC_LIB_UNAVAILABLE, "%s not Implemented for this platform",
+ __func__);
+ return ZEBRA_DPLANE_REQUEST_FAILURE;
+}
+
+#endif
diff --git a/zebra/interface.c b/zebra/interface.c
index fbd2aac005..a70326ebb3 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -224,9 +224,13 @@ static int if_zebra_new_hook(struct interface *ifp)
static void if_nhg_dependents_check_valid(struct nhg_hash_entry *nhe)
{
zebra_nhg_check_valid(nhe);
- if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID))
- /* Assuming uninstalled as well here */
- UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+ if (!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) {
+ /* If we're in shutdown, this interface event needs to clean
+ * up installed NHGs, so don't clear that flag directly.
+ */
+ if (!zrouter.in_shutdown)
+ UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED);
+ }
}
static void if_down_nhg_dependents(const struct interface *ifp)
@@ -257,6 +261,13 @@ static int if_zebra_delete_hook(struct interface *ifp)
if (ifp->info) {
zebra_if = ifp->info;
+ /* If we set protodown, clear our reason now from the kernel */
+ if (ZEBRA_IF_IS_PROTODOWN(zebra_if) && zebra_if->protodown_rc &&
+ !ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zebra_if))
+ zebra_if_update_protodown_rc(ifp, true,
+ (zebra_if->protodown_rc &
+ ~ZEBRA_PROTODOWN_ALL));
+
/* Free installed address chains tree. */
if (zebra_if->ipv4_subnets)
route_table_finish(zebra_if->ipv4_subnets);
@@ -517,7 +528,7 @@ void if_flags_update(struct interface *ifp, uint64_t newflags)
/* inoperative -> operative? */
ifp->flags = newflags;
if (if_is_operative(ifp))
- if_up(ifp);
+ if_up(ifp, true);
}
}
@@ -795,9 +806,10 @@ static void if_delete_connected(struct interface *ifp)
}
/* Handle an interface delete event */
-void if_delete_update(struct interface *ifp)
+void if_delete_update(struct interface **pifp)
{
struct zebra_if *zif;
+ struct interface *ifp = *pifp;
if (if_is_up(ifp)) {
flog_err(
@@ -860,7 +872,7 @@ void if_delete_update(struct interface *ifp)
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("interface %s is being deleted from the system",
ifp->name);
- if_delete(&ifp);
+ if_delete(pifp);
}
}
@@ -1045,7 +1057,7 @@ bool if_nhg_dependents_is_empty(const struct interface *ifp)
}
/* Interface is up. */
-void if_up(struct interface *ifp)
+void if_up(struct interface *ifp, bool install_connected)
{
struct zebra_if *zif;
struct interface *link_if;
@@ -1077,7 +1089,8 @@ void if_up(struct interface *ifp)
#endif
/* Install connected routes to the kernel. */
- if_install_connected(ifp);
+ if (install_connected)
+ if_install_connected(ifp);
/* Handle interface up for specific types for EVPN. Non-VxLAN interfaces
* are checked to see if (remote) neighbor entries need to be installed
@@ -1224,58 +1237,130 @@ void zebra_if_update_all_links(struct zebra_ns *zns)
}
}
-void zebra_if_set_protodown(struct interface *ifp, bool down)
+static bool if_ignore_set_protodown(const struct interface *ifp, bool new_down,
+ uint32_t new_protodown_rc)
{
+ struct zebra_if *zif;
+ bool old_down, old_set_down, old_unset_down;
+
+ zif = ifp->info;
+
+ /* Current state as we know it */
+ old_down = !!(ZEBRA_IF_IS_PROTODOWN(zif));
+ old_set_down = !!CHECK_FLAG(zif->flags, ZIF_FLAG_SET_PROTODOWN);
+ old_unset_down = !!CHECK_FLAG(zif->flags, ZIF_FLAG_UNSET_PROTODOWN);
+
+ if (new_protodown_rc == zif->protodown_rc) {
+ /* Early return if already down & reason bitfield matches */
+ if (new_down == old_down) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "Ignoring request to set protodown %s for interface %s (%u): protodown %s is already set (reason bitfield: old 0x%x new 0x%x)",
+ new_down ? "on" : "off", ifp->name,
+ ifp->ifindex, new_down ? "on" : "off",
+ zif->protodown_rc, new_protodown_rc);
+
+ return true;
+ }
+
+ /* Early return if already set queued & reason bitfield matches
+ */
+ if (new_down && old_set_down) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "Ignoring request to set protodown %s for interface %s (%u): protodown %s is already queued to dplane (reason bitfield: old 0x%x new 0x%x)",
+ new_down ? "on" : "off", ifp->name,
+ ifp->ifindex, new_down ? "on" : "off",
+ zif->protodown_rc, new_protodown_rc);
+
+ return true;
+ }
+
+ /* Early return if already unset queued & reason bitfield
+ * matches */
+ if (!new_down && old_unset_down) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug(
+ "Ignoring request to set protodown %s for interface %s (%u): protodown %s is already queued to dplane (reason bitfield: old 0x%x new 0x%x)",
+ new_down ? "on" : "off", ifp->name,
+ ifp->ifindex, new_down ? "on" : "off",
+ zif->protodown_rc, new_protodown_rc);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int zebra_if_update_protodown_rc(struct interface *ifp, bool new_down,
+ uint32_t new_protodown_rc)
+{
+ struct zebra_if *zif;
+
+ zif = ifp->info;
+
+ /* Check if we already have this state or it's queued */
+ if (if_ignore_set_protodown(ifp, new_down, new_protodown_rc))
+ return 1;
+
+ zlog_info(
+ "Setting protodown %s - interface %s (%u): reason bitfield change from 0x%x --> 0x%x",
+ new_down ? "on" : "off", ifp->name, ifp->ifindex,
+ zif->protodown_rc, new_protodown_rc);
+
+ zif->protodown_rc = new_protodown_rc;
+
+ if (new_down)
+ SET_FLAG(zif->flags, ZIF_FLAG_SET_PROTODOWN);
+ else
+ SET_FLAG(zif->flags, ZIF_FLAG_UNSET_PROTODOWN);
+
#ifdef HAVE_NETLINK
- netlink_protodown(ifp, down);
+ dplane_intf_update(ifp);
#else
zlog_warn("Protodown is not supported on this platform");
#endif
+ return 0;
+}
+
+int zebra_if_set_protodown(struct interface *ifp, bool new_down,
+ enum protodown_reasons new_reason)
+{
+ struct zebra_if *zif;
+ uint32_t new_protodown_rc;
+
+ zif = ifp->info;
+
+ if (new_down)
+ new_protodown_rc = zif->protodown_rc | new_reason;
+ else
+ new_protodown_rc = zif->protodown_rc & ~new_reason;
+
+ return zebra_if_update_protodown_rc(ifp, new_down, new_protodown_rc);
}
/*
- * Handle an interface addr event based on info in a dplane context object.
+ * Handle an interface events based on info in a dplane context object.
* This runs in the main pthread, using the info in the context object to
* modify an interface.
*/
-void zebra_if_addr_update_ctx(struct zebra_dplane_ctx *ctx)
+static void zebra_if_addr_update_ctx(struct zebra_dplane_ctx *ctx,
+ struct interface *ifp)
{
- struct interface *ifp;
uint8_t flags = 0;
const char *label = NULL;
- ns_id_t ns_id;
- struct zebra_ns *zns;
uint32_t metric = METRIC_MAX;
- ifindex_t ifindex;
const struct prefix *addr, *dest = NULL;
enum dplane_op_e op;
op = dplane_ctx_get_op(ctx);
- ns_id = dplane_ctx_get_ns_id(ctx);
-
- zns = zebra_ns_lookup(ns_id);
- if (zns == NULL) {
- /* No ns - deleted maybe? */
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: can't find zns id %u", __func__, ns_id);
- goto done;
- }
-
- ifindex = dplane_ctx_get_ifindex(ctx);
-
- ifp = if_lookup_by_index_per_ns(zns, ifindex);
- if (ifp == NULL) {
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: can't find ifp at nsid %u index %d",
- __func__, ns_id, ifindex);
- goto done;
- }
-
addr = dplane_ctx_get_intf_addr(ctx);
if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: %s: ifindex %u, addr %pFX", __func__,
- dplane_op2str(op), ifindex, addr);
+ zlog_debug("%s: %s: ifindex %s(%u), addr %pFX", __func__,
+ dplane_op2str(dplane_ctx_get_op(ctx)), ifp->name,
+ ifp->ifindex, addr);
/* Is there a peer or broadcast address? */
dest = dplane_ctx_get_intf_dest(ctx);
@@ -1330,41 +1415,66 @@ void zebra_if_addr_update_ctx(struct zebra_dplane_ctx *ctx)
*/
if (op != DPLANE_OP_INTF_ADDR_ADD)
rib_update(RIB_UPDATE_KERNEL);
+}
+
+static void zebra_if_update_ctx(struct zebra_dplane_ctx *ctx,
+ struct interface *ifp)
+{
+ enum zebra_dplane_result dp_res;
+ struct zebra_if *zif;
+ bool pd_reason_val;
+ bool down;
+
+ dp_res = dplane_ctx_get_status(ctx);
+ pd_reason_val = dplane_ctx_get_intf_pd_reason_val(ctx);
+ down = dplane_ctx_intf_is_protodown(ctx);
+
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: %s: if %s(%u) ctx-protodown %s ctx-reason %d",
+ __func__, dplane_op2str(dplane_ctx_get_op(ctx)),
+ ifp->name, ifp->ifindex, down ? "on" : "off",
+ pd_reason_val);
+
+ zif = ifp->info;
+ if (!zif) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: if %s(%u) zebra info pointer is NULL",
+ __func__, ifp->name, ifp->ifindex);
+ return;
+ }
+
+ if (dp_res != ZEBRA_DPLANE_REQUEST_SUCCESS) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: if %s(%u) dplane update failed",
+ __func__, ifp->name, ifp->ifindex);
+ goto done;
+ }
+
+ /* Update our info */
+ COND_FLAG(zif->flags, ZIF_FLAG_PROTODOWN, down);
done:
- /* We're responsible for the ctx object */
- dplane_ctx_fini(&ctx);
+ /* Clear our dplane flags */
+ UNSET_FLAG(zif->flags, ZIF_FLAG_SET_PROTODOWN);
+ UNSET_FLAG(zif->flags, ZIF_FLAG_UNSET_PROTODOWN);
}
/*
* Handle netconf change from a dplane context object; runs in the main
* pthread so it can update zebra data structs.
*/
-int zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx)
+static void zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx,
+ struct interface *ifp)
{
- struct zebra_ns *zns;
- struct interface *ifp;
struct zebra_if *zif;
enum dplane_netconf_status_e mpls;
- int ret = 0;
-
- zns = zebra_ns_lookup(dplane_ctx_get_netconf_ns_id(ctx));
- if (zns == NULL) {
- ret = -1;
- goto done;
- }
-
- ifp = if_lookup_by_index_per_ns(zns,
- dplane_ctx_get_netconf_ifindex(ctx));
- if (ifp == NULL) {
- ret = -1;
- goto done;
- }
zif = ifp->info;
- if (zif == NULL) {
- ret = -1;
- goto done;
+ if (!zif) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: if %s(%u) zebra info pointer is NULL",
+ __func__, ifp->name, ifp->ifindex);
+ return;
}
mpls = dplane_ctx_get_netconf_mpls(ctx);
@@ -1378,12 +1488,105 @@ int zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx)
zlog_debug("%s: if %s, ifindex %d, mpls %s",
__func__, ifp->name, ifp->ifindex,
(zif->mpls ? "ON" : "OFF"));
+}
+void zebra_if_dplane_result(struct zebra_dplane_ctx *ctx)
+{
+ struct zebra_ns *zns;
+ struct interface *ifp;
+ ns_id_t ns_id;
+ enum dplane_op_e op;
+ enum zebra_dplane_result dp_res;
+ ifindex_t ifindex;
+
+ ns_id = dplane_ctx_get_ns_id(ctx);
+ dp_res = dplane_ctx_get_status(ctx);
+ op = dplane_ctx_get_op(ctx);
+ ifindex = dplane_ctx_get_ifindex(ctx);
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL || IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("Intf dplane ctx %p, op %s, ifindex (%u), result %s",
+ ctx, dplane_op2str(op), ifindex,
+ dplane_res2str(dp_res));
+
+ zns = zebra_ns_lookup(ns_id);
+ if (zns == NULL) {
+ /* No ns - deleted maybe? */
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: can't find zns id %u", __func__, ns_id);
+
+ goto done;
+ }
+
+ ifp = if_lookup_by_index_per_ns(zns, ifindex);
+ if (ifp == NULL) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: can't find ifp at nsid %u index %d",
+ __func__, ns_id, ifindex);
+
+ goto done;
+ }
+
+ switch (op) {
+ case DPLANE_OP_INTF_ADDR_ADD:
+ case DPLANE_OP_INTF_ADDR_DEL:
+ zebra_if_addr_update_ctx(ctx, ifp);
+ break;
+
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ zebra_if_update_ctx(ctx, ifp);
+ break;
+
+ case DPLANE_OP_INTF_NETCONFIG:
+ zebra_if_netconf_update_ctx(ctx, ifp);
+ break;
+
+ case DPLANE_OP_ROUTE_INSTALL:
+ case DPLANE_OP_ROUTE_UPDATE:
+ case DPLANE_OP_ROUTE_DELETE:
+ case DPLANE_OP_NH_DELETE:
+ case DPLANE_OP_NH_INSTALL:
+ case DPLANE_OP_NH_UPDATE:
+ case DPLANE_OP_ROUTE_NOTIFY:
+ case DPLANE_OP_LSP_INSTALL:
+ case DPLANE_OP_LSP_UPDATE:
+ case DPLANE_OP_LSP_DELETE:
+ case DPLANE_OP_LSP_NOTIFY:
+ case DPLANE_OP_PW_INSTALL:
+ case DPLANE_OP_PW_UNINSTALL:
+ case DPLANE_OP_SYS_ROUTE_ADD:
+ case DPLANE_OP_SYS_ROUTE_DELETE:
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ case DPLANE_OP_MAC_INSTALL:
+ case DPLANE_OP_MAC_DELETE:
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_NEIGH_IP_INSTALL:
+ case DPLANE_OP_NEIGH_IP_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
+ case DPLANE_OP_RULE_ADD:
+ case DPLANE_OP_RULE_DELETE:
+ case DPLANE_OP_RULE_UPDATE:
+ case DPLANE_OP_NEIGH_DISCOVER:
+ case DPLANE_OP_BR_PORT_UPDATE:
+ case DPLANE_OP_NONE:
+ case DPLANE_OP_IPTABLE_ADD:
+ case DPLANE_OP_IPTABLE_DELETE:
+ case DPLANE_OP_IPSET_ADD:
+ case DPLANE_OP_IPSET_DELETE:
+ case DPLANE_OP_IPSET_ENTRY_ADD:
+ case DPLANE_OP_IPSET_ENTRY_DELETE:
+ case DPLANE_OP_NEIGH_TABLE_UPDATE:
+ case DPLANE_OP_GRE_SET:
+ break; /* should never hit here */
+ }
done:
- /* Free the context */
dplane_ctx_fini(&ctx);
-
- return ret;
}
/* Dump if address information to vty. */
@@ -1646,28 +1849,34 @@ static void ifs_dump_brief_vty_json(json_object *json, struct vrf *vrf)
}
}
-const char *zebra_protodown_rc_str(enum protodown_reasons protodown_rc,
- char *pd_buf, uint32_t pd_buf_len)
+const char *zebra_protodown_rc_str(uint32_t protodown_rc, char *pd_buf,
+ uint32_t pd_buf_len)
{
- bool first = true;
-
pd_buf[0] = '\0';
+ size_t len;
strlcat(pd_buf, "(", pd_buf_len);
- if (protodown_rc & ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY) {
- if (first)
- first = false;
- else
- strlcat(pd_buf, ",", pd_buf_len);
- strlcat(pd_buf, "startup-delay", pd_buf_len);
- }
+ if (CHECK_FLAG(protodown_rc, ZEBRA_PROTODOWN_EXTERNAL))
+ strlcat(pd_buf, "external,", pd_buf_len);
- if (protodown_rc & ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN) {
- if (!first)
- strlcat(pd_buf, ",", pd_buf_len);
- strlcat(pd_buf, "uplinks-down", pd_buf_len);
- }
+ if (CHECK_FLAG(protodown_rc, ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY))
+ strlcat(pd_buf, "startup-delay,", pd_buf_len);
+
+ if (CHECK_FLAG(protodown_rc, ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN))
+ strlcat(pd_buf, "uplinks-down,", pd_buf_len);
+
+ if (CHECK_FLAG(protodown_rc, ZEBRA_PROTODOWN_VRRP))
+ strlcat(pd_buf, "vrrp,", pd_buf_len);
+
+ if (CHECK_FLAG(protodown_rc, ZEBRA_PROTODOWN_SHARP))
+ strlcat(pd_buf, "sharp,", pd_buf_len);
+
+ len = strnlen(pd_buf, pd_buf_len);
+
+ /* Remove trailing comma */
+ if (pd_buf[len - 1] == ',')
+ pd_buf[len - 1] = '\0';
strlcat(pd_buf, ")", pd_buf_len);
@@ -1873,7 +2082,7 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
zebra_evpn_if_es_print(vty, NULL, zebra_if);
vty_out(vty, " protodown: %s %s\n",
- (zebra_if->flags & ZIF_FLAG_PROTODOWN) ? "on" : "off",
+ (ZEBRA_IF_IS_PROTODOWN(zebra_if)) ? "on" : "off",
if_is_protodown_applicable(ifp) ? "" : "(n/a)");
if (zebra_if->protodown_rc)
vty_out(vty, " protodown reasons: %s\n",
@@ -2224,7 +2433,7 @@ static void if_dump_vty_json(struct vty *vty, struct interface *ifp,
if (if_is_protodown_applicable(ifp)) {
json_object_string_add(
json_if, "protodown",
- (zebra_if->flags & ZIF_FLAG_PROTODOWN) ? "on" : "off");
+ (ZEBRA_IF_IS_PROTODOWN(zebra_if)) ? "on" : "off");
if (zebra_if->protodown_rc)
json_object_string_add(
json_if, "protodownReason",
@@ -2778,7 +2987,7 @@ int if_linkdetect(struct interface *ifp, bool detect)
/* Interface may come up after disabling link detection */
if (if_is_operative(ifp) && !if_was_operative)
- if_up(ifp);
+ if_up(ifp, true);
}
/* FIXME: Will defer status change forwarding if interface
does not come down! */
diff --git a/zebra/interface.h b/zebra/interface.h
index c19e494860..c6930ce816 100644
--- a/zebra/interface.h
+++ b/zebra/interface.h
@@ -308,14 +308,22 @@ enum zebra_if_flags {
/* Dataplane protodown-on */
ZIF_FLAG_PROTODOWN = (1 << 2),
+ /* Dataplane protodown-on Queued to the dplane */
+ ZIF_FLAG_SET_PROTODOWN = (1 << 3),
+ /* Dataplane protodown-off Queued to the dplane */
+ ZIF_FLAG_UNSET_PROTODOWN = (1 << 4),
/* LACP bypass state is set by the dataplane on a bond member
* and inherited by the bond (if one or more bond members are in
* a bypass state the bond is placed in a bypass state)
*/
- ZIF_FLAG_LACP_BYPASS = (1 << 3)
+ ZIF_FLAG_LACP_BYPASS = (1 << 5)
};
+#define ZEBRA_IF_IS_PROTODOWN(zif) ((zif)->flags & ZIF_FLAG_PROTODOWN)
+#define ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zif) \
+ ((zif)->protodown_rc == ZEBRA_PROTODOWN_EXTERNAL)
+
/* `zebra' daemon local interface structure. */
struct zebra_if {
/* back pointer to the interface */
@@ -403,7 +411,7 @@ struct zebra_if {
* in the dataplane. This results in a carrier/L1 down on the
* physical device.
*/
- enum protodown_reasons protodown_rc;
+ uint32_t protodown_rc;
/* list of zebra_mac entries using this interface as destination */
struct list *mac_list;
@@ -484,9 +492,9 @@ extern void if_nbr_ipv6ll_to_ipv4ll_neigh_update(struct interface *ifp,
struct in6_addr *address,
int add);
extern void if_nbr_ipv6ll_to_ipv4ll_neigh_del_all(struct interface *ifp);
-extern void if_delete_update(struct interface *ifp);
+extern void if_delete_update(struct interface **ifp);
extern void if_add_update(struct interface *ifp);
-extern void if_up(struct interface *);
+extern void if_up(struct interface *ifp, bool install_connected);
extern void if_down(struct interface *);
extern void if_refresh(struct interface *);
extern void if_flags_update(struct interface *, uint64_t);
@@ -497,7 +505,16 @@ extern void if_handle_vrf_change(struct interface *ifp, vrf_id_t vrf_id);
extern void zebra_if_update_link(struct interface *ifp, ifindex_t link_ifindex,
ns_id_t ns_id);
extern void zebra_if_update_all_links(struct zebra_ns *zns);
-extern void zebra_if_set_protodown(struct interface *ifp, bool down);
+/**
+ * Directly update entire protodown & reason code bitfield.
+ */
+extern int zebra_if_update_protodown_rc(struct interface *ifp, bool new_down,
+ uint32_t new_protodown_rc);
+/**
+ * Set protodown with single reason.
+ */
+extern int zebra_if_set_protodown(struct interface *ifp, bool down,
+ enum protodown_reasons new_reason);
extern int if_ip_address_install(struct interface *ifp, struct prefix *prefix,
const char *label, struct prefix *pp);
extern int if_ipv6_address_install(struct interface *ifp, struct prefix *prefix,
@@ -521,10 +538,9 @@ extern bool if_nhg_dependents_is_empty(const struct interface *ifp);
extern void vrf_add_update(struct vrf *vrfp);
extern void zebra_l2_map_slave_to_bond(struct zebra_if *zif, vrf_id_t vrf);
extern void zebra_l2_unmap_slave_from_bond(struct zebra_if *zif);
-extern const char *zebra_protodown_rc_str(enum protodown_reasons protodown_rc,
- char *pd_buf, uint32_t pd_buf_len);
-void zebra_if_addr_update_ctx(struct zebra_dplane_ctx *ctx);
-int zebra_if_netconf_update_ctx(struct zebra_dplane_ctx *ctx);
+extern const char *zebra_protodown_rc_str(uint32_t protodown_rc, char *pd_buf,
+ uint32_t pd_buf_len);
+void zebra_if_dplane_result(struct zebra_dplane_ctx *ctx);
#ifdef HAVE_PROC_NET_DEV
extern void ifstat_update_proc(void);
diff --git a/zebra/ioctl.c b/zebra/ioctl.c
index 9b6aaf1d85..a895ed9410 100644
--- a/zebra/ioctl.c
+++ b/zebra/ioctl.c
@@ -136,7 +136,7 @@ static int if_ioctl_ipv6(unsigned long request, caddr_t buffer)
void if_get_metric(struct interface *ifp)
{
#ifdef SIOCGIFMETRIC
- struct ifreq ifreq;
+ struct ifreq ifreq = {};
ifreq_set_name(&ifreq, ifp);
@@ -153,7 +153,7 @@ void if_get_metric(struct interface *ifp)
/* get interface MTU */
void if_get_mtu(struct interface *ifp)
{
- struct ifreq ifreq;
+ struct ifreq ifreq = {};
ifreq_set_name(&ifreq, ifp);
@@ -410,8 +410,8 @@ int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
void if_get_flags(struct interface *ifp)
{
int ret;
- struct ifreq ifreqflags;
- struct ifreq ifreqdata;
+ struct ifreq ifreqflags = {};
+ struct ifreq ifreqdata = {};
ifreq_set_name(&ifreqflags, ifp);
ifreq_set_name(&ifreqdata, ifp);
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index d84b0c1325..35f3274c65 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -94,6 +94,7 @@ static const struct message nlmsg_str[] = {{RTM_NEWROUTE, "RTM_NEWROUTE"},
{RTM_DELROUTE, "RTM_DELROUTE"},
{RTM_GETROUTE, "RTM_GETROUTE"},
{RTM_NEWLINK, "RTM_NEWLINK"},
+ {RTM_SETLINK, "RTM_SETLINK"},
{RTM_DELLINK, "RTM_DELLINK"},
{RTM_GETLINK, "RTM_GETLINK"},
{RTM_NEWADDR, "RTM_NEWADDR"},
@@ -209,6 +210,10 @@ int netlink_config_write_helper(struct vty *vty)
vty_out(vty, "zebra kernel netlink batch-tx-buf %u %u\n", size,
threshold);
+ if (if_netlink_frr_protodown_r_bit_is_set())
+ vty_out(vty, "zebra protodown reason-bit %u\n",
+ if_netlink_get_frr_protodown_r_bit());
+
return 0;
}
@@ -1011,6 +1016,18 @@ int netlink_parse_info(int (*filter)(struct nlmsghdr *, ns_id_t, int),
return err;
}
+ /*
+ * What is the right thing to do? The kernel
+ * is telling us that the dump request was interrupted
+ * and we more than likely are out of luck and have
+ * missed data from the kernel. At this point in time
+ * lets just note that this is happening.
+ */
+ if (h->nlmsg_flags & NLM_F_DUMP_INTR)
+ flog_err(
+ EC_ZEBRA_NETLINK_BAD_SEQUENCE,
+ "netlink recvmsg: The Dump request was interrupted");
+
/* OK we got netlink message. */
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug(
@@ -1491,6 +1508,11 @@ static enum netlink_msg_status nl_put_msg(struct nl_batch *bth,
case DPLANE_OP_INTF_NETCONFIG:
case DPLANE_OP_NONE:
return FRR_NETLINK_ERROR;
+
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ return netlink_put_intf_update_msg(bth, ctx);
}
return FRR_NETLINK_ERROR;
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index ce1f17111b..d6ca92f54e 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -159,6 +159,9 @@ const struct message rtm_type_str[] = {{RTM_ADD, "RTM_ADD"},
#ifdef RTM_IFANNOUNCE
{RTM_IFANNOUNCE, "RTM_IFANNOUNCE"},
#endif /* RTM_IFANNOUNCE */
+#ifdef RTM_IEEE80211
+ {RTM_IEEE80211, "RTM_IEEE80211"},
+#endif
{0}};
static const struct message rtm_flag_str[] = {{RTF_UP, "UP"},
@@ -450,12 +453,13 @@ static int ifan_read(struct if_announcemsghdr *ifan)
if_get_metric(ifp);
if_add_update(ifp);
} else if (ifp != NULL && ifan->ifan_what == IFAN_DEPARTURE)
- if_delete_update(ifp);
-
- if_get_flags(ifp);
- if_get_mtu(ifp);
- if_get_metric(ifp);
+ if_delete_update(&ifp);
+ if (ifp) {
+ if_get_flags(ifp);
+ if_get_mtu(ifp);
+ if_get_metric(ifp);
+ }
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("%s: interface %s index %d", __func__,
ifan->ifan_name, ifan->ifan_index);
@@ -722,10 +726,10 @@ int ifm_read(struct if_msghdr *ifm)
* will still behave correctly if run on a platform
* without
*/
- if_delete_update(ifp);
+ if_delete_update(&ifp);
}
#endif /* RTM_IFANNOUNCE */
- if (if_is_up(ifp)) {
+ if (ifp && if_is_up(ifp)) {
#if defined(__bsdi__)
if_kvm_get_mtu(ifp);
#else
@@ -735,14 +739,16 @@ int ifm_read(struct if_msghdr *ifm)
}
}
+ if (ifp) {
#ifdef HAVE_NET_RT_IFLIST
- ifp->stats = ifm->ifm_data;
+ ifp->stats = ifm->ifm_data;
#endif /* HAVE_NET_RT_IFLIST */
- ifp->speed = ifm->ifm_data.ifi_baudrate / 1000000;
+ ifp->speed = ifm->ifm_data.ifi_baudrate / 1000000;
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("%s: interface %s index %d", __func__, ifp->name,
- ifp->ifindex);
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s: interface %s index %d", __func__,
+ ifp->name, ifp->ifindex);
+ }
return 0;
}
@@ -1405,7 +1411,10 @@ static void kernel_read(struct thread *thread)
#endif /* RTM_IFANNOUNCE */
default:
if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("Unprocessed RTM_type: %d", rtm->rtm_type);
+ zlog_debug(
+ "Unprocessed RTM_type: %s(%d)",
+ lookup_msg(rtm_type_str, rtm->rtm_type, NULL),
+ rtm->rtm_type);
break;
}
}
@@ -1577,6 +1586,12 @@ void kernel_update_multi(struct dplane_ctx_q *ctx_list)
res = kernel_pbr_rule_update(ctx);
break;
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ res = kernel_intf_update(ctx);
+ break;
+
/* Ignore 'notifications' - no-op */
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
diff --git a/zebra/rib.h b/zebra/rib.h
index c6f3528cec..c8abfaf023 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -54,8 +54,7 @@ struct rnh {
#define ZEBRA_NHT_CONNECTED 0x1
#define ZEBRA_NHT_DELETED 0x2
-#define ZEBRA_NHT_EXACT_MATCH 0x4
-#define ZEBRA_NHT_RESOLVE_VIA_DEFAULT 0x8
+#define ZEBRA_NHT_RESOLVE_VIA_DEFAULT 0x4
/* VRF identifier. */
vrf_id_t vrf_id;
diff --git a/zebra/rt.h b/zebra/rt.h
index 5e626928d9..4952c3eb1a 100644
--- a/zebra/rt.h
+++ b/zebra/rt.h
@@ -66,6 +66,9 @@ enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx);
extern enum zebra_dplane_result
kernel_pbr_rule_update(struct zebra_dplane_ctx *ctx);
+extern enum zebra_dplane_result
+kernel_intf_update(struct zebra_dplane_ctx *ctx);
+
#endif /* !HAVE_NETLINK */
extern int kernel_neigh_update(int cmd, int ifindex, void *addr, char *lla,
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index c6423dce99..f2cf9122fa 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -2733,7 +2733,7 @@ netlink_put_route_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx)
*/
if (RSYSTEM_ROUTE(dplane_ctx_get_type(ctx))
&& !RSYSTEM_ROUTE(dplane_ctx_get_old_type(ctx)))
- netlink_batch_add_msg(
+ return netlink_batch_add_msg(
bth, ctx, netlink_delroute_msg_encoder,
true);
} else {
diff --git a/zebra/rt_netlink.h b/zebra/rt_netlink.h
index 0a79771708..b1af4b20e1 100644
--- a/zebra/rt_netlink.h
+++ b/zebra/rt_netlink.h
@@ -128,6 +128,8 @@ const char *af_type2str(int type);
const char *ifi_type2str(int type);
const char *rta_type2str(int type);
const char *rtm_type2str(int type);
+const char *ifla_pdr_type2str(int type);
+const char *ifla_info_type2str(int type);
const char *rtm_protocol2str(int type);
const char *rtm_scope2str(int type);
const char *rtm_rta2str(int type);
diff --git a/zebra/rt_socket.c b/zebra/rt_socket.c
index 006513ac9e..1f3f66a68e 100644
--- a/zebra/rt_socket.c
+++ b/zebra/rt_socket.c
@@ -282,6 +282,12 @@ static int kernel_rtm(int cmd, const struct prefix *p,
continue;
/* Note any unexpected status returns */
+ case ZEBRA_ERR_RTNOEXIST:
+ if (cmd != RTM_DELETE)
+ flog_err(EC_LIB_SYSTEM_CALL,
+ "%s: rtm_write() returned %d for command %d",
+ __func__, error, cmd);
+ break;
default:
flog_err(
EC_LIB_SYSTEM_CALL,
diff --git a/zebra/subdir.am b/zebra/subdir.am
index 77e0898d81..8cb1237c22 100644
--- a/zebra/subdir.am
+++ b/zebra/subdir.am
@@ -60,6 +60,7 @@ zebra_zebra_SOURCES = \
zebra/debug.c \
zebra/if_ioctl.c \
zebra/if_netlink.c \
+ zebra/if_socket.c \
zebra/if_sysctl.c \
zebra/interface.c \
zebra/ioctl.c \
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 1b6f37ec6a..fd475e4cee 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -1215,7 +1215,7 @@ static void zread_rnh_register(ZAPI_HANDLER_ARGS)
p.family);
return;
}
- rnh = zebra_add_rnh(&p, zvrf_id(zvrf), &exist);
+ rnh = zebra_add_rnh(&p, zvrf_id(zvrf), safi, &exist);
if (!rnh)
return;
@@ -1487,6 +1487,7 @@ static void zread_interface_set_protodown(ZAPI_HANDLER_ARGS)
ifindex_t ifindex;
struct interface *ifp;
char down;
+ enum protodown_reasons reason;
STREAM_GETL(msg, ifindex);
STREAM_GETC(msg, down);
@@ -1494,16 +1495,27 @@ static void zread_interface_set_protodown(ZAPI_HANDLER_ARGS)
/* set ifdown */
ifp = if_lookup_by_index_per_ns(zebra_ns_lookup(NS_DEFAULT), ifindex);
- if (ifp) {
- zlog_info("Setting interface %s (%u): protodown %s", ifp->name,
- ifindex, down ? "on" : "off");
- zebra_if_set_protodown(ifp, down);
- } else {
+ if (!ifp) {
zlog_warn(
"Cannot set protodown %s for interface %u; does not exist",
down ? "on" : "off", ifindex);
+
+ return;
+ }
+
+ switch (client->proto) {
+ case ZEBRA_ROUTE_VRRP:
+ reason = ZEBRA_PROTODOWN_VRRP;
+ break;
+ case ZEBRA_ROUTE_SHARP:
+ reason = ZEBRA_PROTODOWN_SHARP;
+ break;
+ default:
+ reason = 0;
+ break;
}
+ zebra_if_set_protodown(ifp, down, reason);
stream_failure:
return;
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 6de2be3ab8..d034c8f306 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -193,6 +193,9 @@ struct dplane_intf_info {
uint32_t metric;
uint32_t flags;
+ bool protodown;
+ bool pd_reason_val;
+
#define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
#define DPLANE_INTF_SECONDARY (1 << 1)
#define DPLANE_INTF_BROADCAST (1 << 2)
@@ -526,6 +529,9 @@ static struct zebra_dplane_globals {
_Atomic uint32_t dg_gre_set_in;
_Atomic uint32_t dg_gre_set_errors;
+ _Atomic uint32_t dg_intfs_in;
+ _Atomic uint32_t dg_intf_errors;
+
/* Dataplane pthread */
struct frr_pthread *dg_pthread;
@@ -760,6 +766,9 @@ static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
case DPLANE_OP_NONE:
case DPLANE_OP_IPSET_ADD:
case DPLANE_OP_IPSET_DELETE:
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
break;
case DPLANE_OP_IPSET_ENTRY_ADD:
@@ -1073,6 +1082,16 @@ const char *dplane_op2str(enum dplane_op_e op)
case DPLANE_OP_INTF_NETCONFIG:
return "INTF_NETCONFIG";
+
+ case DPLANE_OP_INTF_INSTALL:
+ ret = "INTF_INSTALL";
+ break;
+ case DPLANE_OP_INTF_UPDATE:
+ ret = "INTF_UPDATE";
+ break;
+ case DPLANE_OP_INTF_DELETE:
+ ret = "INTF_DELETE";
+ break;
}
return ret;
@@ -1771,6 +1790,27 @@ void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric)
ctx->u.intf.metric = metric;
}
+uint32_t dplane_ctx_get_intf_pd_reason_val(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.intf.pd_reason_val;
+}
+
+void dplane_ctx_set_intf_pd_reason_val(struct zebra_dplane_ctx *ctx, bool val)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.intf.pd_reason_val = val;
+}
+
+bool dplane_ctx_intf_is_protodown(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.intf.protodown;
+}
+
/* Is interface addr p2p? */
bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
{
@@ -2638,6 +2678,73 @@ done:
return ret;
}
+/**
+ * dplane_ctx_intf_init() - Initialize a context block for a inteface update
+ *
+ * @ctx: Dataplane context to init
+ * @op: Operation being performed
+ * @ifp: Interface
+ *
+ * Return: Result status
+ */
+int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
+ const struct interface *ifp)
+{
+ struct zebra_ns *zns;
+ struct zebra_if *zif;
+ int ret = EINVAL;
+ bool set_pdown, unset_pdown;
+
+ if (!ctx || !ifp)
+ goto done;
+
+ ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+ ctx->zd_vrf_id = ifp->vrf->vrf_id;
+
+ strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
+ ctx->zd_ifindex = ifp->ifindex;
+
+ zns = zebra_ns_lookup(ifp->vrf->vrf_id);
+ dplane_ctx_ns_init(ctx, zns, false);
+
+
+ /* Copy over ifp info */
+ ctx->u.intf.metric = ifp->metric;
+ ctx->u.intf.flags = ifp->flags;
+
+ /* Copy over extra zebra info, if available */
+ zif = (struct zebra_if *)ifp->info;
+
+ if (zif) {
+ set_pdown = !!(zif->flags & ZIF_FLAG_SET_PROTODOWN);
+ unset_pdown = !!(zif->flags & ZIF_FLAG_UNSET_PROTODOWN);
+
+ if (zif->protodown_rc &&
+ ZEBRA_IF_IS_PROTODOWN_ONLY_EXTERNAL(zif) == false)
+ ctx->u.intf.pd_reason_val = true;
+
+ /*
+ * See if we have new protodown state to set, otherwise keep
+ * current state
+ */
+ if (set_pdown)
+ ctx->u.intf.protodown = true;
+ else if (unset_pdown)
+ ctx->u.intf.protodown = false;
+ else
+ ctx->u.intf.protodown = !!ZEBRA_IF_IS_PROTODOWN(zif);
+ }
+
+ dplane_ctx_ns_init(ctx, zns, (op == DPLANE_OP_INTF_UPDATE));
+ ctx->zd_is_update = (op == DPLANE_OP_INTF_UPDATE);
+
+ ret = AOK;
+
+done:
+ return ret;
+}
+
/*
* Capture information for an LSP update in a dplane context.
*/
@@ -3824,6 +3931,85 @@ static enum zebra_dplane_result intf_addr_update_internal(
return result;
}
+/**
+ * dplane_intf_update_internal() - Helper for enqueuing interface changes
+ *
+ * @ifp: Interface where the change occured
+ * @op: The operation to be enqued
+ *
+ * Return: Result of the change
+ */
+static enum zebra_dplane_result
+dplane_intf_update_internal(const struct interface *ifp, enum dplane_op_e op)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret = EINVAL;
+ struct zebra_dplane_ctx *ctx = NULL;
+
+ /* Obtain context block */
+ ctx = dplane_ctx_alloc();
+ if (!ctx) {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ ret = dplane_ctx_intf_init(ctx, op, ifp);
+ if (ret == AOK)
+ ret = dplane_update_enqueue(ctx);
+
+done:
+ /* Update counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_intfs_in, 1,
+ memory_order_relaxed);
+
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors, 1,
+ memory_order_relaxed);
+ if (ctx)
+ dplane_ctx_free(&ctx);
+ }
+
+ return result;
+}
+
+/*
+ * Enqueue a interface add for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_add(const struct interface *ifp)
+{
+ enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
+
+ if (ifp)
+ ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_INSTALL);
+ return ret;
+}
+
+/*
+ * Enqueue a interface update for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_update(const struct interface *ifp)
+{
+ enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
+
+ if (ifp)
+ ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_UPDATE);
+ return ret;
+}
+
+/*
+ * Enqueue a interface delete for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_delete(const struct interface *ifp)
+{
+ enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
+
+ if (ifp)
+ ret = dplane_intf_update_internal(ifp, DPLANE_OP_INTF_DELETE);
+ return ret;
+}
+
/*
* Enqueue vxlan/evpn mac add (or update).
*/
@@ -5241,6 +5427,15 @@ static void kernel_dplane_log_detail(struct zebra_dplane_ctx *ctx)
dplane_ctx_get_netconf_mpls(ctx),
dplane_ctx_get_netconf_mcast(ctx));
break;
+
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ zlog_debug("Dplane intf %s, idx %u, protodown %d",
+ dplane_op2str(dplane_ctx_get_op(ctx)),
+ dplane_ctx_get_ifindex(ctx),
+ dplane_ctx_intf_is_protodown(ctx));
+ break;
}
}
@@ -5375,6 +5570,15 @@ static void kernel_dplane_handle_result(struct zebra_dplane_ctx *ctx)
&zdplane_info.dg_gre_set_errors, 1,
memory_order_relaxed);
break;
+
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(&zdplane_info.dg_intf_errors,
+ 1, memory_order_relaxed);
+ break;
+
/* Ignore 'notifications' - no-op */
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index 29555d5b56..334d440a2f 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -188,6 +188,11 @@ enum dplane_op_e {
/* Incoming interface config events */
DPLANE_OP_INTF_NETCONFIG,
+
+ /* Interface update */
+ DPLANE_OP_INTF_INSTALL,
+ DPLANE_OP_INTF_UPDATE,
+ DPLANE_OP_INTF_DELETE,
};
/*
@@ -480,6 +485,9 @@ dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx);
/* Accessors for interface information */
uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_intf_metric(struct zebra_dplane_ctx *ctx, uint32_t metric);
+uint32_t dplane_ctx_get_intf_pd_reason_val(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_intf_pd_reason_val(struct zebra_dplane_ctx *ctx, bool val);
+bool dplane_ctx_intf_is_protodown(const struct zebra_dplane_ctx *ctx);
/* Is interface addr p2p? */
bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_intf_set_connected(struct zebra_dplane_ctx *ctx);
@@ -677,6 +685,13 @@ enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
const struct connected *ifc);
/*
+ * Enqueue interface link changes for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_add(const struct interface *ifp);
+enum zebra_dplane_result dplane_intf_update(const struct interface *ifp);
+enum zebra_dplane_result dplane_intf_delete(const struct interface *ifp);
+
+/*
* Link layer operations for the dataplane.
*/
enum zebra_dplane_result dplane_neigh_ip_update(enum dplane_op_e op,
@@ -814,6 +829,10 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
int dplane_ctx_nexthop_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
struct nhg_hash_entry *nhe);
+/* Encode interface information into data plane context. */
+int dplane_ctx_intf_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
+ const struct interface *ifp);
+
/* Retrieve the limit on the number of pending, unprocessed updates. */
uint32_t dplane_get_in_queue_limit(void);
diff --git a/zebra/zebra_errors.c b/zebra/zebra_errors.c
index c3890f7220..7549a3d5c0 100644
--- a/zebra/zebra_errors.c
+++ b/zebra/zebra_errors.c
@@ -792,6 +792,15 @@ static struct log_ref ferr_zebra_err[] = {
.suggestion = "Ignore this error.",
},
{
+ .code = EC_ZEBRA_INTF_UPDATE_FAILURE,
+ .title =
+ "Zebra failed to update interface in the kernel",
+ .description =
+ "Zebra made an attempt to update an interfce in the kernel, but it was not successful.",
+ .suggestion =
+ "Wait for Zebra to reattempt update.",
+ },
+ {
.code = END_FERR,
}
};
diff --git a/zebra/zebra_errors.h b/zebra/zebra_errors.h
index 540c6dd7d0..5164de09d6 100644
--- a/zebra/zebra_errors.h
+++ b/zebra/zebra_errors.h
@@ -136,6 +136,7 @@ enum zebra_log_refs {
EC_ZEBRA_ES_CREATE,
EC_ZEBRA_GRE_SET_UPDATE,
EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
+ EC_ZEBRA_INTF_UPDATE_FAILURE,
};
void zebra_error_init(void);
diff --git a/zebra/zebra_evpn.c b/zebra/zebra_evpn.c
index 13b9cc2002..d9d21462fb 100644
--- a/zebra/zebra_evpn.c
+++ b/zebra/zebra_evpn.c
@@ -344,10 +344,10 @@ int zebra_evpn_add_macip_for_intf(struct interface *ifp,
for (ALL_LIST_ELEMENTS(ifp->connected, cnode, cnnode, c)) {
struct ipaddr ip;
- memset(&ip, 0, sizeof(struct ipaddr));
if (!CHECK_FLAG(c->conf, ZEBRA_IFC_REAL))
continue;
+ memset(&ip, 0, sizeof(struct ipaddr));
if (c->address->family == AF_INET) {
ip.ipa_type = IPADDR_V4;
memcpy(&(ip.ipaddr_v4), &(c->address->u.prefix4),
@@ -442,10 +442,8 @@ int zebra_evpn_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
vxl = &zif->l2info.vxl;
- if (zebra_evpn_mac_gw_macip_add(ifp, zevpn, ip, &mac, macaddr,
- vxl->access_vlan, true)
- != 0)
- return -1;
+ zebra_evpn_mac_gw_macip_add(ifp, zevpn, ip, &mac, macaddr,
+ vxl->access_vlan, true);
return zebra_evpn_neigh_gw_macip_add(ifp, zevpn, ip, mac);
}
diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c
index e38766bc6b..46d789bc34 100644
--- a/zebra/zebra_evpn_mac.c
+++ b/zebra/zebra_evpn_mac.c
@@ -1141,14 +1141,6 @@ int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac)
sizeof(mac_buf)));
}
- /* If the MAC is freed before the neigh we will end up
- * with a stale pointer against the neigh
- */
- if (!list_isempty(mac->neigh_list))
- zlog_warn("%s: MAC %pEA flags 0x%x neigh list not empty %d",
- __func__, &mac->macaddr, mac->flags,
- listcount(mac->neigh_list));
-
/* force de-ref any ES entry linked to the MAC */
zebra_evpn_es_mac_deref_entry(mac);
@@ -1161,6 +1153,26 @@ int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac)
/* Cancel auto recovery */
THREAD_OFF(mac->dad_mac_auto_recovery_timer);
+ /* If the MAC is freed before the neigh we will end up
+ * with a stale pointer against the neigh.
+ * The situation can arise when a MAC is in remote state
+ * and its associated neigh is local state.
+ * zebra_evpn_cfg_cleanup() cleans up remote neighs and MACs.
+ * Instead of deleting remote MAC, if its neigh list is non-empty
+ * (associated to local neighs), mark the MAC as AUTO.
+ */
+ if (!list_isempty(mac->neigh_list)) {
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "MAC %pEA (flags 0x%x vni %u) has non-empty neigh list "
+ "count %u, mark MAC as AUTO",
+ &mac->macaddr, mac->flags, zevpn->vni,
+ listcount(mac->neigh_list));
+
+ SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ return 0;
+ }
+
list_delete(&mac->neigh_list);
/* Free the VNI hash entry and allocated memory. */
@@ -2452,11 +2464,12 @@ int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac,
return 0;
}
-int zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
- const struct ipaddr *ip,
- struct zebra_mac **macp,
- const struct ethaddr *macaddr, vlanid_t vlan_id,
- bool def_gw)
+void zebra_evpn_mac_gw_macip_add(struct interface *ifp,
+ struct zebra_evpn *zevpn,
+ const struct ipaddr *ip,
+ struct zebra_mac **macp,
+ const struct ethaddr *macaddr,
+ vlanid_t vlan_id, bool def_gw)
{
struct zebra_mac *mac;
ns_id_t local_ns_id = NS_DEFAULT;
@@ -2466,9 +2479,13 @@ int zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
if (zvrf && zvrf->zns)
local_ns_id = zvrf->zns->ns_id;
- mac = zebra_evpn_mac_lookup(zevpn, macaddr);
- if (!mac)
- mac = zebra_evpn_mac_add(zevpn, macaddr);
+ if (!*macp) {
+ mac = zebra_evpn_mac_lookup(zevpn, macaddr);
+ if (!mac)
+ mac = zebra_evpn_mac_add(zevpn, macaddr);
+ *macp = mac;
+ } else
+ mac = *macp;
/* Set "local" forwarding info. */
zebra_evpn_mac_clear_fwd_info(mac);
@@ -2476,13 +2493,11 @@ int zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
if (def_gw)
SET_FLAG(mac->flags, ZEBRA_MAC_DEF_GW);
+ else
+ SET_FLAG(mac->flags, ZEBRA_MAC_SVI);
mac->fwd_info.local.ifindex = ifp->ifindex;
mac->fwd_info.local.ns_id = local_ns_id;
mac->fwd_info.local.vid = vlan_id;
-
- *macp = mac;
-
- return 0;
}
void zebra_evpn_mac_svi_del(struct interface *ifp, struct zebra_evpn *zevpn)
@@ -2535,10 +2550,7 @@ void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn)
? true
: false;
- mac = NULL;
zebra_evpn_mac_gw_macip_add(ifp, zevpn, NULL, &mac, &macaddr, 0, false);
- if (mac)
- SET_FLAG(mac->flags, ZEBRA_MAC_SVI);
new_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags);
zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready,
diff --git a/zebra/zebra_evpn_mac.h b/zebra/zebra_evpn_mac.h
index d0bb18a5fc..b727ac1f98 100644
--- a/zebra/zebra_evpn_mac.h
+++ b/zebra/zebra_evpn_mac.h
@@ -124,8 +124,8 @@ struct zebra_mac {
/* List of neigh associated with this mac */
struct list *neigh_list;
- /* list of hosts pointing to this remote RMAC */
- struct host_rb_tree_entry host_rb;
+ /* List of nexthop associated with this RMAC */
+ struct list *nh_list;
/* Duplicate mac detection */
uint32_t dad_count;
@@ -278,11 +278,12 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf,
bool dp_static, struct zebra_mac *mac);
int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac,
bool clear_static);
-int zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
- const struct ipaddr *ip,
- struct zebra_mac **macp,
- const struct ethaddr *macaddr, vlanid_t vlan_id,
- bool def_gw);
+void zebra_evpn_mac_gw_macip_add(struct interface *ifp,
+ struct zebra_evpn *zevpn,
+ const struct ipaddr *ip,
+ struct zebra_mac **macp,
+ const struct ethaddr *macaddr,
+ vlanid_t vlan_id, bool def_gw);
void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn);
void zebra_evpn_mac_svi_del(struct interface *ifp, struct zebra_evpn *zevpn);
void zebra_evpn_mac_ifp_del(struct interface *ifp);
diff --git a/zebra/zebra_evpn_mh.c b/zebra/zebra_evpn_mh.c
index 50eecd31d5..02eda4a438 100644
--- a/zebra/zebra_evpn_mh.c
+++ b/zebra/zebra_evpn_mh.c
@@ -113,10 +113,7 @@ static struct zebra_evpn_es_evi *zebra_evpn_es_evi_new(struct zebra_evpn_es *es,
es_evi->zevpn = zevpn;
/* insert into the EVPN-ESI rb tree */
- if (RB_INSERT(zebra_es_evi_rb_head, &zevpn->es_evi_rb_tree, es_evi)) {
- XFREE(MTYPE_ZES_EVI, es_evi);
- return NULL;
- }
+ RB_INSERT(zebra_es_evi_rb_head, &zevpn->es_evi_rb_tree, es_evi);
/* add to the ES's VNI list */
listnode_init(&es_evi->es_listnode, es_evi);
@@ -1776,10 +1773,7 @@ static struct zebra_evpn_es *zebra_evpn_es_new(const esi_t *esi)
esi_to_str(&es->esi, es->esi_str, sizeof(es->esi_str));
/* Add to rb_tree */
- if (RB_INSERT(zebra_es_rb_head, &zmh_info->es_rb_tree, es)) {
- XFREE(MTYPE_ZES, es);
- return NULL;
- }
+ RB_INSERT(zebra_es_rb_head, &zmh_info->es_rb_tree, es);
/* Initialise the ES-EVI list */
es->es_evi_list = list_new();
@@ -3463,11 +3457,13 @@ void zebra_evpn_mh_json(json_object *json)
if (zmh_info->protodown_rc) {
json_array = json_object_new_array();
- if (zmh_info->protodown_rc & ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY)
+ if (CHECK_FLAG(zmh_info->protodown_rc,
+ ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY))
json_object_array_add(
json_array,
json_object_new_string("startupDelay"));
- if (zmh_info->protodown_rc & ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN)
+ if (CHECK_FLAG(zmh_info->protodown_rc,
+ ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN))
json_object_array_add(
json_array,
json_object_new_string("uplinkDown"));
@@ -3623,10 +3619,10 @@ bool zebra_evpn_is_es_bond_member(struct interface *ifp)
void zebra_evpn_mh_update_protodown_bond_mbr(struct zebra_if *zif, bool clear,
const char *caller)
{
- bool old_protodown;
bool new_protodown;
- enum protodown_reasons old_protodown_rc = 0;
- enum protodown_reasons protodown_rc = 0;
+ uint32_t old_protodown_rc = 0;
+ uint32_t new_protodown_rc = 0;
+ uint32_t protodown_rc = 0;
if (!clear) {
struct zebra_if *bond_zif;
@@ -3635,32 +3631,23 @@ void zebra_evpn_mh_update_protodown_bond_mbr(struct zebra_if *zif, bool clear,
protodown_rc = bond_zif->protodown_rc;
}
- old_protodown = !!(zif->flags & ZIF_FLAG_PROTODOWN);
old_protodown_rc = zif->protodown_rc;
- zif->protodown_rc &= ~ZEBRA_PROTODOWN_EVPN_ALL;
- zif->protodown_rc |= (protodown_rc & ZEBRA_PROTODOWN_EVPN_ALL);
- new_protodown = !!zif->protodown_rc;
+ new_protodown_rc = (old_protodown_rc & ~ZEBRA_PROTODOWN_EVPN_ALL);
+ new_protodown_rc |= (protodown_rc & ZEBRA_PROTODOWN_EVPN_ALL);
+ new_protodown = !!new_protodown_rc;
- if (IS_ZEBRA_DEBUG_EVPN_MH_ES
- && (zif->protodown_rc != old_protodown_rc))
+ if (IS_ZEBRA_DEBUG_EVPN_MH_ES && (new_protodown_rc != old_protodown_rc))
zlog_debug(
"%s bond mbr %s protodown_rc changed; old 0x%x new 0x%x",
caller, zif->ifp->name, old_protodown_rc,
- zif->protodown_rc);
-
- if (old_protodown == new_protodown)
- return;
-
- if (new_protodown)
- zif->flags |= ZIF_FLAG_PROTODOWN;
- else
- zif->flags &= ~ZIF_FLAG_PROTODOWN;
+ new_protodown_rc);
- if (IS_ZEBRA_DEBUG_EVPN_MH_ES)
- zlog_debug("%s protodown %s", zif->ifp->name,
- new_protodown ? "on" : "off");
-
- zebra_if_set_protodown(zif->ifp, new_protodown);
+ if (zebra_if_update_protodown_rc(zif->ifp, new_protodown,
+ new_protodown_rc) == 0) {
+ if (IS_ZEBRA_DEBUG_EVPN_MH_ES)
+ zlog_debug("%s protodown %s", zif->ifp->name,
+ new_protodown ? "on" : "off");
+ }
}
/* The bond members inherit the protodown reason code from the bond */
@@ -3683,7 +3670,7 @@ static void zebra_evpn_mh_update_protodown_es(struct zebra_evpn_es *es,
bool resync_dplane)
{
struct zebra_if *zif;
- enum protodown_reasons old_protodown_rc;
+ uint32_t old_protodown_rc;
zif = es->zif;
/* if the reason code is the same bail unless it is a new
@@ -3714,7 +3701,7 @@ static void zebra_evpn_mh_update_protodown_es(struct zebra_evpn_es *es,
static void zebra_evpn_mh_clear_protodown_es(struct zebra_evpn_es *es)
{
struct zebra_if *zif;
- enum protodown_reasons old_protodown_rc;
+ uint32_t old_protodown_rc;
zif = es->zif;
if (!(zif->protodown_rc & ZEBRA_PROTODOWN_EVPN_ALL))
@@ -3742,10 +3729,9 @@ static void zebra_evpn_mh_update_protodown_es_all(void)
zebra_evpn_mh_update_protodown_es(es, false /*resync_dplane*/);
}
-static void zebra_evpn_mh_update_protodown(enum protodown_reasons protodown_rc,
- bool set)
+static void zebra_evpn_mh_update_protodown(uint32_t protodown_rc, bool set)
{
- enum protodown_reasons old_protodown_rc = zmh_info->protodown_rc;
+ uint32_t old_protodown_rc = zmh_info->protodown_rc;
if (set) {
if ((protodown_rc & zmh_info->protodown_rc) == protodown_rc)
diff --git a/zebra/zebra_evpn_mh.h b/zebra/zebra_evpn_mh.h
index af6832092b..ce7b920de1 100644
--- a/zebra/zebra_evpn_mh.h
+++ b/zebra/zebra_evpn_mh.h
@@ -263,7 +263,7 @@ struct zebra_evpn_mh_info {
uint32_t uplink_oper_up_cnt;
/* These protodown bits are inherited by all ES bonds */
- enum protodown_reasons protodown_rc;
+ uint32_t protodown_rc;
};
/* returns TRUE if the EVPN is ready to be sent to BGP */
diff --git a/zebra/zebra_evpn_neigh.c b/zebra/zebra_evpn_neigh.c
index f557e66384..ed224151ba 100644
--- a/zebra/zebra_evpn_neigh.c
+++ b/zebra/zebra_evpn_neigh.c
@@ -1311,14 +1311,7 @@ int zebra_evpn_local_neigh_update(struct zebra_evpn *zevpn,
if (!n) {
/* New neighbor - create */
n = zebra_evpn_neigh_add(zevpn, ip, macaddr, zmac, 0);
- if (!n) {
- flog_err(
- EC_ZEBRA_MAC_ADD_FAILED,
- "Failed to add neighbor %pIA MAC %pEA intf %s(%u) -> VNI %u",
- ip, macaddr, ifp->name, ifp->ifindex,
- zevpn->vni);
- return -1;
- }
+
/* Set "local" forwarding info. */
SET_FLAG(n->flags, ZEBRA_NEIGH_LOCAL);
n->ifindex = ifp->ifindex;
@@ -2070,14 +2063,6 @@ void zebra_evpn_neigh_remote_macip_add(struct zebra_evpn *zevpn,
if (!n) {
n = zebra_evpn_neigh_add(zevpn, ipaddr, &mac->macaddr,
mac, 0);
- if (!n) {
- zlog_warn(
- "Failed to add Neigh %pIA MAC %pEA VNI %u Remote VTEP %pI4",
- ipaddr, &mac->macaddr, zevpn->vni,
- &vtep_ip);
- return;
- }
-
} else {
/* When host moves but changes its (MAC,IP)
* binding, BGP may install a MACIP entry that
@@ -2182,17 +2167,8 @@ int zebra_evpn_neigh_gw_macip_add(struct interface *ifp,
assert(mac);
n = zebra_evpn_neigh_lookup(zevpn, ip);
- if (!n) {
+ if (!n)
n = zebra_evpn_neigh_add(zevpn, ip, &mac->macaddr, mac, 0);
- if (!n) {
- flog_err(
- EC_ZEBRA_MAC_ADD_FAILED,
- "Failed to add neighbor %pIA MAC %pEA intf %s(%u) -> VNI %u",
- ip, &mac->macaddr,
- ifp->name, ifp->ifindex, zevpn->vni);
- return -1;
- }
- }
/* Set "local" forwarding info. */
SET_FLAG(n->flags, ZEBRA_NEIGH_LOCAL);
@@ -2203,7 +2179,6 @@ int zebra_evpn_neigh_gw_macip_add(struct interface *ifp,
/* Only advertise in BGP if the knob is enabled */
if (advertise_gw_macip_enabled(zevpn)) {
- SET_FLAG(mac->flags, ZEBRA_MAC_DEF_GW);
SET_FLAG(n->flags, ZEBRA_NEIGH_DEF_GW);
/* Set Router flag (R-bit) */
if (ip->ipa_type == IPADDR_V6)
diff --git a/zebra/zebra_netns_id.c b/zebra/zebra_netns_id.c
index 81d610940d..739ba33036 100644
--- a/zebra/zebra_netns_id.c
+++ b/zebra/zebra_netns_id.c
@@ -136,7 +136,6 @@ static ns_id_t extract_nsid(struct nlmsghdr *nlh, char *buf)
ns_id_t ns_id = NS_UNKNOWN;
int offset = NETLINK_ALIGN(sizeof(struct nlmsghdr))
+ NETLINK_ALIGN(sizeof(struct rtgenmsg));
- int curr_length = offset;
void *tail = (void *)((char *)nlh + NETLINK_ALIGN(nlh->nlmsg_len));
struct nlattr *attr;
@@ -145,7 +144,6 @@ static ns_id_t extract_nsid(struct nlmsghdr *nlh, char *buf)
&& attr->nla_len >= sizeof(struct nlattr)
&& attr->nla_len <= NETLINK_NLATTR_LEN(tail, attr);
attr += NETLINK_ALIGN(attr->nla_len)) {
- curr_length += attr->nla_len;
if ((attr->nla_type & NLA_TYPE_MASK) == NETNSA_NSID) {
uint32_t *ptr = (uint32_t *)(attr);
diff --git a/zebra/zebra_netns_notify.c b/zebra/zebra_netns_notify.c
index 7cb1906895..af6046c9ad 100644
--- a/zebra/zebra_netns_notify.c
+++ b/zebra/zebra_netns_notify.c
@@ -179,7 +179,7 @@ static int zebra_ns_delete(char *name)
}
UNSET_FLAG(ifp->flags, IFF_UP);
- if_delete_update(ifp);
+ if_delete_update(&ifp);
}
ns = (struct ns *)vrf->ns_ctxt;
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index e1d28e1534..1b926dba5f 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1966,7 +1966,7 @@ static int resolve_backup_nexthops(const struct nexthop *nexthop,
*/
static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe,
const struct prefix *top, int type, uint32_t flags,
- uint32_t *pmtu)
+ uint32_t *pmtu, vrf_id_t vrf_id)
{
struct prefix p;
struct route_table *table;
@@ -2061,13 +2061,13 @@ static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe,
return 1;
}
- if (top
- && ((top->family == AF_INET && top->prefixlen == IPV4_MAX_BITLEN
- && nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr)
- || (top->family == AF_INET6 && top->prefixlen == IPV6_MAX_BITLEN
- && memcmp(&nexthop->gate.ipv6, &top->u.prefix6,
- IPV6_MAX_BYTELEN)
- == 0))) {
+ if (top &&
+ ((top->family == AF_INET && top->prefixlen == IPV4_MAX_BITLEN &&
+ nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr) ||
+ (top->family == AF_INET6 && top->prefixlen == IPV6_MAX_BITLEN &&
+ memcmp(&nexthop->gate.ipv6, &top->u.prefix6, IPV6_MAX_BYTELEN) ==
+ 0)) &&
+ nexthop->vrf_id == vrf_id) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
" :%s: Attempting to install a max prefixlength route through itself",
@@ -2361,6 +2361,7 @@ static unsigned nexthop_active_check(struct route_node *rn,
const struct prefix *p, *src_p;
struct zebra_vrf *zvrf;
uint32_t mtu = 0;
+ vrf_id_t vrf_id;
srcdest_rnode_prefixes(rn, &p, &src_p);
@@ -2389,10 +2390,12 @@ static unsigned nexthop_active_check(struct route_node *rn,
goto skip_check;
}
+
+ vrf_id = zvrf_id(rib_dest_vrf(rib_dest_from_rnode(rn)));
switch (nexthop->type) {
case NEXTHOP_TYPE_IFINDEX:
- if (nexthop_active(nexthop, nhe, &rn->p, re->type,
- re->flags, &mtu))
+ if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
+ &mtu, vrf_id))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
@@ -2400,16 +2403,16 @@ static unsigned nexthop_active_check(struct route_node *rn,
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
family = AFI_IP;
- if (nexthop_active(nexthop, nhe, &rn->p, re->type,
- re->flags, &mtu))
+ if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
+ &mtu, vrf_id))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
break;
case NEXTHOP_TYPE_IPV6:
family = AFI_IP6;
- if (nexthop_active(nexthop, nhe, &rn->p, re->type,
- re->flags, &mtu))
+ if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
+ &mtu, vrf_id))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
@@ -2419,8 +2422,8 @@ static unsigned nexthop_active_check(struct route_node *rn,
if (rn->p.family != AF_INET)
family = AFI_IP6;
- if (nexthop_active(nexthop, nhe, &rn->p, re->type,
- re->flags, &mtu))
+ if (nexthop_active(nexthop, nhe, &rn->p, re->type, re->flags,
+ &mtu, vrf_id))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
@@ -2990,6 +2993,9 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx)
case DPLANE_OP_INTF_ADDR_ADD:
case DPLANE_OP_INTF_ADDR_DEL:
case DPLANE_OP_INTF_NETCONFIG:
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
break;
}
diff --git a/zebra/zebra_ptm.c b/zebra/zebra_ptm.c
index 68e5c391cf..c28e251e3a 100644
--- a/zebra/zebra_ptm.c
+++ b/zebra/zebra_ptm.c
@@ -350,7 +350,7 @@ DEFUN (no_zebra_ptm_enable_if,
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug("%s: Bringing up interface %s",
__func__, ifp->name);
- if_up(ifp);
+ if_up(ifp, true);
}
}
@@ -553,7 +553,7 @@ static int zebra_ptm_handle_cbl_msg(void *arg, void *in_ctxt,
ifp->ptm_status = ZEBRA_PTM_STATUS_UP;
if (ifp->ptm_enable && if_is_no_ptm_operative(ifp)
&& send_linkup)
- if_up(ifp);
+ if_up(ifp, true);
} else if (!strcmp(cbl_str, ZEBRA_PTM_FAIL_STR)
&& (ifp->ptm_status != ZEBRA_PTM_STATUS_DOWN)) {
ifp->ptm_status = ZEBRA_PTM_STATUS_DOWN;
@@ -1163,7 +1163,7 @@ void zebra_ptm_reset_status(int ptm_disable)
zlog_debug(
"%s: Bringing up interface %s",
__func__, ifp->name);
- if_up(ifp);
+ if_up(ifp, true);
}
}
}
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index e376d4b2af..c6840a503c 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -4318,11 +4318,11 @@ static void rib_process_dplane_results(struct thread *thread)
case DPLANE_OP_INTF_ADDR_ADD:
case DPLANE_OP_INTF_ADDR_DEL:
- zebra_if_addr_update_ctx(ctx);
- break;
-
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
case DPLANE_OP_INTF_NETCONFIG:
- zebra_if_netconf_update_ctx(ctx);
+ zebra_if_dplane_result(ctx);
break;
/* Some op codes not handled here */
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index 8ca25359be..7934a9d206 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -132,13 +132,13 @@ static void zebra_rnh_store_in_routing_table(struct rnh *rnh)
route_unlock_node(rn);
}
-struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, bool *exists)
+struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, safi_t safi,
+ bool *exists)
{
struct route_table *table;
struct route_node *rn;
struct rnh *rnh = NULL;
afi_t afi = family2afi(p->family);
- safi_t safi = SAFI_UNICAST;
if (IS_ZEBRA_DEBUG_NHT) {
struct vrf *vrf = vrf_lookup_by_id(vrfid);
@@ -345,7 +345,7 @@ void zebra_register_rnh_pseudowire(vrf_id_t vrf_id, struct zebra_pw *pw,
return;
addr2hostprefix(pw->af, &pw->nexthop, &nh);
- rnh = zebra_add_rnh(&nh, vrf_id, &exists);
+ rnh = zebra_add_rnh(&nh, vrf_id, SAFI_UNICAST, &exists);
if (!rnh)
return;
@@ -768,7 +768,7 @@ static void zebra_rnh_clear_nhc_flag(struct zebra_vrf *zvrf, afi_t afi,
* of a particular VRF and address-family or a specific prefix.
*/
void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
- struct prefix *p, safi_t safi)
+ const struct prefix *p, safi_t safi)
{
struct route_table *rnh_table;
struct route_node *nrn;
@@ -802,13 +802,13 @@ void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
}
}
-void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
- struct prefix *p)
+void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, safi_t safi,
+ struct vty *vty, const struct prefix *p)
{
struct route_table *table;
struct route_node *rn;
- table = get_rnh_table(vrfid, afi, SAFI_UNICAST);
+ table = get_rnh_table(vrfid, afi, safi);
if (!table) {
if (IS_ZEBRA_DEBUG_NHT)
zlog_debug("print_rnhs: rnh table not found");
@@ -1169,15 +1169,17 @@ int zebra_send_rnh_update(struct rnh *rnh, struct zserv *client,
SET_FLAG(message, ZAPI_MESSAGE_SRTE);
stream_putl(s, message);
+ /*
+ * Put what we were told to match against
+ */
stream_putw(s, rnh->safi);
stream_putw(s, rn->p.family);
+ stream_putc(s, rn->p.prefixlen);
switch (rn->p.family) {
case AF_INET:
- stream_putc(s, rn->p.prefixlen);
stream_put_in_addr(s, &rn->p.u.prefix4);
break;
case AF_INET6:
- stream_putc(s, rn->p.prefixlen);
stream_put(s, &rn->p.u.prefix6, IPV6_MAX_BYTELEN);
break;
default:
@@ -1186,6 +1188,26 @@ int zebra_send_rnh_update(struct rnh *rnh, struct zserv *client,
__func__, rn->p.family);
goto failure;
}
+
+ /*
+ * What we matched against
+ */
+ stream_putw(s, rnh->resolved_route.family);
+ stream_putc(s, rnh->resolved_route.prefixlen);
+ switch (rnh->resolved_route.family) {
+ case AF_INET:
+ stream_put_in_addr(s, &rnh->resolved_route.u.prefix4);
+ break;
+ case AF_INET6:
+ stream_put(s, &rnh->resolved_route.u.prefix6, IPV6_MAX_BYTELEN);
+ break;
+ default:
+ flog_err(EC_ZEBRA_RNH_UNKNOWN_FAMILY,
+ "%s: Unknown family (%d) notification attempted",
+ __func__, rn->p.family);
+ goto failure;
+ }
+
if (srte_color)
stream_putl(s, srte_color);
@@ -1315,7 +1337,7 @@ static void print_rnh(struct route_node *rn, struct vty *vty)
vty_out(vty, "\n");
}
-static int zebra_cleanup_rnh_client(vrf_id_t vrf_id, afi_t afi,
+static int zebra_cleanup_rnh_client(vrf_id_t vrf_id, afi_t afi, safi_t safi,
struct zserv *client)
{
struct route_table *ntable;
@@ -1330,7 +1352,7 @@ static int zebra_cleanup_rnh_client(vrf_id_t vrf_id, afi_t afi,
zebra_route_string(client->proto), afi2str(afi));
}
- ntable = get_rnh_table(vrf_id, afi, SAFI_UNICAST);
+ ntable = get_rnh_table(vrf_id, afi, safi);
if (!ntable) {
zlog_debug("cleanup_rnh_client: rnh table not found");
return -1;
@@ -1355,9 +1377,14 @@ static int zebra_client_cleanup_rnh(struct zserv *client)
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
zvrf = vrf->info;
if (zvrf) {
- zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP, client);
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP,
+ SAFI_UNICAST, client);
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP,
+ SAFI_MULTICAST, client);
+ zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP6,
+ SAFI_UNICAST, client);
zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP6,
- client);
+ SAFI_MULTICAST, client);
}
}
diff --git a/zebra/zebra_rnh.h b/zebra/zebra_rnh.h
index 27c016ebe6..70eda725c4 100644
--- a/zebra/zebra_rnh.h
+++ b/zebra/zebra_rnh.h
@@ -31,7 +31,7 @@ extern "C" {
extern void zebra_rnh_init(void);
-extern struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid,
+extern struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, safi_t safi,
bool *exists);
extern struct rnh *zebra_lookup_rnh(struct prefix *p, vrf_id_t vrfid,
safi_t safi);
@@ -44,9 +44,9 @@ extern void zebra_register_rnh_pseudowire(vrf_id_t, struct zebra_pw *, bool *);
extern void zebra_deregister_rnh_pseudowire(vrf_id_t, struct zebra_pw *);
extern void zebra_remove_rnh_client(struct rnh *rnh, struct zserv *client);
extern void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
- struct prefix *p, safi_t safi);
-extern void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
- struct prefix *p);
+ const struct prefix *p, safi_t safi);
+extern void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, safi_t safi,
+ struct vty *vty, const struct prefix *p);
extern int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family);
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index 63a61d5293..7aca91959c 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -68,19 +68,27 @@ enum multicast_mode {
* physical device.
*/
enum protodown_reasons {
+ /* A process outside of FRR's control protodowned the interface */
+ ZEBRA_PROTODOWN_EXTERNAL = (1 << 0),
/* On startup local ESs are held down for some time to
* allow the underlay to converge and EVPN routes to
* get learnt
*/
- ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY = (1 << 0),
+ ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY = (1 << 1),
/* If all the uplinks are down the switch has lost access
* to the VxLAN overlay and must shut down the access
* ports to allow servers to re-direct their traffic to
* other switches on the Ethernet Segment
*/
- ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN = (1 << 1),
- ZEBRA_PROTODOWN_EVPN_ALL = (ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN
- | ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY)
+ ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN = (1 << 2),
+ ZEBRA_PROTODOWN_EVPN_ALL = (ZEBRA_PROTODOWN_EVPN_UPLINK_DOWN |
+ ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY),
+ ZEBRA_PROTODOWN_VRRP = (1 << 3),
+ /* This reason used exclusively for testing */
+ ZEBRA_PROTODOWN_SHARP = (1 << 4),
+ /* Just used to clear our fields on shutdown, externel not included */
+ ZEBRA_PROTODOWN_ALL = (ZEBRA_PROTODOWN_EVPN_ALL | ZEBRA_PROTODOWN_VRRP |
+ ZEBRA_PROTODOWN_SHARP)
};
#define ZEBRA_PROTODOWN_RC_STR_LEN 80
diff --git a/zebra/zebra_script.c b/zebra/zebra_script.c
index 4087749fd7..9805390a6d 100644
--- a/zebra/zebra_script.c
+++ b/zebra/zebra_script.c
@@ -331,6 +331,9 @@ void lua_pushzebra_dplane_ctx(lua_State *L, const struct zebra_dplane_ctx *ctx)
case DPLANE_OP_ADDR_UNINSTALL:
case DPLANE_OP_INTF_ADDR_ADD:
case DPLANE_OP_INTF_ADDR_DEL:
+ case DPLANE_OP_INTF_INSTALL:
+ case DPLANE_OP_INTF_UPDATE:
+ case DPLANE_OP_INTF_DELETE:
break;
case DPLANE_OP_NEIGH_INSTALL:
case DPLANE_OP_NEIGH_UPDATE:
diff --git a/zebra/zebra_srte.c b/zebra/zebra_srte.c
index 7933ef66b1..c0f18dd091 100644
--- a/zebra/zebra_srte.c
+++ b/zebra/zebra_srte.c
@@ -117,16 +117,28 @@ static int zebra_sr_policy_notify_update_client(struct zebra_sr_policy *policy,
stream_putl(s, message);
stream_putw(s, SAFI_UNICAST);
+ /*
+ * The prefix is copied twice because the ZEBRA_NEXTHOP_UPDATE
+ * code was modified to send back both the matched against
+ * as well as the actual matched. There does not appear to
+ * be an equivalent here so just send the same thing twice.
+ */
switch (policy->endpoint.ipa_type) {
case IPADDR_V4:
stream_putw(s, AF_INET);
stream_putc(s, IPV4_MAX_BITLEN);
stream_put_in_addr(s, &policy->endpoint.ipaddr_v4);
+ stream_putw(s, AF_INET);
+ stream_putc(s, IPV4_MAX_BITLEN);
+ stream_put_in_addr(s, &policy->endpoint.ipaddr_v4);
break;
case IPADDR_V6:
stream_putw(s, AF_INET6);
stream_putc(s, IPV6_MAX_BITLEN);
stream_put(s, &policy->endpoint.ipaddr_v6, IPV6_MAX_BYTELEN);
+ stream_putw(s, AF_INET6);
+ stream_putc(s, IPV6_MAX_BITLEN);
+ stream_put(s, &policy->endpoint.ipaddr_v6, IPV6_MAX_BYTELEN);
break;
default:
flog_warn(EC_LIB_DEVELOPMENT,
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index bb16232118..22c65e3c0c 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -60,6 +60,7 @@
#include "northbound_cli.h"
#include "zebra/zebra_nb.h"
#include "zebra/kernel_netlink.h"
+#include "zebra/if_netlink.h"
#include "zebra/table_manager.h"
#include "zebra/zebra_script.h"
#include "zebra/rtadv.h"
@@ -1360,7 +1361,7 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi,
DEFPY (show_ip_nht,
show_ip_nht_cmd,
- "show <ip$ipv4|ipv6$ipv6> <nht|import-check>$type [<A.B.C.D|X:X::X:X>$addr|vrf NAME$vrf_name [<A.B.C.D|X:X::X:X>$addr]|vrf all$vrf_all]",
+ "show <ip$ipv4|ipv6$ipv6> <nht|import-check>$type [<A.B.C.D|X:X::X:X>$addr|vrf NAME$vrf_name [<A.B.C.D|X:X::X:X>$addr]|vrf all$vrf_all] [mrib$mrib]",
SHOW_STR
IP_STR
IP6_STR
@@ -1371,11 +1372,13 @@ DEFPY (show_ip_nht,
VRF_CMD_HELP_STR
"IPv4 Address\n"
"IPv6 Address\n"
- VRF_ALL_CMD_HELP_STR)
+ VRF_ALL_CMD_HELP_STR
+ "Show Multicast (MRIB) NHT state\n")
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
vrf_id_t vrf_id = VRF_DEFAULT;
struct prefix prefix, *p = NULL;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
if (vrf_all) {
struct vrf *vrf;
@@ -1384,8 +1387,8 @@ DEFPY (show_ip_nht,
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
if ((zvrf = vrf->info) != NULL) {
vty_out(vty, "\nVRF %s:\n", zvrf_name(zvrf));
- zebra_print_rnh_table(zvrf_id(zvrf), afi, vty,
- NULL);
+ zebra_print_rnh_table(zvrf_id(zvrf), afi, safi,
+ vty, NULL);
}
return CMD_SUCCESS;
}
@@ -1399,7 +1402,7 @@ DEFPY (show_ip_nht,
return CMD_WARNING;
}
- zebra_print_rnh_table(vrf_id, afi, vty, p);
+ zebra_print_rnh_table(vrf_id, afi, safi, vty, p);
return CMD_SUCCESS;
}
@@ -3976,6 +3979,7 @@ DEFUN (show_zebra,
ttable_rowseps(table, 0, BOTTOM, true, '-');
ttable_add_row(table, "OS|%s(%s)", cmd_system_get(), cmd_release_get());
+ ttable_add_row(table, "ECMP Maximum|%d", zrouter.multipath_num);
ttable_add_row(table, "v4 Forwarding|%s", ipforward() ? "On" : "Off");
ttable_add_row(table, "v6 Forwarding|%s",
ipforward_ipv6() ? "On" : "Off");
@@ -4356,6 +4360,31 @@ DEFUN_HIDDEN(no_zebra_kernel_netlink_batch_tx_buf,
return CMD_SUCCESS;
}
+DEFPY (zebra_protodown_bit,
+ zebra_protodown_bit_cmd,
+ "zebra protodown reason-bit (0-31)$bit",
+ ZEBRA_STR
+ "Protodown Configuration\n"
+ "Reason Bit used in the kernel for application\n"
+ "Reason Bit range\n")
+{
+ if_netlink_set_frr_protodown_r_bit(bit);
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_zebra_protodown_bit,
+ no_zebra_protodown_bit_cmd,
+ "no zebra protodown reason-bit [(0-31)$bit]",
+ NO_STR
+ ZEBRA_STR
+ "Protodown Configuration\n"
+ "Reason Bit used in the kernel for setting protodown\n"
+ "Reason Bit Range\n")
+{
+ if_netlink_unset_frr_protodown_r_bit();
+ return CMD_SUCCESS;
+}
+
#endif /* HAVE_NETLINK */
DEFUN(ip_table_range, ip_table_range_cmd,
@@ -4561,6 +4590,8 @@ void zebra_vty_init(void)
#ifdef HAVE_NETLINK
install_element(CONFIG_NODE, &zebra_kernel_netlink_batch_tx_buf_cmd);
install_element(CONFIG_NODE, &no_zebra_kernel_netlink_batch_tx_buf_cmd);
+ install_element(CONFIG_NODE, &zebra_protodown_bit_cmd);
+ install_element(CONFIG_NODE, &no_zebra_protodown_bit_cmd);
#endif /* HAVE_NETLINK */
#ifdef HAVE_SCRIPTING
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index b6da445e38..13e1f63457 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -62,6 +62,7 @@ DEFINE_MTYPE_STATIC(ZEBRA, ZL3VNI, "L3 VNI hash");
DEFINE_MTYPE_STATIC(ZEBRA, L3VNI_MAC, "EVPN L3VNI MAC");
DEFINE_MTYPE_STATIC(ZEBRA, L3NEIGH, "EVPN Neighbor");
DEFINE_MTYPE_STATIC(ZEBRA, ZVXLAN_SG, "zebra VxLAN multicast group");
+DEFINE_MTYPE_STATIC(ZEBRA, EVPN_VTEP, "zebra VxLAN VTEP IP");
DEFINE_HOOK(zebra_rmac_update,
(struct zebra_mac * rmac, struct zebra_l3vni *zl3vni, bool delete,
@@ -198,6 +199,37 @@ static uint32_t rb_host_count(struct host_rb_tree_entry *hrbe)
return count;
}
+static int l3vni_rmac_nh_list_cmp(void *p1, void *p2)
+{
+ const struct ipaddr *vtep_ip1 = p1;
+ const struct ipaddr *vtep_ip2 = p2;
+
+ return !ipaddr_cmp(vtep_ip1, vtep_ip2);
+}
+
+static void l3vni_rmac_nh_free(struct ipaddr *vtep_ip)
+{
+ XFREE(MTYPE_EVPN_VTEP, vtep_ip);
+}
+
+static void l3vni_rmac_nh_list_nh_delete(struct zebra_l3vni *zl3vni,
+ struct zebra_mac *zrmac,
+ struct ipaddr *vtep_ip)
+{
+ struct listnode *node = NULL, *nnode = NULL;
+ struct ipaddr *vtep = NULL;
+
+ for (ALL_LIST_ELEMENTS(zrmac->nh_list, node, nnode, vtep)) {
+ if (ipaddr_cmp(vtep, vtep_ip) == 0)
+ break;
+ }
+
+ if (node) {
+ l3vni_rmac_nh_free(vtep);
+ list_delete_node(zrmac->nh_list, node);
+ }
+}
+
/*
* Print neighbors for all EVPN.
*/
@@ -366,35 +398,25 @@ static void zl3vni_print_nh(struct zebra_neigh *n, struct vty *vty,
static void zl3vni_print_rmac(struct zebra_mac *zrmac, struct vty *vty,
json_object *json)
{
- char buf1[ETHER_ADDR_STRLEN];
- char buf2[PREFIX_STRLEN];
- json_object *json_hosts = NULL;
- struct host_rb_entry *hle;
+ struct listnode *node = NULL;
+ struct ipaddr *vtep = NULL;
+ json_object *json_nhs = NULL;
if (!json) {
- vty_out(vty, "MAC: %s\n",
- prefix_mac2str(&zrmac->macaddr, buf1, sizeof(buf1)));
+ vty_out(vty, "MAC: %pEA\n", &zrmac->macaddr);
vty_out(vty, " Remote VTEP: %pI4\n",
&zrmac->fwd_info.r_vtep_ip);
- vty_out(vty, " Refcount: %d\n", rb_host_count(&zrmac->host_rb));
- vty_out(vty, " Prefixes:\n");
- RB_FOREACH (hle, host_rb_tree_entry, &zrmac->host_rb)
- vty_out(vty, " %pFX\n", &hle->p);
} else {
- json_hosts = json_object_new_array();
- json_object_string_add(
- json, "routerMac",
- prefix_mac2str(&zrmac->macaddr, buf1, sizeof(buf1)));
+ json_nhs = json_object_new_array();
+ json_object_string_addf(json, "routerMac", "%pEA",
+ &zrmac->macaddr);
json_object_string_addf(json, "vtepIp", "%pI4",
&zrmac->fwd_info.r_vtep_ip);
- json_object_int_add(json, "refCount",
- rb_host_count(&zrmac->host_rb));
- RB_FOREACH (hle, host_rb_tree_entry, &zrmac->host_rb)
- json_object_array_add(
- json_hosts,
- json_object_new_string(prefix2str(
- &hle->p, buf2, sizeof(buf2))));
- json_object_object_add(json, "prefixList", json_hosts);
+ for (ALL_LIST_ELEMENTS_RO(zrmac->nh_list, node, vtep)) {
+ json_object_array_add(json_nhs, json_object_new_stringf(
+ "%pIA", vtep));
+ }
+ json_object_object_add(json, "nexthops", json_nhs);
}
}
@@ -826,8 +848,7 @@ static int zvni_map_to_svi_ns(struct ns *ns,
struct interface **p_ifp = (struct interface **)_p_ifp;
struct zebra_if *zif;
- if (!in_param)
- return NS_WALK_STOP;
+ assert(in_param && p_ifp);
/* TODO: Optimize with a hash. */
for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) {
@@ -842,8 +863,7 @@ static int zvni_map_to_svi_ns(struct ns *ns,
vl = (struct zebra_l2info_vlan *)&zif->l2info.vl;
if (vl->vid == in_param->vid) {
- if (p_ifp)
- *p_ifp = tmp_if;
+ *p_ifp = tmp_if;
return NS_WALK_STOP;
}
}
@@ -1070,13 +1090,11 @@ static void zebra_evpn_vxlan_cleanup_all(struct hash_bucket *bucket, void *arg)
{
struct zebra_evpn *zevpn = NULL;
struct zebra_l3vni *zl3vni = NULL;
- struct zebra_vrf *zvrf = (struct zebra_vrf *)arg;
zevpn = (struct zebra_evpn *)bucket->data;
- /* remove from l3-vni list */
- if (zvrf->l3vni)
- zl3vni = zl3vni_lookup(zvrf->l3vni);
+ /* remove l2vni from l2vni's tenant-vrf l3-vni list */
+ zl3vni = zl3vni_from_vrf(zevpn->vrf_id);
if (zl3vni)
listnode_delete(zl3vni->l2vnis, zevpn);
@@ -1173,7 +1191,9 @@ static struct zebra_mac *zl3vni_rmac_add(struct zebra_l3vni *zl3vni,
zrmac = hash_get(zl3vni->rmac_table, &tmp_rmac, zl3vni_rmac_alloc);
assert(zrmac);
- RB_INIT(host_rb_tree_entry, &zrmac->host_rb);
+ zrmac->nh_list = list_new();
+ zrmac->nh_list->cmp = (int (*)(void *, void *))l3vni_rmac_nh_list_cmp;
+ zrmac->nh_list->del = (void (*)(void *))l3vni_rmac_nh_free;
SET_FLAG(zrmac->flags, ZEBRA_MAC_REMOTE);
SET_FLAG(zrmac->flags, ZEBRA_MAC_REMOTE_RMAC);
@@ -1187,14 +1207,9 @@ static struct zebra_mac *zl3vni_rmac_add(struct zebra_l3vni *zl3vni,
static int zl3vni_rmac_del(struct zebra_l3vni *zl3vni, struct zebra_mac *zrmac)
{
struct zebra_mac *tmp_rmac;
- struct host_rb_entry *hle;
- while (!RB_EMPTY(host_rb_tree_entry, &zrmac->host_rb)) {
- hle = RB_ROOT(host_rb_tree_entry, &zrmac->host_rb);
-
- RB_REMOVE(host_rb_tree_entry, &zrmac->host_rb, hle);
- XFREE(MTYPE_HOST_PREFIX, hle);
- }
+ /* free the list of nh list*/
+ list_delete(&zrmac->nh_list);
tmp_rmac = hash_release(zl3vni->rmac_table, zrmac);
XFREE(MTYPE_L3VNI_MAC, tmp_rmac);
@@ -1295,10 +1310,10 @@ static int zl3vni_rmac_uninstall(struct zebra_l3vni *zl3vni,
/* handle rmac add */
static int zl3vni_remote_rmac_add(struct zebra_l3vni *zl3vni,
const struct ethaddr *rmac,
- const struct ipaddr *vtep_ip,
- const struct prefix *host_prefix)
+ const struct ipaddr *vtep_ip)
{
struct zebra_mac *zrmac = NULL;
+ struct ipaddr *vtep = NULL;
zrmac = zl3vni_rmac_lookup(zl3vni, rmac);
if (!zrmac) {
@@ -1307,13 +1322,18 @@ static int zl3vni_remote_rmac_add(struct zebra_l3vni *zl3vni,
zrmac = zl3vni_rmac_add(zl3vni, rmac);
if (!zrmac) {
zlog_debug(
- "Failed to add RMAC %pEA L3VNI %u Remote VTEP %pIA, prefix %pFX",
- rmac, zl3vni->vni, vtep_ip, host_prefix);
+ "Failed to add RMAC %pEA L3VNI %u Remote VTEP %pIA",
+ rmac, zl3vni->vni, vtep_ip);
return -1;
}
memset(&zrmac->fwd_info, 0, sizeof(zrmac->fwd_info));
zrmac->fwd_info.r_vtep_ip = vtep_ip->ipaddr_v4;
+ vtep = XCALLOC(MTYPE_EVPN_VTEP, sizeof(struct ipaddr));
+ memcpy(vtep, vtep_ip, sizeof(struct ipaddr));
+ if (!listnode_add_sort_nodup(zrmac->nh_list, (void *)vtep))
+ XFREE(MTYPE_EVPN_VTEP, vtep);
+
/* Send RMAC for FPM processing */
hook_call(zebra_rmac_update, zrmac, zl3vni, false,
"new RMAC added");
@@ -1324,18 +1344,21 @@ static int zl3vni_remote_rmac_add(struct zebra_l3vni *zl3vni,
&vtep_ip->ipaddr_v4)) {
if (IS_ZEBRA_DEBUG_VXLAN)
zlog_debug(
- "L3VNI %u Remote VTEP change(%pI4 -> %pIA) for RMAC %pEA, prefix %pFX",
+ "L3VNI %u Remote VTEP change(%pI4 -> %pIA) for RMAC %pEA",
zl3vni->vni, &zrmac->fwd_info.r_vtep_ip,
- vtep_ip, rmac, host_prefix);
+ vtep_ip, rmac);
zrmac->fwd_info.r_vtep_ip = vtep_ip->ipaddr_v4;
+ vtep = XCALLOC(MTYPE_EVPN_VTEP, sizeof(struct ipaddr));
+ memcpy(vtep, vtep_ip, sizeof(struct ipaddr));
+ if (!listnode_add_sort_nodup(zrmac->nh_list, (void *)vtep))
+ XFREE(MTYPE_EVPN_VTEP, vtep);
+
/* install rmac in kernel */
zl3vni_rmac_install(zl3vni, zrmac);
}
- rb_find_or_add_host(&zrmac->host_rb, host_prefix);
-
return 0;
}
@@ -1343,20 +1366,59 @@ static int zl3vni_remote_rmac_add(struct zebra_l3vni *zl3vni,
/* handle rmac delete */
static void zl3vni_remote_rmac_del(struct zebra_l3vni *zl3vni,
struct zebra_mac *zrmac,
- struct prefix *host_prefix)
+ struct ipaddr *vtep_ip)
{
- rb_delete_host(&zrmac->host_rb, host_prefix);
+ struct ipaddr ipv4_vtep;
- if (RB_EMPTY(host_rb_tree_entry, &zrmac->host_rb)) {
- /* uninstall from kernel */
- zl3vni_rmac_uninstall(zl3vni, zrmac);
+ if (!zl3vni_nh_lookup(zl3vni, vtep_ip)) {
+ memset(&ipv4_vtep, 0, sizeof(struct ipaddr));
+ ipv4_vtep.ipa_type = IPADDR_V4;
+ if (vtep_ip->ipa_type == IPADDR_V6)
+ ipv4_mapped_ipv6_to_ipv4(&vtep_ip->ipaddr_v6,
+ &ipv4_vtep.ipaddr_v4);
+ else
+ memcpy(&(ipv4_vtep.ipaddr_v4), &vtep_ip->ipaddr_v4,
+ sizeof(struct in_addr));
- /* Send RMAC for FPM processing */
- hook_call(zebra_rmac_update, zrmac, zl3vni, true,
- "RMAC deleted");
+ /* remove nh from rmac's list */
+ l3vni_rmac_nh_list_nh_delete(zl3vni, zrmac, &ipv4_vtep);
+ /* delete nh is same as current selected, fall back to
+ * one present in the list
+ */
+ if (IPV4_ADDR_SAME(&zrmac->fwd_info.r_vtep_ip,
+ &ipv4_vtep.ipaddr_v4) &&
+ listcount(zrmac->nh_list)) {
+ struct ipaddr *vtep;
+
+ vtep = listgetdata(listhead(zrmac->nh_list));
+ zrmac->fwd_info.r_vtep_ip = vtep->ipaddr_v4;
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "L3VNI %u Remote VTEP nh change(%pIA -> %pI4) for RMAC %pEA",
+ zl3vni->vni, &ipv4_vtep,
+ &zrmac->fwd_info.r_vtep_ip,
+ &zrmac->macaddr);
+
+ /* install rmac in kernel */
+ zl3vni_rmac_install(zl3vni, zrmac);
+ }
- /* del the rmac entry */
- zl3vni_rmac_del(zl3vni, zrmac);
+ if (!listcount(zrmac->nh_list)) {
+ /* uninstall from kernel */
+ zl3vni_rmac_uninstall(zl3vni, zrmac);
+
+ /* Send RMAC for FPM processing */
+ hook_call(zebra_rmac_update, zrmac, zl3vni, true,
+ "RMAC deleted");
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "L3VNI %u RMAC %pEA vtep_ip %pIA delete",
+ zl3vni->vni, &zrmac->macaddr, vtep_ip);
+
+ /* del the rmac entry */
+ zl3vni_rmac_del(zl3vni, zrmac);
+ }
}
}
@@ -2124,13 +2186,6 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
return 0;
zevpn = zebra_evpn_add(vni);
- if (!zevpn) {
- flog_err(EC_ZEBRA_VNI_ADD_FAILED,
- "Adding L2-VNI - Failed to add VNI hash, VNI %u",
- vni);
-
- return -1;
- }
/* Find bridge interface for the VNI */
vlan_if = zvni_map_to_svi(vxl->access_vlan,
@@ -2246,7 +2301,7 @@ void zebra_vxlan_evpn_vrf_route_add(vrf_id_t vrf_id, const struct ethaddr *rmac,
* add the rmac - remote rmac to be installed is against the ipv4
* nexthop address
*/
- zl3vni_remote_rmac_add(zl3vni, rmac, &ipv4_vtep, host_prefix);
+ zl3vni_remote_rmac_add(zl3vni, rmac, &ipv4_vtep);
}
/* handle evpn vrf route delete */
@@ -2273,8 +2328,7 @@ void zebra_vxlan_evpn_vrf_route_del(vrf_id_t vrf_id,
/* delete the rmac entry */
if (zrmac)
- zl3vni_remote_rmac_del(zl3vni, zrmac, host_prefix);
-
+ zl3vni_remote_rmac_del(zl3vni, zrmac, vtep_ip);
}
void zebra_vxlan_print_specific_rmac_l3vni(struct vty *vty, vni_t l3vni,
@@ -2308,7 +2362,7 @@ void zebra_vxlan_print_specific_rmac_l3vni(struct vty *vty, vni_t l3vni,
vty_out(vty, "{}\n");
else
vty_out(vty,
- "%% Requested RMAC doesn't exist in L3-VNI %u",
+ "%% Requested RMAC doesn't exist in L3-VNI %u\n",
l3vni);
return;
}
@@ -5171,16 +5225,8 @@ int zebra_vxlan_if_add(struct interface *ifp)
/* Create or update EVPN hash. */
zevpn = zebra_evpn_lookup(vni);
- if (!zevpn) {
+ if (!zevpn)
zevpn = zebra_evpn_add(vni);
- if (!zevpn) {
- flog_err(
- EC_ZEBRA_VNI_ADD_FAILED,
- "Failed to add EVPN hash, IF %s(%u) VNI %u",
- ifp->name, ifp->ifindex, vni);
- return -1;
- }
- }
if (zevpn->local_vtep_ip.s_addr != vxl->vtep_ip.s_addr ||
zevpn->mcast_grp.s_addr != vxl->mcast_grp.s_addr) {