summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/babeld.c6
-rw-r--r--bgpd/bgp_attr.c9
-rw-r--r--bgpd/bgp_bmp.c169
-rw-r--r--bgpd/bgp_evpn.c529
-rw-r--r--bgpd/bgp_evpn.h2
-rw-r--r--bgpd/bgp_evpn_private.h16
-rw-r--r--bgpd/bgp_evpn_vty.c52
-rw-r--r--bgpd/bgp_fsm.c24
-rw-r--r--bgpd/bgp_labelpool.c4
-rw-r--r--bgpd/bgp_main.c2
-rw-r--r--bgpd/bgp_mplsvpn.c42
-rw-r--r--bgpd/bgp_mplsvpn.h1
-rw-r--r--bgpd/bgp_nexthop.c4
-rw-r--r--bgpd/bgp_packet.c2
-rw-r--r--bgpd/bgp_route.c582
-rw-r--r--bgpd/bgp_route.h50
-rw-r--r--bgpd/bgp_rpki.c170
-rw-r--r--bgpd/bgp_table.h11
-rw-r--r--bgpd/bgp_updgrp.c10
-rw-r--r--bgpd/bgp_updgrp_adv.c10
-rw-r--r--bgpd/bgp_vty.c73
-rw-r--r--bgpd/bgp_zebra.c70
-rw-r--r--bgpd/bgp_zebra.h2
-rw-r--r--bgpd/bgpd.c79
-rw-r--r--bgpd/bgpd.h36
-rw-r--r--doc/developer/grpc.rst1
-rw-r--r--doc/developer/workflow.rst10
-rw-r--r--doc/figures/releases.dot44
-rw-r--r--doc/user/filter.rst27
-rw-r--r--doc/user/pim.rst192
-rw-r--r--doc/user/zebra.rst94
-rw-r--r--include/linux/if_packet.h317
-rw-r--r--isisd/isis_circuit.c37
-rw-r--r--isisd/isis_cli.c16
-rw-r--r--isisd/isis_nb.c8
-rw-r--r--isisd/isis_nb.h4
-rw-r--r--isisd/isis_pfpacket.c56
-rw-r--r--lib/darr.c3
-rw-r--r--lib/darr.h24
-rw-r--r--lib/log.c2
-rw-r--r--lib/mgmt_msg_native.h16
-rw-r--r--lib/monotime.h16
-rw-r--r--lib/northbound_cli.c13
-rw-r--r--lib/northbound_cli.h3
-rw-r--r--lib/plist.c4
-rw-r--r--lib/vty.h1
-rw-r--r--lib/zclient.h2
-rw-r--r--mgmtd/mgmt_be_adapter.c6
-rw-r--r--mgmtd/mgmt_fe_adapter.c2
-rw-r--r--pathd/path_pcep_debug.c3
-rw-r--r--pbrd/pbr_map.c8
-rw-r--r--pimd/pim6_mld.c2
-rw-r--r--pimd/pim_bsm.c10
-rw-r--r--pimd/pim_bsr_rpdb.c4
-rw-r--r--pimd/pim_cmd.c92
-rw-r--r--pimd/pim_cmd_common.c152
-rw-r--r--pimd/pim_iface.c42
-rw-r--r--pimd/pim_iface.h6
-rw-r--r--pimd/pim_igmp.c2
-rw-r--r--pimd/pim_igmp_mtrace.c9
-rw-r--r--pimd/pim_igmpv2.c3
-rw-r--r--pimd/pim_igmpv3.c26
-rw-r--r--pimd/pim_instance.c21
-rw-r--r--pimd/pim_instance.h8
-rw-r--r--pimd/pim_join.c14
-rw-r--r--pimd/pim_mroute.c16
-rw-r--r--pimd/pim_msdp.c85
-rw-r--r--pimd/pim_msdp.h67
-rw-r--r--pimd/pim_msdp_packet.c65
-rw-r--r--pimd/pim_nb.c27
-rw-r--r--pimd/pim_nb.h8
-rw-r--r--pimd/pim_nb_config.c283
-rw-r--r--pimd/pim_nht.c1314
-rw-r--r--pimd/pim_nht.h104
-rw-r--r--pimd/pim_register.c3
-rw-r--r--pimd/pim_rp.c134
-rw-r--r--pimd/pim_rp.h2
-rw-r--r--pimd/pim_rpf.c142
-rw-r--r--pimd/pim_rpf.h17
-rw-r--r--pimd/pim_tib.c20
-rw-r--r--pimd/pim_upstream.c37
-rw-r--r--pimd/pim_upstream.h1
-rw-r--r--pimd/pim_util.c102
-rw-r--r--pimd/pim_util.h7
-rw-r--r--pimd/pim_vty.c29
-rw-r--r--pimd/pim_vxlan.c7
-rw-r--r--pimd/pim_zebra.c1
-rw-r--r--pimd/pim_zlookup.c99
-rw-r--r--pimd/pim_zlookup.h1
-rw-r--r--staticd/static_nht.c5
-rw-r--r--tests/helpers/python/frrtest.py2
-rw-r--r--tests/topotests/all_protocol_startup/r1/ipv4_routes.ref1
-rw-r--r--tests/topotests/all_protocol_startup/r1/ipv6_routes.ref1
-rw-r--r--tests/topotests/bfd_profiles_topo1/r2/bgpd.conf2
-rw-r--r--tests/topotests/bfd_profiles_topo1/r3/bgpd.conf1
-rw-r--r--tests/topotests/bfd_profiles_topo1/r4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py89
-rw-r--r--tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf2
-rw-r--r--tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_aggregator_zero/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_aspath_zero/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_bmp/bgpbmp.py33
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json2
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json8
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json8
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json2
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json8
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json8
-rw-r--r--tests/topotests/bgp_bmp/r1vrf/frr.conf2
-rw-r--r--tests/topotests/bgp_bmp/test_bgp_bmp_1.py2
-rw-r--r--tests/topotests/bgp_bmp/test_bgp_bmp_2.py4
-rw-r--r--tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf20
-rw-r--r--tests/topotests/bgp_flowspec/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_invalid_nexthop/r1/frr.conf1
-rwxr-xr-xtests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py2
-rw-r--r--tests/topotests/bgp_multiview_topo1/r1/bgpd.conf8
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf6
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py2
-rw-r--r--tests/topotests/bgp_path_attribute_discard/r1/frr.conf1
-rw-r--r--tests/topotests/bgp_path_attribute_discard/r2/frr.conf1
-rw-r--r--tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py21
-rw-r--r--tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py21
-rw-r--r--tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf8
-rw-r--r--tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf4
-rw-r--r--tests/topotests/bgp_prefix_sid/r1/bgpd.conf2
-rw-r--r--tests/topotests/bgp_prefix_sid2/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_route_server_client/r1/bgpd.conf10
-rw-r--r--tests/topotests/bgp_route_server_client/r2/bgpd.conf8
-rw-r--r--tests/topotests/bgp_route_server_client/r3/bgpd.conf2
-rw-r--r--tests/topotests/bgp_route_server_client/r4/bgpd.conf2
-rw-r--r--tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json70
-rw-r--r--tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py29
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/__init__.py0
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf13
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf29
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf11
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py88
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py0
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf117
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf88
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf32
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py191
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py0
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf30
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf40
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py135
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py16
-rw-r--r--tests/topotests/bgp_vrf_netns/r1/bgpd.conf1
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py12
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py15
-rw-r--r--tests/topotests/mgmt_config/r1/early-end-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/early-end2-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit2-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit2-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/test_config.py26
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json49
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json160
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json64
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json48
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json16
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib.json96
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json8
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json46
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json229
-rw-r--r--tests/topotests/mgmt_oper/oper.py17
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json3
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json1
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json8
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib.json8
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json4
-rw-r--r--tests/topotests/mgmt_oper/test_oper.py1
-rw-r--r--tests/topotests/mgmt_oper/test_simple.py8
-rwxr-xr-xtests/topotests/msdp_topo1/test_msdp_topo1.py36
-rw-r--r--tests/topotests/msdp_topo3/__init__.py0
-rw-r--r--tests/topotests/msdp_topo3/r1/frr.conf31
-rw-r--r--tests/topotests/msdp_topo3/r2/frr.conf28
-rw-r--r--tests/topotests/msdp_topo3/test_msdp_topo3.py165
-rw-r--r--tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json50
-rw-r--r--tests/topotests/ospf_metric_propagation/r4/frr.conf5
-rw-r--r--tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py19
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt1
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt1
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt3
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt2
-rw-r--r--tests/topotests/ospf_netns_vrf/r1/zebraroute.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r2/zebraroute.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r3/zebraroute.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt3
-rw-r--r--tests/topotests/pim_boundary_acl/r1/frr.conf39
-rw-r--r--tests/topotests/pim_boundary_acl/r2/frr.conf19
-rw-r--r--tests/topotests/pim_boundary_acl/r3/frr.conf13
-rw-r--r--tests/topotests/pim_boundary_acl/rp/frr.conf22
-rw-r--r--tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py523
-rwxr-xr-xtests/topotests/pim_mrib/__init__.py0
-rw-r--r--tests/topotests/pim_mrib/r1/frr.conf28
-rw-r--r--tests/topotests/pim_mrib/r2/frr.conf28
-rw-r--r--tests/topotests/pim_mrib/r3/frr.conf28
-rw-r--r--tests/topotests/pim_mrib/r4/frr.conf29
-rw-r--r--tests/topotests/pim_mrib/test_pim_mrib.py328
-rwxr-xr-xtests/topotests/srv6_static_route/test_srv6_route.py2
-rw-r--r--tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json20
-rw-r--r--tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json21
-rw-r--r--tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py47
-rw-r--r--tools/etc/frr/support_bundle_commands.conf8
-rwxr-xr-xtools/frr-reload.py6
-rw-r--r--tools/gen_northbound_callbacks.c15
-rw-r--r--watchfrr/watchfrr.c2
-rw-r--r--yang/frr-pim.yang80
-rw-r--r--yang/frr-zebra.yang47
-rw-r--r--zebra/interface.c2
-rw-r--r--zebra/rib.h6
-rw-r--r--zebra/zapi_msg.c70
-rw-r--r--zebra/zebra_cli.c3
-rw-r--r--zebra/zebra_evpn_mac.c15
-rw-r--r--zebra/zebra_mpls.c2
-rw-r--r--zebra/zebra_mroute.c2
-rw-r--r--zebra/zebra_nb.c6
-rw-r--r--zebra/zebra_nb_config.c17
-rw-r--r--zebra/zebra_nhg.c24
-rw-r--r--zebra/zebra_rib.c102
-rw-r--r--zebra/zebra_router.c14
-rw-r--r--zebra/zebra_router.h18
-rw-r--r--zebra/zebra_srv6_vty.c4
-rw-r--r--zebra/zebra_vty.c396
-rw-r--r--zebra/zebra_vxlan.c7
-rw-r--r--zebra/zserv.c11
241 files changed, 7404 insertions, 2836 deletions
diff --git a/babeld/babeld.c b/babeld/babeld.c
index b562f0b70c..1d2f60e3ad 100644
--- a/babeld/babeld.c
+++ b/babeld/babeld.c
@@ -304,6 +304,12 @@ void babel_clean_routing_process(void)
flush_all_routes();
babel_interface_close_all();
+ /* Clean babel config */
+ diversity_kind = DIVERSITY_NONE;
+ diversity_factor = BABEL_DEFAULT_DIVERSITY_FACTOR;
+ resend_delay = BABEL_DEFAULT_RESEND_DELAY;
+ change_smoothing_half_life(BABEL_DEFAULT_SMOOTHING_HALF_LIFE);
+
/* cancel events */
event_cancel(&babel_routing_process->t_read);
event_cancel(&babel_routing_process->t_update);
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 2280aa9097..d349922c52 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -5406,7 +5406,14 @@ enum bgp_attr_parse_ret bgp_attr_ignore(struct peer *peer, uint8_t type)
lookup_msg(attr_str, type, NULL),
withdraw ? "treat-as-withdraw" : "discard");
- return withdraw ? BGP_ATTR_PARSE_WITHDRAW : BGP_ATTR_PARSE_PROCEED;
+ /* We don't increment stat_pfx_withdraw here, because it's done in
+ * bgp_update_receive().
+ */
+ if (withdraw)
+ return BGP_ATTR_PARSE_WITHDRAW;
+
+ peer->stat_pfx_discard++;
+ return BGP_ATTR_PARSE_PROCEED;
}
bool route_matches_soo(struct bgp_path_info *pi, struct ecommunity *soo)
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 2e3a0388d0..acc49cac94 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -275,37 +275,33 @@ static inline int bmp_get_peer_type(struct peer *peer)
return bmp_get_peer_type_vrf(peer->bgp->vrf_id);
}
-static inline int bmp_get_peer_distinguisher(struct bmp *bmp, afi_t afi,
- uint8_t peer_type,
+static inline int bmp_get_peer_distinguisher(struct bgp *bgp, afi_t afi, uint8_t peer_type,
uint64_t *result_ref)
{
-
- /* remove this check when the other peer types get correct peer dist.
- *(RFC7854) impl.
- * for now, always return no error and 0 peer distinguisher as before
- */
- if (peer_type != BMP_PEER_TYPE_LOC_RIB_INSTANCE)
- return (*result_ref = 0);
+ /* use RD if set in VRF config */
+ struct prefix_rd *prd;
/* sending vrf_id or rd could be turned into an option at some point */
- struct bgp *bgp = bmp->targets->bgp;
+ if (peer_type == BMP_PEER_TYPE_LOCAL_INSTANCE || bgp->vrf_id == VRF_UNKNOWN)
+ return 1;
/* vrf default => ok, distinguisher 0 */
if (bgp->inst_type == VRF_DEFAULT)
return (*result_ref = 0);
- /* use RD if set in VRF config for this AFI */
- struct prefix_rd *prd = &bgp->vpn_policy[afi].tovpn_rd;
-
- if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_RD_SET)) {
+ prd = &bgp->vpn_policy[AFI_IP].tovpn_rd;
+ if ((afi == AFI_IP || afi == AFI_UNSPEC) &&
+ CHECK_FLAG(bgp->vpn_policy[AFI_IP].flags, BGP_VPN_POLICY_TOVPN_RD_SET)) {
memcpy(result_ref, prd->val, sizeof(prd->val));
return 0;
}
- /* VRF has no id => error => message should be skipped */
- if (bgp->vrf_id == VRF_UNKNOWN)
- return 1;
+ prd = &bgp->vpn_policy[AFI_IP6].tovpn_rd;
+ if ((afi == AFI_IP6 || afi == AFI_UNSPEC) &&
+ CHECK_FLAG(bgp->vpn_policy[AFI_IP6].flags, BGP_VPN_POLICY_TOVPN_RD_SET)) {
+ memcpy(result_ref, prd->val, sizeof(prd->val));
+ return 0;
+ }
/* use VRF id converted to ::vrf_id 64bits format */
*result_ref = ((uint64_t)htonl(bgp->vrf_id)) << 32;
@@ -467,13 +463,23 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
struct stream *s;
size_t len;
struct timeval uptime, uptime_real;
+ uint8_t peer_type;
+ bool is_locrib = false;
+ uint64_t peer_distinguisher = 0;
uptime.tv_sec = peer->uptime;
uptime.tv_usec = 0;
monotime_to_realtime(&uptime, &uptime_real);
- uint8_t peer_type = bmp_get_peer_type(peer);
- bool is_locrib = peer_type == BMP_PEER_TYPE_LOC_RIB_INSTANCE;
+ peer_type = bmp_get_peer_type(peer);
+ if (peer_type == BMP_PEER_TYPE_LOC_RIB_INSTANCE)
+ is_locrib = true;
+
+ if (bmp_get_peer_distinguisher(peer->bgp, AFI_UNSPEC, peer_type, &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for peer %s: can't get peer distinguisher",
+ peer->host);
+ return NULL;
+ }
#define BGP_BMP_MAX_PACKET_SIZE 1024
#define BMP_PEERUP_INFO_TYPE_STRING 0
@@ -484,9 +490,7 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
bmp_common_hdr(s, BMP_VERSION_3,
BMP_TYPE_PEER_UP_NOTIFICATION);
- bmp_per_peer_hdr(s, peer->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0,
- &uptime_real);
+ bmp_per_peer_hdr(s, peer->bgp, peer, 0, peer_type, peer_distinguisher, &uptime_real);
/* Local Address (16 bytes) */
if (is_locrib)
@@ -548,9 +552,7 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
bmp_common_hdr(s, BMP_VERSION_3,
BMP_TYPE_PEER_DOWN_NOTIFICATION);
- bmp_per_peer_hdr(s, peer->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0,
- &uptime_real);
+ bmp_per_peer_hdr(s, peer->bgp, peer, 0, peer_type, peer_distinguisher, &uptime_real);
type_pos = stream_get_endp(s);
stream_putc(s, 0); /* placeholder for down reason */
@@ -604,8 +606,10 @@ static int bmp_send_peerup(struct bmp *bmp)
/* Walk down all peers */
for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
s = bmp_peerstate(peer, false);
- pullwr_write_stream(bmp->pullwr, s);
- stream_free(s);
+ if (s) {
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
}
return 0;
@@ -622,10 +626,10 @@ static int bmp_send_peerup_vrf(struct bmp *bmp)
bmp_bgp_update_vrf_status(bmpbgp, vrf_state_unknown);
s = bmp_peerstate(bmpbgp->bgp->peer_self, bmpbgp->vrf_state == vrf_state_down);
-
- pullwr_write_stream(bmp->pullwr, s);
- stream_free(s);
-
+ if (s) {
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
return 0;
}
@@ -636,6 +640,9 @@ static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
struct bmp_targets *bt;
struct bmp *bmp;
+ if (!s)
+ return;
+
frr_each(bmp_targets, &bmpbgp->targets, bt)
frr_each(bmp_session, &bt->sessions, bmp)
pullwr_write_stream(bmp->pullwr, s);
@@ -644,6 +651,9 @@ static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
static void bmp_send_all_safe(struct bmp_bgp *bmpbgp, struct stream *s)
{
+ if (!s)
+ return;
+
if (!bmpbgp) {
stream_free(s);
return;
@@ -771,14 +781,24 @@ static void bmp_wrmirror_lost(struct bmp *bmp, struct pullwr *pullwr)
{
struct stream *s;
struct timeval tv;
+ uint8_t peer_type_flag;
+ uint64_t peer_distinguisher = 0;
gettimeofday(&tv, NULL);
+ peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id);
+
+ if (bmp_get_peer_distinguisher(bmp->targets->bgp, AFI_UNSPEC, peer_type_flag,
+ &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for reason: can't get peer distinguisher");
+ return;
+ }
+
s = stream_new(BGP_MAX_PACKET_SIZE);
bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
- bmp_per_peer_hdr(s, bmp->targets->bgp, bmp->targets->bgp->peer_self, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0, &tv);
+ bmp_per_peer_hdr(s, bmp->targets->bgp, bmp->targets->bgp->peer_self, 0, peer_type_flag,
+ peer_distinguisher, &tv);
stream_putw(s, BMP_MIRROR_TLV_TYPE_INFO);
stream_putw(s, 2);
@@ -795,6 +815,8 @@ static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
struct bmp_mirrorq *bmq;
struct peer *peer;
bool written = false;
+ uint8_t peer_type_flag;
+ uint64_t peer_distinguisher = 0;
if (bmp->mirror_lost) {
bmp_wrmirror_lost(bmp, pullwr);
@@ -812,12 +834,20 @@ static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
goto out;
}
+ peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id);
+
+ if (bmp_get_peer_distinguisher(peer->bgp, AFI_UNSPEC, peer_type_flag, &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for peer %s: can't get peer distinguisher",
+ peer->host);
+ goto out;
+ }
+
struct stream *s;
s = stream_new(BGP_MAX_PACKET_SIZE);
bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
- bmp_per_peer_hdr(s, bmp->targets->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0, &bmq->tv);
+ bmp_per_peer_hdr(s, bmp->targets->bgp, peer, 0, peer_type_flag, peer_distinguisher,
+ &bmq->tv);
/* BMP Mirror TLV. */
stream_putw(s, BMP_MIRROR_TLV_TYPE_BGP_MESSAGE);
@@ -967,8 +997,7 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags,
uint64_t peer_distinguisher = 0;
/* skip this message if peer distinguisher is not available */
- if (bmp_get_peer_distinguisher(bmp, afi, peer_type_flag,
- &peer_distinguisher)) {
+ if (bmp_get_peer_distinguisher(peer->bgp, afi, peer_type_flag, &peer_distinguisher)) {
zlog_warn(
"skipping bmp message for reason: can't get peer distinguisher");
continue;
@@ -1096,8 +1125,7 @@ static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags,
uint64_t peer_distinguisher = 0;
/* skip this message if peer distinguisher is not available */
- if (bmp_get_peer_distinguisher(bmp, afi, peer_type_flag,
- &peer_distinguisher)) {
+ if (bmp_get_peer_distinguisher(peer->bgp, afi, peer_type_flag, &peer_distinguisher)) {
zlog_warn(
"skipping bmp message for reason: can't get peer distinguisher");
return;
@@ -1131,6 +1159,7 @@ static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr)
uint8_t bpi_num_labels, adjin_num_labels;
afi_t afi;
safi_t safi;
+ uint8_t peer_type_flag;
if (bmp->syncafi == AFI_MAX) {
FOREACH_AFI_SAFI (afi, safi) {
@@ -1173,6 +1202,8 @@ afibreak:
struct bgp_path_info *bpi = NULL, *bpiter;
struct bgp_adj_in *adjin = NULL, *adjiter;
+ peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id);
+
if ((afi == AFI_L2VPN && safi == SAFI_EVPN) ||
(safi == SAFI_MPLS_VPN)) {
/* initialize syncrdpos to the first
@@ -1227,10 +1258,8 @@ afibreak:
bmp->remote, afi2str(afi),
safi2str(safi));
- bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L,
- BMP_PEER_TYPE_GLOBAL_INSTANCE);
- bmp_eor(bmp, afi, safi, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE);
+ bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L, peer_type_flag);
+ bmp_eor(bmp, afi, safi, 0, peer_type_flag);
bmp_eor(bmp, afi, safi, 0,
BMP_PEER_TYPE_LOC_RIB_INSTANCE);
@@ -1314,19 +1343,20 @@ afibreak:
bpi_num_labels);
}
+ if (bpi)
+ peer_type_flag = bmp_get_peer_type(bpi->peer);
+
if (bpi && CHECK_FLAG(bpi->flags, BGP_PATH_VALID) &&
CHECK_FLAG(bmp->targets->afimon[afi][safi], BMP_MON_POSTPOLICY))
- bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, bn_p, prd, bpi->attr,
+ bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, peer_type_flag, bn_p, prd, bpi->attr,
afi, safi, bpi->uptime,
- bpi_num_labels ? bpi->extra->labels->label : NULL,
- bpi_num_labels);
+ bpi_num_labels ? bpi->extra->labels->label : NULL, bpi_num_labels);
if (adjin) {
adjin_num_labels = adjin->labels ? adjin->labels->num_labels : 0;
- bmp_monitor(bmp, adjin->peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, bn_p, prd,
- adjin->attr, afi, safi, adjin->uptime,
- adjin_num_labels ? &adjin->labels->label[0] : NULL, adjin_num_labels);
+ bmp_monitor(bmp, adjin->peer, 0, peer_type_flag, bn_p, prd, adjin->attr, afi, safi,
+ adjin->uptime, adjin_num_labels ? &adjin->labels->label[0] : NULL,
+ adjin_num_labels);
}
if (bn)
@@ -1465,6 +1495,7 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
struct bgp_dest *bn = NULL;
bool written = false;
uint8_t bpi_num_labels, adjin_num_labels;
+ uint8_t peer_type_flag;
bqe = bmp_pull(bmp);
if (!bqe)
@@ -1505,6 +1536,8 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
bn = bgp_safi_node_lookup(bmp->targets->bgp->rib[afi][safi], safi,
&bqe->p, prd);
+ peer_type_flag = bmp_get_peer_type(peer);
+
if (CHECK_FLAG(bmp->targets->afimon[afi][safi], BMP_MON_POSTPOLICY)) {
struct bgp_path_info *bpi;
@@ -1518,12 +1551,9 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
bpi_num_labels = BGP_PATH_INFO_NUM_LABELS(bpi);
- bmp_monitor(bmp, peer, BMP_PEER_FLAG_L,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, &bqe->p, prd,
- bpi ? bpi->attr : NULL, afi, safi,
- bpi ? bpi->uptime : monotime(NULL),
- bpi_num_labels ? bpi->extra->labels->label : NULL,
- bpi_num_labels);
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, peer_type_flag, &bqe->p, prd,
+ bpi ? bpi->attr : NULL, afi, safi, bpi ? bpi->uptime : monotime(NULL),
+ bpi_num_labels ? bpi->extra->labels->label : NULL, bpi_num_labels);
written = true;
}
@@ -1536,9 +1566,8 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
break;
}
adjin_num_labels = adjin && adjin->labels ? adjin->labels->num_labels : 0;
- bmp_monitor(bmp, peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, &bqe->p, prd,
- adjin ? adjin->attr : NULL, afi, safi,
- adjin ? adjin->uptime : monotime(NULL),
+ bmp_monitor(bmp, peer, 0, peer_type_flag, &bqe->p, prd, adjin ? adjin->attr : NULL,
+ afi, safi, adjin ? adjin->uptime : monotime(NULL),
adjin_num_labels ? &adjin->labels->label[0] : NULL, adjin_num_labels);
written = true;
}
@@ -1704,6 +1733,8 @@ static void bmp_stats(struct event *thread)
struct peer *peer;
struct listnode *node;
struct timeval tv;
+ uint8_t peer_type_flag;
+ uint64_t peer_distinguisher = 0;
if (bt->stat_msec)
event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
@@ -1720,8 +1751,14 @@ static void bmp_stats(struct event *thread)
s = stream_new(BGP_MAX_PACKET_SIZE);
bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_STATISTICS_REPORT);
- bmp_per_peer_hdr(s, bt->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0, &tv);
+ peer_type_flag = bmp_get_peer_type(peer);
+ if (bmp_get_peer_distinguisher(peer->bgp, AFI_UNSPEC, peer_type_flag,
+ &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for peer %s: can't get peer distinguisher",
+ peer->host);
+ continue;
+ }
+ bmp_per_peer_hdr(s, bt->bgp, peer, 0, peer_type_flag, peer_distinguisher, &tv);
count_pos = stream_get_endp(s);
stream_putl(s, 0);
@@ -1736,8 +1773,7 @@ static void bmp_stats(struct event *thread)
peer->stat_pfx_cluster_loop);
bmp_stat_put_u32(s, &count, BMP_STATS_PFX_DUP_WITHDRAW,
peer->stat_pfx_dup_withdraw);
- bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW,
- peer->stat_upd_7606);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW, peer->stat_pfx_withdraw);
if (bt->stats_send_experimental)
bmp_stat_put_u32(s, &count, BMP_STATS_FRR_NH_INVALID,
peer->stat_pfx_nh_invalid);
@@ -2593,8 +2629,11 @@ DEFPY(bmp_connect,
}
ba = bmp_active_get(bt, hostname, port);
- if (srcif)
+ if (srcif) {
+ if (ba->ifsrc)
+ XFREE(MTYPE_TMP, ba->ifsrc);
ba->ifsrc = XSTRDUP(MTYPE_TMP, srcif);
+ }
if (min_retry_str)
ba->minretry = min_retry;
if (max_retry_str)
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index f173bd01f2..488f635b81 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -79,6 +79,8 @@ static void bgp_evpn_remote_ip_hash_unlink_nexthop(struct hash_bucket *bucket,
void *args);
static struct in_addr zero_vtep_ip;
+static void bgp_evpn_local_l3vni_del_post_processing(struct bgp *bgp_vrf);
+
/*
* Private functions.
*/
@@ -1669,9 +1671,18 @@ static int update_evpn_type5_route_entry(struct bgp *bgp_evpn,
/* attribute changed */
*route_changed = 1;
+ /* if the asn values are different, copy the asn of
+ * source vrf to the target (evpn) vrf entry.
+ */
+ if (bgp_vrf->as != bgp_evpn->as) {
+ new_aspath = aspath_dup(static_attr.aspath);
+ new_aspath = aspath_add_seq(new_aspath, bgp_vrf->as);
+ static_attr.aspath = new_aspath;
+ }
/* The attribute has changed. */
/* Add (or update) attribute to hash. */
- attr_new = bgp_attr_intern(attr);
+ attr_new = bgp_attr_intern(&static_attr);
+ bgp_attr_flush(&static_attr);
bgp_path_info_set_flag(dest, tmp_pi,
BGP_PATH_ATTR_CHANGED);
@@ -3882,14 +3893,6 @@ int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
const struct prefix_evpn *evp =
(const struct prefix_evpn *)bgp_dest_get_prefix(pi->net);
- /* Consider "valid" remote routes applicable for
- * this VRF.
- */
- if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID)
- && pi->type == ZEBRA_ROUTE_BGP
- && pi->sub_type == BGP_ROUTE_NORMAL))
- return 0;
-
if (is_route_matching_for_vrf(bgp_vrf, pi)) {
if (bgp_evpn_route_rmac_self_check(bgp_vrf, evp, pi))
return 0;
@@ -3916,26 +3919,66 @@ int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
return ret;
}
+#define BGP_PROC_L3VNI_LIMIT 10
+static int install_uninstall_evpn_remote_route_per_l3vni(struct bgp_path_info *pi,
+ const struct prefix_evpn *evp)
+{
+ int ret = 0;
+ uint8_t vni_iter = 0;
+ bool is_install = false;
+ struct bgp *bgp_to_proc = NULL;
+ struct bgp *bgp_to_proc_next = NULL;
+
+ for (bgp_to_proc = zebra_l3_vni_first(&bm->zebra_l3_vni_head);
+ bgp_to_proc && vni_iter < BGP_PROC_L3VNI_LIMIT; bgp_to_proc = bgp_to_proc_next) {
+ bgp_to_proc_next = zebra_l3_vni_next(&bm->zebra_l3_vni_head, bgp_to_proc);
+ vni_iter++;
+ is_install = !!CHECK_FLAG(bgp_to_proc->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+
+ ret = bgp_evpn_route_entry_install_if_vrf_match(bgp_to_proc, pi, is_install);
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in L3VNI %u during BP",
+ bgp_to_proc->vrf_id, is_install ? "install" : "uninstall",
+ bgp_evpn_route_type_str[evp->prefix.route_type].str,
+ bgp_to_proc->l3vni);
+ zebra_l3_vni_del(&bm->zebra_l3_vni_head, bgp_to_proc);
+ if (!is_install)
+ bgp_evpn_local_l3vni_del_post_processing(bgp_to_proc);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
/*
* Install or uninstall mac-ip routes are appropriate for this
* particular VRF.
*/
-static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
+int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
{
afi_t afi;
safi_t safi;
struct bgp_dest *rd_dest, *dest;
struct bgp_table *table;
struct bgp_path_info *pi;
- int ret;
+ int ret = 0;
struct bgp *bgp_evpn = NULL;
+ uint8_t count = 0;
afi = AFI_L2VPN;
safi = SAFI_EVPN;
bgp_evpn = bgp_get_evpn();
- if (!bgp_evpn)
+ if (!bgp_evpn) {
+ zlog_warn("%s: No BGP EVPN instance found...", __func__);
+
return -1;
+ }
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Total %u L3VNI BGP-VRFs pending to be processed for remote route installation",
+ __func__, (uint32_t)zebra_l3_vni_count(&bm->zebra_l3_vni_head));
/* Walk entire global routing table and evaluate routes which could be
* imported into this VRF. Note that we need to loop through all global
* routes to determine which route matches the import rt on vrf
@@ -3952,30 +3995,109 @@ static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
(const struct prefix_evpn *)bgp_dest_get_prefix(
dest);
- /* if not mac-ip route skip this route */
- if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE
- || evp->prefix.route_type
- == BGP_EVPN_IP_PREFIX_ROUTE))
- continue;
-
- /* if not a mac+ip route skip this route */
- if (!(is_evpn_prefix_ipaddr_v4(evp)
- || is_evpn_prefix_ipaddr_v6(evp)))
+ /* Proceed only for MAC-IP and IP-Prefix routes */
+ switch (evp->prefix.route_type) {
+ case BGP_EVPN_MAC_IP_ROUTE:
+ case BGP_EVPN_IP_PREFIX_ROUTE:
+ if (!(is_evpn_prefix_ipaddr_v4(evp) ||
+ is_evpn_prefix_ipaddr_v6(evp)))
+ continue;
+ break;
+ case BGP_EVPN_AD_ROUTE:
+ case BGP_EVPN_IMET_ROUTE:
+ case BGP_EVPN_ES_ROUTE:
continue;
+ }
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
- ret = bgp_evpn_route_entry_install_if_vrf_match(
- bgp_vrf, pi, install);
- if (ret) {
- bgp_dest_unlock_node(rd_dest);
- bgp_dest_unlock_node(dest);
- return ret;
+ /* Consider "valid" remote routes applicable for
+ * this VRF */
+ if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID) &&
+ pi->type == ZEBRA_ROUTE_BGP &&
+ pi->sub_type == BGP_ROUTE_NORMAL))
+ continue;
+
+ if (!bgp_vrf) {
+ ret = install_uninstall_evpn_remote_route_per_l3vni(pi, evp);
+ if (ret) {
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+
+ return ret;
+ }
+ } else {
+ ret = bgp_evpn_route_entry_install_if_vrf_match(bgp_vrf, pi,
+ install);
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in L3VNI %u",
+ bgp_vrf->vrf_id,
+ install ? "install" : "uninstall",
+ bgp_evpn_route_type_str[evp->prefix.route_type]
+ .str,
+ bgp_vrf->l3vni);
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+
+ return ret;
+ }
}
}
}
}
+ if (!bgp_vrf) {
+ while (count < BGP_PROC_L3VNI_LIMIT) {
+ struct bgp *bgp_to_proc = zebra_l3_vni_pop(&bm->zebra_l3_vni_head);
+
+ if (!bgp_to_proc)
+ return 0;
+
+ if (CHECK_FLAG(bgp_to_proc->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE))
+ bgp_evpn_local_l3vni_del_post_processing(bgp_to_proc);
+
+ UNSET_FLAG(bgp_to_proc->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+ count++;
+ }
+ }
+
+ return 0;
+}
+
+#define BGP_PROC_L2VNI_LIMIT 10
+static int install_evpn_remote_route_per_l2vni(struct bgp *bgp, struct bgp_path_info *pi,
+ const struct prefix_evpn *evp)
+{
+ int ret = 0;
+ uint8_t vni_iter = 0;
+ struct bgpevpn *t_vpn = NULL;
+ struct bgpevpn *t_vpn_next = NULL;
+
+ for (t_vpn = zebra_l2_vni_first(&bm->zebra_l2_vni_head);
+ t_vpn && vni_iter < BGP_PROC_L2VNI_LIMIT; t_vpn = t_vpn_next) {
+ t_vpn_next = zebra_l2_vni_next(&bm->zebra_l2_vni_head, t_vpn);
+ vni_iter++;
+ /*
+ * Skip install/uninstall if the route entry is not needed to
+ * be imported into the VNI i.e. RTs dont match
+ */
+ if (!is_route_matching_for_vni(bgp, t_vpn, pi))
+ continue;
+
+ ret = install_evpn_route_entry(bgp, t_vpn, evp, pi);
+
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to install EVPN %s route in VNI %u during BP",
+ bgp->vrf_id, bgp_evpn_route_type_str[evp->prefix.route_type].str,
+ t_vpn->vni);
+ zebra_l2_vni_del(&bm->zebra_l2_vni_head, t_vpn);
+
+ return ret;
+ }
+ }
+
return 0;
}
@@ -3983,26 +4105,40 @@ static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
* Install or uninstall routes of specified type that are appropriate for this
* particular VNI.
*/
-static int install_uninstall_routes_for_vni(struct bgp *bgp,
- struct bgpevpn *vpn, bool install)
+int install_uninstall_routes_for_vni(struct bgp *bgp, struct bgpevpn *vpn, bool install)
{
afi_t afi;
safi_t safi;
struct bgp_dest *rd_dest, *dest;
struct bgp_table *table;
struct bgp_path_info *pi;
- int ret;
+ int ret = 0;
+ uint8_t count = 0;
+ bool walk_fifo = false;
afi = AFI_L2VPN;
safi = SAFI_EVPN;
- /* Walk entire global routing table and evaluate routes which could be
+ if (!bgp) {
+ walk_fifo = true;
+ bgp = bgp_get_evpn();
+ if (!bgp) {
+ zlog_warn("%s: No BGP EVPN instance found...", __func__);
+
+ return -1;
+ }
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Total %u L2VNI VPNs pending to be processed for remote route installation",
+ __func__, (uint32_t)zebra_l2_vni_count(&bm->zebra_l2_vni_head));
+ /*
+ * Walk entire global routing table and evaluate routes which could be
* imported into this VPN. Note that we cannot just look at the routes
- * for
- * the VNI's RD - remote routes applicable for this VNI could have any
- * RD.
+ * for the VNI's RD - remote routes applicable for this VNI could have
+ * any RD.
+ * Note: EVPN routes are a 2-level table.
*/
- /* EVPN routes are a 2-level table. */
for (rd_dest = bgp_table_top(bgp->rib[afi][safi]); rd_dest;
rd_dest = bgp_route_next(rd_dest)) {
table = bgp_dest_get_bgp_table_info(rd_dest);
@@ -4015,54 +4151,80 @@ static int install_uninstall_routes_for_vni(struct bgp *bgp,
(const struct prefix_evpn *)bgp_dest_get_prefix(
dest);
- if (evp->prefix.route_type != BGP_EVPN_IMET_ROUTE &&
- evp->prefix.route_type != BGP_EVPN_AD_ROUTE &&
- evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE)
+ /* Proceed only for AD, MAC_IP and IMET routes */
+ switch (evp->prefix.route_type) {
+ case BGP_EVPN_AD_ROUTE:
+ case BGP_EVPN_MAC_IP_ROUTE:
+ case BGP_EVPN_IMET_ROUTE:
+ break;
+ case BGP_EVPN_ES_ROUTE:
+ case BGP_EVPN_IP_PREFIX_ROUTE:
continue;
+ }
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
- /* Consider "valid" remote routes applicable for
- * this VNI. */
- if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID)
- && pi->type == ZEBRA_ROUTE_BGP
- && pi->sub_type == BGP_ROUTE_NORMAL))
- continue;
-
- if (!is_route_matching_for_vni(bgp, vpn, pi))
+ /*
+ * Skip install/uninstall if
+ * - Not a valid remote routes
+ * - Install & evpn route matchesi macvrf SOO
+ */
+ if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID) &&
+ pi->type == ZEBRA_ROUTE_BGP &&
+ pi->sub_type == BGP_ROUTE_NORMAL) ||
+ (install && bgp_evpn_route_matches_macvrf_soo(pi, evp)))
continue;
- if (install) {
- if (bgp_evpn_route_matches_macvrf_soo(
- pi, evp))
+ if (walk_fifo) {
+ ret = install_evpn_remote_route_per_l2vni(bgp, pi, evp);
+ if (ret) {
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+ return ret;
+ }
+ } else {
+ /*
+ * Skip install/uninstall if the route
+ * entry is not needed to be imported
+ * into the VNI i.e. RTs dont match
+ */
+ if (!is_route_matching_for_vni(bgp, vpn, pi))
continue;
- ret = install_evpn_route_entry(bgp, vpn,
- evp, pi);
- } else
- ret = uninstall_evpn_route_entry(
- bgp, vpn, evp, pi);
-
- if (ret) {
- flog_err(EC_BGP_EVPN_FAIL,
- "%u: Failed to %s EVPN %s route in VNI %u",
- bgp->vrf_id,
- install ? "install"
- : "uninstall",
- evp->prefix.route_type ==
- BGP_EVPN_MAC_IP_ROUTE
- ? "MACIP"
- : "IMET",
- vpn->vni);
-
- bgp_dest_unlock_node(rd_dest);
- bgp_dest_unlock_node(dest);
- return ret;
+ if (install)
+ ret = install_evpn_route_entry(bgp, vpn, evp, pi);
+ else
+ ret = uninstall_evpn_route_entry(bgp, vpn, evp, pi);
+
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in VNI %u",
+ bgp->vrf_id,
+ install ? "install" : "uninstall",
+ bgp_evpn_route_type_str[evp->prefix.route_type]
+ .str,
+ vpn->vni);
+
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+ return ret;
+ }
}
}
}
}
+ if (walk_fifo) {
+ while (count < BGP_PROC_L2VNI_LIMIT) {
+ vpn = zebra_l2_vni_pop(&bm->zebra_l2_vni_head);
+ if (!vpn)
+ return 0;
+
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ count++;
+ }
+ }
+
return 0;
}
@@ -4212,9 +4374,7 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,
assert(attr);
- /* Only type-1, type-2, type-3, type-4 and type-5
- * are supported currently
- */
+ /* Only EVPN route-types 1-5 are supported currently */
if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE
|| evp->prefix.route_type == BGP_EVPN_IMET_ROUTE
|| evp->prefix.route_type == BGP_EVPN_ES_ROUTE
@@ -4271,26 +4431,28 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,
bgp_evpn_attr_get_esi(pi->attr));
/*
- * macip routes (type-2) are imported into VNI and VRF tables.
- * IMET route is imported into VNI table.
- * prefix routes are imported into VRF table.
+ * AD/IMET routes (type-1/3) are imported into VNI table.
+ * MACIP routes (type-2) are imported into VNI and VRF tables.
+ * Prefix routes (type 5) are imported into VRF table.
*/
if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE ||
evp->prefix.route_type == BGP_EVPN_IMET_ROUTE ||
evp->prefix.route_type == BGP_EVPN_AD_ROUTE ||
evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE) {
+ if (evp->prefix.route_type != BGP_EVPN_IP_PREFIX_ROUTE) {
+ irt = in_vni_rt ? lookup_import_rt(bgp, eval) : NULL;
+ if (irt)
+ install_uninstall_route_in_vnis(bgp, afi, safi, evp, pi,
+ irt->vnis, import);
+ }
- irt = in_vni_rt ? lookup_import_rt(bgp, eval) : NULL;
- if (irt)
- install_uninstall_route_in_vnis(
- bgp, afi, safi, evp, pi, irt->vnis,
- import);
-
- vrf_irt = in_vrf_rt ? lookup_vrf_import_rt(eval) : NULL;
- if (vrf_irt)
- install_uninstall_route_in_vrfs(
- bgp, afi, safi, evp, pi, vrf_irt->vrfs,
- import);
+ if (evp->prefix.route_type != BGP_EVPN_AD_ROUTE &&
+ evp->prefix.route_type != BGP_EVPN_IMET_ROUTE) {
+ vrf_irt = in_vrf_rt ? lookup_vrf_import_rt(eval) : NULL;
+ if (vrf_irt)
+ install_uninstall_route_in_vrfs(bgp, afi, safi, evp, pi,
+ vrf_irt->vrfs, import);
+ }
/* Also check for non-exact match.
* In this, we mask out the AS and
@@ -6780,6 +6942,53 @@ static void link_l2vni_hash_to_l3vni(struct hash_bucket *bucket,
bgpevpn_link_to_l3vni(vpn);
}
+static void bgp_evpn_l3vni_remote_route_processing(struct bgp *bgp, bool install)
+{
+ /*
+ * Anytime BGP gets a Bulk of L3 VNI ADD/DEL from zebra,
+ * - Walking the entire global routing table per VNI is very expensive.
+ * - The next read (say of another VNI ADD/DEL) from the socket does
+ * not proceed unless this walk is complete.
+ * This results in huge output buffer FIFO growth spiking up the
+ * memory in zebra.
+ *
+ * To avoid this, idea is to hookup the BGP-VRF off the struct
+ * bgp_master and maintain a struct bgp FIFO list which is processed
+ * later on, where we walk a chunk of BGP-VRFs and do the remote route
+ * install/uninstall.
+ */
+ if (!CHECK_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL) &&
+ !CHECK_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE))
+ zebra_l3_vni_add_tail(&bm->zebra_l3_vni_head, bgp);
+
+ if (install) {
+ SET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+ UNSET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE);
+ } else {
+ SET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE);
+ UNSET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Scheduling L3VNI %s to be processed later for %s VNI %u",
+ install ? "ADD" : "DEL", bgp->name_pretty, bgp->l3vni);
+ /*
+ * If there are no BGP-VRFs's in the bm L3VNI FIFO list i.e. an update
+ * for an already processed L3VNI comes in, schedule the remote route
+ * install immediately.
+ *
+ * In all other cases, it is ok to schedule the remote route un/install
+ * after a small sleep. This is to give benefit of doubt in case more
+ * L3VNI events come.
+ */
+ if (zebra_l3_vni_count(&bm->zebra_l3_vni_head))
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l3vrf, NULL,
+ 20, &bm->t_bgp_zebra_l3_vni);
+ else
+ event_add_event(bm->master, bgp_zebra_process_remote_routes_for_l3vrf, NULL, 0,
+ &bm->t_bgp_zebra_l3_vni);
+}
+
int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id,
struct ethaddr *svi_rmac,
struct ethaddr *vrr_rmac,
@@ -6925,52 +7134,36 @@ int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id,
/* advertise type-5 routes if needed */
update_advertise_vrf_routes(bgp_vrf);
- /* install all remote routes belonging to this l3vni into correspondng
- * vrf */
- install_routes_for_vrf(bgp_vrf);
+ bgp_evpn_l3vni_remote_route_processing(bgp_vrf, true);
return 0;
}
-int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
+static void bgp_evpn_local_l3vni_del_post_processing(struct bgp *bgp_vrf)
{
- struct bgp *bgp_vrf = NULL; /* bgp vrf instance */
struct bgp *bgp_evpn = NULL; /* EVPN bgp instance */
struct listnode *node = NULL;
struct listnode *next = NULL;
struct bgpevpn *vpn = NULL;
- bgp_vrf = bgp_lookup_by_vrf_id(vrf_id);
- if (!bgp_vrf) {
- flog_err(
- EC_BGP_NO_DFLT,
- "Cannot process L3VNI %u Del - Could not find BGP instance",
- l3vni);
- return -1;
- }
-
bgp_evpn = bgp_get_evpn();
if (!bgp_evpn) {
- flog_err(
- EC_BGP_NO_DFLT,
- "Cannot process L3VNI %u Del - Could not find EVPN BGP instance",
- l3vni);
- return -1;
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u Del - Could not find EVPN BGP instance",
+ bgp_vrf->l3vni);
+ return;
}
if (CHECK_FLAG(bgp_evpn->flags, BGP_FLAG_DELETE_IN_PROGRESS)) {
flog_err(EC_BGP_NO_DFLT,
- "Cannot process L3VNI %u ADD - EVPN BGP instance is shutting down",
- l3vni);
- return -1;
+ "Cannot process L3VNI %u ADD - EVPN BGP instance is shutting down",
+ bgp_vrf->l3vni);
+ return;
}
- /* Remove remote routes from BGT VRF even if BGP_VRF_AUTO is configured,
- * bgp_delete would not remove/decrement bgp_path_info of the ip_prefix
- * routes. This will uninstalling the routes from zebra and decremnt the
- * bgp info count.
- */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("In %s for L3VNI %u after remote route installation", __func__,
+ bgp_vrf->l3vni);
/* delete/withdraw all type-5 routes */
delete_withdraw_vrf_routes(bgp_vrf);
@@ -7016,14 +7209,95 @@ int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
bgpevpn_unlink_from_l3vni(vpn);
UNSET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_L3VNI_PREFIX_ROUTES_ONLY);
+ UNSET_FLAG(bgp_vrf->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE);
/* Delete the instance if it was autocreated */
if (CHECK_FLAG(bgp_vrf->vrf_flags, BGP_VRF_AUTO))
bgp_delete(bgp_vrf);
+}
+
+int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
+{
+ struct bgp *bgp_evpn = NULL; /* EVPN bgp instance */
+ struct bgp *bgp_vrf = NULL; /* bgp vrf instance */
+
+ bgp_vrf = bgp_lookup_by_vrf_id(vrf_id);
+ if (!bgp_vrf) {
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u Del - Could not find BGP instance", l3vni);
+ return -1;
+ }
+
+ bgp_evpn = bgp_get_evpn();
+ if (!bgp_evpn) {
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u Del - Could not find EVPN BGP instance", l3vni);
+ return -1;
+ }
+
+ if (CHECK_FLAG(bgp_evpn->flags, BGP_FLAG_DELETE_IN_PROGRESS)) {
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u ADD - EVPN BGP instance is shutting down", l3vni);
+ return -1;
+ }
+
+ if (!bgp_vrf->l3vni) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Returning from %s since VNI %u is already deleted", __func__,
+ l3vni);
+
+ return -1;
+ }
+
+ /*
+ * Move all the l3vni_delete operation post the remote route
+ * installation processing i.e. add the L3VNI DELETE item on the
+ * BGP-VRFs FIFO and move on.
+ */
+ bgp_evpn_l3vni_remote_route_processing(bgp_vrf, false);
return 0;
}
+static void bgp_evpn_l2vni_remote_route_processing(struct bgpevpn *vpn)
+{
+ /*
+ * Anytime BGP gets a Bulk of L2 VNIs ADD/UPD from zebra,
+ * - Walking the entire global routing table per VNI is very expensive.
+ * - The next read (say of another VNI ADD/UPD) from the socket does
+ * not proceed unless this walk is complete.
+ * This results in huge output buffer FIFO growth spiking up the
+ * memory in zebra.
+ *
+ * To avoid this, idea is to hookup the VPN off the struct bgp_master
+ * and maintain a VPN FIFO list which is processed later on, where we
+ * walk a chunk of VPNs and do the remote route install.
+ */
+ if (!CHECK_FLAG(vpn->flags, VNI_FLAG_ADD)) {
+ zebra_l2_vni_add_tail(&bm->zebra_l2_vni_head, vpn);
+ SET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Scheduling L2VNI ADD to be processed later for VNI %u", vpn->vni);
+
+ /*
+ * If there are no VNI's in the bgp VPN FIFO list i.e. an update
+ * for an already processed VNI comes in, schedule the remote
+ * route install immediately.
+ *
+ * In all other cases, it is ok to schedule the remote route install
+ * after a small sleep. This is to give benefit of doubt in case more
+ * L2VNI ADD events come.
+ */
+ if (zebra_l2_vni_count(&bm->zebra_l2_vni_head))
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l2vni, NULL,
+ 20, &bm->t_bgp_zebra_l2_vni);
+ else
+ event_add_event(bm->master, bgp_zebra_process_remote_routes_for_l2vni, NULL, 0,
+ &bm->t_bgp_zebra_l2_vni);
+}
+
/*
* When bgp instance goes down also clean up what might have been left over
* from evpn.
@@ -7047,6 +7321,10 @@ int bgp_evpn_local_vni_del(struct bgp *bgp, vni_t vni)
if (!vpn)
return 0;
+ /* Remove the VPN from the bgp VPN FIFO (if exists) */
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ zebra_l2_vni_del(&bm->zebra_l2_vni_head, vpn);
+
/* Remove all local EVPN routes and schedule for processing (to
* withdraw from peers).
*/
@@ -7203,12 +7481,6 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
}
}
- /* If we have learnt and retained remote routes (VTEPs, MACs) for this
- * VNI,
- * install them.
- */
- install_routes_for_vni(bgp, vpn);
-
/* If we are advertising gateway mac-ip
It needs to be conveyed again to zebra */
bgp_zebra_advertise_gw_macip(bgp, vpn->advertise_gw_macip, vpn->vni);
@@ -7216,6 +7488,8 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
/* advertise svi mac-ip knob to zebra */
bgp_zebra_advertise_svi_macip(bgp, vpn->advertise_svi_macip, vpn->vni);
+ bgp_evpn_l2vni_remote_route_processing(vpn);
+
return 0;
}
@@ -7245,8 +7519,17 @@ void bgp_evpn_flood_control_change(struct bgp *bgp)
*/
void bgp_evpn_cleanup_on_disable(struct bgp *bgp)
{
- hash_iterate(bgp->vnihash, (void (*)(struct hash_bucket *,
- void *))cleanup_vni_on_disable,
+ struct bgpevpn *vpn = NULL;
+ uint32_t vni_count = zebra_l2_vni_count(&bm->zebra_l2_vni_head);
+
+ /* Cleanup VNI FIFO list from this bgp instance */
+ while (vni_count) {
+ vpn = zebra_l2_vni_pop(&bm->zebra_l2_vni_head);
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ vni_count--;
+ }
+
+ hash_iterate(bgp->vnihash, (void (*)(struct hash_bucket *, void *))cleanup_vni_on_disable,
bgp);
}
diff --git a/bgpd/bgp_evpn.h b/bgpd/bgp_evpn.h
index 1a333a5a09..8bbc5d3c37 100644
--- a/bgpd/bgp_evpn.h
+++ b/bgpd/bgp_evpn.h
@@ -200,4 +200,6 @@ bool bgp_evpn_skip_vrf_import_of_local_es(struct bgp *bgp_vrf, const struct pref
int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, const struct prefix_evpn *evp,
struct bgp_path_info *parent_pi);
extern void bgp_zebra_evpn_pop_items_from_announce_fifo(struct bgpevpn *vpn);
+extern int install_uninstall_routes_for_vni(struct bgp *bgp, struct bgpevpn *vpn, bool install);
+extern int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install);
#endif /* _QUAGGA_BGP_EVPN_H */
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index b05df3d82a..568d3d45ee 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -32,6 +32,13 @@
#define BGP_EVPN_TYPE4_V4_PSIZE 23
#define BGP_EVPN_TYPE4_V6_PSIZE 34
+static const struct message bgp_evpn_route_type_str[] = { { BGP_EVPN_AD_ROUTE, "AD" },
+ { BGP_EVPN_MAC_IP_ROUTE, "MACIP" },
+ { BGP_EVPN_IMET_ROUTE, "IMET" },
+ { BGP_EVPN_ES_ROUTE, "ES" },
+ { BGP_EVPN_IP_PREFIX_ROUTE, "IP-PREFIX" },
+ { 0 } };
+
RB_HEAD(bgp_es_evi_rb_head, bgp_evpn_es_evi);
RB_PROTOTYPE(bgp_es_evi_rb_head, bgp_evpn_es_evi, rb_node,
bgp_es_evi_rb_cmp);
@@ -53,8 +60,9 @@ struct bgpevpn {
#define VNI_FLAG_RD_CFGD 0x4 /* RD is user configured. */
#define VNI_FLAG_IMPRT_CFGD 0x8 /* Import RT is user configured */
#define VNI_FLAG_EXPRT_CFGD 0x10 /* Export RT is user configured */
-#define VNI_FLAG_USE_TWO_LABELS 0x20 /* Attach both L2-VNI and L3-VNI if
- needed for this VPN */
+/* Attach both L2-VNI and L3-VNI if needed for this VPN */
+#define VNI_FLAG_USE_TWO_LABELS 0x20
+#define VNI_FLAG_ADD 0x40 /* L2VNI Add */
struct bgp *bgp_vrf; /* back pointer to the vrf instance */
@@ -108,11 +116,15 @@ struct bgpevpn {
/* List of local ESs */
struct list *local_es_evi_list;
+ struct zebra_l2_vni_item zl2vni;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgpevpn);
+DECLARE_LIST(zebra_l2_vni, struct bgpevpn, zl2vni);
+
/* Mapping of Import RT to VNIs.
* The Import RTs of all VNIs are maintained in a hash table with each
* RT linking to all VNIs that will import routes matching this RT.
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 958a9c6492..d9dfc4c5eb 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -764,10 +764,9 @@ static void bgp_evpn_show_routes_mac_ip_es(struct vty *vty, esi_t *esi,
json_path = json_object_new_array();
if (detail)
- route_vty_out_detail(
- vty, bgp, bd, bgp_dest_get_prefix(bd),
- pi, AFI_L2VPN, SAFI_EVPN,
- RPKI_NOT_BEING_USED, json_path);
+ route_vty_out_detail(vty, bgp, bd, bgp_dest_get_prefix(bd), pi,
+ AFI_L2VPN, SAFI_EVPN, RPKI_NOT_BEING_USED,
+ json_path, NULL);
else
route_vty_out(vty, &bd->rn->p, pi, 0, SAFI_EVPN,
json_path, false);
@@ -892,10 +891,9 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn,
json_path = json_object_new_array();
if (detail)
- route_vty_out_detail(vty, bgp, dest, &tmp_p, pi,
- AFI_L2VPN, SAFI_EVPN,
- RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, &tmp_p, pi, AFI_L2VPN,
+ SAFI_EVPN, RPKI_NOT_BEING_USED, json_path,
+ NULL);
else
route_vty_out(vty, &tmp_p, pi, 0, SAFI_EVPN,
@@ -2570,9 +2568,8 @@ static void evpn_show_route_vni_multicast(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest),
- pi, afi, safi, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest), pi, afi, safi,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -2699,9 +2696,8 @@ static void evpn_show_route_vni_macip(struct vty *vty, struct bgp *bgp,
NULL /* ip */);
}
- route_vty_out_detail(vty, bgp, dest, (struct prefix *)&tmp_p,
- pi, afi, safi, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, (struct prefix *)&tmp_p, pi, afi, safi,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -2810,9 +2806,8 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest),
- pi, afi, safi, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest), pi, afi, safi,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -2923,9 +2918,8 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(
- vty, bgp, dest, bgp_dest_get_prefix(dest), pi,
- afi, safi, RPKI_NOT_BEING_USED, json_path);
+ route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest), pi, afi,
+ safi, RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -3060,9 +3054,8 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(vty, bgp, dest, p, pi, AFI_L2VPN,
- SAFI_EVPN, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, p, pi, AFI_L2VPN, SAFI_EVPN,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -3223,11 +3216,10 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
json_path = json_object_new_array();
if (detail) {
- route_vty_out_detail(
- vty, bgp, dest,
- bgp_dest_get_prefix(dest), pi,
- AFI_L2VPN, SAFI_EVPN,
- RPKI_NOT_BEING_USED, json_path);
+ route_vty_out_detail(vty, bgp, dest,
+ bgp_dest_get_prefix(dest), pi,
+ AFI_L2VPN, SAFI_EVPN,
+ RPKI_NOT_BEING_USED, json_path, NULL);
} else
route_vty_out(vty, p, pi, 0, SAFI_EVPN,
json_path, false);
@@ -3469,7 +3461,9 @@ static void evpn_process_default_originate_cmd(struct bgp *bgp_vrf,
BGP_L2VPN_EVPN_DEFAULT_ORIGINATE_IPV6);
}
- bgp_evpn_install_uninstall_default_route(bgp_vrf, afi, safi, add);
+ if (is_l3vni_live(bgp_vrf))
+ bgp_evpn_install_uninstall_default_route(bgp_vrf,
+ afi, safi, add);
}
/*
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index cadef39974..3d02214ca9 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -491,11 +491,14 @@ static void bgp_connect_timer(struct event *thread)
assert(!connection->t_read);
if (bgp_debug_neighbor_events(peer))
- zlog_debug("%s [FSM] Timer (connect timer expire)", peer->host);
+ zlog_debug("%s [FSM] Timer (connect timer (%us) expire)", peer->host,
+ peer->v_connect);
if (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
bgp_stop(connection);
else {
+ if (!peer->connect)
+ peer->v_connect = MIN(BGP_MAX_CONNECT_RETRY, peer->v_connect * 2);
EVENT_VAL(thread) = ConnectRetry_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
@@ -662,7 +665,7 @@ static void bgp_llgr_stale_timer_expire(struct event *thread)
static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
{
struct bgp_dest *dest;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
struct bgp_table *table;
struct attr attr;
@@ -677,8 +680,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
for (rm = bgp_table_top(table); rm;
rm = bgp_route_next(rm))
- for (pi = bgp_dest_get_bgp_path_info(rm); pi;
- pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(rm);
+ (pi != NULL) && (next = pi->next, 1); pi = next) {
if (pi->peer != peer)
continue;
@@ -709,8 +712,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
} else {
for (dest = bgp_table_top(peer->bgp->rib[afi][safi]); dest;
dest = bgp_route_next(dest))
- for (pi = bgp_dest_get_bgp_path_info(dest); pi;
- pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest);
+ (pi != NULL) && (next = pi->next, 1); pi = next) {
if (pi->peer != peer)
continue;
@@ -1224,9 +1227,14 @@ void bgp_fsm_change_status(struct peer_connection *connection,
peer_count = bgp->established_peers;
- if (status == Established)
+ if (status == Established) {
bgp->established_peers++;
- else if ((peer_established(connection)) && (status != Established))
+ /* Reset the retry timer if we already established */
+ if (peer->connect)
+ peer->v_connect = peer->connect;
+ else
+ peer->v_connect = peer->bgp->default_connect_retry;
+ } else if ((peer_established(connection)) && (status != Established))
bgp->established_peers--;
if (bgp_debug_neighbor_events(peer)) {
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 23e0c191dc..54a966e191 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -1125,7 +1125,6 @@ static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
struct bgp_path_info *path;
struct bgp *bgp_path;
struct bgp_table *table;
- time_t tbuf;
vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
afi2str(afi), bgp->name_pretty);
@@ -1146,8 +1145,7 @@ static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
vty_out(vty, " if %s\n",
ifindex2ifname(iter->nh->ifindex,
iter->nh->vrf_id));
- tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, buf));
+ vty_out(vty, " Last update: %s", time_to_string(iter->last_update, buf));
if (!detail)
continue;
vty_out(vty, " Paths:\n");
diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c
index 535d2fc5f4..9ca20c949a 100644
--- a/bgpd/bgp_main.c
+++ b/bgpd/bgp_main.c
@@ -207,6 +207,8 @@ static __attribute__((__noreturn__)) void bgp_exit(int status)
bgp_nhg_finish();
zebra_announce_fini(&bm->zebra_announce_head);
+ zebra_l2_vni_fini(&bm->zebra_l2_vni_head);
+ zebra_l3_vni_fini(&bm->zebra_l3_vni_head);
/* reverse bgp_dump_init */
bgp_dump_finish();
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index ca7f73dde9..b96c287f86 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -2167,6 +2167,8 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
struct interface *ifp = NULL;
char rd_buf[RD_ADDRSTRLEN];
struct aspath *new_aspath;
+ int32_t aspath_loop_count = 0;
+ struct peer *peer = path_vpn->peer;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
@@ -2227,7 +2229,9 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p, NULL);
/* Check if leaked route has our asn. If so, don't import it. */
- if (aspath_loop_check(path_vpn->attr->aspath, to_bgp->as)) {
+ if (CHECK_FLAG(peer->af_flags[afi][SAFI_MPLS_VPN], PEER_FLAG_ALLOWAS_IN))
+ aspath_loop_count = peer->allowas_in[afi][SAFI_MPLS_VPN];
+ if (aspath_loop_check(path_vpn->attr->aspath, to_bgp->as) > aspath_loop_count) {
for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
bpi = bpi->next) {
if (bpi->extra && bpi->extra->vrfleak &&
@@ -2513,11 +2517,12 @@ void vpn_leak_to_vrf_update(struct bgp *from_bgp,
{
struct listnode *mnode, *mnnode;
struct bgp *bgp;
+ const struct prefix *p = bgp_dest_get_prefix(path_vpn->net);
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
if (debug)
- zlog_debug("%s: start (path_vpn=%p)", __func__, path_vpn);
+ zlog_debug("%s: start (path_vpn=%p, prefix=%pFX)", __func__, path_vpn, p);
/* Loop over VRFs */
for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) {
@@ -4074,6 +4079,35 @@ void bgp_vpn_leak_export(struct bgp *from_bgp)
}
}
+/* It releases the label from labelpool which
+ * was previously assigned and unsets the flag based on reset arg
+ * This also used in vty to release the label and to change the allocation mode as well
+ */
+void bgp_vpn_release_label(struct bgp *bgp, afi_t afi, bool reset)
+{
+ if (!CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
+ return;
+ /*
+ * label has previously been automatically
+ * assigned by labelpool: release it
+ *
+ * NB if tovpn_label == MPLS_LABEL_NONE it
+ * means the automatic assignment is in flight
+ * and therefore the labelpool callback must
+ * detect that the auto label is not needed.
+ */
+ if (bgp->vpn_policy[afi].tovpn_label == MPLS_LABEL_NONE)
+ return;
+ if (CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+ return;
+
+ bgp_lp_release(LP_TYPE_VRF, &bgp->vpn_policy[afi], bgp->vpn_policy[afi].tovpn_label);
+ bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE;
+
+ if (reset)
+ UNSET_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+}
+
/* The nexthops values are compared to
* find in the tree the appropriate cache entry
*/
@@ -4390,7 +4424,6 @@ static void show_bgp_mplsvpn_nh_label_bind_internal(struct vty *vty,
struct bgp_path_info *path;
struct bgp *bgp_path;
struct bgp_table *table;
- time_t tbuf;
char buf[32];
vty_out(vty, "Current BGP mpls-vpn nexthop label bind cache, %s\n",
@@ -4408,8 +4441,7 @@ static void show_bgp_mplsvpn_nh_label_bind_internal(struct vty *vty,
vty_out(vty, " interface %s\n",
ifindex2ifname(iter->nh->ifindex,
iter->nh->vrf_id));
- tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, buf));
+ vty_out(vty, " Last update: %s", time_to_string(iter->last_update, buf));
if (!detail)
continue;
vty_out(vty, " Paths:\n");
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 39fed66781..18639fc69b 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -352,6 +352,7 @@ extern void vpn_handle_router_id_update(struct bgp *bgp, bool withdraw,
bool is_config);
extern void bgp_vpn_leak_unimport(struct bgp *from_bgp);
extern void bgp_vpn_leak_export(struct bgp *from_bgp);
+extern void bgp_vpn_release_label(struct bgp *bgp, afi_t afi, bool reset);
extern bool bgp_mplsvpn_path_uses_valid_mpls_label(struct bgp_path_info *pi);
extern int
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index 1ef90a8e38..5fda5701f3 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -1079,14 +1079,14 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
json_last_update = json_object_new_object();
json_object_int_add(json_last_update, "epoch", tbuf);
json_object_string_add(json_last_update, "string",
- ctime_r(&tbuf, timebuf));
+ time_to_string_json(bnc->last_update, timebuf));
json_object_object_add(json_nexthop, "lastUpdate",
json_last_update);
} else {
json_object_int_add(json_nexthop, "lastUpdate", tbuf);
}
} else {
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, timebuf));
+ vty_out(vty, " Last update: %s", time_to_string(bnc->last_update, timebuf));
}
/* show paths dependent on nexthop, if needed. */
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index e9cc52449b..c5e390b045 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -2411,7 +2411,7 @@ static int bgp_update_receive(struct peer_connection *connection,
sizeof(peer->rcvd_attr_str));
if (attr_parse_ret == BGP_ATTR_PARSE_WITHDRAW) {
- peer->stat_upd_7606++;
+ peer->stat_pfx_withdraw++;
flog_err(
EC_BGP_UPDATE_RCV,
"%pBP rcvd UPDATE with errors in attr(s)!! Withdrawing route.",
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 5feda71837..0f899d9617 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -78,6 +78,9 @@
#include "bgpd/bgp_route_clippy.c"
+DEFINE_MTYPE_STATIC(BGPD, BGP_EOIU_MARKER_INFO, "BGP EOIU Marker info");
+DEFINE_MTYPE_STATIC(BGPD, BGP_METAQ, "BGP MetaQ");
+
DEFINE_HOOK(bgp_snmp_update_stats,
(struct bgp_dest *rn, struct bgp_path_info *pi, bool added),
(rn, pi, added));
@@ -2587,12 +2590,11 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
if (ret == RMAP_DENYMATCH) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
- zlog_debug(
- "%pBP [Update:SEND] %pFX is filtered by route-map '%s'",
- peer, p,
- bgp_path_suppressed(pi)
- ? UNSUPPRESS_MAP_NAME(filter)
- : ROUTE_MAP_OUT_NAME(filter));
+ zlog_debug("%pBP [Update:SEND] %pFX is filtered by route-map (%s) '%s'",
+ peer, p,
+ bgp_path_suppressed(pi) ? "unsuppress-map" : "out",
+ bgp_path_suppressed(pi) ? UNSUPPRESS_MAP_NAME(filter)
+ : ROUTE_MAP_OUT_NAME(filter));
bgp_attr_flush(rmap_path.attr);
return false;
}
@@ -3007,7 +3009,10 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
old_select = NULL;
pi = bgp_dest_get_bgp_path_info(dest);
- while (pi && CHECK_FLAG(pi->flags, BGP_PATH_UNSORTED)) {
+ while (pi && (CHECK_FLAG(pi->flags, BGP_PATH_UNSORTED) ||
+ (pi->peer != bgp->peer_self &&
+ !CHECK_FLAG(pi->peer->sflags, PEER_STATUS_NSF_WAIT) &&
+ !peer_established(pi->peer->connection)))) {
struct bgp_path_info *next = pi->next;
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED))
@@ -3101,6 +3106,30 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
continue;
}
+ if (first->peer && first->peer != bgp->peer_self &&
+ !CHECK_FLAG(first->peer->sflags, PEER_STATUS_NSF_WAIT) &&
+ !peer_established(first->peer->connection)) {
+ if (debug)
+ zlog_debug("%s: %pBD(%s) pi %p from %s is not in established state",
+ __func__, dest, bgp->name_pretty, first,
+ first->peer->host);
+
+ /*
+ * Peer is not in established state we cannot sort this
+ * item yet. Let's wait, so hold this one to the side
+ */
+ if (unsorted_holddown) {
+ first->next = unsorted_holddown;
+ unsorted_holddown->prev = first;
+ unsorted_holddown = first;
+ } else
+ unsorted_holddown = first;
+
+ UNSET_FLAG(first->flags, BGP_PATH_UNSORTED);
+
+ continue;
+ }
+
bgp_path_info_unset_flag(dest, first, BGP_PATH_DMED_CHECK);
worse = NULL;
@@ -3462,14 +3491,6 @@ bool bgp_zebra_has_route_changed(struct bgp_path_info *selected)
return false;
}
-struct bgp_process_queue {
- struct bgp *bgp;
- STAILQ_HEAD(, bgp_dest) pqueue;
-#define BGP_PROCESS_QUEUE_EOIU_MARKER (1 << 0)
- unsigned int flags;
- unsigned int queued;
-};
-
static void bgp_process_evpn_route_injection(struct bgp *bgp, afi_t afi,
safi_t safi, struct bgp_dest *dest,
struct bgp_path_info *new_select,
@@ -4017,43 +4038,286 @@ void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
&bgp->gr_info[afi][safi].t_route_select);
}
-static wq_item_status bgp_process_wq(struct work_queue *wq, void *data)
+static const char *subqueue2str(enum meta_queue_indexes index)
{
- struct bgp_process_queue *pqnode = data;
- struct bgp *bgp = pqnode->bgp;
- struct bgp_table *table;
- struct bgp_dest *dest;
+ switch (index) {
+ case META_QUEUE_EARLY_ROUTE:
+ return "Early Route";
+ case META_QUEUE_OTHER_ROUTE:
+ return "Other Route";
+ case META_QUEUE_EOIU_MARKER:
+ return "EOIU Marker";
+ }
+
+ return "Unknown";
+}
+
+/*
+ * Process a node from the Early route subqueue.
+ */
+static void process_subq_early_route(struct bgp_dest *dest)
+{
+ struct bgp_table *table = bgp_dest_table(dest);
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s dequeued from sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(META_QUEUE_EARLY_ROUTE));
+
+ /* note, new DESTs may be added as part of processing */
+ bgp_process_main_one(table->bgp, dest, table->afi, table->safi);
+ bgp_dest_unlock_node(dest);
+ bgp_table_unlock(table);
+}
+
+/*
+ * Process a node from the other subqueue.
+ */
+static void process_subq_other_route(struct bgp_dest *dest)
+{
+ struct bgp_table *table = bgp_dest_table(dest);
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s dequeued from sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(META_QUEUE_OTHER_ROUTE));
+
+ /* note, new DESTs may be added as part of processing */
+ bgp_process_main_one(table->bgp, dest, table->afi, table->safi);
+ bgp_dest_unlock_node(dest);
+ bgp_table_unlock(table);
+}
+
+/*
+ * Process a node from the eoiu marker subqueue.
+ */
+static void process_eoiu_marker(struct bgp_dest *dest)
+{
+ struct bgp_eoiu_info *info = bgp_dest_get_bgp_eoiu_info(dest);
+
+ if (!info || !info->bgp) {
+ zlog_err("Unable to retrieve BGP instance, can't process EOIU marker");
+ return;
+ }
+
+ if (BGP_DEBUG(update, UPDATE_IN))
+ zlog_debug("EOIU Marker dequeued from sub-queue %s",
+ subqueue2str(META_QUEUE_EOIU_MARKER));
+
+ bgp_process_main_one(info->bgp, NULL, 0, 0);
+}
+
+/*
+ * Examine the specified subqueue; process one entry and return 1 if
+ * there is a node, return 0 otherwise.
+ */
+static unsigned int process_subq(struct bgp_dest_queue *subq, enum meta_queue_indexes qindex)
+{
+ struct bgp_dest *dest = STAILQ_FIRST(subq);
+
+ if (!dest)
+ return 0;
+
+ STAILQ_REMOVE_HEAD(subq, pq);
+ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+
+ switch (qindex) {
+ case META_QUEUE_EARLY_ROUTE:
+ process_subq_early_route(dest);
+ break;
+ case META_QUEUE_OTHER_ROUTE:
+ process_subq_other_route(dest);
+ break;
+ case META_QUEUE_EOIU_MARKER:
+ process_eoiu_marker(dest);
+ }
+
+ return 1;
+}
+
+/* Dispatch the meta queue by picking and processing the next node from
+ * a non-empty sub-queue with lowest priority. wq is equal to bgp->process_queue and
+ * data is pointed to the meta queue structure.
+ */
+static wq_item_status meta_queue_process(struct work_queue *dummy, void *data)
+{
+ struct meta_queue *mq = data;
+ uint32_t i;
+
+ for (i = 0; i < MQ_SIZE; i++)
+ if (process_subq(mq->subq[i], i)) {
+ mq->size--;
+ break;
+ }
+ return mq->size ? WQ_REQUEUE : WQ_SUCCESS;
+}
+
+static int early_route_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ uint8_t qindex = META_QUEUE_EARLY_ROUTE;
+ struct bgp_dest *dest = data;
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s queued into sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(qindex));
+
+ assert(STAILQ_NEXT(dest, pq) == NULL);
+ STAILQ_INSERT_TAIL(mq->subq[qindex], dest, pq);
+ mq->size++;
+ return 0;
+}
+
+static int other_route_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ uint8_t qindex = META_QUEUE_OTHER_ROUTE;
+ struct bgp_dest *dest = data;
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s queued into sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(qindex));
+
+ assert(STAILQ_NEXT(dest, pq) == NULL);
+ STAILQ_INSERT_TAIL(mq->subq[qindex], dest, pq);
+ mq->size++;
+ return 0;
+}
+
+static int eoiu_marker_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ uint8_t qindex = META_QUEUE_EOIU_MARKER;
+ struct bgp_dest *dest = data;
+
+ if (BGP_DEBUG(update, UPDATE_IN))
+ zlog_debug("EOIU Marker queued into sub-queue %s", subqueue2str(qindex));
+
+ assert(STAILQ_NEXT(dest, pq) == NULL);
+ STAILQ_INSERT_TAIL(mq->subq[qindex], dest, pq);
+ mq->size++;
+ return 0;
+}
+
+static int mq_add_handler(struct bgp *bgp, void *data,
+ int (*mq_add_func)(struct meta_queue *mq, void *data))
+{
+ if (bgp->process_queue == NULL) {
+ zlog_err("%s: work_queue does not exist!", __func__);
+ return -1;
+ }
+
+ if (work_queue_empty(bgp->process_queue))
+ work_queue_add(bgp->process_queue, bgp->mq);
+
+ return mq_add_func(bgp->mq, data);
+}
+
+int early_route_process(struct bgp *bgp, struct bgp_dest *dest)
+{
+ if (!dest) {
+ zlog_err("%s: early route dest is NULL!", __func__);
+ return -1;
+ }
+
+ return mq_add_handler(bgp, dest, early_route_meta_queue_add);
+}
- /* eoiu marker */
- if (CHECK_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER)) {
- bgp_process_main_one(bgp, NULL, 0, 0);
- /* should always have dedicated wq call */
- assert(STAILQ_FIRST(&pqnode->pqueue) == NULL);
- return WQ_SUCCESS;
+int other_route_process(struct bgp *bgp, struct bgp_dest *dest)
+{
+ if (!dest) {
+ zlog_err("%s: other route dest is NULL!", __func__);
+ return -1;
}
- while (!STAILQ_EMPTY(&pqnode->pqueue)) {
- dest = STAILQ_FIRST(&pqnode->pqueue);
- STAILQ_REMOVE_HEAD(&pqnode->pqueue, pq);
+ return mq_add_handler(bgp, dest, other_route_meta_queue_add);
+}
+
+int eoiu_marker_process(struct bgp *bgp, struct bgp_dest *dest)
+{
+ if (!dest) {
+ zlog_err("%s: eoiu marker dest is NULL!", __func__);
+ return -1;
+ }
+
+ return mq_add_handler(bgp, dest, eoiu_marker_meta_queue_add);
+}
+
+/* Create new meta queue.
+ A destructor function doesn't seem to be necessary here.
+ */
+static struct meta_queue *meta_queue_new(void)
+{
+ struct meta_queue *new;
+ uint32_t i;
+
+ new = XCALLOC(MTYPE_BGP_METAQ, sizeof(struct meta_queue));
+
+ for (i = 0; i < MQ_SIZE; i++) {
+ new->subq[i] = XCALLOC(MTYPE_BGP_METAQ, sizeof(*(new->subq[i])));
+ assert(new->subq[i]);
+ STAILQ_INIT(new->subq[i]);
+ }
+
+ return new;
+}
+
+/* Clean up the early meta-queue list */
+static void early_meta_queue_free(struct meta_queue *mq, struct bgp_dest_queue *l)
+{
+ struct bgp_dest *dest;
+
+ while (!STAILQ_EMPTY(l)) {
+ dest = STAILQ_FIRST(l);
+ STAILQ_REMOVE_HEAD(l, pq);
STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
- table = bgp_dest_table(dest);
- /* note, new DESTs may be added as part of processing */
- bgp_process_main_one(bgp, dest, table->afi, table->safi);
+ mq->size--;
+ }
+}
- bgp_dest_unlock_node(dest);
- bgp_table_unlock(table);
+/* Clean up the other meta-queue list */
+static void other_meta_queue_free(struct meta_queue *mq, struct bgp_dest_queue *l)
+{
+ struct bgp_dest *dest;
+
+ while (!STAILQ_EMPTY(l)) {
+ dest = STAILQ_FIRST(l);
+ STAILQ_REMOVE_HEAD(l, pq);
+ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+ mq->size--;
}
+}
- return WQ_SUCCESS;
+/* Clean up the eoiu marker meta-queue list */
+static void eoiu_marker_queue_free(struct meta_queue *mq, struct bgp_dest_queue *l)
+{
+ struct bgp_dest *dest;
+
+ while (!STAILQ_EMPTY(l)) {
+ dest = STAILQ_FIRST(l);
+ XFREE(MTYPE_BGP_EOIU_MARKER_INFO, dest->info);
+ STAILQ_REMOVE_HEAD(l, pq);
+ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+ mq->size--;
+ }
}
-static void bgp_processq_del(struct work_queue *wq, void *data)
+void bgp_meta_queue_free(struct meta_queue *mq)
{
- struct bgp_process_queue *pqnode = data;
+ enum meta_queue_indexes i;
+
+ for (i = 0; i < MQ_SIZE; i++) {
+ switch (i) {
+ case META_QUEUE_EARLY_ROUTE:
+ early_meta_queue_free(mq, mq->subq[i]);
+ break;
+ case META_QUEUE_OTHER_ROUTE:
+ other_meta_queue_free(mq, mq->subq[i]);
+ break;
+ case META_QUEUE_EOIU_MARKER:
+ eoiu_marker_queue_free(mq, mq->subq[i]);
+ break;
+ }
- bgp_unlock(pqnode->bgp);
+ XFREE(MTYPE_BGP_METAQ, mq->subq[i]);
+ }
- XFREE(MTYPE_BGP_PROCESS_QUEUE, pqnode);
+ XFREE(MTYPE_BGP_METAQ, mq);
}
void bgp_process_queue_init(struct bgp *bgp)
@@ -4065,37 +4329,19 @@ void bgp_process_queue_init(struct bgp *bgp)
bgp->process_queue = work_queue_new(bm->master, name);
}
- bgp->process_queue->spec.workfunc = &bgp_process_wq;
- bgp->process_queue->spec.del_item_data = &bgp_processq_del;
+ bgp->process_queue->spec.workfunc = &meta_queue_process;
bgp->process_queue->spec.max_retries = 0;
bgp->process_queue->spec.hold = 50;
/* Use a higher yield value of 50ms for main queue processing */
bgp->process_queue->spec.yield = 50 * 1000L;
-}
-static struct bgp_process_queue *bgp_processq_alloc(struct bgp *bgp)
-{
- struct bgp_process_queue *pqnode;
-
- pqnode = XCALLOC(MTYPE_BGP_PROCESS_QUEUE,
- sizeof(struct bgp_process_queue));
-
- /* unlocked in bgp_processq_del */
- pqnode->bgp = bgp_lock(bgp);
- STAILQ_INIT(&pqnode->pqueue);
-
- return pqnode;
+ bgp->mq = meta_queue_new();
}
static void bgp_process_internal(struct bgp *bgp, struct bgp_dest *dest,
struct bgp_path_info *pi, afi_t afi,
safi_t safi, bool early_process)
{
-#define ARBITRARY_PROCESS_QLEN 10000
- struct work_queue *wq = bgp->process_queue;
- struct bgp_process_queue *pqnode;
- int pqnode_reuse = 0;
-
/*
* Indicate that *this* pi is in an unsorted
* situation, even if the node is already
@@ -4145,39 +4391,16 @@ static void bgp_process_internal(struct bgp *bgp, struct bgp_dest *dest,
return;
}
- if (wq == NULL)
- return;
-
- /* Add route nodes to an existing work queue item until reaching the
- limit only if is from the same BGP view and it's not an EOIU marker
- */
- if (work_queue_item_count(wq)) {
- struct work_queue_item *item = work_queue_last_item(wq);
- pqnode = item->data;
-
- if (CHECK_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER) ||
- (pqnode->queued >= ARBITRARY_PROCESS_QLEN && !early_process))
- pqnode = bgp_processq_alloc(bgp);
- else
- pqnode_reuse = 1;
- } else
- pqnode = bgp_processq_alloc(bgp);
- /* all unlocked in bgp_process_wq */
+ /* all unlocked in process_subq_xxx functions */
bgp_table_lock(bgp_dest_table(dest));
SET_FLAG(dest->flags, BGP_NODE_PROCESS_SCHEDULED);
bgp_dest_lock_node(dest);
- /* can't be enqueued twice */
- assert(STAILQ_NEXT(dest, pq) == NULL);
if (early_process)
- STAILQ_INSERT_HEAD(&pqnode->pqueue, dest, pq);
+ early_route_process(bgp, dest);
else
- STAILQ_INSERT_TAIL(&pqnode->pqueue, dest, pq);
- pqnode->queued++;
-
- if (!pqnode_reuse)
- work_queue_add(wq, pqnode);
+ other_route_process(bgp, dest);
return;
}
@@ -4196,15 +4419,18 @@ void bgp_process_early(struct bgp *bgp, struct bgp_dest *dest,
void bgp_add_eoiu_mark(struct bgp *bgp)
{
- struct bgp_process_queue *pqnode;
-
- if (bgp->process_queue == NULL)
- return;
+ /*
+ * Create a dummy dest as the meta queue expects all its elements to be
+ * dest's
+ */
+ struct bgp_dest *dummy_dest = XCALLOC(MTYPE_BGP_NODE, sizeof(struct bgp_dest));
- pqnode = bgp_processq_alloc(bgp);
+ struct bgp_eoiu_info *eoiu_info = XCALLOC(MTYPE_BGP_EOIU_MARKER_INFO,
+ sizeof(struct bgp_eoiu_info));
+ eoiu_info->bgp = bgp;
- SET_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER);
- work_queue_add(bgp->process_queue, pqnode);
+ bgp_dest_set_bgp_eoiu_info(dummy_dest, eoiu_info);
+ eoiu_marker_process(bgp, dummy_dest);
}
static void bgp_maximum_prefix_restart_timer(struct event *thread)
@@ -7411,7 +7637,7 @@ static void bgp_purge_af_static_redist_routes(struct bgp *bgp, afi_t afi,
{
struct bgp_table *table;
struct bgp_dest *dest;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
/* Do not install the aggregate route if BGP is in the
* process of termination.
@@ -7422,7 +7648,8 @@ static void bgp_purge_af_static_redist_routes(struct bgp *bgp, afi_t afi,
table = bgp->rib[afi][safi];
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (pi->peer == bgp->peer_self
&& ((pi->type == ZEBRA_ROUTE_BGP
&& pi->sub_type == BGP_ROUTE_STATIC)
@@ -7922,7 +8149,7 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
struct bgp_table *table = bgp->rib[afi][safi];
const struct prefix *dest_p;
struct bgp_dest *dest, *top;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
/* We've found a different MED we must revert any suppressed routes. */
top = bgp_node_get(table, p);
@@ -7932,7 +8159,8 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
if (dest_p->prefixlen <= p->prefixlen)
continue;
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (BGP_PATH_HOLDDOWN(pi))
continue;
if (pi->sub_type == BGP_ROUTE_AGGREGATE)
@@ -8007,7 +8235,7 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
struct community *community = NULL;
struct ecommunity *ecommunity = NULL;
struct lcommunity *lcommunity = NULL;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
uint8_t atomic_aggregate = 0;
/* If the bgp instance is being deleted or self peer is deleted
@@ -8057,7 +8285,8 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
if (!bgp_check_advertise(bgp, dest, safi))
continue;
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (BGP_PATH_HOLDDOWN(pi))
continue;
@@ -8214,7 +8443,7 @@ void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, afi_t afi,
struct bgp_table *table;
struct bgp_dest *top;
struct bgp_dest *dest;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
table = bgp->rib[afi][safi];
@@ -8227,7 +8456,8 @@ void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, afi_t afi,
if (dest_p->prefixlen <= p->prefixlen)
continue;
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (BGP_PATH_HOLDDOWN(pi))
continue;
@@ -9331,9 +9561,18 @@ static void route_vty_short_status_out(struct vty *vty,
const struct prefix *p,
json_object *json_path)
{
- enum rpki_states rpki_state = RPKI_NOT_BEING_USED;
+ enum rpki_states rpki_state;
+
+ /* RPKI validation state */
+ rpki_state = hook_call(bgp_rpki_prefix_status, path->peer, path->attr, p);
if (json_path) {
+ if (rpki_state == RPKI_VALID)
+ json_object_boolean_true_add(json_path, "rpkiValid");
+ else if (rpki_state == RPKI_INVALID)
+ json_object_boolean_true_add(json_path, "rpkiInvalid");
+ else if (rpki_state == RPKI_NOTFOUND)
+ json_object_boolean_true_add(json_path, "rpkiNotFound");
/* Route status display. */
if (CHECK_FLAG(path->flags, BGP_PATH_REMOVED))
@@ -9381,10 +9620,6 @@ static void route_vty_short_status_out(struct vty *vty,
return;
}
- /* RPKI validation state */
- rpki_state =
- hook_call(bgp_rpki_prefix_status, path->peer, path->attr, p);
-
if (rpki_state == RPKI_VALID)
vty_out(vty, "V");
else if (rpki_state == RPKI_INVALID)
@@ -10522,14 +10757,13 @@ static void route_vty_out_detail_es_info(struct vty *vty,
}
void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
- const struct prefix *p, struct bgp_path_info *path,
- afi_t afi, safi_t safi,
- enum rpki_states rpki_curr_state,
- json_object *json_paths)
+ const struct prefix *p, struct bgp_path_info *path, afi_t afi,
+ safi_t safi, enum rpki_states rpki_curr_state, json_object *json_paths,
+ struct attr *pattr)
{
char buf[INET6_ADDRSTRLEN];
char vni_buf[30] = {};
- struct attr *attr = path->attr;
+ struct attr *attr = pattr ? pattr : path->attr;
time_t tbuf;
char timebuf[32];
json_object *json_bestpath = NULL;
@@ -11254,6 +11488,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_path, "community",
bgp_attr_get_community(attr)->json);
} else {
+ if (!bgp_attr_get_community(attr)->str)
+ community_str(bgp_attr_get_community(attr), true, true);
vty_out(vty, " Community: %s\n",
bgp_attr_get_community(attr)->str);
}
@@ -11261,6 +11497,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
/* Line 5 display Extended-community */
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) {
+ if (!bgp_attr_get_ecommunity(attr)->str)
+ ecommunity_str(bgp_attr_get_ecommunity(attr));
+
if (json_paths) {
json_ext_community = json_object_new_object();
json_object_string_add(
@@ -11275,6 +11514,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
}
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_IPV6_EXT_COMMUNITIES))) {
+ if (!bgp_attr_get_ipv6_ecommunity(attr)->str)
+ ecommunity_str(bgp_attr_get_ipv6_ecommunity(attr));
+
if (json_paths) {
json_ext_ipv6_community = json_object_new_object();
json_object_string_add(json_ext_ipv6_community, "string",
@@ -11300,6 +11542,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_path, "largeCommunity",
bgp_attr_get_lcommunity(attr)->json);
} else {
+ if (!bgp_attr_get_lcommunity(attr)->str)
+ lcommunity_str(bgp_attr_get_lcommunity(attr), true, true);
vty_out(vty, " Large Community: %s\n",
bgp_attr_get_lcommunity(attr)->str);
}
@@ -11482,11 +11726,11 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_last_update = json_object_new_object();
json_object_int_add(json_last_update, "epoch", tbuf);
json_object_string_add(json_last_update, "string",
- ctime_r(&tbuf, timebuf));
+ time_to_string_json(path->uptime, timebuf));
json_object_object_add(json_path, "lastUpdate",
json_last_update);
} else
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, timebuf));
+ vty_out(vty, " Last update: %s", time_to_string(path->uptime, timebuf));
/* Line 10 display PMSI tunnel attribute, if present */
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL))) {
@@ -11731,14 +11975,13 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa
continue;
}
- if (type == bgp_show_type_rpki) {
- if (dest_p->family == AF_INET
- || dest_p->family == AF_INET6)
- rpki_curr_state = hook_call(
- bgp_rpki_prefix_status,
- pi->peer, pi->attr, dest_p);
- if (rpki_target_state != RPKI_NOT_BEING_USED
- && rpki_curr_state != rpki_target_state)
+ if ((dest_p->family == AF_INET || dest_p->family == AF_INET6) &&
+ (detail_routes || detail_json || type == bgp_show_type_rpki)) {
+ rpki_curr_state = hook_call(bgp_rpki_prefix_status, pi->peer,
+ pi->attr, dest_p);
+ if (type == bgp_show_type_rpki &&
+ rpki_target_state != RPKI_NOT_BEING_USED &&
+ rpki_curr_state != rpki_target_state)
continue;
}
@@ -11967,11 +12210,9 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa
prd, table->afi, safi,
NULL, false, false);
- route_vty_out_detail(
- vty, bgp, dest, dest_p, pi,
- family2afi(dest_p->family),
- safi, RPKI_NOT_BEING_USED,
- json_paths);
+ route_vty_out_detail(vty, bgp, dest, dest_p, pi,
+ family2afi(dest_p->family), safi,
+ rpki_curr_state, json_paths, NULL);
} else {
route_vty_out(vty, dest_p, pi, display,
safi, json_paths, wide);
@@ -12084,8 +12325,13 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa
}
if (is_last) {
unsigned long i;
- for (i = 0; i < *json_header_depth; ++i)
+ for (i = 0; i < *json_header_depth; ++i) {
vty_out(vty, " } ");
+ /* Put this information before closing the last `}` */
+ if (i == *json_header_depth - 2)
+ vty_out(vty, ", \"totalRoutes\": %ld, \"totalPaths\": %ld",
+ output_count, total_count);
+ }
if (!all)
vty_out(vty, "\n");
}
@@ -12481,11 +12727,10 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
}
}
-static void bgp_show_path_info(const struct prefix_rd *pfx_rd,
- struct bgp_dest *bgp_node, struct vty *vty,
- struct bgp *bgp, afi_t afi, safi_t safi,
- json_object *json, enum bgp_path_type pathtype,
- int *display, enum rpki_states rpki_target_state)
+static void bgp_show_path_info(const struct prefix_rd *pfx_rd, struct bgp_dest *bgp_node,
+ struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
+ json_object *json, enum bgp_path_type pathtype, int *display,
+ enum rpki_states rpki_target_state, struct attr *attr)
{
struct bgp_path_info *pi;
int header = 1;
@@ -12528,10 +12773,8 @@ static void bgp_show_path_info(const struct prefix_rd *pfx_rd,
|| (pathtype == BGP_PATH_SHOW_MULTIPATH
&& (CHECK_FLAG(pi->flags, BGP_PATH_MULTIPATH)
|| CHECK_FLAG(pi->flags, BGP_PATH_SELECTED))))
- route_vty_out_detail(vty, bgp, bgp_node,
- bgp_dest_get_prefix(bgp_node), pi,
- afi, safi, rpki_curr_state,
- json_paths);
+ route_vty_out_detail(vty, bgp, bgp_node, bgp_dest_get_prefix(bgp_node), pi,
+ afi, safi, rpki_curr_state, json_paths, attr);
}
if (json && json_paths) {
@@ -12618,9 +12861,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
continue;
}
- bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty,
- bgp, afi, safi, json, pathtype,
- &display, rpki_target_state);
+ bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty, bgp, afi, safi,
+ json, pathtype, &display, rpki_target_state, NULL);
bgp_dest_unlock_node(rm);
}
@@ -12679,9 +12921,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
rm = longest_pfx;
bgp_dest_lock_node(rm);
- bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty,
- bgp, afi, safi, json, pathtype,
- &display, rpki_target_state);
+ bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty, bgp, afi, safi,
+ json, pathtype, &display, rpki_target_state, NULL);
bgp_dest_unlock_node(rm);
}
@@ -12707,9 +12948,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
if (!prefix_check
|| dest_p->prefixlen == match.prefixlen) {
- bgp_show_path_info(NULL, dest, vty, bgp, afi,
- safi, json, pathtype,
- &display, rpki_target_state);
+ bgp_show_path_info(NULL, dest, vty, bgp, afi, safi, json, pathtype,
+ &display, rpki_target_state, NULL);
}
bgp_dest_unlock_node(dest);
@@ -14603,10 +14843,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
if (use_json)
json_net = json_object_new_object();
- bgp_show_path_info(NULL /* prefix_rd */, dest, vty, bgp,
- afi, safi, json_net,
- BGP_PATH_SHOW_ALL, &display,
- RPKI_NOT_BEING_USED);
+ bgp_show_path_info(NULL /* prefix_rd */, dest, vty, bgp, afi, safi, json_net,
+ BGP_PATH_SHOW_ALL, &display, RPKI_NOT_BEING_USED, NULL);
if (use_json)
json_object_object_addf(json_ar, json_net,
"%pFX", rn_p);
@@ -14740,11 +14978,9 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
pass_in = &buildit;
} else
pass_in = dest;
- bgp_show_path_info(
- NULL, pass_in, vty, bgp, afi,
- safi, json_net,
- BGP_PATH_SHOW_ALL, &display,
- RPKI_NOT_BEING_USED);
+ bgp_show_path_info(NULL, pass_in, vty, bgp, afi, safi,
+ json_net, BGP_PATH_SHOW_ALL, &display,
+ RPKI_NOT_BEING_USED, NULL);
if (use_json)
json_object_object_addf(
json_ar, json_net,
@@ -14770,9 +15006,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
bgp_dest_get_prefix(dest);
attr = *adj->attr;
- ret = bgp_output_modifier(
- peer, rn_p, &attr, afi, safi,
- rmap_name);
+ ret = bgp_output_modifier(peer, rn_p, &attr, afi, safi,
+ rmap_name);
if (ret == RMAP_DENY) {
(*filtered_count)++;
@@ -14796,7 +15031,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
json_net = json_object_new_object();
bgp_show_path_info(NULL, dest, vty, bgp, afi, safi,
json_net, BGP_PATH_SHOW_ALL,
- &display, RPKI_NOT_BEING_USED);
+ &display, RPKI_NOT_BEING_USED,
+ adj->attr);
if (use_json)
json_object_object_addf(json_ar, json_net,
"%pFX", rn_p);
@@ -14809,7 +15045,7 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
*/
if (use_json) {
route_vty_out_tmp(vty, bgp, dest, rn_p,
- &attr, safi, use_json,
+ adj->attr, safi, use_json,
json_ar, wide);
} else {
for (bpi = bgp_dest_get_bgp_path_info(dest);
@@ -14842,11 +15078,9 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
if (use_json)
json_net =
json_object_new_object();
- bgp_show_path_info(
- NULL /* prefix_rd */, dest, vty,
- bgp, afi, safi, json_net,
- BGP_PATH_SHOW_BESTPATH,
- &display, RPKI_NOT_BEING_USED);
+ bgp_show_path_info(NULL /* prefix_rd */, dest, vty, bgp, afi,
+ safi, json_net, BGP_PATH_SHOW_BESTPATH,
+ &display, RPKI_NOT_BEING_USED, NULL);
if (use_json)
json_object_object_addf(
json_ar, json_net,
@@ -15543,6 +15777,28 @@ static int bgp_distance_unset(struct vty *vty, const char *distance_str,
return CMD_SUCCESS;
}
+void bgp_address_family_distance_delete(void)
+{
+ afi_t afi = AFI_UNSPEC;
+ safi_t safi = SAFI_UNSPEC;
+ struct bgp_dest *dest = NULL;
+ struct bgp_distance *bdistance = NULL;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ for (dest = bgp_table_top(bgp_distance_table[afi][safi]); dest;
+ dest = bgp_route_next(dest)) {
+ if (!bgp_dest_has_bgp_path_info_data(dest))
+ continue;
+ bdistance = bgp_dest_get_bgp_distance_info(dest);
+ XFREE(MTYPE_AS_LIST, bdistance->access_list);
+ bgp_distance_free(bdistance);
+
+ bgp_dest_set_bgp_distance_info(dest, NULL);
+ bgp_dest_unlock_node(dest);
+ }
+ }
+}
+
/* Apply BGP information to distance method. */
uint8_t bgp_distance_apply(const struct prefix *p, struct bgp_path_info *pinfo,
afi_t afi, safi_t safi, struct bgp *bgp)
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 1df0ffd300..474e229575 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -589,6 +589,42 @@ enum bgp_path_type {
BGP_PATH_SHOW_MULTIPATH
};
+/* meta-queue structure:
+ * sub-queue 0: soo routes
+ * sub-queue 1: other routes
+ */
+#define MQ_SIZE 3
+
+/* For checking that an object has already queued in some sub-queue */
+#define MQ_BIT_MASK ((1 << MQ_SIZE) - 1)
+
+struct meta_queue {
+ STAILQ_HEAD(bgp_dest_queue, bgp_dest) * subq[MQ_SIZE];
+ uint32_t size; /* sum of lengths of all subqueues */
+};
+
+/*
+ * When the update-delay expires, BGP inserts an EOIU (End-Of-Initial-Update) marker
+ * into the BGP_PROCESS_QUEUE_EOIU_MARKER meta queue. This meta queue holds only
+ * bgp_dest structures. To process the EOIU marker, we need to call bgp_process_main_one()
+ * on the corresponding BGP instance. Since the marker itself isn't a real route
+ * (a dummy dest is created for this) and doesn't inherently carry the BGP instance pointer,
+ * we store the struct bgp pointer in the dest->info field. This ensures that, when processing
+ * the EOIU marker, we have the necessary context (the relevant BGP instance) available.
+ */
+struct bgp_eoiu_info {
+ struct bgp *bgp;
+};
+
+/*
+ * Meta Q's specific names
+ */
+enum meta_queue_indexes {
+ META_QUEUE_EARLY_ROUTE,
+ META_QUEUE_OTHER_ROUTE,
+ META_QUEUE_EOIU_MARKER,
+};
+
static inline void bgp_bump_version(struct bgp_dest *dest)
{
dest->version = bgp_table_next_version(bgp_dest_table(dest));
@@ -795,6 +831,7 @@ extern void bgp_redistribute_withdraw(struct bgp *, afi_t, int, unsigned short);
extern void bgp_static_add(struct bgp *);
extern void bgp_static_delete(struct bgp *);
+extern void bgp_address_family_distance_delete(void);
extern void bgp_static_redo_import_check(struct bgp *);
extern void bgp_purge_static_redist_routes(struct bgp *bgp);
extern void bgp_static_update(struct bgp *bgp, const struct prefix *p,
@@ -932,11 +969,10 @@ extern void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
safi_t safi, json_object *json,
bool incremental_print,
bool local_table);
-extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
- struct bgp_dest *bn, const struct prefix *p,
- struct bgp_path_info *path, afi_t afi,
- safi_t safi, enum rpki_states,
- json_object *json_paths);
+extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
+ const struct prefix *p, struct bgp_path_info *path, afi_t afi,
+ safi_t safi, enum rpki_states, json_object *json_paths,
+ struct attr *attr);
extern int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
struct bgp_table *table, struct prefix_rd *prd,
enum bgp_show_type type, void *output_arg,
@@ -973,4 +1009,8 @@ extern int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,
#define bgp_path_info_add(A, B) \
bgp_path_info_add_with_caller(__func__, (A), (B))
#define bgp_path_info_free(B) bgp_path_info_free_with_caller(__func__, (B))
+extern void bgp_meta_queue_free(struct meta_queue *mq);
+extern int early_route_process(struct bgp *bgp, struct bgp_dest *dest);
+extern int other_route_process(struct bgp *bgp, struct bgp_dest *dest);
+extern int eoiu_marker_process(struct bgp *bgp, struct bgp_dest *dest);
#endif /* _QUAGGA_BGP_ROUTE_H */
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 347c5d02a1..04a709b350 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -155,7 +155,6 @@ static enum route_map_cmd_result_t route_match(void *rule,
void *object);
static void *route_match_compile(const char *arg);
static void revalidate_bgp_node(struct bgp_dest *dest, afi_t afi, safi_t safi);
-static void revalidate_all_routes(struct rpki_vrf *rpki_vrf);
static bool rpki_debug_conf, rpki_debug_term;
@@ -586,48 +585,10 @@ static void rpki_revalidate_prefix(struct event *thread)
XFREE(MTYPE_BGP_RPKI_REVALIDATE, rrp);
}
-static void bgpd_sync_callback(struct event *thread)
+static void revalidate_single_prefix(struct vrf *vrf, struct prefix prefix, afi_t afi)
{
struct bgp *bgp;
struct listnode *node;
- struct prefix prefix;
- struct pfx_record rec;
- struct rpki_vrf *rpki_vrf = EVENT_ARG(thread);
- struct vrf *vrf = NULL;
-
- event_add_read(bm->master, bgpd_sync_callback, rpki_vrf,
- rpki_vrf->rpki_sync_socket_bgpd, NULL);
-
- if (atomic_load_explicit(&rpki_vrf->rtr_update_overflow,
- memory_order_seq_cst)) {
- while (read(rpki_vrf->rpki_sync_socket_bgpd, &rec,
- sizeof(struct pfx_record)) != -1)
- ;
-
- atomic_store_explicit(&rpki_vrf->rtr_update_overflow, 0,
- memory_order_seq_cst);
- revalidate_all_routes(rpki_vrf);
- return;
- }
-
- int retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec,
- sizeof(struct pfx_record));
- if (retval != sizeof(struct pfx_record)) {
- RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd");
- return;
- }
- pfx_record_to_prefix(&rec, &prefix);
-
- afi_t afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
-
- if (rpki_vrf->vrfname) {
- vrf = vrf_lookup_by_name(rpki_vrf->vrfname);
- if (!vrf) {
- zlog_err("%s(): vrf for rpki %s not found", __func__,
- rpki_vrf->vrfname);
- return;
- }
- }
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
safi_t safi;
@@ -655,101 +616,76 @@ static void bgpd_sync_callback(struct event *thread)
}
}
-static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi,
- safi_t safi)
+static void bgpd_sync_callback(struct event *thread)
{
- struct bgp_adj_in *ain;
- mpls_label_t *label;
- uint8_t num_labels;
-
- for (ain = bgp_dest->adj_in; ain; ain = ain->next) {
- struct bgp_path_info *path =
- bgp_dest_get_bgp_path_info(bgp_dest);
-
- num_labels = BGP_PATH_INFO_NUM_LABELS(path);
- label = num_labels ? path->extra->labels->label : NULL;
-
- (void)bgp_update(ain->peer, bgp_dest_get_prefix(bgp_dest),
- ain->addpath_rx_id, ain->attr, afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL, label,
- num_labels, 1, NULL);
- }
-}
-
-/*
- * The act of a soft reconfig in revalidation is really expensive
- * coupled with the fact that the download of a full rpki state
- * from a rpki server can be expensive, let's break up the revalidation
- * to a point in time in the future to allow other bgp events
- * to take place too.
- */
-struct rpki_revalidate_peer {
+ struct prefix prefix;
+ struct pfx_record rec;
+ struct rpki_vrf *rpki_vrf = EVENT_ARG(thread);
+ struct vrf *vrf = NULL;
afi_t afi;
- safi_t safi;
- struct peer *peer;
-};
+ int retval;
-static void bgp_rpki_revalidate_peer(struct event *thread)
-{
- struct rpki_revalidate_peer *rvp = EVENT_ARG(thread);
-
- /*
- * Here's the expensive bit of gnomish deviousness
- */
- bgp_soft_reconfig_in(rvp->peer, rvp->afi, rvp->safi);
-
- XFREE(MTYPE_BGP_RPKI_REVALIDATE, rvp);
-}
-
-static void revalidate_all_routes(struct rpki_vrf *rpki_vrf)
-{
- struct bgp *bgp;
- struct listnode *node;
- struct vrf *vrf = NULL;
+ event_add_read(bm->master, bgpd_sync_callback, rpki_vrf, rpki_vrf->rpki_sync_socket_bgpd,
+ NULL);
if (rpki_vrf->vrfname) {
vrf = vrf_lookup_by_name(rpki_vrf->vrfname);
if (!vrf) {
- zlog_err("%s(): vrf for rpki %s not found", __func__,
- rpki_vrf->vrfname);
+ zlog_err("%s(): vrf for rpki %s not found", __func__, rpki_vrf->vrfname);
return;
}
}
- for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
- struct peer *peer;
- struct listnode *peer_listnode;
+ if (atomic_load_explicit(&rpki_vrf->rtr_update_overflow, memory_order_seq_cst)) {
+ ssize_t size = 0;
- if (!vrf && bgp->vrf_id != VRF_DEFAULT)
- continue;
- if (vrf && bgp->vrf_id != vrf->vrf_id)
- continue;
+ retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record));
+ while (retval != -1) {
+ if (retval != sizeof(struct pfx_record))
+ break;
- for (ALL_LIST_ELEMENTS_RO(bgp->peer, peer_listnode, peer)) {
- afi_t afi;
- safi_t safi;
+ size += retval;
+ pfx_record_to_prefix(&rec, &prefix);
+ afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
+ revalidate_single_prefix(vrf, prefix, afi);
- FOREACH_AFI_SAFI (afi, safi) {
- struct rpki_revalidate_peer *rvp;
+ retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec,
+ sizeof(struct pfx_record));
+ }
- if (!bgp->rib[afi][safi])
- continue;
+ RPKI_DEBUG("Socket overflow detected (%zu), revalidating affected prefixes", size);
- if (!peer_established(peer->connection))
- continue;
+ atomic_store_explicit(&rpki_vrf->rtr_update_overflow, 0, memory_order_seq_cst);
+ return;
+ }
- rvp = XCALLOC(MTYPE_BGP_RPKI_REVALIDATE,
- sizeof(*rvp));
- rvp->peer = peer;
- rvp->afi = afi;
- rvp->safi = safi;
+ retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record));
+ if (retval != sizeof(struct pfx_record)) {
+ RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd");
+ return;
+ }
+ pfx_record_to_prefix(&rec, &prefix);
- event_add_event(
- bm->master, bgp_rpki_revalidate_peer,
- rvp, 0,
- &peer->t_revalidate_all[afi][safi]);
- }
- }
+ afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
+
+ revalidate_single_prefix(vrf, prefix, afi);
+}
+
+static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi, safi_t safi)
+{
+ struct bgp_adj_in *ain;
+ mpls_label_t *label;
+ uint8_t num_labels;
+
+ for (ain = bgp_dest->adj_in; ain; ain = ain->next) {
+ struct bgp_path_info *path = bgp_dest_get_bgp_path_info(bgp_dest);
+
+ num_labels = BGP_PATH_INFO_NUM_LABELS(path);
+ label = num_labels ? path->extra->labels->label : NULL;
+
+ (void)bgp_update(ain->peer, bgp_dest_get_prefix(bgp_dest), ain->addpath_rx_id,
+ ain->attr, afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
+ label, num_labels, 1, NULL);
}
}
diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h
index 130f5ca749..88276de848 100644
--- a/bgpd/bgp_table.h
+++ b/bgpd/bgp_table.h
@@ -391,6 +391,16 @@ static inline void bgp_dest_set_bgp_path_info(struct bgp_dest *dest,
dest->info = bi;
}
+static inline struct bgp_eoiu_info *bgp_dest_get_bgp_eoiu_info(struct bgp_dest *dest)
+{
+ return dest ? dest->info : NULL;
+}
+
+static inline void bgp_dest_set_bgp_eoiu_info(struct bgp_dest *dest, struct bgp_eoiu_info *eoiu_info)
+{
+ dest->info = eoiu_info;
+}
+
static inline struct bgp_table *
bgp_dest_get_bgp_table_info(struct bgp_dest *dest)
{
@@ -419,7 +429,6 @@ static inline unsigned int bgp_dest_get_lock_count(const struct bgp_dest *dest)
}
#ifdef _FRR_ATTRIBUTE_PRINTFRR
-#pragma FRR printfrr_ext "%pRN" (struct bgp_node *)
#pragma FRR printfrr_ext "%pBD" (struct bgp_dest *)
#endif
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index ef03606707..35ddfc34ff 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -757,7 +757,7 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
json_time = json_object_new_object();
json_object_int_add(json_time, "epoch", epoch_tbuf);
json_object_string_add(json_time, "epochString",
- ctime_r(&epoch_tbuf, timebuf));
+ time_to_string_json(updgrp->uptime, timebuf));
json_object_object_add(json_updgrp, "groupCreateTime",
json_time);
json_object_string_add(json_updgrp, "afi",
@@ -766,8 +766,7 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
safi2str(updgrp->safi));
} else {
vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
- vty_out(vty, " Created: %s",
- timestamp_string(updgrp->uptime, timebuf));
+ vty_out(vty, " Created: %s", time_to_string(updgrp->uptime, timebuf));
}
filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
@@ -835,15 +834,14 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
json_object_int_add(json_subgrp_time, "epoch",
epoch_tbuf);
json_object_string_add(json_subgrp_time, "epochString",
- ctime_r(&epoch_tbuf, timebuf));
+ time_to_string_json(subgrp->uptime, timebuf));
json_object_object_add(json_subgrp, "groupCreateTime",
json_subgrp_time);
} else {
vty_out(vty, "\n");
vty_out(vty, " Update-subgroup %" PRIu64 ":\n",
subgrp->id);
- vty_out(vty, " Created: %s",
- timestamp_string(subgrp->uptime, timebuf));
+ vty_out(vty, " Created: %s", time_to_string(subgrp->uptime, timebuf));
}
if (subgrp->split_from.update_group_id
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index 1a66df59fc..a1bf9a4c61 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -228,6 +228,12 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
afi2str(afi), safi2str(safi), ctx->dest);
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
+ /* withdraw stale addpath without waiting for the coalesce timer timeout.
+ * Otherwise, since adj->addpath_tx_id is overwritten, the code never
+ * notice anymore it has to do a withdrawal.
+ */
+ if (addpath_capable)
+ subgrp_withdraw_stale_addpath(ctx, subgrp);
/*
* Skip the subgroups that have coalesce timer running. We will
* walk the entire prefix table for those subgroups when the
@@ -237,8 +243,6 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
/* An update-group that uses addpath */
if (addpath_capable) {
- subgrp_withdraw_stale_addpath(ctx, subgrp);
-
subgrp_announce_addpath_best_selected(ctx->dest,
subgrp);
@@ -582,7 +586,7 @@ bool bgp_adj_out_set_subgroup(struct bgp_dest *dest,
bgp_dump_attr(attr, attr_str, sizeof(attr_str));
- zlog_debug("%s suppress UPDATE w/ attr: %s", peer->host,
+ zlog_debug("%s suppress UPDATE %pBD w/ attr: %s", peer->host, dest,
attr_str);
}
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index bb0c69ca56..550adf93db 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -95,15 +95,15 @@ FRR_CFG_DEFAULT_BOOL(BGP_DETERMINISTIC_MED,
);
FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY,
{ .val_ulong = 10, .match_profile = "datacenter", },
- { .val_ulong = 120 },
+ { .val_ulong = BGP_DEFAULT_CONNECT_RETRY },
);
FRR_CFG_DEFAULT_ULONG(BGP_HOLDTIME,
{ .val_ulong = 9, .match_profile = "datacenter", },
- { .val_ulong = 180 },
+ { .val_ulong = BGP_DEFAULT_KEEPALIVE },
);
FRR_CFG_DEFAULT_ULONG(BGP_KEEPALIVE,
{ .val_ulong = 3, .match_profile = "datacenter", },
- { .val_ulong = 60 },
+ { .val_ulong = BGP_DEFAULT_KEEPALIVE },
);
FRR_CFG_DEFAULT_BOOL(BGP_EBGP_REQUIRES_POLICY,
{ .val_bool = false, .match_profile = "datacenter", },
@@ -1696,8 +1696,13 @@ DEFUN (no_router_bgp,
}
if (bgp->l3vni) {
- vty_out(vty, "%% Please unconfigure l3vni %u\n",
- bgp->l3vni);
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE))
+ vty_out(vty,
+ "%% L3VNI %u is scheduled to be deleted. Please give it few secs and retry the command\n",
+ bgp->l3vni);
+ else
+ vty_out(vty, "%% Please unconfigure l3vni %u\n", bgp->l3vni);
+
return CMD_WARNING_CONFIG_FAILED;
}
@@ -9801,6 +9806,8 @@ DEFPY (af_rd_vpn_export,
bgp_get_default(), bgp);
if (yes) {
+ if (bgp->vpn_policy[afi].tovpn_rd_pretty)
+ XFREE(MTYPE_BGP_NAME, bgp->vpn_policy[afi].tovpn_rd_pretty);
bgp->vpn_policy[afi].tovpn_rd_pretty = XSTRDUP(MTYPE_BGP_NAME,
rd_str);
bgp->vpn_policy[afi].tovpn_rd = prd;
@@ -9941,26 +9948,9 @@ DEFPY (af_label_vpn_export,
UNSET_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG);
- } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
+ } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
/* release any previous auto label */
- if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) {
-
- /*
- * label has previously been automatically
- * assigned by labelpool: release it
- *
- * NB if tovpn_label == MPLS_LABEL_NONE it
- * means the automatic assignment is in flight
- * and therefore the labelpool callback must
- * detect that the auto label is not needed.
- */
-
- bgp_lp_release(LP_TYPE_VRF,
- &bgp->vpn_policy[afi],
- bgp->vpn_policy[afi].tovpn_label);
- }
- }
+ bgp_vpn_release_label(bgp, afi, false);
if (yes) {
if (label_auto) {
@@ -15474,9 +15464,12 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field")
if (use_json) {
json_object *json_stat = NULL;
+ json_object *json_pfx_stat = NULL;
+
json_stat = json_object_new_object();
- /* Packet counts. */
+ json_pfx_stat = json_object_new_object();
+ /* Packet counts. */
atomic_size_t outq_count, inq_count;
outq_count = atomic_load_explicit(&p->connection->obuf->count,
memory_order_relaxed);
@@ -15526,6 +15519,16 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field")
json_object_int_add(json_stat, "totalSent", PEER_TOTAL_TX(p));
json_object_int_add(json_stat, "totalRecv", PEER_TOTAL_RX(p));
json_object_object_add(json_neigh, "messageStats", json_stat);
+
+ /* Prefix statistics */
+ json_object_int_add(json_pfx_stat, "inboundFiltered", p->stat_pfx_filter);
+ json_object_int_add(json_pfx_stat, "aspathLoop", p->stat_pfx_aspath_loop);
+ json_object_int_add(json_pfx_stat, "originatorLoop", p->stat_pfx_originator_loop);
+ json_object_int_add(json_pfx_stat, "clusterLoop", p->stat_pfx_cluster_loop);
+ json_object_int_add(json_pfx_stat, "invalidNextHop", p->stat_pfx_nh_invalid);
+ json_object_int_add(json_pfx_stat, "withdrawn", p->stat_pfx_withdraw);
+ json_object_int_add(json_pfx_stat, "attributesDiscarded", p->stat_pfx_discard);
+ json_object_object_add(json_neigh, "prefixStats", json_pfx_stat);
} else {
atomic_size_t outq_count, inq_count, open_out, open_in,
notify_out, notify_in, update_out, update_in,
@@ -15577,8 +15580,18 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field")
refresh_in);
vty_out(vty, " Capability: %10zu %10zu\n",
dynamic_cap_out, dynamic_cap_in);
- vty_out(vty, " Total: %10u %10u\n",
- (uint32_t)PEER_TOTAL_TX(p), (uint32_t)PEER_TOTAL_RX(p));
+ vty_out(vty, " Total: %10u %10u\n\n", (uint32_t)PEER_TOTAL_TX(p),
+ (uint32_t)PEER_TOTAL_RX(p));
+
+ /* Prefix statistics */
+ vty_out(vty, " Prefix statistics:\n");
+ vty_out(vty, " Inbound filtered: %u\n", p->stat_pfx_filter);
+ vty_out(vty, " AS-PATH loop: %u\n", p->stat_pfx_aspath_loop);
+ vty_out(vty, " Originator loop: %u\n", p->stat_pfx_originator_loop);
+ vty_out(vty, " Cluster loop: %u\n", p->stat_pfx_cluster_loop);
+ vty_out(vty, " Invalid next-hop: %u\n", p->stat_pfx_nh_invalid);
+ vty_out(vty, " Withdrawn: %u\n", p->stat_pfx_withdraw);
+ vty_out(vty, " Attributes discarded: %u\n\n", p->stat_pfx_discard);
}
if (use_json) {
@@ -18776,7 +18789,11 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
/* enforce-first-as */
if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS)) {
- if (!peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
+ /* The `no` form is printed because by default this enforcing
+ * is enabled, thus we need to print it inverted.
+ * See peer_new().
+ */
+ if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
vty_out(vty, " no neighbor %s enforce-first-as\n", addr);
} else {
if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 688dfacaa0..e3465feda8 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -421,11 +421,10 @@ static int bgp_interface_address_delete(ZAPI_CALLBACK_ARGS)
if (addr->family == AF_INET)
continue;
- if (!IN6_IS_ADDR_LINKLOCAL(&addr->u.prefix6)
- && memcmp(&peer->nexthop.v6_global,
- &addr->u.prefix6, 16)
- == 0) {
- memset(&peer->nexthop.v6_global, 0, 16);
+ if (!IN6_IS_ADDR_LINKLOCAL(&addr->u.prefix6) &&
+ memcmp(&peer->nexthop.v6_global, &addr->u.prefix6, IPV6_MAX_BYTELEN) ==
+ 0) {
+ memset(&peer->nexthop.v6_global, 0, IPV6_MAX_BYTELEN);
FOREACH_AFI_SAFI (afi, safi)
bgp_announce_route(peer, afi, safi,
true);
@@ -744,6 +743,7 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
int ret = 0;
struct interface *ifp = NULL;
bool v6_ll_avail = true;
+ bool shared_network_original = peer->shared_network;
memset(nexthop, 0, sizeof(struct bgp_nexthop));
@@ -838,9 +838,9 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
if (!v6_ll_avail && !peer->conf_if)
v6_ll_avail = true;
if (if_lookup_by_ipv4(&remote->sin.sin_addr, peer->bgp->vrf_id))
- peer->shared_network = 1;
+ peer->shared_network = true;
else
- peer->shared_network = 0;
+ peer->shared_network = false;
}
/* IPv6 connection, fetch and store IPv4 local address if any. */
@@ -903,11 +903,14 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
|| if_lookup_by_ipv6(&remote->sin6.sin6_addr,
remote->sin6.sin6_scope_id,
peer->bgp->vrf_id))
- peer->shared_network = 1;
+ peer->shared_network = true;
else
- peer->shared_network = 0;
+ peer->shared_network = false;
}
+ if (shared_network_original != peer->shared_network)
+ bgp_peer_bfd_update_source(peer);
+
/* KAME stack specific treatment. */
#ifdef KAME
if (IN6_IS_ADDR_LINKLOCAL(&nexthop->v6_global)
@@ -1187,9 +1190,10 @@ static bool update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp,
ifindex =
pi->peer->nexthop.ifp->ifindex;
if (!ifindex) {
- if (pi->peer->conf_if)
- ifindex = pi->peer->ifp->ifindex;
- else if (pi->peer->ifname)
+ if (pi->peer->conf_if) {
+ if (pi->peer->ifp)
+ ifindex = pi->peer->ifp->ifindex;
+ } else if (pi->peer->ifname)
ifindex = ifname2ifindex(
pi->peer->ifname,
pi->peer->bgp->vrf_id);
@@ -3025,6 +3029,48 @@ static void bgp_zebra_connected(struct zclient *zclient)
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(bgp, bgp->peer);
}
+void bgp_zebra_process_remote_routes_for_l2vni(struct event *e)
+{
+ /*
+ * If we have learnt and retained remote routes (VTEPs, MACs)
+ * for this VNI, install them.
+ */
+ install_uninstall_routes_for_vni(NULL, NULL, true);
+
+ /*
+ * If there are VNIs still pending to be processed, schedule them
+ * after a small sleep so that CPU can be used for other purposes.
+ */
+ if (zebra_l2_vni_count(&bm->zebra_l2_vni_head))
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l2vni, NULL,
+ 20, &bm->t_bgp_zebra_l2_vni);
+}
+
+void bgp_zebra_process_remote_routes_for_l3vrf(struct event *e)
+{
+ /*
+ * Install/Uninstall all remote routes belonging to l3vni
+ *
+ * NOTE:
+ * - At this point it does not matter whether we call
+ * install_routes_for_vrf/uninstall_routes_for_vrf.
+ * - Since we pass struct bgp as NULL,
+ * * we iterate the bm FIFO list
+ * * the second variable (true) is ignored as well and
+ * calculated based on the BGP-VRFs flags for ADD/DELETE.
+ */
+ install_uninstall_routes_for_vrf(NULL, true);
+
+ /*
+ * If there are L3VNIs still pending to be processed, schedule them
+ * after a small sleep so that CPU can be used for other purposes.
+ */
+ if (zebra_l3_vni_count(&bm->zebra_l3_vni_head)) {
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l3vrf, NULL,
+ 20, &bm->t_bgp_zebra_l3_vni);
+ }
+}
+
static int bgp_zebra_process_local_es_add(ZAPI_CALLBACK_ARGS)
{
esi_t esi;
diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h
index 8deecba747..7e9d57cb85 100644
--- a/bgpd/bgp_zebra.h
+++ b/bgpd/bgp_zebra.h
@@ -135,4 +135,6 @@ extern void bgp_zebra_release_label_range(uint32_t start, uint32_t end);
extern enum zclient_send_status
bgp_zebra_withdraw_actual(struct bgp_dest *dest, struct bgp_path_info *info,
struct bgp *bgp);
+extern void bgp_zebra_process_remote_routes_for_l2vni(struct event *e);
+extern void bgp_zebra_process_remote_routes_for_l3vrf(struct event *e);
#endif /* _QUAGGA_BGP_ZEBRA_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index dccac3eceb..2f234e3a5a 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1227,8 +1227,6 @@ static void peer_free(struct peer *peer)
bgp_reads_off(peer->connection);
bgp_writes_off(peer->connection);
event_cancel_event_ready(bm->master, peer->connection);
- FOREACH_AFI_SAFI (afi, safi)
- EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!peer->connection->t_write);
assert(!peer->connection->t_read);
@@ -1562,8 +1560,13 @@ struct peer *peer_new(struct bgp *bgp)
SET_FLAG(peer->sflags, PEER_STATUS_CAPABILITY_OPEN);
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS))
- peer_flag_set(peer, PEER_FLAG_ENFORCE_FIRST_AS);
+ /* By default this is enabled, thus we need to mark it as
+ * inverted in order to display correctly in the configuration.
+ */
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS)) {
+ SET_FLAG(peer->flags_invert, PEER_FLAG_ENFORCE_FIRST_AS);
+ SET_FLAG(peer->flags, PEER_FLAG_ENFORCE_FIRST_AS);
+ }
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SOFT_VERSION_CAPABILITY))
peer_flag_set(peer, PEER_FLAG_CAPABILITY_SOFT_VERSION);
@@ -2158,8 +2161,7 @@ int peer_remote_as(struct bgp *bgp, union sockunion *su, const char *conf_if,
/* When this peer is a member of peer-group. */
if (peer->group) {
/* peer-group already has AS number/internal/external */
- if (peer->group->conf->as
- || peer->group->conf->as_type) {
+ if (peer->group->conf->as || peer->group->conf->as_type != AS_UNSPECIFIED) {
/* Return peer group's AS number. */
*as = peer->group->conf->as;
return BGP_ERR_PEER_GROUP_MEMBER;
@@ -2677,8 +2679,6 @@ int peer_delete(struct peer *peer)
bgp_reads_off(peer->connection);
bgp_writes_off(peer->connection);
event_cancel_event_ready(bm->master, peer->connection);
- FOREACH_AFI_SAFI (afi, safi)
- EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!CHECK_FLAG(peer->connection->thread_flags,
PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->connection->thread_flags,
@@ -3967,11 +3967,16 @@ int bgp_delete(struct bgp *bgp)
afi_t afi;
safi_t safi;
int i;
+ uint32_t vni_count;
+ struct bgpevpn *vpn = NULL;
struct bgp_dest *dest = NULL;
struct bgp_dest *dest_next = NULL;
struct bgp_table *dest_table = NULL;
struct graceful_restart_info *gr_info;
- uint32_t cnt_before, cnt_after;
+ uint32_t b_ann_cnt = 0, b_l2_cnt = 0, b_l3_cnt = 0;
+ uint32_t a_ann_cnt = 0, a_l2_cnt = 0, a_l3_cnt = 0;
+ struct bgp *bgp_to_proc = NULL;
+ struct bgp *bgp_to_proc_next = NULL;
assert(bgp);
@@ -3979,7 +3984,7 @@ int bgp_delete(struct bgp *bgp)
* Iterate the pending dest list and remove all the dest pertaininig to
* the bgp under delete.
*/
- cnt_before = zebra_announce_count(&bm->zebra_announce_head);
+ b_ann_cnt = zebra_announce_count(&bm->zebra_announce_head);
for (dest = zebra_announce_first(&bm->zebra_announce_head); dest;
dest = dest_next) {
dest_next = zebra_announce_next(&bm->zebra_announce_head, dest);
@@ -3991,10 +3996,36 @@ int bgp_delete(struct bgp *bgp)
}
}
- cnt_after = zebra_announce_count(&bm->zebra_announce_head);
- if (BGP_DEBUG(zebra, ZEBRA))
- zlog_debug("Zebra Announce Fifo cleanup count before %u and after %u during BGP %s deletion",
- cnt_before, cnt_after, bgp->name_pretty);
+ /*
+ * Pop all VPNs yet to be processed for remote routes install if the
+ * bgp-evpn instance is getting deleted
+ */
+ if (bgp == bgp_get_evpn()) {
+ b_l2_cnt = zebra_l2_vni_count(&bm->zebra_l2_vni_head);
+ vni_count = b_l2_cnt;
+ while (vni_count) {
+ vpn = zebra_l2_vni_pop(&bm->zebra_l2_vni_head);
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ vni_count--;
+ }
+ }
+
+ b_l3_cnt = zebra_l3_vni_count(&bm->zebra_l3_vni_head);
+ for (bgp_to_proc = zebra_l3_vni_first(&bm->zebra_l3_vni_head); bgp_to_proc;
+ bgp_to_proc = bgp_to_proc_next) {
+ bgp_to_proc_next = zebra_l3_vni_next(&bm->zebra_l3_vni_head, bgp_to_proc);
+ if (bgp_to_proc == bgp)
+ zebra_l3_vni_del(&bm->zebra_l3_vni_head, bgp_to_proc);
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA)) {
+ a_ann_cnt = zebra_announce_count(&bm->zebra_announce_head);
+ a_l2_cnt = zebra_l2_vni_count(&bm->zebra_l2_vni_head);
+ a_l3_cnt = zebra_l3_vni_count(&bm->zebra_l3_vni_head);
+ zlog_debug("BGP %s deletion FIFO cnt Zebra_Ann before %u after %u, L2_VNI before %u after, %u L3_VNI before %u after %u",
+ bgp->name_pretty, b_ann_cnt, a_ann_cnt, b_l2_cnt, a_l2_cnt, b_l3_cnt,
+ a_l3_cnt);
+ }
bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL);
@@ -4006,6 +4037,9 @@ int bgp_delete(struct bgp *bgp)
bgp_vpn_leak_unimport(bgp);
+ bgp_vpn_release_label(bgp, AFI_IP, true);
+ bgp_vpn_release_label(bgp, AFI_IP6, true);
+
hook_call(bgp_inst_delete, bgp);
FOREACH_AFI_SAFI (afi, safi)
@@ -4206,6 +4240,14 @@ int bgp_delete(struct bgp *bgp)
}
}
+ /* Clean BGP address family parameters */
+ bgp_mh_info->ead_evi_rx = BGP_EVPN_MH_EAD_EVI_RX_DEF;
+ bgp_evpn_switch_ead_evi_rx();
+ bgp_mh_info->ead_evi_tx = BGP_EVPN_MH_EAD_EVI_TX_DEF;
+ bgp_mh_info->evi_per_es_frag = BGP_EVPN_MAX_EVI_PER_ES_FRAG;
+
+ bgp_address_family_distance_delete();
+
return 0;
}
@@ -4287,6 +4329,9 @@ void bgp_free(struct bgp *bgp)
XFREE(MTYPE_BGP_NAME, bgp->snmp_stats);
XFREE(MTYPE_BGP_CONFED_LIST, bgp->confed_peers);
+ bgp_meta_queue_free(bgp->mq);
+ bgp->mq = NULL;
+
XFREE(MTYPE_BGP, bgp);
}
@@ -8493,6 +8538,8 @@ void bgp_master_init(struct event_loop *master, const int buffer_size,
bm = &bgp_master;
zebra_announce_init(&bm->zebra_announce_head);
+ zebra_l2_vni_init(&bm->zebra_l2_vni_head);
+ zebra_l3_vni_init(&bm->zebra_l3_vni_head);
bm->bgp = list_new();
bm->listen_sockets = list_new();
bm->port = BGP_PORT_DEFAULT;
@@ -8516,6 +8563,8 @@ void bgp_master_init(struct event_loop *master, const int buffer_size,
bm->stalepath_time = BGP_DEFAULT_STALEPATH_TIME;
bm->select_defer_time = BGP_DEFAULT_SELECT_DEFERRAL_TIME;
bm->rib_stale_time = BGP_DEFAULT_RIB_STALE_TIME;
+ bm->t_bgp_zebra_l2_vni = NULL;
+ bm->t_bgp_zebra_l3_vni = NULL;
bgp_mac_init();
/* init the rd id space.
@@ -8763,6 +8812,8 @@ void bgp_terminate(void)
EVENT_OFF(bm->t_bgp_sync_label_manager);
EVENT_OFF(bm->t_bgp_start_label_manager);
EVENT_OFF(bm->t_bgp_zebra_route);
+ EVENT_OFF(bm->t_bgp_zebra_l2_vni);
+ EVENT_OFF(bm->t_bgp_zebra_l3_vni);
bgp_mac_finish();
}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index df55d879e7..47214e52e5 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -19,6 +19,8 @@
#include "asn.h"
PREDECL_LIST(zebra_announce);
+PREDECL_LIST(zebra_l2_vni);
+PREDECL_LIST(zebra_l3_vni);
/* For union sockunion. */
#include "queue.h"
@@ -204,6 +206,14 @@ struct bgp_master {
/* To preserve ordering of installations into zebra across all Vrfs */
struct zebra_announce_head zebra_announce_head;
+ struct event *t_bgp_zebra_l2_vni;
+ /* To preserve ordering of processing of L2 VNIs in BGP */
+ struct zebra_l2_vni_head zebra_l2_vni_head;
+
+ struct event *t_bgp_zebra_l3_vni;
+ /* To preserve ordering of processing of BGP-VRFs for L3 VNIs */
+ struct zebra_l3_vni_head zebra_l3_vni_head;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp_master);
@@ -554,6 +564,8 @@ struct bgp {
#define BGP_FLAG_INSTANCE_HIDDEN (1ULL << 39)
/* Prohibit BGP from enabling IPv6 RA on interfaces */
#define BGP_FLAG_IPV6_NO_AUTO_RA (1ULL << 40)
+#define BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL (1ULL << 41)
+#define BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE (1ULL << 42)
/* BGP default address-families.
* New peers inherit enabled afi/safis from bgp instance.
@@ -830,6 +842,9 @@ struct bgp {
/* Process Queue for handling routes */
struct work_queue *process_queue;
+ /* Meta Queue Information */
+ struct meta_queue *mq;
+
bool fast_convergence;
/* BGP Conditional advertisement */
@@ -868,10 +883,14 @@ struct bgp {
uint64_t node_already_on_queue;
uint64_t node_deferred_on_queue;
+ struct zebra_l3_vni_item zl3vni;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp);
+DECLARE_LIST(zebra_l3_vni, struct bgp, zl3vni);
+
struct bgp_interface {
#define BGP_INTERFACE_MPLS_BGP_FORWARDING (1 << 0)
/* L3VPN multi domain switching */
@@ -1330,7 +1349,7 @@ struct peer {
union sockunion *su_local; /* Sockunion of local address. */
union sockunion *su_remote; /* Sockunion of remote address. */
- int shared_network; /* Is this peer shared same network. */
+ bool shared_network; /* Is this peer shared same network. */
struct bgp_nexthop nexthop; /* Nexthop */
/* Roles in bgp session */
@@ -1654,7 +1673,6 @@ struct peer {
/* Threads. */
struct event *t_llgr_stale[AFI_MAX][SAFI_MAX];
- struct event *t_revalidate_all[AFI_MAX][SAFI_MAX];
struct event *t_refresh_stalepath;
/* Thread flags. */
@@ -1710,7 +1728,8 @@ struct peer {
uint32_t stat_pfx_cluster_loop;
uint32_t stat_pfx_nh_invalid;
uint32_t stat_pfx_dup_withdraw;
- uint32_t stat_upd_7606; /* RFC7606: treat-as-withdraw */
+ uint32_t stat_pfx_withdraw; /* RFC7606: treat-as-withdraw */
+ uint32_t stat_pfx_discard; /* The number of prefixes with discarded attributes */
uint64_t stat_pfx_loc_rib; /* RFC7854 : Number of routes in Loc-RIB */
uint64_t stat_pfx_adj_rib_in; /* RFC7854 : Number of routes in Adj-RIBs-In */
@@ -2105,7 +2124,8 @@ struct bgp_nlri {
*/
#define BGP_DEFAULT_HOLDTIME 180
#define BGP_DEFAULT_KEEPALIVE 60
-#define BGP_DEFAULT_CONNECT_RETRY 120
+#define BGP_DEFAULT_CONNECT_RETRY 30
+#define BGP_MAX_CONNECT_RETRY 120
#define BGP_DEFAULT_EBGP_ROUTEADV 0
#define BGP_DEFAULT_IBGP_ROUTEADV 0
@@ -2695,14 +2715,6 @@ static inline int peer_group_af_configured(struct peer_group *group)
return 0;
}
-static inline char *timestamp_string(time_t ts, char *timebuf)
-{
- time_t tbuf;
-
- tbuf = time(NULL) - (monotime(NULL) - ts);
- return ctime_r(&tbuf, timebuf);
-}
-
static inline bool peer_established(struct peer_connection *connection)
{
return connection->status == Established;
diff --git a/doc/developer/grpc.rst b/doc/developer/grpc.rst
index 4e81adf8b2..62d1594f4c 100644
--- a/doc/developer/grpc.rst
+++ b/doc/developer/grpc.rst
@@ -149,7 +149,6 @@ Below is how to compile and run the program, with the example output:
]
},
"frr-zebra:zebra": {
- "mcast-rpf-lookup": "mrib-then-urib",
"workqueue-hold-timer": 10,
"zapi-packets": 1000,
"import-kernel-table": {
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 5e22c4cb72..45142f7d83 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -167,15 +167,7 @@ as early as possible, i.e. the first 2-week window.
For reference, the expected release schedule according to the above is:
-+---------+------------+------------+------------+
-| Release | 2024-03-12 | 2024-07-02 | 2024-11-05 |
-+---------+------------+------------+------------+
-| RC | 2024-02-27 | 2024-06-18 | 2024-10-22 |
-+---------+------------+------------+------------+
-| dev/X.Y | 2024-02-13 | 2024-06-04 | 2024-10-08 |
-+---------+------------+------------+------------+
-| freeze | 2024-01-30 | 2024-05-21 | 2024-09-24 |
-+---------+------------+------------+------------+
+.. graphviz:: ../figures/releases.dot
Here is the hint on how to get the dates easily:
diff --git a/doc/figures/releases.dot b/doc/figures/releases.dot
new file mode 100644
index 0000000000..57d35987f8
--- /dev/null
+++ b/doc/figures/releases.dot
@@ -0,0 +1,44 @@
+digraph ReleaseTimeline {
+ rankdir=LR;
+ node [shape=box, style=rounded, fontsize=10, width=1.5, fontname="Helvetica"];
+
+ subgraph cluster_dev {
+ label="Development";
+ style=dashed;
+ color=blue;
+ node [fillcolor=lightblue, style=filled];
+ "dev/X.Y";
+ }
+
+ subgraph cluster_rc {
+ label="Release Candidate";
+ style=dashed;
+ color=orange;
+ node [fillcolor=orange, style=filled];
+ "RC";
+ }
+
+ subgraph cluster_stable {
+ label="Stable Release";
+ style=dashed;
+ color=green;
+ node [fillcolor=lightgreen, style=filled];
+ "release";
+ }
+
+ // Release steps with actions
+ "freeze" [label="Freeze", shape=ellipse, style=dotted, fontcolor=red];
+ "dev/X.Y" [label="dev/X.Y\n(Development)", fillcolor=lightblue];
+ "RC" [label="RC\n(Release Candidate)", fillcolor=orange];
+ "release" [label="Release\n(Final)", fillcolor=lightgreen];
+
+ // Connect the steps with actions
+ "freeze" -> "dev/X.Y" [label=" "];
+ "dev/X.Y" -> "RC" [label=" "];
+ "RC" -> "release" [label=" "];
+
+ // Date connections (freeze -> dev/X.Y -> RC -> release)
+ "2025-01-21" -> "2025-02-04" -> "2025-02-18" -> "2025-03-04";
+ "2025-05-20" -> "2025-06-03" -> "2025-06-17" -> "2025-07-01";
+ "2025-09-23" -> "2025-10-07" -> "2025-10-21" -> "2025-11-04";
+}
diff --git a/doc/user/filter.rst b/doc/user/filter.rst
index c1146e50aa..be63095166 100644
--- a/doc/user/filter.rst
+++ b/doc/user/filter.rst
@@ -9,9 +9,7 @@ defined, it can be applied in any direction.
IP Access List
==============
-.. clicmd:: access-list NAME [seq (1-4294967295)] permit IPV4-NETWORK
-
-.. clicmd:: access-list NAME [seq (1-4294967295)] deny IPV4-NETWORK
+.. clicmd:: access-list NAME [seq (1-4294967295)] <permit|deny> <A.B.C.D/M [exact-match]|any>
seq
seq `number` can be set either automatically or manually. In the
@@ -35,6 +33,29 @@ IP Access List
access-list filter permit 10.0.0.0/8
access-list filter seq 13 permit 10.0.0.0/7
+.. clicmd:: access-list NAME [seq (1-4294967295)] <deny|permit> ip <A.B.C.D A.B.C.D|host A.B.C.D|any> <A.B.C.D A.B.C.D|host A.B.C.D|any>
+
+ The extended access-list syntax enables filtering on both source and destination
+ IP addresses (or source and group, if used for multicast boundaries). The
+ source address is first in order in the command.
+
+ If providing a mask, note that the access-lists use wildcard masks (inverse
+ matching logic of subnet masks). If specifying ``host``, only the single address
+ given will be matched.
+
+ A basic example is as follows:
+
+ .. code-block:: frr
+
+ access-list filter seq 5 permit ip host 10.0.20.2 232.1.1.0 0.0.0.128
+ access-list filter seq 10 deny ip 10.0.20.0 0.0.0.255 232.1.1.0 0.0.0.255
+ access-list filter seq 15 permit ip any any
+
+ .. note ::
+
+ If an access-list is specified but no match is found, the default verdict
+ is deny.
+
.. clicmd:: show <ip|ipv6> access-list [json]
Display all IPv4 or IPv6 access lists.
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 05418da5a9..ff45f21b56 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -6,9 +6,9 @@ PIM
PIM -- Protocol Independent Multicast
-*pimd* supports pim-sm as well as igmp v2 and v3. pim is
-vrf aware and can work within the context of vrf's in order to
-do S,G mrouting. Additionally PIM can be used in the EVPN underlay
+*pimd* supports PIM-SM as well as IGMP v2 and v3. PIM is
+VRF aware and can work within the context of VRFs in order to
+do S,G mrouting. Additionally, PIM can be used in the EVPN underlay
network for optimizing forwarding of overlay BUM traffic.
.. note::
@@ -217,32 +217,47 @@ PIM Routers
never do SM over. This command is vrf aware, to configure for a vrf, specify
the vrf in the router pim block.
-Global Multicast
-----------------
+.. clicmd:: rpf-lookup-mode MODE
-These commands are valid at the top-level of the configuration (or also per
-vrf where indicated), instead of under the 'router pim' submode.
+ MODE sets the method used to perform RPF lookups. Supported modes:
-.. clicmd:: ip multicast rpf-lookup-mode WORD
+ urib-only
+ Performs the lookup on the Unicast RIB. The Multicast RIB is never used.
- Modify how PIM does RPF lookups in the zebra routing table. You can use
- these choices:
+ mrib-only
+ Performs the lookup on the Multicast RIB. The Unicast RIB is never used.
- longer-prefix
- Lookup the RPF in both tables using the longer prefix as a match
+ mrib-then-urib
+ Tries to perform the lookup on the Multicast RIB. If any route is found,
+ that route is used. Otherwise, the Unicast RIB is tried.
lower-distance
- Lookup the RPF in both tables using the lower distance as a match
+ Performs a lookup on the Multicast RIB and Unicast RIB each. The result
+ with the lower administrative distance is used; if they're equal, the
+ Multicast RIB takes precedence.
- mrib-only
- Lookup in the Multicast RIB only
+ longer-prefix
+ Performs a lookup on the Multicast RIB and Unicast RIB each. The result
+ with the longer prefix length is used; if they're equal, the
+ Multicast RIB takes precedence.
- mrib-then-urib
- Lookup in the Multicast RIB then the Unicast Rib, returning first found.
- This is the default value for lookup if this command is not entered
+ The ``mrib-then-urib`` setting is the default behavior if nothing is
+ configured. If this is the desired behavior, it should be explicitly
+ configured to make the configuration immune against possible changes in
+ what the default behavior is.
- urib-only
- Lookup in the Unicast Rib only.
+.. warning::
+
+ Unreachable routes do not receive special treatment and do not cause
+ fallback to a second lookup.
+
+.. _pim-global-configuration:
+
+Global Multicast
+================
+
+These commands are valid at the top-level of the configuration (or also per
+vrf where indicated), instead of under the 'router pim' submode.
.. clicmd:: ip igmp generate-query-once [version (2-3)]
@@ -257,6 +272,70 @@ vrf where indicated), instead of under the 'router pim' submode.
'no' form of the command disables the warning generation. This command is
vrf aware. To configure per vrf, enter vrf submode.
+
+.. _pim-multicast-rib:
+
+Multicast RIB Commands
+----------------------
+
+The Multicast RIB provides a separate table of unicast destinations which
+is used for Multicast Reverse Path Forwarding decisions. It is used with
+a multicast source's IP address, hence contains not multicast group
+addresses but unicast addresses.
+
+This table is fully separate from the default unicast table. However,
+RPF lookup can include the unicast table.
+
+.. clicmd:: ip mroute PREFIX NEXTHOP [DISTANCE]
+
+ Adds a static route entry to the Multicast RIB. This performs exactly as the
+ ``ip route`` command, except that it inserts the route in the Multicast RIB
+ instead of the Unicast RIB.
+ These routes are only used for RPF lookup and will not be used by zebra for
+ insertion into the kernel *or* for normal rib processing. As such it is
+ possible to create weird states with these commands. Use with caution. Most
+ of the time this will not be necessary.
+
+.. clicmd:: show [ip|ipv6] rpf
+
+ Prints the entire Multicast RIB. Note that this is independent of the
+ configured RPF lookup mode, the Multicast RIB may be printed yet not
+ used at all.
+
+.. clicmd:: show [ip|ipv6] rpf ADDR
+
+ Performs a Multicast RPF lookup using the Multicast RIB only.
+ ADDR specifies the multicast source address to look up. Note that this is
+ independent of the configured RPF lookup mode.
+
+ ::
+
+ > show ip rpf 192.0.2.1
+ Routing entry for 192.0.2.0/24 using Multicast RIB
+ Known via "kernel", distance 0, metric 0, best
+ * 198.51.100.1, via eth0
+
+
+ Indicates that a multicast source lookup for 192.0.2.1 against the
+ Multicast RIB would use an entry for 192.0.2.0/24 with a gateway of
+ 198.51.100.1.
+
+.. clicmd:: show ip pim [vrf NAME] nexthop-lookup ADDR [GROUP]
+
+ Performs a nexthop lookup according to the configured RPF lookup mode.
+ This performs the lookup for a given source address, and optionally with
+ a group address, which may effect the nexthop decision.
+
+ ::
+
+ > show ip pim nexthop-lookup 192.0.2.1
+ (192.0.2.1, *) --- Nexthop 198.10.10.1 Interface eth1
+
+
+ Indicates the a source lookup for 192.0.2.1 according to the configured RPF
+ lookup mode would use the gateway address 192.10.10.1 on interface eth1.
+
+
.. _pim-interface-configuration:
PIM Interface Configuration
@@ -348,10 +427,46 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
.. clicmd:: ip multicast boundary oil WORD
- Set a pim multicast boundary, based upon the WORD prefix-list. If a pim join
- or IGMP report is received on this interface and the Group is denied by the
+ Set a PIM multicast boundary, based upon the WORD prefix-list. If a PIM join
+ or IGMP report is received on this interface and the group is denied by the
prefix-list, PIM will ignore the join or report.
+ .. code-block:: frr
+
+ prefix-list multicast-acl seq 5 permit 232.1.1.1/32
+ prefix-list multicast-acl seq 10 deny 232.1.1.0/24
+ prefix-list multicast-acl seq 15 permit any
+ !
+ interface r1-eth0
+ ip pim
+ ip igmp
+ ip multicast boundary oil multicast-acl
+ exit
+
+.. clicmd:: ip multicast boundary ACCESS-LIST
+
+ Set a PIM multicast boundary, based upon the ACCESS-LIST. If a PIM join
+ or IGMP report is received on this interface and the (S,G) tuple is denied by the
+ access-list, PIM will ignore the join or report.
+
+ To filter on both source and group, the extended access-list syntax must be used.
+
+ If both a prefix-list and access-list are configured for multicast boundaries,
+ the prefix-list will be evaluated first (and must have a terminating "permit any"
+ in order to also evaluate against the access-list).
+
+ .. code-block:: frr
+
+ access-list multicast-acl seq 5 permit ip host 10.0.20.2 host 232.1.1.1
+ access-list multicast-acl seq 10 deny ip 10.0.20.0 0.0.0.255 232.1.1.0 0.0.0.255
+ access-list multicast-acl seq 15 permit ip any any
+ !
+ interface r1-eth0
+ ip pim
+ ip igmp
+ ip multicast boundary pim-acl
+ exit
+
.. clicmd:: ip igmp last-member-query-count (1-255)
Set the IGMP last member query count. The default value is 2. 'no' form of
@@ -374,29 +489,6 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
:ref:`bfd-pim-peer-config`
-
-.. _pim-multicast-rib:
-
-PIM Multicast RIB
-=================
-
-In order to influence Multicast RPF lookup, it is possible to insert
-into zebra routes for the Multicast RIB. These routes are only
-used for RPF lookup and will not be used by zebra for insertion
-into the kernel *or* for normal rib processing. As such it is
-possible to create weird states with these commands. Use with
-caution. Most of the time this will not be necessary.
-
-.. clicmd:: ip mroute A.B.C.D/M A.B.C.D (1-255)
-
- Insert into the Multicast Rib Route A.B.C.D/M with specified nexthop. The
- distance can be specified as well if desired.
-
-.. clicmd:: ip mroute A.B.C.D/M INTERFACE (1-255)
-
- Insert into the Multicast Rib Route A.B.C.D/M using the specified INTERFACE.
- The distance can be specified as well if desired.
-
.. _msdp-configuration:
Multicast Source Discovery Protocol (MSDP) Configuration
@@ -467,6 +559,10 @@ Commands available for MSDP
The filtering will only take effect starting from the command
application.
+.. clicmd:: msdp peer A.B.C.D sa-limit <AMOUNT>
+
+ Configure the maximum number of SAs to learn from peer.
+
.. clicmd:: msdp peer A.B.C.D password WORD
Use MD5 authentication to connect with the remote peer.
@@ -478,6 +574,10 @@ Commands available for MSDP
To apply it immediately call `clear ip msdp peer A.B.C.D`.
+.. clicmd:: msdp originator-id A.B.C.D
+
+ Use the specified originator ID instead of the multicast RP group.
+
.. clicmd:: msdp shutdown
Shutdown the MSDP sessions in this PIM instance.
@@ -734,7 +834,7 @@ cause great confusion.
.. seealso::
- :ref:`multicast-rib-commands`
+ :ref:`pim-multicast-rib`
PIM Debug Commands
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index b862ba9f50..ac29b1c7d4 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -526,16 +526,6 @@ commands in relationship to VRF. Here is an extract of some of those commands:
The network administrator can however decide to provision this command in
configuration file to provide more clarity about the intended configuration.
-.. clicmd:: netns NAMESPACE
-
- This command is based on VRF configuration mode. This command is available
- when *Zebra* is run in :option:`-n` mode. This command reflects which *Linux
- network namespace* is to be mapped with *Zebra* VRF. It is to be noted that
- *Zebra* creates and detects added/suppressed VRFs from the Linux environment
- (in fact, those managed with iproute2). The network administrator can however
- decide to provision this command in configuration file to provide more clarity
- about the intended configuration.
-
.. clicmd:: show ip route vrf VRF
The show command permits dumping the routing table associated to the VRF. If
@@ -936,7 +926,7 @@ and this section also helps that case.
Create a new locator. If the name of an existing locator is specified,
move to specified locator's configuration node to change the settings it.
-.. clicmd:: prefix X:X::X:X/M [func-bits (0-64)] [block-len 40] [node-len 24]
+.. clicmd:: prefix X:X::X:X/M [block-len (16-64)] [node-len (16-64)] [func-bits (0-64)]
Set the ipv6 prefix block of the locator. SRv6 locator is defined by
RFC8986. The actual routing protocol specifies the locator and allocates a
@@ -1123,88 +1113,6 @@ and this section also helps that case.
!
...
-.. _multicast-rib-commands:
-
-Multicast RIB Commands
-======================
-
-The Multicast RIB provides a separate table of unicast destinations which
-is used for Multicast Reverse Path Forwarding decisions. It is used with
-a multicast source's IP address, hence contains not multicast group
-addresses but unicast addresses.
-
-This table is fully separate from the default unicast table. However,
-RPF lookup can include the unicast table.
-
-WARNING: RPF lookup results are non-responsive in this version of FRR,
-i.e. multicast routing does not actively react to changes in underlying
-unicast topology!
-
-.. clicmd:: ip multicast rpf-lookup-mode MODE
-
-
- MODE sets the method used to perform RPF lookups. Supported modes:
-
- urib-only
- Performs the lookup on the Unicast RIB. The Multicast RIB is never used.
-
- mrib-only
- Performs the lookup on the Multicast RIB. The Unicast RIB is never used.
-
- mrib-then-urib
- Tries to perform the lookup on the Multicast RIB. If any route is found,
- that route is used. Otherwise, the Unicast RIB is tried.
-
- lower-distance
- Performs a lookup on the Multicast RIB and Unicast RIB each. The result
- with the lower administrative distance is used; if they're equal, the
- Multicast RIB takes precedence.
-
- longer-prefix
- Performs a lookup on the Multicast RIB and Unicast RIB each. The result
- with the longer prefix length is used; if they're equal, the
- Multicast RIB takes precedence.
-
- The ``mrib-then-urib`` setting is the default behavior if nothing is
- configured. If this is the desired behavior, it should be explicitly
- configured to make the configuration immune against possible changes in
- what the default behavior is.
-
-.. warning::
-
- Unreachable routes do not receive special treatment and do not cause
- fallback to a second lookup.
-
-.. clicmd:: show [ip|ipv6] rpf ADDR
-
- Performs a Multicast RPF lookup, as configured with ``ip multicast
- rpf-lookup-mode MODE``. ADDR specifies the multicast source address to look
- up.
-
- ::
-
- > show ip rpf 192.0.2.1
- Routing entry for 192.0.2.0/24 using Unicast RIB
- Known via "kernel", distance 0, metric 0, best
- * 198.51.100.1, via eth0
-
-
- Indicates that a multicast source lookup for 192.0.2.1 would use an
- Unicast RIB entry for 192.0.2.0/24 with a gateway of 198.51.100.1.
-
-.. clicmd:: show [ip|ipv6] rpf
-
- Prints the entire Multicast RIB. Note that this is independent of the
- configured RPF lookup mode, the Multicast RIB may be printed yet not
- used at all.
-
-.. clicmd:: ip mroute PREFIX NEXTHOP [DISTANCE]
-
-
- Adds a static route entry to the Multicast RIB. This performs exactly as the
- ``ip route`` command, except that it inserts the route in the Multicast RIB
- instead of the Unicast RIB.
-
.. _zebra-route-filtering:
zebra Route Filtering
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 057edb33dc..9efc42382f 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -1,16 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_IF_PACKET_H
#define __LINUX_IF_PACKET_H
+#include <asm/byteorder.h>
#include <linux/types.h>
+struct sockaddr_pkt {
+ unsigned short spkt_family;
+ unsigned char spkt_device[14];
+ __be16 spkt_protocol;
+};
+
struct sockaddr_ll {
- unsigned short sll_family;
- __be16 sll_protocol;
- int sll_ifindex;
- unsigned short sll_hatype;
- unsigned char sll_pkttype;
- unsigned char sll_halen;
- unsigned char sll_addr[8];
+ unsigned short sll_family;
+ __be16 sll_protocol;
+ int sll_ifindex;
+ unsigned short sll_hatype;
+ unsigned char sll_pkttype;
+ unsigned char sll_halen;
+ unsigned char sll_addr[8];
+};
+
+/* Packet types */
+
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+#define PACKET_OUTGOING 4 /* Outgoing of any type */
+#define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
+#define PACKET_USER 6 /* To user space */
+#define PACKET_KERNEL 7 /* To kernel space */
+/* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
+#define PACKET_FASTROUTE 6 /* Fastrouted frame */
+
+/* Packet socket options */
+
+#define PACKET_ADD_MEMBERSHIP 1
+#define PACKET_DROP_MEMBERSHIP 2
+#define PACKET_RECV_OUTPUT 3
+/* Value 4 is still used by obsolete turbo-packet. */
+#define PACKET_RX_RING 5
+#define PACKET_STATISTICS 6
+#define PACKET_COPY_THRESH 7
+#define PACKET_AUXDATA 8
+#define PACKET_ORIGDEV 9
+#define PACKET_VERSION 10
+#define PACKET_HDRLEN 11
+#define PACKET_RESERVE 12
+#define PACKET_TX_RING 13
+#define PACKET_LOSS 14
+#define PACKET_VNET_HDR 15
+#define PACKET_TX_TIMESTAMP 16
+#define PACKET_TIMESTAMP 17
+#define PACKET_FANOUT 18
+#define PACKET_TX_HAS_OFF 19
+#define PACKET_QDISC_BYPASS 20
+#define PACKET_ROLLOVER_STATS 21
+#define PACKET_FANOUT_DATA 22
+#define PACKET_IGNORE_OUTGOING 23
+#define PACKET_VNET_HDR_SZ 24
+
+#define PACKET_FANOUT_HASH 0
+#define PACKET_FANOUT_LB 1
+#define PACKET_FANOUT_CPU 2
+#define PACKET_FANOUT_ROLLOVER 3
+#define PACKET_FANOUT_RND 4
+#define PACKET_FANOUT_QM 5
+#define PACKET_FANOUT_CBPF 6
+#define PACKET_FANOUT_EBPF 7
+#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
+#define PACKET_FANOUT_FLAG_UNIQUEID 0x2000
+#define PACKET_FANOUT_FLAG_IGNORE_OUTGOING 0x4000
+#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
+
+struct tpacket_stats {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+};
+
+struct tpacket_stats_v3 {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+ unsigned int tp_freeze_q_cnt;
+};
+
+struct tpacket_rollover_stats {
+ __aligned_u64 tp_all;
+ __aligned_u64 tp_huge;
+ __aligned_u64 tp_failed;
+};
+
+union tpacket_stats_u {
+ struct tpacket_stats stats1;
+ struct tpacket_stats_v3 stats3;
+};
+
+struct tpacket_auxdata {
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u16 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+};
+
+/* Rx ring - header status */
+#define TP_STATUS_KERNEL 0
+#define TP_STATUS_USER (1 << 0)
+#define TP_STATUS_COPY (1 << 1)
+#define TP_STATUS_LOSING (1 << 2)
+#define TP_STATUS_CSUMNOTREADY (1 << 3)
+#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
+#define TP_STATUS_BLK_TMO (1 << 5)
+#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID (1 << 7)
+#define TP_STATUS_GSO_TCP (1 << 8)
+
+/* Tx ring - header status */
+#define TP_STATUS_AVAILABLE 0
+#define TP_STATUS_SEND_REQUEST (1 << 0)
+#define TP_STATUS_SENDING (1 << 1)
+#define TP_STATUS_WRONG_FORMAT (1 << 2)
+
+/* Rx and Tx ring - header status */
+#define TP_STATUS_TS_SOFTWARE (1 << 29)
+#define TP_STATUS_TS_SYS_HARDWARE (1 << 30) /* deprecated, never set */
+#define TP_STATUS_TS_RAW_HARDWARE (1U << 31)
+
+/* Rx ring - feature request bits */
+#define TP_FT_REQ_FILL_RXHASH 0x1
+
+struct tpacket_hdr {
+ unsigned long tp_status;
+ unsigned int tp_len;
+ unsigned int tp_snaplen;
+ unsigned short tp_mac;
+ unsigned short tp_net;
+ unsigned int tp_sec;
+ unsigned int tp_usec;
+};
+
+#define TPACKET_ALIGNMENT 16
+#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
+#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
+
+struct tpacket2_hdr {
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u16 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u8 tp_padding[4];
+};
+
+struct tpacket_hdr_variant1 {
+ __u32 tp_rxhash;
+ __u32 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u16 tp_padding;
+};
+
+struct tpacket3_hdr {
+ __u32 tp_next_offset;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u32 tp_snaplen;
+ __u32 tp_len;
+ __u32 tp_status;
+ __u16 tp_mac;
+ __u16 tp_net;
+ /* pkt_hdr variants */
+ union {
+ struct tpacket_hdr_variant1 hv1;
+ };
+ __u8 tp_padding[8];
+};
+
+struct tpacket_bd_ts {
+ unsigned int ts_sec;
+ union {
+ unsigned int ts_usec;
+ unsigned int ts_nsec;
+ };
+};
+
+struct tpacket_hdr_v1 {
+ __u32 block_status;
+ __u32 num_pkts;
+ __u32 offset_to_first_pkt;
+
+ /* Number of valid bytes (including padding)
+ * blk_len <= tp_block_size
+ */
+ __u32 blk_len;
+
+ /*
+ * Quite a few uses of sequence number:
+ * 1. Make sure cache flush etc worked.
+ * Well, one can argue - why not use the increasing ts below?
+ * But look at 2. below first.
+ * 2. When you pass around blocks to other user space decoders,
+ * you can see which blk[s] is[are] outstanding etc.
+ * 3. Validate kernel code.
+ */
+ __aligned_u64 seq_num;
+
+ /*
+ * ts_last_pkt:
+ *
+ * Case 1. Block has 'N'(N >=1) packets and TMO'd(timed out)
+ * ts_last_pkt == 'time-stamp of last packet' and NOT the
+ * time when the timer fired and the block was closed.
+ * By providing the ts of the last packet we can absolutely
+ * guarantee that time-stamp wise, the first packet in the
+ * next block will never precede the last packet of the
+ * previous block.
+ * Case 2. Block has zero packets and TMO'd
+ * ts_last_pkt = time when the timer fired and the block
+ * was closed.
+ * Case 3. Block has 'N' packets and NO TMO.
+ * ts_last_pkt = time-stamp of the last pkt in the block.
+ *
+ * ts_first_pkt:
+ * Is always the time-stamp when the block was opened.
+ * Case a) ZERO packets
+ * No packets to deal with but atleast you know the
+ * time-interval of this block.
+ * Case b) Non-zero packets
+ * Use the ts of the first packet in the block.
+ *
+ */
+ struct tpacket_bd_ts ts_first_pkt, ts_last_pkt;
+};
+
+union tpacket_bd_header_u {
+ struct tpacket_hdr_v1 bh1;
+};
+
+struct tpacket_block_desc {
+ __u32 version;
+ __u32 offset_to_priv;
+ union tpacket_bd_header_u hdr;
+};
+
+#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
+#define TPACKET3_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket3_hdr)) + sizeof(struct sockaddr_ll))
+
+enum tpacket_versions {
+ TPACKET_V1,
+ TPACKET_V2,
+ TPACKET_V3
};
+/*
+ Frame structure:
+
+ - Start. Frame must be aligned to TPACKET_ALIGNMENT=16
+ - struct tpacket_hdr
+ - pad to TPACKET_ALIGNMENT=16
+ - struct sockaddr_ll
+ - Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16
+ - Start+tp_mac: [ Optional MAC header ]
+ - Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
+ - Pad to align to TPACKET_ALIGNMENT=16
+ */
+
+struct tpacket_req {
+ unsigned int tp_block_size; /* Minimal size of contiguous block */
+ unsigned int tp_block_nr; /* Number of blocks */
+ unsigned int tp_frame_size; /* Size of frame */
+ unsigned int tp_frame_nr; /* Total number of frames */
+};
+
+struct tpacket_req3 {
+ unsigned int tp_block_size; /* Minimal size of contiguous block */
+ unsigned int tp_block_nr; /* Number of blocks */
+ unsigned int tp_frame_size; /* Size of frame */
+ unsigned int tp_frame_nr; /* Total number of frames */
+ unsigned int tp_retire_blk_tov; /* timeout in msecs */
+ unsigned int tp_sizeof_priv; /* offset to private data area */
+ unsigned int tp_feature_req_word;
+};
+
+union tpacket_req_u {
+ struct tpacket_req req;
+ struct tpacket_req3 req3;
+};
+
+struct packet_mreq {
+ int mr_ifindex;
+ unsigned short mr_type;
+ unsigned short mr_alen;
+ unsigned char mr_address[8];
+};
+
+struct fanout_args {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 id;
+ __u16 type_flags;
+#else
+ __u16 type_flags;
+ __u16 id;
+#endif
+ __u32 max_num_members;
+};
+
+#define PACKET_MR_MULTICAST 0
+#define PACKET_MR_PROMISC 1
+#define PACKET_MR_ALLMULTI 2
+#define PACKET_MR_UNICAST 3
+
#endif
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 9a967bc1e3..9ea2cfd0a1 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -1008,45 +1008,40 @@ void isis_circuit_print_json(struct isis_circuit *circuit,
circuit_t2string(level));
if (circuit->area->newmetric)
json_object_int_add(level_json, "metric",
- circuit->te_metric[0]);
+ circuit->te_metric[level - 1]);
else
json_object_int_add(level_json, "metric",
- circuit->metric[0]);
+ circuit->metric[level - 1]);
if (!circuit->is_passive) {
- json_object_int_add(level_json,
- "active-neighbors",
- circuit->upadjcount[0]);
- json_object_int_add(level_json,
- "hello-interval",
- circuit->hello_interval[0]);
+ json_object_int_add(level_json, "active-neighbors",
+ circuit->upadjcount[level - 1]);
+ json_object_int_add(level_json, "hello-interval",
+ circuit->hello_interval[level - 1]);
hold_json = json_object_new_object();
json_object_object_add(level_json, "holddown",
hold_json);
- json_object_int_add(
- hold_json, "count",
- circuit->hello_multiplier[0]);
+ json_object_int_add(hold_json, "count",
+ circuit->hello_multiplier[level - 1]);
json_object_string_add(
hold_json, "pad",
isis_hello_padding2string(
circuit->pad_hellos));
json_object_int_add(level_json, "cnsp-interval",
- circuit->csnp_interval[0]);
+ circuit->csnp_interval[level - 1]);
json_object_int_add(level_json, "psnp-interval",
- circuit->psnp_interval[0]);
+ circuit->psnp_interval[level - 1]);
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
lan_prio_json =
json_object_new_object();
json_object_object_add(level_json,
"lan",
lan_prio_json);
- json_object_int_add(
- lan_prio_json, "priority",
- circuit->priority[0]);
- json_object_string_add(
- lan_prio_json, "is-dis",
- (circuit->u.bc.is_dr[0]
- ? "yes"
- : "no"));
+ json_object_int_add(lan_prio_json, "priority",
+ circuit->priority[level - 1]);
+ json_object_string_add(lan_prio_json, "is-dis",
+ (circuit->u.bc.is_dr[level - 1]
+ ? "yes"
+ : "no"));
}
}
json_object_array_add(levels_json, level_json);
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index 93f7bbf753..652efee89a 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -2065,12 +2065,6 @@ void cli_show_isis_srv6_locator(struct vty *vty, const struct lyd_node *dnode,
vty_out(vty, " locator %s\n", yang_dnode_get_string(dnode, NULL));
}
-void cli_show_isis_srv6_locator_end(struct vty *vty,
- const struct lyd_node *dnode)
-{
- vty_out(vty, " exit\n");
-}
-
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/enabled
*/
@@ -2118,6 +2112,11 @@ void cli_show_isis_srv6_enabled(struct vty *vty, const struct lyd_node *dnode,
vty_out(vty, " segment-routing srv6\n");
}
+void cli_show_isis_srv6_end(struct vty *vty, const struct lyd_node *dnode)
+{
+ vty_out(vty, " exit\n");
+}
+
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd
*/
@@ -2248,6 +2247,11 @@ void cli_show_isis_srv6_node_msd(struct vty *vty, const struct lyd_node *dnode,
yang_dnode_get_uint8(dnode, "max-end-d"));
}
+void cli_show_isis_srv6_node_msd_end(struct vty *vty, const struct lyd_node *dnode)
+{
+ vty_out(vty, " exit\n");
+}
+
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/interface
*/
diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c
index 8608d2b9bd..3024bb57ea 100644
--- a/isisd/isis_nb.c
+++ b/isisd/isis_nb.c
@@ -861,6 +861,12 @@ const struct frr_yang_module_info frr_isisd_info = {
},
},
{
+ .xpath = "/frr-isisd:isis/instance/segment-routing-srv6",
+ .cbs = {
+ .cli_show_end = cli_show_isis_srv6_end,
+ },
+ },
+ {
.xpath = "/frr-isisd:isis/instance/segment-routing-srv6/enabled",
.cbs = {
.modify = isis_instance_segment_routing_srv6_enabled_modify,
@@ -873,7 +879,6 @@ const struct frr_yang_module_info frr_isisd_info = {
.modify = isis_instance_segment_routing_srv6_locator_modify,
.destroy = isis_instance_segment_routing_srv6_locator_destroy,
.cli_show = cli_show_isis_srv6_locator,
- .cli_show_end = cli_show_isis_srv6_locator_end,
},
},
{
@@ -904,6 +909,7 @@ const struct frr_yang_module_info frr_isisd_info = {
.xpath = "/frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd",
.cbs = {
.cli_show = cli_show_isis_srv6_node_msd,
+ .cli_show_end = cli_show_isis_srv6_node_msd_end,
},
},
{
diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h
index 1bf95e3db3..10b3bd4009 100644
--- a/isisd/isis_nb.h
+++ b/isisd/isis_nb.h
@@ -322,6 +322,7 @@ int isis_instance_flex_algo_affinity_mapping_value_modify(
struct nb_cb_modify_args *args);
int isis_instance_flex_algo_affinity_mapping_value_destroy(
struct nb_cb_destroy_args *args);
+void cli_show_isis_srv6_end(struct vty *vty, const struct lyd_node *dnode);
int isis_instance_segment_routing_srv6_enabled_modify(
struct nb_cb_modify_args *args);
void cli_show_isis_srv6_enabled(struct vty *vty, const struct lyd_node *dnode,
@@ -332,8 +333,6 @@ int isis_instance_segment_routing_srv6_locator_destroy(
struct nb_cb_destroy_args *args);
void cli_show_isis_srv6_locator(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
-void cli_show_isis_srv6_locator_end(struct vty *vty,
- const struct lyd_node *dnode);
int isis_instance_segment_routing_srv6_msd_node_msd_max_segs_left_modify(
struct nb_cb_modify_args *args);
int isis_instance_segment_routing_srv6_msd_node_msd_max_end_pop_modify(
@@ -344,6 +343,7 @@ int isis_instance_segment_routing_srv6_msd_node_msd_max_end_d_modify(
struct nb_cb_modify_args *args);
void cli_show_isis_srv6_node_msd(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
+void cli_show_isis_srv6_node_msd_end(struct vty *vty, const struct lyd_node *dnode);
int isis_instance_segment_routing_srv6_interface_modify(
struct nb_cb_modify_args *args);
void cli_show_isis_srv6_interface(struct vty *vty, const struct lyd_node *dnode,
diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c
index af69fac1cd..634bdca7cb 100644
--- a/isisd/isis_pfpacket.c
+++ b/isisd/isis_pfpacket.c
@@ -10,7 +10,7 @@
#include <zebra.h>
#if ISIS_METHOD == ISIS_METHOD_PFPACKET
#include <net/ethernet.h> /* the L2 protocols */
-#include <netpacket/packet.h>
+#include "linux/if_packet.h"
#include <linux/filter.h>
@@ -134,6 +134,13 @@ static int open_packet_socket(struct isis_circuit *circuit)
return ISIS_WARNING;
}
+ int val = 1;
+ if (setsockopt(fd, SOL_PACKET, PACKET_AUXDATA, &val, sizeof(val)) == -1 &&
+ errno != ENOPROTOOPT) {
+ zlog_warn("%s: PACKET_AUXDATA failed: %s", __func__,
+ safe_strerror(errno));
+ }
+
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf))) {
zlog_warn("%s: SO_ATTACH_FILTER failed: %s", __func__,
safe_strerror(errno));
@@ -284,13 +291,54 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa)
? circuit->interface->mtu
: circuit->interface->mtu6;
uint8_t temp_buff[max_size];
- bytesread =
- recvfrom(circuit->fd, temp_buff, max_size, MSG_DONTWAIT,
- (struct sockaddr *)&s_addr, (socklen_t *)&addr_len);
+
+ union {
+ struct cmsghdr cmsg;
+ char buf[CMSG_SPACE(sizeof(struct tpacket_auxdata))];
+ } cmsg_buf;
+ struct iovec iov;
+ struct msghdr msg;
+ memset(&cmsg_buf, 0x00, sizeof(cmsg_buf));
+ memset(&iov, 0x00, sizeof(iov));
+ memset(&msg, 0x00, sizeof(msg));
+
+ iov.iov_base = temp_buff;
+ iov.iov_len = max_size;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ msg.msg_name = &s_addr;
+ msg.msg_namelen = addr_len;
+
+ msg.msg_control = &cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ bytesread = recvmsg(circuit->fd, &msg, MSG_DONTWAIT);
if (bytesread < 0) {
zlog_warn("%s: recvfrom() failed", __func__);
return ISIS_WARNING;
}
+
+ bool vlan_packet = false;
+
+ for (struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); cmsg;
+ cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_len >= CMSG_LEN(sizeof(struct tpacket_auxdata)) &&
+ cmsg->cmsg_level == SOL_PACKET &&
+ cmsg->cmsg_type == PACKET_AUXDATA) {
+ struct tpacket_auxdata *aux =
+ (struct tpacket_auxdata *)CMSG_DATA(cmsg);
+
+ if (aux && (aux->tp_status & TP_STATUS_VLAN_VALID))
+ vlan_packet = true;
+ break;
+ }
+ }
+
+ if (vlan_packet)
+ return ISIS_WARNING;
+
/* then we lose the LLC */
stream_write(circuit->rcv_stream, temp_buff + LLC_LEN,
bytesread - LLC_LEN);
diff --git a/lib/darr.c b/lib/darr.c
index 7a01274104..0cffa64425 100644
--- a/lib/darr.c
+++ b/lib/darr.c
@@ -8,6 +8,7 @@
#include <zebra.h>
#include "darr.h"
#include "memory.h"
+#include "printfrr.h"
DEFINE_MTYPE(LIB, DARR, "Dynamic Array");
DEFINE_MTYPE(LIB, DARR_STR, "Dynamic Array String");
@@ -70,7 +71,7 @@ char *__darr_in_vsprintf(char **sp, bool concat, const char *fmt, va_list ap)
*darr_append(*sp) = 0;
again:
va_copy(ap_copy, ap);
- len = vsnprintf(darr_last(*sp), darr_avail(*sp) + 1, fmt, ap_copy);
+ len = vsnprintfrr(darr_last(*sp), darr_avail(*sp) + 1, fmt, ap_copy);
va_end(ap_copy);
if (len < 0)
darr_in_strcat(*sp, fmt);
diff --git a/lib/darr.h b/lib/darr.h
index 2b9a0a0c02..121e3dd14e 100644
--- a/lib/darr.h
+++ b/lib/darr.h
@@ -272,10 +272,10 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
*/
#define darr_ensure_avail_mt(A, S, MT) \
({ \
- ssize_t need = (ssize_t)(S) - \
- (ssize_t)(darr_cap(A) - darr_len(A)); \
- if (need > 0) \
- _darr_resize_mt((A), darr_cap(A) + need, MT); \
+ ssize_t __dea_need = (ssize_t)(S) - \
+ (ssize_t)(darr_cap(A) - darr_len(A)); \
+ if (__dea_need > 0) \
+ _darr_resize_mt((A), darr_cap(A) + __dea_need, MT); \
(A); \
})
#define darr_ensure_avail(A, S) darr_ensure_avail_mt(A, S, MTYPE_DARR)
@@ -301,9 +301,9 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
#define darr_ensure_cap_mt(A, C, MT) \
({ \
/* Cast to avoid warning when C == 0 */ \
- uint _c = (C) > 0 ? (C) : 1; \
- if ((size_t)darr_cap(A) < _c) \
- _darr_resize_mt((A), _c, MT); \
+ uint __dec_c = (C) > 0 ? (C) : 1; \
+ if ((size_t)darr_cap(A) < __dec_c) \
+ _darr_resize_mt((A), __dec_c, MT); \
(A); \
})
#define darr_ensure_cap(A, C) darr_ensure_cap_mt(A, C, MTYPE_DARR)
@@ -428,12 +428,12 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
#define _darr_append_n(A, N, Z, MT) \
({ \
- uint __len = darr_len(A); \
- darr_ensure_cap_mt(A, __len + (N), MT); \
- _darr_len(A) = __len + (N); \
+ uint __da_len = darr_len(A); \
+ darr_ensure_cap_mt(A, __da_len + (N), MT); \
+ _darr_len(A) = __da_len + (N); \
if (Z) \
- memset(&(A)[__len], 0, (N)*_darr_esize(A)); \
- &(A)[__len]; \
+ memset(&(A)[__da_len], 0, (N)*_darr_esize(A)); \
+ &(A)[__da_len]; \
})
/**
* Extending the array's length by N.
diff --git a/lib/log.c b/lib/log.c
index 2b049cebe4..bc1ed5c5cc 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -358,7 +358,7 @@ static const struct zebra_desc_table command_types[] = {
DESC_ENTRY(ZEBRA_BFD_CLIENT_DEREGISTER),
DESC_ENTRY(ZEBRA_INTERFACE_ENABLE_RADV),
DESC_ENTRY(ZEBRA_INTERFACE_DISABLE_RADV),
- DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP_MRIB),
+ DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP),
DESC_ENTRY(ZEBRA_INTERFACE_LINK_PARAMS),
DESC_ENTRY(ZEBRA_MPLS_LABELS_ADD),
DESC_ENTRY(ZEBRA_MPLS_LABELS_DELETE),
diff --git a/lib/mgmt_msg_native.h b/lib/mgmt_msg_native.h
index ef03b66edc..587a002801 100644
--- a/lib/mgmt_msg_native.h
+++ b/lib/mgmt_msg_native.h
@@ -554,8 +554,8 @@ extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
*/
#define mgmt_msg_native_alloc_msg(msg_type, var_len, mem_type) \
({ \
- uint8_t *buf = NULL; \
- (msg_type *)darr_append_nz_mt(buf, \
+ uint8_t *__nam_buf = NULL; \
+ (msg_type *)darr_append_nz_mt(__nam_buf, \
sizeof(msg_type) + (var_len), \
mem_type); \
})
@@ -590,10 +590,10 @@ extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
*/
#define mgmt_msg_native_append(msg, data, len) \
({ \
- uint8_t **darrp = mgmt_msg_native_get_darrp(msg); \
- uint8_t *p = darr_append_n(*darrp, len); \
- memcpy(p, data, len); \
- p; \
+ uint8_t **__na_darrp = mgmt_msg_native_get_darrp(msg); \
+ uint8_t *__na_p = darr_append_n(*__na_darrp, len); \
+ memcpy(__na_p, data, len); \
+ __na_p; \
})
/**
@@ -611,8 +611,8 @@ extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
*/
#define mgmt_msg_native_add_str(msg, s) \
do { \
- int __len = strlen(s) + 1; \
- mgmt_msg_native_append(msg, s, __len); \
+ int __nas_len = strlen(s) + 1; \
+ mgmt_msg_native_append(msg, s, __nas_len); \
} while (0)
/**
diff --git a/lib/monotime.h b/lib/monotime.h
index f7ae1bbbe1..5e1bfe754e 100644
--- a/lib/monotime.h
+++ b/lib/monotime.h
@@ -129,6 +129,22 @@ static inline char *time_to_string(time_t ts, char *buf)
return ctime_r(&tbuf, buf);
}
+/* A wrapper for time_to_string() which removes newline at the end.
+ * This is needed for JSON outputs, where newline is not expected.
+ */
+static inline char *time_to_string_json(time_t ts, char *buf)
+{
+ size_t len;
+
+ time_to_string(ts, buf);
+ len = strlen(buf);
+
+ if (len && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ return buf;
+}
+
/* Convert interval to human-friendly string, used in cli output e.g. */
static inline const char *frrtime_to_interval(time_t t, char *buf,
size_t buflen)
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index f9794bee3c..b199dd61f8 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -83,6 +83,7 @@ static int nb_cli_classic_commit(struct vty *vty)
static void nb_cli_pending_commit_clear(struct vty *vty)
{
vty->pending_commit = 0;
+ vty->buffer_cmd_count = 0;
XFREE(MTYPE_TMP, vty->pending_cmds_buf);
vty->pending_cmds_buflen = 0;
vty->pending_cmds_bufpos = 0;
@@ -102,12 +103,19 @@ int nb_cli_pending_commit_check(struct vty *vty)
static int nb_cli_schedule_command(struct vty *vty)
{
- /* Append command to dynamically sized buffer of scheduled commands. */
+ /* Append command to dynamically sized buffer of scheduled commands.
+ * vty->buf -Incoming config
+ * vty->pending_cmds_buf - Pending buffer where incoming configs are
+ * accumulated for later processing
+ * vty->pending_cmds_bufpos - length of the pending buffer
+ *
+ */
if (!vty->pending_cmds_buf) {
vty->pending_cmds_buflen = 4096;
vty->pending_cmds_buf =
XCALLOC(MTYPE_TMP, vty->pending_cmds_buflen);
}
+
if ((strlen(vty->buf) + 3)
> (vty->pending_cmds_buflen - vty->pending_cmds_bufpos)) {
vty->pending_cmds_buflen *= 2;
@@ -121,6 +129,9 @@ static int nb_cli_schedule_command(struct vty *vty)
/* Schedule the commit operation. */
vty->pending_commit = 1;
+ vty->buffer_cmd_count++;
+ if (vty->buffer_cmd_count == NB_CMD_BATCH_SIZE)
+ nb_cli_pending_commit_check(vty);
return CMD_SUCCESS;
}
diff --git a/lib/northbound_cli.h b/lib/northbound_cli.h
index 4c8dc50bd2..43c40f49e1 100644
--- a/lib/northbound_cli.h
+++ b/lib/northbound_cli.h
@@ -20,6 +20,9 @@ enum nb_cfg_format {
NB_CFG_FMT_XML,
};
+/* Maximum config commands in a batch*/
+#define NB_CMD_BATCH_SIZE 5000
+
extern struct nb_config *vty_shared_candidate_config;
/*
diff --git a/lib/plist.c b/lib/plist.c
index 2cfaa7d81d..6950ab5761 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -1136,8 +1136,10 @@ static int vty_show_prefix_list_prefix(struct vty *vty, afi_t afi,
match = 0;
if (type == normal_display || type == first_match_display)
- if (prefix_same(&p, &pentry->prefix))
+ if (prefix_list_entry_match(pentry, &p, false)) {
+ pentry->hitcnt++;
match = 1;
+ }
if (type == longer_display) {
if ((p.family == pentry->prefix.family)
diff --git a/lib/vty.h b/lib/vty.h
index be54159aa9..c6f9f5a3a7 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -149,6 +149,7 @@ struct vty {
struct nb_config *candidate_config_base;
/* Dynamic transaction information. */
+ size_t buffer_cmd_count;
bool pending_allowed;
bool pending_commit;
char *pending_cmds_buf;
diff --git a/lib/zclient.h b/lib/zclient.h
index 6da9558aa5..2385a8a219 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -131,7 +131,7 @@ typedef enum {
ZEBRA_BFD_CLIENT_DEREGISTER,
ZEBRA_INTERFACE_ENABLE_RADV,
ZEBRA_INTERFACE_DISABLE_RADV,
- ZEBRA_NEXTHOP_LOOKUP_MRIB,
+ ZEBRA_NEXTHOP_LOOKUP,
ZEBRA_INTERFACE_LINK_PARAMS,
ZEBRA_MPLS_LABELS_ADD,
ZEBRA_MPLS_LABELS_DELETE,
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
index 93c9bcac44..45e154d83b 100644
--- a/mgmtd/mgmt_be_adapter.c
+++ b/mgmtd/mgmt_be_adapter.c
@@ -83,7 +83,7 @@ static const char *const zebra_oper_xpaths[] = {
NULL,
};
-#if HAVE_RIPD
+#ifdef HAVE_RIPD
static const char *const ripd_config_xpaths[] = {
"/frr-filter:lib",
"/frr-interface:lib/interface",
@@ -104,7 +104,7 @@ static const char *const ripd_rpc_xpaths[] = {
};
#endif
-#if HAVE_RIPNGD
+#ifdef HAVE_RIPNGD
static const char *const ripngd_config_xpaths[] = {
"/frr-filter:lib",
"/frr-interface:lib/interface",
@@ -123,7 +123,7 @@ static const char *const ripngd_rpc_xpaths[] = {
};
#endif
-#if HAVE_STATICD
+#ifdef HAVE_STATICD
static const char *const staticd_config_xpaths[] = {
"/frr-vrf:lib",
"/frr-interface:lib",
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
index 32f28a5774..7f7a5d9a8e 100644
--- a/mgmtd/mgmt_fe_adapter.c
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -190,7 +190,7 @@ static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **sessionp)
assert(session->adapter->refcount > 1);
mgmt_fe_adapter_unlock(&session->adapter);
}
-
+ darr_free_free(session->notify_xpaths);
hash_release(mgmt_fe_sessions, session);
XFREE(MTYPE_MGMTD_FE_SESSION, session);
*sessionp = NULL;
diff --git a/pathd/path_pcep_debug.c b/pathd/path_pcep_debug.c
index 7bff9c7b9c..89e7574324 100644
--- a/pathd/path_pcep_debug.c
+++ b/pathd/path_pcep_debug.c
@@ -1321,8 +1321,7 @@ void _format_pcep_event(int ps, pcep_event *event)
PATHD_FORMAT("\n");
PATHD_FORMAT("%*sevent_type: %s\n", ps2, "",
pcep_event_type_name(event->event_type));
- PATHD_FORMAT("%*sevent_time: %s", ps2, "",
- ctime_r(&event->event_time, buf));
+ PATHD_FORMAT("%*sevent_time: %s", ps2, "", time_to_string(event->event_time, buf));
if (event->session == NULL) {
PATHD_FORMAT("%*ssession: NULL\n", ps2, "");
} else {
diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c
index 8f7a46377c..ea0e2e4eaf 100644
--- a/pbrd/pbr_map.c
+++ b/pbrd/pbr_map.c
@@ -732,6 +732,14 @@ void pbr_map_schedule_policy_from_nhg(const char *nh_group, bool installed)
pbr_map_check(pbrms, false);
}
+
+ /*
+ * vrf_unchanged pbrms have no nhg but their
+ * installation is contingent on other sequences which
+ * may...
+ */
+ if (pbrms->vrf_unchanged)
+ pbr_map_check(pbrms, false);
}
}
}
diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c
index b44169c522..acfb0c3af3 100644
--- a/pimd/pim6_mld.c
+++ b/pimd/pim6_mld.c
@@ -449,7 +449,7 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
* this data structure.
*/
if (sg->oil)
- pim_channel_oil_del(sg->oil, __func__);
+ sg->oil = pim_channel_oil_del(sg->oil, __func__);
/* multiple paths can lead to the last state going away;
* t_sg_expire can still be running if we're arriving from
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index 75104141ae..6c4d649235 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -480,9 +480,7 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
pend = bsm_rpinfos_first(bsgrp_node->partial_bsrp_list);
- if (!pim_get_all_mcast_group(&group_all))
- return;
-
+ pim_get_all_mcast_group(&group_all);
rp_all = pim_rp_find_match_group(pim, &group_all);
rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
@@ -727,11 +725,9 @@ void pim_bsm_clear(struct pim_instance *pim)
__func__, &nht_p);
}
- pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
-
- if (!pim_get_all_mcast_group(&g_all))
- return;
+ pim_nht_delete_tracked(pim, nht_p, NULL, rp_info);
+ pim_get_all_mcast_group(&g_all);
rp_all = pim_rp_find_match_group(pim, &g_all);
if (rp_all == rp_info) {
diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c
index 6e93b65f4b..02e7a69ff1 100644
--- a/pimd/pim_bsr_rpdb.c
+++ b/pimd/pim_bsr_rpdb.c
@@ -413,11 +413,11 @@ void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
struct bsr_crp_rp *rp, ref;
bool ok;
- ref.addr = pnc->rpf.rpf_addr;
+ ref.addr = pnc->addr;
rp = bsr_crp_rps_find(scope->ebsr_rps, &ref);
assertf(rp, "addr=%pPA", &ref.addr);
- ok = CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID);
+ ok = pim_nht_pnc_is_valid(pim, pnc);
if (ok == rp->nht_ok)
return;
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index f4c25ea81e..a34fb344fe 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -2914,7 +2914,7 @@ DEFPY (show_ip_pim_nexthop,
DEFPY (show_ip_pim_nexthop_lookup,
show_ip_pim_nexthop_lookup_cmd,
- "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source A.B.C.D$group",
+ "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source [A.B.C.D$group]",
SHOW_STR
IP_STR
PIM_STR
@@ -2926,6 +2926,14 @@ DEFPY (show_ip_pim_nexthop_lookup,
return pim_show_nexthop_lookup_cmd_helper(vrf, vty, source, group);
}
+ALIAS_DEPRECATED (show_ip_pim_nexthop_lookup,
+ show_ip_rpf_source_cmd,
+ "show ip rpf A.B.C.D$source",
+ SHOW_STR
+ IP_STR
+ "Display RPF information for multicast source\n"
+ "Nexthop lookup for specific source address\n");
+
DEFPY (show_ip_pim_interface_traffic,
show_ip_pim_interface_traffic_cmd,
"show ip pim [vrf NAME] interface traffic [WORD$if_name] [json$json]",
@@ -3288,7 +3296,7 @@ DEFUN (show_ip_rib,
return CMD_WARNING;
}
- if (!pim_nexthop_lookup(vrf->info, &nexthop, addr, 0)) {
+ if (!pim_nht_lookup(vrf->info, &nexthop, addr, 0)) {
vty_out(vty,
"Failure querying RIB nexthop for unicast address %s\n",
addr_str);
@@ -5871,6 +5879,21 @@ DEFUN(interface_no_ip_pim_boundary_oil,
return pim_process_no_ip_pim_boundary_oil_cmd(vty);
}
+DEFPY_YANG(interface_ip_pim_boundary_acl,
+ interface_ip_pim_boundary_acl_cmd,
+ "[no] ip multicast boundary ACCESSLIST4_NAME$name",
+ NO_STR
+ IP_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Access-list to filter OIL with by source and group\n")
+{
+ nb_cli_enqueue_change(vty, "./multicast-boundary-acl",
+ (!!no ? NB_OP_DESTROY : NB_OP_MODIFY), name);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
+}
+
DEFUN (interface_ip_mroute,
interface_ip_mroute_cmd,
"ip mroute INTERFACE A.B.C.D [A.B.C.D]",
@@ -7578,6 +7601,47 @@ DEFPY(msdp_shutdown,
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY(msdp_peer_sa_limit, msdp_peer_sa_limit_cmd,
+ "[no] msdp peer A.B.C.D$peer sa-limit ![(1-4294967294)$sa_limit]",
+ NO_STR
+ CFG_MSDP_STR
+ "Configure MSDP peer\n"
+ "MSDP peer address\n"
+ "Limit amount of SA\n"
+ "Maximum number of SA\n")
+{
+ const struct lyd_node *peer_node;
+ char xpath[XPATH_MAXLEN + 24];
+
+ snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']", VTY_CURR_XPATH, peer_str);
+ peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
+ if (peer_node == NULL) {
+ vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
+ return CMD_SUCCESS;
+ }
+
+ nb_cli_enqueue_change(vty, "./sa-limit", NB_OP_MODIFY, sa_limit_str);
+ return nb_cli_apply_changes(vty, "%s", xpath);
+}
+
+DEFPY(msdp_originator_id, msdp_originator_id_cmd,
+ "[no] msdp originator-id ![A.B.C.D$originator_id]",
+ NO_STR
+ CFG_MSDP_STR
+ "Configure MSDP RP originator\n"
+ "MSDP RP originator identifier\n")
+{
+ char xpath_value[XPATH_MAXLEN];
+
+ snprintf(xpath_value, sizeof(xpath_value), "./msdp/originator-id");
+ if (no)
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, originator_id_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
static void ip_msdp_show_mesh_group(struct vty *vty, struct pim_msdp_mg *mg,
struct json_object *json)
{
@@ -8813,6 +8877,24 @@ done:
return ret;
}
+DEFPY_YANG(pim_rpf_lookup_mode, pim_rpf_lookup_mode_cmd,
+ "[no] rpf-lookup-mode ![urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix]$mode",
+ NO_STR
+ "RPF lookup behavior\n"
+ "Lookup in unicast RIB only\n"
+ "Lookup in multicast RIB only\n"
+ "Try multicast RIB first, fall back to unicast RIB\n"
+ "Lookup both, use entry with lower distance\n"
+ "Lookup both, use entry with longer prefix\n")
+{
+ if (no)
+ nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_MODIFY, mode);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
struct cmd_node pim_node = {
.name = "pim",
.node = PIM_NODE,
@@ -8973,11 +9055,15 @@ void pim_cmd_init(void)
install_element(PIM_NODE, &msdp_log_neighbor_changes_cmd);
install_element(PIM_NODE, &msdp_log_sa_changes_cmd);
install_element(PIM_NODE, &msdp_shutdown_cmd);
+ install_element(PIM_NODE, &msdp_peer_sa_limit_cmd);
+ install_element(PIM_NODE, &msdp_originator_id_cmd);
install_element(PIM_NODE, &pim_bsr_candidate_rp_cmd);
install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd);
install_element(PIM_NODE, &pim_bsr_candidate_bsr_cmd);
+ install_element(PIM_NODE, &pim_rpf_lookup_mode_cmd);
+
install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd);
@@ -9018,6 +9104,7 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_no_ip_pim_hello_cmd);
install_element(INTERFACE_NODE, &interface_ip_pim_boundary_oil_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_boundary_acl_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_query_generate_cmd);
// Static mroutes NEB
@@ -9100,6 +9187,7 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_ssmpingd_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
+ install_element(VIEW_NODE, &show_ip_rpf_source_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsr_rpinfo_cmd);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 1476845a5d..8aebce7d27 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -2825,31 +2825,39 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
struct vty *vty = cwd->vty;
struct pim_instance *pim = cwd->pim;
struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
struct interface *ifp = NULL;
struct ttable *tt = NULL;
char *table = NULL;
/* Prepare table. */
tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
- ttable_add_row(tt, "Address|Interface|Nexthop");
+ ttable_add_row(tt, "Address|Interface|Nexthop|Table");
tt->style.cell.rpad = 2;
tt->style.corner = '+';
ttable_restyle(tt);
- for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
-
- ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+#if PIM_IPV == 4
+ ttable_add_row(tt, "%pPA|%s|%pI4|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv4, "MRIB");
+#else
+ ttable_add_row(tt, "%pPA|%s|%pI6|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv6, "MRIB");
+#endif
+ }
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
#if PIM_IPV == 4
- ttable_add_row(tt, "%pPA|%s|%pI4", &pnc->rpf.rpf_addr,
- ifp ? ifp->name : "NULL", &nh_node->gate.ipv4);
+ ttable_add_row(tt, "%pPA|%s|%pI4|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv4, "URIB");
#else
- ttable_add_row(tt, "%pPA|%s|%pI6", &pnc->rpf.rpf_addr,
- ifp ? ifp->name : "NULL", &nh_node->gate.ipv6);
+ ttable_add_row(tt, "%pPA|%s|%pI6|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv6, "URIB");
#endif
}
+
/* Dump the generated table. */
table = ttable_dump(tt, "\n");
vty_out(vty, "%s\n", table);
@@ -2859,56 +2867,58 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
return CMD_SUCCESS;
}
-static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
- void *arg)
+static void pim_print_json_nexthop(json_object *json_obj, struct nexthop *nh_node,
+ struct interface *ifp, char *addr_str, const char *type)
{
- struct pim_nexthop_cache *pnc = backet->data;
- struct json_pnc_cache_walk_data *cwd = arg;
- struct pim_instance *pim = cwd->pim;
- struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
- struct interface *ifp = NULL;
- char addr_str[PIM_ADDRSTRLEN];
json_object *json_row = NULL;
json_object *json_ifp = NULL;
json_object *json_arr = NULL;
struct pim_interface *pim_ifp = NULL;
- bool pim_enable = false;
-
- for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
- ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
- snprintfrr(addr_str, sizeof(addr_str), "%pPA",
- &pnc->rpf.rpf_addr);
- json_object_object_get_ex(cwd->json_obj, addr_str, &json_row);
- if (!json_row) {
- json_row = json_object_new_object();
- json_object_string_addf(json_row, "address", "%pPA",
- &pnc->rpf.rpf_addr);
- json_object_object_addf(cwd->json_obj, json_row, "%pPA",
- &pnc->rpf.rpf_addr);
- json_arr = json_object_new_array();
- json_object_object_add(json_row, "nexthops", json_arr);
- }
- json_ifp = json_object_new_object();
- json_object_string_add(json_ifp, "interface",
- ifp ? ifp->name : "NULL");
- if (ifp)
- pim_ifp = ifp->info;
+ if (ifp)
+ pim_ifp = ifp->info;
- if (pim_ifp && pim_ifp->pim_enable)
- pim_enable = true;
+ json_object_object_get_ex(json_obj, addr_str, &json_row);
- json_object_boolean_add(json_ifp, "pimEnabled", pim_enable);
+ if (!json_row) {
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "address", "%s", addr_str);
+ json_object_object_addf(json_obj, json_row, "%s", addr_str);
+ json_arr = json_object_new_array();
+ json_object_object_add(json_row, "nexthops", json_arr);
+ }
+
+ json_ifp = json_object_new_object();
+ json_object_string_add(json_ifp, "interface", ifp ? ifp->name : "NULL");
+ json_object_boolean_add(json_ifp, "pimEnabled", (pim_ifp && pim_ifp->pim_enable));
#if PIM_IPV == 4
- json_object_string_addf(json_ifp, "nexthop", "%pI4",
- &nh_node->gate.ipv4);
+ json_object_string_addf(json_ifp, "nexthop", "%pI4", &nh_node->gate.ipv4);
#else
- json_object_string_addf(json_ifp, "nexthop", "%pI6",
- &nh_node->gate.ipv6);
+ json_object_string_addf(json_ifp, "nexthop", "%pI6", &nh_node->gate.ipv6);
#endif
- json_object_array_add(json_arr, json_ifp);
+ json_object_string_add(json_ifp, "table", type);
+ json_object_array_add(json_arr, json_ifp);
+}
+
+static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet, void *arg)
+{
+ struct pim_nexthop_cache *pnc = backet->data;
+ struct json_pnc_cache_walk_data *cwd = arg;
+ json_object *json_obj = cwd->json_obj;
+ struct pim_instance *pim = cwd->pim;
+ char addr_str[PIM_ADDRSTRLEN];
+ struct nexthop *nh_node = NULL;
+ struct interface *ifp = NULL;
+
+ snprintfrr(addr_str, sizeof(addr_str), "%pPA", &pnc->addr);
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ pim_print_json_nexthop(json_obj, nh_node, ifp, addr_str, "MRIB");
+ }
+
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ pim_print_json_nexthop(json_obj, nh_node, ifp, addr_str, "URIB");
}
return CMD_SUCCESS;
}
@@ -2916,7 +2926,6 @@ static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
pim_addr source, pim_addr group)
{
- int result = 0;
pim_addr vif_source;
struct prefix grp;
struct pim_nexthop nexthop;
@@ -2929,34 +2938,36 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
#if PIM_IPV == 4
if (pim_is_group_224_4(source)) {
- vty_out(vty,
- "Invalid argument. Expected Valid Source Address.\n");
+ vty_out(vty, "Invalid argument. Expected Valid Source Address.\n");
return CMD_WARNING;
}
-
- if (!pim_is_group_224_4(group)) {
- vty_out(vty,
- "Invalid argument. Expected Valid Multicast Group Address.\n");
+ /* Only require group if source is not provided */
+ if (pim_addr_is_any(source) && !pim_is_group_224_4(group)) {
+ vty_out(vty, "Invalid argument. Expected Valid Multicast Group Address.\n");
return CMD_WARNING;
}
#endif
- if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group))
+ /* This call will set vif_source=source, if source is not ANY. Otherwise vif_source
+ * will be set to the RP address according to the group address. If no RP is configured
+ * for the group, then return 0 and set vif_source to ANY
+ */
+ if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group)) {
+ vty_out(vty, "(%pPAs, %pPA) --- Nexthop Lookup failed, no RP.\n", &source, &group);
return CMD_SUCCESS;
+ }
+
pim_addr_to_prefix(&grp, group);
memset(&nexthop, 0, sizeof(nexthop));
- result =
- pim_ecmp_nexthop_lookup(v->info, &nexthop, vif_source, &grp, 0);
-
- if (!result) {
- vty_out(vty,
- "Nexthop Lookup failed, no usable routes returned.\n");
+ if (!pim_nht_lookup_ecmp(v->info, &nexthop, vif_source, &grp, false)) {
+ vty_out(vty, "(%pPAs, %pPA) --- Nexthop Lookup failed, no usable routes returned.\n",
+ &source, &group);
return CMD_SUCCESS;
}
- vty_out(vty, "Group %pFXh --- Nexthop %pPAs Interface %s\n", &grp,
+ vty_out(vty, "(%pPAs, %pPAs) --- Nexthop %pPAs Interface %s\n", &source, &group,
&nexthop.mrib_nexthop_addr, nexthop.interface->name);
return CMD_SUCCESS;
@@ -2985,19 +2996,16 @@ void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj)
cwd.pim = pim;
jcwd.pim = pim;
- if (uj) {
+ if (uj)
jcwd.json_obj = json_object_new_object();
- } else {
- vty_out(vty, "Number of registered addresses: %lu\n",
- pim->rpf_hash->count);
- }
+ else
+ vty_out(vty, "Number of registered addresses: %lu\n", pim->nht_hash->count);
if (uj) {
- hash_walk(pim->rpf_hash, pim_print_json_pnc_cache_walkcb,
- &jcwd);
+ hash_walk(pim->nht_hash, pim_print_json_pnc_cache_walkcb, &jcwd);
vty_json(vty, jcwd.json_obj);
} else
- hash_walk(pim->rpf_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
+ hash_walk(pim->nht_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
}
int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 20e3ba184b..9316cebc0a 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -38,6 +38,7 @@
#include "pim_igmp_join.h"
#include "pim_vxlan.h"
#include "pim_tib.h"
+#include "pim_util.h"
#include "pim6_mld.h"
@@ -215,7 +216,6 @@ void pim_if_delete(struct interface *ifp)
if (pim_ifp->bfd_config.profile)
XFREE(MTYPE_TMP, pim_ifp->bfd_config.profile);
- XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
XFREE(MTYPE_PIM_INTERFACE, pim_ifp);
ifp->info = NULL;
@@ -601,26 +601,13 @@ void pim_if_addr_add(struct connected *ifc)
ifp->name);
}
}
- struct pim_nexthop_cache *pnc = NULL;
- struct pim_rpf rpf;
- struct zclient *zclient = NULL;
-
- zclient = pim_zebra_zclient_get();
- /* RP config might come prior to (local RP's interface)
- IF UP event.
- In this case, pnc would not have pim enabled
- nexthops.
- Once Interface is UP and pim info is available,
- reregister
- with RNH address to receive update and add the
- interface as nexthop. */
- memset(&rpf, 0, sizeof(struct pim_rpf));
- rpf.rpf_addr = pim_addr_from_prefix(ifc->address);
- pnc = pim_nexthop_cache_find(pim_ifp->pim, &rpf);
- if (pnc)
- pim_sendmsg_zebra_rnh(pim_ifp->pim, zclient,
- pnc,
- ZEBRA_NEXTHOP_REGISTER);
+
+ /* RP config might come prior to local RP's interface IF UP event.
+ * In this case, pnc would not have pim enabled nexthops. Once
+ * Interface is UP and pim info is available, reregister with RNH
+ * address to receive update and add the interface as nexthop.
+ */
+ pim_nht_get(pim_ifp->pim, pim_addr_from_prefix(ifc->address));
}
} /* pim */
@@ -1258,6 +1245,14 @@ static int gm_join_sock(const char *ifname, ifindex_t ifindex,
{
int join_fd;
+ if (pim_is_group_filtered(pim_ifp, &group_addr, &source_addr)) {
+ if (PIM_DEBUG_GM_EVENTS) {
+ zlog_debug("%s: join failed for (S,G)=(%pPAs,%pPAs) due to multicast boundary filtering",
+ __func__, &source_addr, &group_addr);
+ }
+ return -1;
+ }
+
pim_ifp->igmp_ifstat_joins_sent++;
join_fd = pim_socket_raw(IPPROTO_GM);
@@ -1464,8 +1459,7 @@ static void pim_if_gm_join_del_all(struct interface *ifp)
return;
for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, nextnode, ij))
- pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr,
- GM_JOIN_STATIC);
+ pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr, ij->join_type);
}
ferr_r pim_if_static_group_add(struct interface *ifp, pim_addr group_addr,
@@ -2036,7 +2030,7 @@ void pim_pim_interface_delete(struct interface *ifp)
* pim_ifp->pim_neighbor_list.
*/
pim_sock_delete(ifp, "pim unconfigured on interface");
- pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+ pim_nht_upstream_if_update(pim_ifp->pim, ifp);
if (!pim_ifp->gm_enable) {
pim_if_addr_del_all(ifp);
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 95bac084d2..90a81a21d0 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -133,8 +133,10 @@ struct pim_interface {
uint32_t pim_dr_priority; /* config */
int pim_dr_num_nondrpri_neighbors; /* neighbors without dr_pri */
- /* boundary prefix-list */
- char *boundary_oil_plist;
+ /* boundary prefix-list (group) */
+ struct prefix_list *boundary_oil_plist;
+ /* boundary access-list (source and group) */
+ struct access_list *boundary_acl;
/* Turn on Active-Active for this interface */
bool activeactive;
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 1ba9bc45a2..12f424248f 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -666,7 +666,7 @@ static int igmp_v1_recv_report(struct gm_sock *igmp, struct in_addr from,
memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
- if (pim_is_group_filtered(ifp->info, &group_addr))
+ if (pim_is_group_filtered(ifp->info, &group_addr, NULL))
return -1;
/* non-existent group is created as INCLUDE {empty} */
diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c
index 309da138d2..ad6f265101 100644
--- a/pimd/pim_igmp_mtrace.c
+++ b/pimd/pim_igmp_mtrace.c
@@ -16,6 +16,7 @@
#include "pim_oil.h"
#include "pim_ifchannel.h"
#include "pim_macro.h"
+#include "pim_nht.h"
#include "pim_igmp_mtrace.h"
static struct in_addr mtrace_primary_address(struct interface *ifp)
@@ -58,14 +59,14 @@ static bool mtrace_fwd_info_weak(struct pim_instance *pim,
memset(&nexthop, 0, sizeof(nexthop));
- if (!pim_nexthop_lookup(pim, &nexthop, mtracep->src_addr, 1)) {
+ if (!pim_nht_lookup(pim, &nexthop, mtracep->src_addr, 1)) {
if (PIM_DEBUG_MTRACE)
zlog_debug("mtrace not found neighbor");
return false;
}
if (PIM_DEBUG_MTRACE)
- zlog_debug("mtrace pim_nexthop_lookup OK");
+ zlog_debug("mtrace pim_nht_lookup OK");
if (PIM_DEBUG_MTRACE)
zlog_debug("mtrace next_hop=%pPAs", &nexthop.mrib_nexthop_addr);
@@ -353,7 +354,7 @@ static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr,
if (interface == NULL) {
memset(&nexthop, 0, sizeof(nexthop));
- if (!pim_nexthop_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) {
+ if (!pim_nht_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) {
if (PIM_DEBUG_MTRACE)
zlog_debug(
"Dropping mtrace packet, no route to destination");
@@ -535,7 +536,7 @@ static int mtrace_send_response(struct pim_instance *pim,
} else {
memset(&nexthop, 0, sizeof(nexthop));
/* TODO: should use unicast rib lookup */
- if (!pim_nexthop_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) {
+ if (!pim_nht_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) {
if (PIM_DEBUG_MTRACE)
zlog_debug(
"Dropped response qid=%ud, no route to response address",
diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c
index 944dffdc33..720a4944fe 100644
--- a/pimd/pim_igmpv2.c
+++ b/pimd/pim_igmpv2.c
@@ -134,6 +134,9 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
ifp->name, group_str);
}
+ if (pim_is_group_filtered(pim_ifp, &group_addr, NULL))
+ return -1;
+
/*
* RFC 4604
* section 2.2.1
diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c
index 2c5ad4d44b..7348d8130f 100644
--- a/pimd/pim_igmpv3.c
+++ b/pimd/pim_igmpv3.c
@@ -9,6 +9,8 @@
#include "memory.h"
#include "if.h"
#include "lib_errors.h"
+#include "plist.h"
+#include "plist_int.h"
#include "pimd.h"
#include "pim_instance.h"
@@ -507,6 +509,8 @@ static void allow(struct gm_sock *igmp, struct in_addr from,
struct in_addr *src_addr;
src_addr = sources + i;
+ if (pim_is_group_filtered(igmp->interface->info, &group_addr, src_addr))
+ continue;
source = igmp_get_source_by_addr(group, *src_addr, NULL);
if (!source)
@@ -646,7 +650,7 @@ void igmpv3_report_isex(struct gm_sock *igmp, struct in_addr from,
on_trace(__func__, ifp, from, group_addr, num_sources, sources);
- if (pim_is_group_filtered(ifp->info, &group_addr))
+ if (pim_is_group_filtered(ifp->info, &group_addr, NULL))
return;
/* non-existent group is created as INCLUDE {empty} */
@@ -1809,12 +1813,14 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
pim_ifp = ifp->info;
/* determine filtering status for group */
- if (pim_is_group_filtered(pim_ifp, &grp)) {
+ if (pim_is_group_filtered(pim_ifp, &grp, NULL)) {
if (PIM_DEBUG_GM_PACKETS) {
- zlog_debug(
- "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
- &grp.s_addr, from_str, ifp->name,
- pim_ifp->boundary_oil_plist);
+ zlog_debug("Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s or access-list %s",
+ &grp.s_addr, from_str, ifp->name,
+ (pim_ifp->boundary_oil_plist ? pim_ifp->boundary_oil_plist->name
+ : "(not found)"),
+ (pim_ifp->boundary_acl ? pim_ifp->boundary_acl->name
+ : "(not found)"));
}
return false;
}
@@ -1943,11 +1949,9 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
sizeof(struct in_addr));
if (PIM_DEBUG_GM_PACKETS) {
- zlog_debug(
- " Recv IGMP report v3 from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
- from_str, ifp->name, i, rec_type,
- rec_auxdatalen, rec_num_sources,
- &rec_group);
+ zlog_debug(" Recv IGMP report v3 (type %d) from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
+ rec_type, from_str, ifp->name, i, rec_type, rec_auxdatalen,
+ rec_num_sources, &rec_group);
}
/* Scan sources */
diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c
index 4e4e5a6ce8..3945c5923d 100644
--- a/pimd/pim_instance.c
+++ b/pimd/pim_instance.c
@@ -15,6 +15,7 @@
#include "pim_ssm.h"
#include "pim_rpf.h"
#include "pim_rp.h"
+#include "pim_nht.h"
#include "pim_mroute.h"
#include "pim_oil.h"
#include "pim_static.h"
@@ -46,14 +47,15 @@ static void pim_instance_terminate(struct pim_instance *pim)
pim_bsm_proc_free(pim);
- /* Traverse and cleanup rpf_hash */
- hash_clean_and_free(&pim->rpf_hash, (void *)pim_rp_list_hash_clean);
+ pim_nht_terminate(pim);
pim_if_terminate(pim);
pim_oil_terminate(pim);
+#if PIM_IPV == 4
pim_msdp_exit(pim);
+#endif /* PIM_IPV == 4 */
close(pim->reg_sock);
@@ -73,7 +75,6 @@ static void pim_instance_terminate(struct pim_instance *pim)
static struct pim_instance *pim_instance_init(struct vrf *vrf)
{
struct pim_instance *pim;
- char hash_name[64];
pim = XCALLOC(MTYPE_PIM_PIM_INSTANCE, sizeof(struct pim_instance));
@@ -91,15 +92,12 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
pim->spt.switchover = PIM_SPT_IMMEDIATE;
pim->spt.plist = NULL;
+#if PIM_IPV == 4
pim_msdp_init(pim, router->master);
+#endif /* PIM_IPV == 4 */
pim_vxlan_init(pim);
- snprintf(hash_name, sizeof(hash_name), "PIM %s RPF Hash", vrf->name);
- pim->rpf_hash = hash_create_size(256, pim_rpf_hash_key, pim_rpf_equal,
- hash_name);
-
- if (PIM_DEBUG_ZEBRA)
- zlog_debug("%s: NHT rpf hash init ", __func__);
+ pim_nht_init(pim);
pim->ssm_info = pim_ssm_init();
@@ -126,11 +124,6 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
if (pim->reg_sock < 0)
assert(0);
- /* MSDP global timer defaults. */
- pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
- pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
- pim->msdp.connection_retry = PIM_MSDP_PEER_CONNECT_RETRY_TIME;
-
#if PIM_IPV == 4
pim_autorp_init(pim);
#endif
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index dab7ed2698..7f022111bc 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -115,7 +115,8 @@ struct pim_instance {
/* The name of the register-accept prefix-list */
char *register_plist;
- struct hash *rpf_hash;
+ struct hash *nht_hash;
+ enum pim_rpf_lookup_mode rpf_mode;
void *ssm_info; /* per-vrf SSM configuration */
@@ -150,7 +151,9 @@ struct pim_instance {
struct rb_pim_oil_head channel_oil_head;
+#if PIM_IPV == 4
struct pim_msdp msdp;
+#endif /* PIM_IPV == 4 */
struct pim_vxlan_instance vxlan;
struct pim_autorp *autorp;
@@ -225,7 +228,4 @@ extern struct pim_router *router;
struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id);
-extern bool pim_msdp_log_neighbor_events(const struct pim_instance *pim);
-extern bool pim_msdp_log_sa_events(const struct pim_instance *pim);
-
#endif
diff --git a/pimd/pim_join.c b/pimd/pim_join.c
index 2feafabb4d..7796e8b951 100644
--- a/pimd/pim_join.c
+++ b/pimd/pim_join.c
@@ -245,7 +245,7 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
uint16_t msg_num_pruned_sources;
int source;
struct pim_ifchannel *starg_ch = NULL, *sg_ch = NULL;
- bool filtered = false;
+ bool group_filtered = false;
memset(&sg, 0, sizeof(sg));
addr_offset = pim_parse_addr_group(&sg, buf, pastend - buf);
@@ -275,7 +275,7 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
&src_addr, ifp->name);
/* boundary check */
- filtered = pim_is_group_filtered(pim_ifp, &sg.grp);
+ group_filtered = pim_is_group_filtered(pim_ifp, &sg.grp, NULL);
/* Scan joined sources */
for (source = 0; source < msg_num_joined_sources; ++source) {
@@ -287,8 +287,8 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
buf += addr_offset;
- /* if we are filtering this group, skip the join */
- if (filtered)
+ /* if we are filtering this group or (S,G), skip the join */
+ if (group_filtered || pim_is_group_filtered(pim_ifp, &sg.grp, &sg.src))
continue;
recv_join(ifp, neigh, msg_holdtime, msg_upstream_addr,
@@ -312,10 +312,6 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
buf += addr_offset;
- /* if we are filtering this group, skip the prune */
- if (filtered)
- continue;
-
recv_prune(ifp, neigh, msg_holdtime, msg_upstream_addr,
&sg, msg_source_flags);
/*
@@ -361,7 +357,7 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
}
}
}
- if (starg_ch && !filtered)
+ if (starg_ch && !group_filtered)
pim_ifchannel_set_star_g_join_state(starg_ch, 1, 0);
starg_ch = NULL;
} /* scan groups */
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index 9d290c3c6f..93bdd8dac9 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -35,6 +35,8 @@
#include "pim_sock.h"
#include "pim_vxlan.h"
#include "pim_msg.h"
+#include "pim_util.h"
+#include "pim_nht.h"
static void mroute_read_on(struct pim_instance *pim);
static int pim_upstream_mroute_update(struct channel_oil *c_oil,
@@ -271,7 +273,9 @@ int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg)
*oil_incoming_vif(up->channel_oil) >= MAXVIFS) {
pim_upstream_mroute_iif_update(up->channel_oil, __func__);
}
- pim_register_join(up);
+
+ if (!pim_is_group_filtered(pim_ifp, &sg.grp, &sg.src))
+ pim_register_join(up);
/* if we have receiver, inherit from parent */
pim_upstream_inherited_olist_decide(pim_ifp->pim, up);
@@ -563,8 +567,7 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
* setting the SPTBIT to true
*/
if (!(pim_addr_is_any(up->upstream_register)) &&
- pim_nexthop_lookup(pim_ifp->pim, &source,
- up->upstream_register, 0)) {
+ pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0)) {
pim_register_stop_send(source.interface, &sg,
pim_ifp->primary_address,
up->upstream_register);
@@ -577,9 +580,7 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
__func__);
} else {
if (I_am_RP(pim_ifp->pim, up->sg.grp)) {
- if (pim_nexthop_lookup(pim_ifp->pim, &source,
- up->upstream_register,
- 0))
+ if (pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0))
pim_register_stop_send(
source.interface, &sg,
pim_ifp->primary_address,
@@ -632,7 +633,8 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
pim_upstream_keep_alive_timer_start(
up, pim_ifp->pim->keep_alive_time);
up->channel_oil->cc.pktcnt++;
- pim_register_join(up);
+ if (!pim_is_group_filtered(pim_ifp, &sg.grp, &sg.src))
+ pim_register_join(up);
pim_upstream_inherited_olist(pim_ifp->pim, up);
if (!up->channel_oil->installed)
pim_upstream_mroute_add(up->channel_oil, __func__);
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
index ae887b2482..5e5ee5e91f 100644
--- a/pimd/pim_msdp.c
+++ b/pimd/pim_msdp.c
@@ -26,13 +26,12 @@
#include "pim_time.h"
#include "pim_upstream.h"
#include "pim_oil.h"
+#include "pim_nht.h"
#include "pim_msdp.h"
#include "pim_msdp_packet.h"
#include "pim_msdp_socket.h"
-// struct pim_msdp pim_msdp, *msdp = &pim_msdp;
-
static void pim_msdp_peer_listen(struct pim_msdp_peer *mp);
static void pim_msdp_peer_cr_timer_setup(struct pim_msdp_peer *mp, bool start);
static void pim_msdp_peer_ka_timer_setup(struct pim_msdp_peer *mp, bool start);
@@ -46,6 +45,26 @@ static void pim_msdp_sa_deref(struct pim_msdp_sa *sa,
static int pim_msdp_mg_mbr_comp(const void *p1, const void *p2);
static void pim_msdp_mg_mbr_free(struct pim_msdp_mg_mbr *mbr);
+void pim_msdp_originator_id(struct pim_instance *pim, const struct prefix *group,
+ struct in_addr *originator_id)
+{
+ struct rp_info *rp_info;
+
+ originator_id->s_addr = INADDR_ANY;
+
+ /* Originator ID was configured, use it. */
+ if (pim->msdp.originator_id.s_addr != INADDR_ANY) {
+ *originator_id = pim->msdp.originator_id;
+ return;
+ }
+
+ rp_info = pim_rp_find_match_group(pim, group);
+ if (rp_info) {
+ *originator_id = rp_info->rp.rpf_addr;
+ return;
+ }
+}
+
/************************ SA cache management ******************************/
/* RFC-3618:Sec-5.1 - global active source advertisement timer */
static void pim_msdp_sa_adv_timer_cb(struct event *t)
@@ -356,9 +375,17 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
pim_sgaddr *sg, struct in_addr rp)
{
struct pim_msdp_sa *sa;
- struct rp_info *rp_info;
struct prefix grp;
+ /* Check peer SA limit. */
+ if (mp && mp->sa_limit && mp->sa_cnt >= mp->sa_limit) {
+ if (pim_msdp_log_sa_events(pim))
+ zlog_debug("MSDP peer %pI4 reject SA (%pI4, %pI4): SA limit %u of %u",
+ &mp->peer, &sg->src, &sg->grp, mp->sa_cnt, mp->sa_limit);
+
+ return;
+ }
+
sa = pim_msdp_sa_add(pim, sg, rp);
if (!sa) {
return;
@@ -388,12 +415,7 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
/* send an immediate SA update to peers */
pim_addr_to_prefix(&grp, sa->sg.grp);
- rp_info = pim_rp_find_match_group(pim, &grp);
- if (rp_info) {
- sa->rp = rp_info->rp.rpf_addr;
- } else {
- sa->rp = pim->msdp.originator_id;
- }
+ pim_msdp_originator_id(pim, &grp, &sa->rp);
pim_msdp_pkt_sa_tx_one(sa);
}
sa->flags &= ~PIM_MSDP_SAF_STALE;
@@ -684,7 +706,7 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp)
}
/* check if the MSDP peer is the nexthop for the RP */
- if (pim_nexthop_lookup(mp->pim, &nexthop, rp, 0) &&
+ if (pim_nht_lookup(mp->pim, &nexthop, rp, 0) &&
nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) {
return true;
}
@@ -1006,8 +1028,6 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
mp->peer = *peer;
pim_inet4_dump("<peer?>", mp->peer, mp->key_str, sizeof(mp->key_str));
mp->local = *local;
- /* XXX: originator_id setting needs to move to the mesh group */
- pim->msdp.originator_id = *local;
if (mesh_group_name)
mp->mesh_group_name =
XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
@@ -1263,10 +1283,21 @@ int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty)
char src_str[INET_ADDRSTRLEN];
int count = 0;
+ if (pim->msdp.hold_time != PIM_MSDP_PEER_HOLD_TIME ||
+ pim->msdp.keep_alive != PIM_MSDP_PEER_KA_TIME ||
+ pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME) {
+ vty_out(vty, " msdp timers %u %u", pim->msdp.hold_time, pim->msdp.keep_alive);
+ if (pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME)
+ vty_out(vty, " %u", pim->msdp.connection_retry);
+ vty_out(vty, "\n");
+ }
+
if (pim_msdp_log_neighbor_events(pim))
vty_out(vty, " msdp log neighbor-events\n");
if (pim_msdp_log_sa_events(pim))
vty_out(vty, " msdp log sa-events\n");
+ if (pim->msdp.shutdown)
+ vty_out(vty, " msdp shutdown\n");
if (SLIST_EMPTY(&pim->msdp.mglist))
return count;
@@ -1316,9 +1347,15 @@ bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim)
vty_out(vty, " msdp peer %pI4 sa-filter %s out\n",
&mp->peer, mp->acl_out);
+ if (mp->sa_limit)
+ vty_out(vty, " msdp peer %pI4 sa-limit %u\n", &mp->peer, mp->sa_limit);
+
written = true;
}
+ if (pim->msdp.originator_id.s_addr != INADDR_ANY)
+ vty_out(vty, " msdp originator-id %pI4\n", &pim->msdp.originator_id);
+
if (pim->msdp.shutdown)
vty_out(vty, " msdp shutdown\n");
@@ -1361,6 +1398,11 @@ void pim_msdp_init(struct pim_instance *pim, struct event_loop *master)
pim->msdp.sa_list = list_new();
pim->msdp.sa_list->del = (void (*)(void *))pim_msdp_sa_free;
pim->msdp.sa_list->cmp = (int (*)(void *, void *))pim_msdp_sa_comp;
+
+ /* MSDP global timer defaults. */
+ pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
+ pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
+ pim->msdp.connection_retry = PIM_MSDP_PEER_CONNECT_RETRY_TIME;
}
/* counterpart to MSDP init; XXX: unused currently */
@@ -1443,6 +1485,25 @@ struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim,
return mbr;
}
+/* MSDP on RP needs to know if a source is registerable to this RP */
+static void pim_upstream_msdp_reg_timer(struct event *t)
+{
+ struct pim_upstream *up = EVENT_ARG(t);
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ /* source is no longer active - pull the SA from MSDP's cache */
+ pim_msdp_sa_local_del(pim, &up->sg);
+}
+
+void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
+{
+ EVENT_OFF(up->t_msdp_reg_timer);
+ event_add_timer(router->master, pim_upstream_msdp_reg_timer, up, PIM_MSDP_REG_RXED_PERIOD,
+ &up->t_msdp_reg_timer);
+
+ pim_msdp_sa_local_update(up);
+}
+
void pim_msdp_shutdown(struct pim_instance *pim, bool state)
{
struct pim_msdp_peer *peer;
diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h
index d0aa83d997..4edb6e6166 100644
--- a/pimd/pim_msdp.h
+++ b/pimd/pim_msdp.h
@@ -152,6 +152,9 @@ struct pim_msdp_peer {
char *acl_in;
/** SA output access list name. */
char *acl_out;
+
+ /** SA maximum amount. */
+ uint32_t sa_limit;
};
struct pim_msdp_mg_mbr {
@@ -232,8 +235,6 @@ struct pim_msdp {
#define PIM_MSDP_PEER_READ_OFF(mp) event_cancel(&mp->t_read)
#define PIM_MSDP_PEER_WRITE_OFF(mp) event_cancel(&mp->t_write)
-#if PIM_IPV != 6
-// struct pim_msdp *msdp;
struct pim_instance;
void pim_msdp_init(struct pim_instance *pim, struct event_loop *master);
void pim_msdp_exit(struct pim_instance *pim);
@@ -260,6 +261,8 @@ void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg);
enum pim_msdp_err pim_msdp_mg_del(struct pim_instance *pim,
const char *mesh_group_name);
+extern void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up);
+
/**
* Allocates a new mesh group data structure under PIM instance.
*/
@@ -338,53 +341,17 @@ void pim_msdp_peer_restart(struct pim_msdp_peer *mp);
*/
void pim_msdp_shutdown(struct pim_instance *pim, bool state);
-#else /* PIM_IPV == 6 */
-static inline void pim_msdp_init(struct pim_instance *pim,
- struct event_loop *master)
-{
-}
-
-static inline void pim_msdp_exit(struct pim_instance *pim)
-{
-}
-
-static inline void pim_msdp_i_am_rp_changed(struct pim_instance *pim)
-{
-}
-
-static inline void pim_msdp_up_join_state_changed(struct pim_instance *pim,
- struct pim_upstream *xg_up)
-{
-}
-
-static inline void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg)
-{
-}
-
-static inline void pim_msdp_sa_local_update(struct pim_upstream *up)
-{
-}
-
-static inline void pim_msdp_sa_local_del(struct pim_instance *pim,
- pim_sgaddr *sg)
-{
-}
-
-static inline int pim_msdp_config_write(struct pim_instance *pim,
- struct vty *vty)
-{
- return 0;
-}
-
-static inline bool pim_msdp_peer_config_write(struct vty *vty,
- struct pim_instance *pim)
-{
- return false;
-}
-
-static inline void pim_msdp_shutdown(struct pim_instance *pim, bool state)
-{
-}
-#endif /* PIM_IPV == 6 */
+/**
+ * Get the configured originator ID for the SA RP field or the RP for the group.
+ *
+ * \param[in] pim PIM instance that MSDP connection belongs to.
+ * \param[in] group Multicast group.
+ * \param[out] originator_id Originator output value.
+ */
+void pim_msdp_originator_id(struct pim_instance *pim, const struct prefix *group,
+ struct in_addr *originator_id);
+
+extern bool pim_msdp_log_neighbor_events(const struct pim_instance *pim);
+extern bool pim_msdp_log_sa_events(const struct pim_instance *pim);
#endif
diff --git a/pimd/pim_msdp_packet.c b/pimd/pim_msdp_packet.c
index f66a941ee3..8c821cb5e5 100644
--- a/pimd/pim_msdp_packet.c
+++ b/pimd/pim_msdp_packet.c
@@ -367,53 +367,6 @@ static void pim_msdp_pkt_sa_fill_one(struct pim_msdp_sa *sa)
stream_put_ipv4(sa->pim->msdp.work_obuf, sa->sg.src.s_addr);
}
-static bool msdp_cisco_match(const struct filter *filter,
- const struct in_addr *source,
- const struct in_addr *group)
-{
- const struct filter_cisco *cfilter = &filter->u.cfilter;
- uint32_t source_addr;
- uint32_t group_addr;
-
- group_addr = group->s_addr & ~cfilter->mask_mask.s_addr;
-
- if (cfilter->extended) {
- source_addr = source->s_addr & ~cfilter->addr_mask.s_addr;
- if (group_addr == cfilter->mask.s_addr &&
- source_addr == cfilter->addr.s_addr)
- return true;
- } else if (group_addr == cfilter->addr.s_addr)
- return true;
-
- return false;
-}
-
-static enum filter_type msdp_access_list_apply(struct access_list *access,
- const struct in_addr *source,
- const struct in_addr *group)
-{
- struct filter *filter;
- struct prefix group_prefix;
-
- if (access == NULL)
- return FILTER_DENY;
-
- for (filter = access->head; filter; filter = filter->next) {
- if (filter->cisco) {
- if (msdp_cisco_match(filter, source, group))
- return filter->type;
- } else {
- group_prefix.family = AF_INET;
- group_prefix.prefixlen = IPV4_MAX_BITLEN;
- group_prefix.u.prefix4.s_addr = group->s_addr;
- if (access_list_apply(access, &group_prefix))
- return filter->type;
- }
- }
-
- return FILTER_DENY;
-}
-
bool msdp_peer_sa_filter(const struct pim_msdp_peer *mp,
const struct pim_msdp_sa *sa)
{
@@ -425,7 +378,7 @@ bool msdp_peer_sa_filter(const struct pim_msdp_peer *mp,
/* Find access list and test it. */
acl = access_list_lookup(AFI_IP, mp->acl_out);
- if (msdp_access_list_apply(acl, &sa->sg.src, &sa->sg.grp) == FILTER_DENY)
+ if (pim_access_list_apply(acl, &sa->sg.src, &sa->sg.grp) == FILTER_DENY)
return true;
return false;
@@ -456,7 +409,6 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
{
struct listnode *sanode;
struct pim_msdp_sa *sa;
- struct rp_info *rp_info;
struct prefix group_all;
struct in_addr rp;
int sa_count;
@@ -467,14 +419,8 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
zlog_debug(" sa gen %d", local_cnt);
}
- rp = pim->msdp.originator_id;
- if (pim_get_all_mcast_group(&group_all)) {
- rp_info = pim_rp_find_match_group(pim, &group_all);
- if (rp_info) {
- rp = rp_info->rp.rpf_addr;
- }
- }
-
+ pim_get_all_mcast_group(&group_all);
+ pim_msdp_originator_id(pim, &group_all, &rp);
local_cnt = pim_msdp_pkt_sa_fill_hdr(pim, local_cnt, rp);
for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
@@ -504,8 +450,7 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
zlog_debug(" sa gen for remainder %d",
local_cnt);
}
- local_cnt = pim_msdp_pkt_sa_fill_hdr(
- pim, local_cnt, rp);
+ local_cnt = pim_msdp_pkt_sa_fill_hdr(pim, local_cnt, rp);
}
}
@@ -641,7 +586,7 @@ static void pim_msdp_pkt_sa_rx_one(struct pim_msdp_peer *mp, struct in_addr rp)
/* Filter incoming SA with configured access list. */
if (mp->acl_in) {
acl = access_list_lookup(AFI_IP, mp->acl_in);
- if (msdp_access_list_apply(acl, &sg.src, &sg.grp) == FILTER_DENY) {
+ if (pim_access_list_apply(acl, &sg.src, &sg.grp) == FILTER_DENY) {
if (pim_msdp_log_sa_events(mp->pim))
zlog_info("MSDP peer %pI4 filter SA in (%pI4, %pI4)", &mp->peer,
&sg.src, &sg.grp);
diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c
index 4a5ad87942..b55541b810 100644
--- a/pimd/pim_nb.c
+++ b/pimd/pim_nb.c
@@ -142,6 +142,13 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/originator-id",
+ .cbs = {
+ .modify = pim_msdp_originator_id_modify,
+ .destroy = pim_msdp_originator_id_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/shutdown",
.cbs = {
.modify = pim_msdp_shutdown_modify,
@@ -209,6 +216,13 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/sa-limit",
+ .cbs = {
+ .modify = pim_msdp_peer_sa_limit_modify,
+ .destroy = pim_msdp_peer_sa_limit_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag",
.cbs = {
.create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create,
@@ -250,6 +264,12 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify,
+ }
+ },
+ {
.xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family",
.cbs = {
.create = lib_interface_pim_address_family_create,
@@ -353,6 +373,13 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/multicast-boundary-acl",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_multicast_boundary_acl_modify,
+ .destroy = lib_interface_pim_address_family_multicast_boundary_acl_destroy,
+ }
+ },
+ {
.xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/mroute",
.cbs = {
.create = lib_interface_pim_address_family_mroute_create,
diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h
index a9693c65d8..a5ef6ad60a 100644
--- a/pimd/pim_nb.h
+++ b/pimd/pim_nb.h
@@ -56,6 +56,8 @@ int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args);
int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args);
int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args);
int pim_msdp_log_sa_events_modify(struct nb_cb_modify_args *args);
+int pim_msdp_originator_id_modify(struct nb_cb_modify_args *args);
+int pim_msdp_originator_id_destroy(struct nb_cb_destroy_args *args);
int pim_msdp_shutdown_modify(struct nb_cb_modify_args *args);
int pim_msdp_mesh_group_create(struct nb_cb_create_args *args);
int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args);
@@ -76,6 +78,8 @@ int pim_msdp_peer_sa_filter_out_destroy(struct nb_cb_destroy_args *args);
int pim_msdp_peer_authentication_type_modify(struct nb_cb_modify_args *args);
int pim_msdp_peer_authentication_key_modify(struct nb_cb_modify_args *args);
int pim_msdp_peer_authentication_key_destroy(struct nb_cb_destroy_args *args);
+int pim_msdp_peer_sa_limit_modify(struct nb_cb_modify_args *args);
+int pim_msdp_peer_sa_limit_destroy(struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
struct nb_cb_create_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy(
@@ -98,6 +102,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re
struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy(
struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify(
+ struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_dr_priority_modify(
struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_create(struct nb_cb_create_args *args);
@@ -140,6 +146,8 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_multicast_boundary_oil_destroy(
struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_multicast_boundary_acl_modify(struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_multicast_boundary_acl_destroy(struct nb_cb_destroy_args *args);
int lib_interface_pim_address_family_mroute_create(
struct nb_cb_create_args *args);
int lib_interface_pim_address_family_mroute_destroy(
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 171614208f..b55d08bab9 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -157,7 +157,7 @@ static int pim_cmd_interface_add(struct interface *ifp)
pim_ifp->pim_enable = true;
pim_if_addr_add_all(ifp);
- pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+ pim_nht_upstream_if_update(pim_ifp->pim, ifp);
pim_if_membership_refresh(ifp);
pim_if_create_pimreg(pim_ifp->pim);
@@ -1008,6 +1008,40 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
return NB_OK;
}
+pim6_msdp_err(pim_msdp_hold_time_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_keep_alive_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_connection_retry_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_mesh_group_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_mesh_group_create, nb_cb_create_args);
+pim6_msdp_err(pim_msdp_mesh_group_source_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_mesh_group_source_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_mesh_group_members_create, nb_cb_create_args);
+pim6_msdp_err(pim_msdp_mesh_group_members_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_in_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_in_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_out_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_out_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_peer_sa_limit_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_sa_limit_destroy, nb_cb_destroy_args);
+pim6_msdp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
+ nb_cb_modify_args);
+pim6_msdp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy,
+ nb_cb_destroy_args);
+pim6_msdp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create,
+ nb_cb_create_args);
+pim6_msdp_err(pim_msdp_peer_authentication_type_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_authentication_key_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_authentication_key_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_log_neighbor_events_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_log_sa_events_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_originator_id_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_originator_id_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_shutdown_modify, nb_cb_modify_args);
+
+#if PIM_IPV != 6
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/hold-time
@@ -1081,26 +1115,6 @@ int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args)
return NB_OK;
}
-pim6_msdp_err(pim_msdp_mesh_group_destroy, nb_cb_destroy_args);
-pim6_msdp_err(pim_msdp_mesh_group_create, nb_cb_create_args);
-pim6_msdp_err(pim_msdp_mesh_group_source_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_mesh_group_source_destroy, nb_cb_destroy_args);
-pim6_msdp_err(pim_msdp_mesh_group_members_create, nb_cb_create_args);
-pim6_msdp_err(pim_msdp_mesh_group_members_destroy, nb_cb_destroy_args);
-pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
- nb_cb_modify_args);
-pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy,
- nb_cb_destroy_args);
-pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create,
- nb_cb_create_args);
-pim6_msdp_err(pim_msdp_peer_authentication_type_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_peer_authentication_key_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_peer_authentication_key_destroy, nb_cb_destroy_args);
-pim6_msdp_err(pim_msdp_log_neighbor_events_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_log_sa_events_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_shutdown_modify, nb_cb_modify_args);
-
-#if PIM_IPV != 6
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-neighbor-events
@@ -1161,6 +1175,50 @@ int pim_msdp_log_sa_events_modify(struct nb_cb_modify_args *args)
/*
* XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/originator-id
+ */
+int pim_msdp_originator_id_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_ipv4(&pim->msdp.originator_id, args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_msdp_originator_id_destroy(struct nb_cb_destroy_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.originator_id.s_addr = INADDR_ANY;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/shutdown
*/
int pim_msdp_shutdown_modify(struct nb_cb_modify_args *args)
@@ -1489,7 +1547,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ms
return NB_OK;
}
-#endif /* PIM_IPV != 6 */
/*
* XPath:
@@ -1580,6 +1637,49 @@ int pim_msdp_peer_sa_filter_out_destroy(struct nb_cb_destroy_args *args)
}
/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/sa-limit
+ */
+int pim_msdp_peer_sa_limit_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_msdp_peer *mp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ mp = nb_running_get_entry(args->dnode, NULL, true);
+ mp->sa_limit = yang_dnode_get_uint32(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_msdp_peer_sa_limit_destroy(struct nb_cb_destroy_args *args)
+{
+ struct pim_msdp_peer *mp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ mp = nb_running_get_entry(args->dnode, NULL, true);
+ mp->sa_limit = 0;
+ break;
+ }
+
+ return NB_OK;
+}
+#endif /* PIM_IPV != 6 */
+
+/*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag
*/
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
@@ -1793,6 +1893,39 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re
}
/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ enum pim_rpf_lookup_mode old_mode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ old_mode = pim->rpf_mode;
+ pim->rpf_mode = yang_dnode_get_enum(args->dnode, NULL);
+
+ if (pim->rpf_mode != old_mode &&
+ /* MCAST_MIX_MRIB_FIRST is the default if not configured */
+ (old_mode != MCAST_NO_CONFIG && pim->rpf_mode != MCAST_MIX_MRIB_FIRST)) {
+ pim_nht_mode_changed(pim);
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
* XPath: /frr-interface:lib/interface/frr-pim:pim/address-family
*/
int lib_interface_pim_address_family_create(struct nb_cb_create_args *args)
@@ -2390,7 +2523,6 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
{
struct interface *ifp;
struct pim_interface *pim_ifp;
- const char *plist;
const struct lyd_node *if_dnode;
switch (args->event) {
@@ -2398,7 +2530,12 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
if_dnode = yang_dnode_get_parent(args->dnode, "interface");
if (!is_pim_interface(if_dnode)) {
snprintf(args->errmsg, args->errmsg_len,
- "Pim not enabled on this interface");
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ if (!prefix_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL))) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Specified prefix-list not found");
return NB_ERR_VALIDATION;
}
break;
@@ -2408,13 +2545,8 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
- plist = yang_dnode_get_string(args->dnode, NULL);
-
- if (pim_ifp->boundary_oil_plist)
- XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
-
pim_ifp->boundary_oil_plist =
- XSTRDUP(MTYPE_PIM_INTERFACE, plist);
+ prefix_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL));
break;
}
@@ -2444,8 +2576,72 @@ int lib_interface_pim_address_family_multicast_boundary_oil_destroy(
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
- if (pim_ifp->boundary_oil_plist)
- XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
+ pim_ifp->boundary_oil_plist = NULL;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/multicast-boundary-acl
+ */
+int lib_interface_pim_address_family_multicast_boundary_acl_modify(struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ if (!access_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL))) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Specified access-list not found");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->boundary_acl =
+ access_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL));
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_multicast_boundary_acl_destroy(struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->boundary_acl = NULL;
break;
}
@@ -2549,9 +2745,8 @@ int lib_interface_pim_address_family_mroute_oif_modify(
#ifdef PIM_ENFORCE_LOOPFREE_MFC
iif = nb_running_get_entry(args->dnode, NULL, false);
- if (!iif) {
+ if (!iif)
return NB_OK;
- }
pim_iifp = iif->info;
pim = pim_iifp->pim;
@@ -2662,13 +2857,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
else if (yang_dnode_get(args->dnode, "prefix-list")) {
plist = yang_dnode_get_string(args->dnode,
"./prefix-list");
- if (!pim_get_all_mcast_group(&group)) {
- flog_err(
- EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
- return NB_ERR_INCONSISTENCY;
- }
-
+ pim_get_all_mcast_group(&group);
result = pim_no_rp_cmd_worker(pim, rp_addr, group,
plist, args->errmsg,
args->errmsg_len);
@@ -2760,11 +2949,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
plist = yang_dnode_get_string(args->dnode, NULL);
yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
- if (!pim_get_all_mcast_group(&group)) {
- flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
- return NB_ERR_INCONSISTENCY;
- }
+ pim_get_all_mcast_group(&group);
return pim_rp_cmd_worker(pim, rp_addr, group, plist,
args->errmsg, args->errmsg_len);
}
@@ -2791,11 +2976,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
plist = yang_dnode_get_string(args->dnode, NULL);
- if (!pim_get_all_mcast_group(&group)) {
- flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
- return NB_ERR_INCONSISTENCY;
- }
+ pim_get_all_mcast_group(&group);
return pim_no_rp_cmd_worker(pim, rp_addr, group, plist,
args->errmsg, args->errmsg_len);
break;
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index f2dbfa9765..00ab46b4cd 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -38,118 +38,267 @@
* pim_sendmsg_zebra_rnh -- Format and send a nexthop register/Unregister
* command to Zebra.
*/
-void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
- struct pim_nexthop_cache *pnc, int command)
+static void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient, pim_addr addr,
+ int command)
{
struct prefix p;
int ret;
- pim_addr_to_prefix(&p, pnc->rpf.rpf_addr);
- ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false,
- pim->vrf->vrf_id);
+ pim_addr_to_prefix(&p, addr);
+
+ /* Register to track nexthops from the MRIB */
+ ret = zclient_send_rnh(zclient, command, &p, SAFI_MULTICAST, false, false, pim->vrf->vrf_id);
+ if (ret == ZCLIENT_SEND_FAILURE)
+ zlog_warn(
+ "sendmsg_nexthop: zclient_send_message() failed registering MRIB tracking");
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: MRIB NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", __func__,
+ (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, pim->vrf->name,
+ ret);
+
+ /* Also register to track nexthops from the URIB */
+ ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false, pim->vrf->vrf_id);
if (ret == ZCLIENT_SEND_FAILURE)
- zlog_warn("sendmsg_nexthop: zclient_send_message() failed");
+ zlog_warn(
+ "sendmsg_nexthop: zclient_send_message() failed registering URIB tracking");
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: NHT %sregistered addr %pFX(%s) with Zebra ret:%d ",
- __func__,
- (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p,
- pim->vrf->name, ret);
+ zlog_debug("%s: URIB NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", __func__,
+ (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, pim->vrf->name,
+ ret);
return;
}
-struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
- struct pim_rpf *rpf)
+static struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
- lookup.rpf.rpf_addr = rpf->rpf_addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ lookup.addr = addr;
+ pnc = hash_lookup(pim->nht_hash, &lookup);
return pnc;
}
-static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
- struct pim_rpf *rpf_addr)
+static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc;
char hash_name[64];
- pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE,
- sizeof(struct pim_nexthop_cache));
- pnc->rpf.rpf_addr = rpf_addr->rpf_addr;
+ /* This function is only ever called if we are unable to find an entry, so
+ * the hash_get should always add a new entry
+ */
+ pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE, sizeof(struct pim_nexthop_cache));
+ pnc->addr = addr;
- pnc = hash_get(pim->rpf_hash, pnc, hash_alloc_intern);
+ pnc = hash_get(pim->nht_hash, pnc, hash_alloc_intern);
pnc->rp_list = list_new();
pnc->rp_list->cmp = pim_rp_list_cmp;
- snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash",
- &pnc->rpf.rpf_addr, pim->vrf->name);
- pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key,
- pim_upstream_equal, hash_name);
+ snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash", &pnc->addr,
+ pim->vrf->name);
+ pnc->upstream_hash = hash_create_size(32, pim_upstream_hash_key, pim_upstream_equal,
+ hash_name);
return pnc;
}
-static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
- pim_addr addr)
+static bool pim_nht_pnc_has_answer(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
+{
+ switch (pim->rpf_mode) {
+ case MCAST_MRIB_ONLY:
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ case MCAST_URIB_ONLY:
+ return CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ case MCAST_MIX_MRIB_FIRST:
+ case MCAST_NO_CONFIG:
+ case MCAST_MIX_DISTANCE:
+ case MCAST_MIX_PFXLEN:
+ /* This check is to determine if we've received an answer necessary to make a NH decision.
+ * For the mixed modes, where we may lookup from MRIB or URIB, let's require an answer
+ * for both tables.
+ */
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED) &&
+ CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ default:
+ break;
+ }
+ return false;
+}
+
+static struct pim_nexthop_cache_rib *pim_pnc_get_rib(struct pim_instance *pim,
+ struct pim_nexthop_cache *pnc)
+{
+ struct pim_nexthop_cache_rib *pnc_rib = NULL;
+
+ if (pim->rpf_mode == MCAST_MRIB_ONLY)
+ pnc_rib = &pnc->mrib;
+ else if (pim->rpf_mode == MCAST_URIB_ONLY)
+ pnc_rib = &pnc->urib;
+ else if (pim->rpf_mode == MCAST_MIX_MRIB_FIRST || pim->rpf_mode == MCAST_NO_CONFIG) {
+ if (pnc->mrib.nexthop_num > 0)
+ pnc_rib = &pnc->mrib;
+ else
+ pnc_rib = &pnc->urib;
+ } else if (pim->rpf_mode == MCAST_MIX_DISTANCE) {
+ if (pnc->mrib.distance <= pnc->urib.distance)
+ pnc_rib = &pnc->mrib;
+ else
+ pnc_rib = &pnc->urib;
+ } else if (pim->rpf_mode == MCAST_MIX_PFXLEN) {
+ if (pnc->mrib.prefix_len >= pnc->urib.prefix_len)
+ pnc_rib = &pnc->mrib;
+ else
+ pnc_rib = &pnc->urib;
+ }
+
+ return pnc_rib;
+}
+
+bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
+{
+ switch (pim->rpf_mode) {
+ case MCAST_MRIB_ONLY:
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID);
+
+ case MCAST_URIB_ONLY:
+ return CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_VALID);
+
+ case MCAST_MIX_MRIB_FIRST:
+ case MCAST_NO_CONFIG:
+ case MCAST_MIX_DISTANCE:
+ case MCAST_MIX_PFXLEN:
+ /* The valid flag is set if there are nexthops...so when doing mixed, mrib might not have
+ * any nexthops, so consider valid if at least one RIB is valid
+ */
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID) ||
+ CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_VALID);
+
+ default:
+ break;
+ }
+ return false;
+}
+
+struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
- struct pim_rpf rpf;
struct zclient *zclient = NULL;
zclient = pim_zebra_zclient_get();
- memset(&rpf, 0, sizeof(rpf));
- rpf.rpf_addr = addr;
+ pnc = pim_nexthop_cache_find(pim, addr);
- pnc = pim_nexthop_cache_find(pim, &rpf);
- if (!pnc) {
- pnc = pim_nexthop_cache_add(pim, &rpf);
- pim_sendmsg_zebra_rnh(pim, zclient, pnc,
- ZEBRA_NEXTHOP_REGISTER);
- if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug(
- "%s: NHT cache and zebra notification added for %pPA(%s)",
- __func__, &addr, pim->vrf->name);
- }
+ if (pnc)
+ return pnc;
+
+ pnc = pim_nexthop_cache_add(pim, addr);
+ pim_sendmsg_zebra_rnh(pim, zclient, pnc->addr, ZEBRA_NEXTHOP_REGISTER);
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: NHT cache and zebra notification added for %pPA(%s)", __func__,
+ &addr, pim->vrf->name);
return pnc;
}
-/* TBD: this does several distinct things and should probably be split up.
- * (checking state vs. returning pnc vs. adding upstream vs. adding rp)
+void pim_nht_set_gateway(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr addr,
+ struct interface *ifp)
+{
+ struct nexthop *nh_node = NULL;
+ struct interface *ifp1 = NULL;
+
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ /* If the gateway is already set, then keep it */
+#if PIM_IPV == 4
+ if (!pim_addr_is_any(nh_node->gate.ipv4))
+ continue;
+#else
+ if (!pim_addr_is_any(nh_node->gate.ipv6))
+ continue;
+#endif
+
+ /* Only set gateway on the correct interface */
+ ifp1 = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ if (ifp != ifp1)
+ continue;
+
+ /* Update the gateway address with the given address */
+#if PIM_IPV == 4
+ nh_node->gate.ipv4 = addr;
+#else
+ nh_node->gate.ipv6 = addr;
+#endif
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: addr %pPA new MRIB nexthop addr %pPAs interface %s",
+ __func__, &pnc->addr, &addr, ifp1->name);
+ }
+
+ /* Now do the same with URIB nexthop entries */
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+#if PIM_IPV == 4
+ if (!pim_addr_is_any(nh_node->gate.ipv4))
+ continue;
+#else
+ if (!pim_addr_is_any(nh_node->gate.ipv6))
+ continue;
+#endif
+
+ ifp1 = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+
+ if (ifp != ifp1)
+ continue;
+
+#if PIM_IPV == 4
+ nh_node->gate.ipv4 = addr;
+#else
+ nh_node->gate.ipv6 = addr;
+#endif
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: addr %pPA new URIB nexthop addr %pPAs interface %s",
+ __func__, &pnc->addr, &addr, ifp1->name);
+ }
+}
+
+/* Finds the nexthop cache entry for the given address. If no cache, add it for tracking.
+ * Up and/or rp may be given to add to the nexthop cache entry so that they get updates when the nexthop changes
+ * If out_pnc is not null, then copy the nexthop cache entry to it.
+ * Return true if an entry was found and is valid.
*/
-int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp,
- struct pim_nexthop_cache *out_pnc)
+bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp, struct pim_nexthop_cache *out_pnc)
{
struct pim_nexthop_cache *pnc;
struct listnode *ch_node = NULL;
+ /* This will find the entry and add it to tracking if not found */
pnc = pim_nht_get(pim, addr);
assertf(up || rp, "addr=%pPA", &addr);
+ /* Store the RP if provided and not currently in the list */
if (rp != NULL) {
ch_node = listnode_lookup(pnc->rp_list, rp);
if (ch_node == NULL)
listnode_add_sort(pnc->rp_list, rp);
}
+ /* Store the upstream if provided and not currently in the list */
if (up != NULL)
(void)hash_get(pnc->upstream_hash, up, hash_alloc_intern);
- if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
+ if (pim_nht_pnc_is_valid(pim, pnc)) {
if (out_pnc)
memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache));
- return 1;
+ return true;
}
- return 0;
+ return false;
}
void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
@@ -157,7 +306,6 @@ void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
struct pim_nexthop_cache *pnc;
pnc = pim_nht_get(pim, addr);
-
pnc->bsr_count++;
}
@@ -166,47 +314,47 @@ bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr)
struct pim_nexthop_cache *pnc;
pnc = pim_nht_get(pim, addr);
-
pnc->candrp_count++;
- return CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID);
+ return pim_nht_pnc_is_valid(pim, pnc);
}
-static void pim_nht_drop_maybe(struct pim_instance *pim,
- struct pim_nexthop_cache *pnc)
+static void pim_nht_drop_maybe(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
{
if (PIM_DEBUG_PIM_NHT)
zlog_debug("%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u Cand-RP count:%u",
- __func__, &pnc->rpf.rpf_addr, pim->vrf->name,
- pnc->rp_list->count, pnc->upstream_hash->count,
- pnc->bsr_count, pnc->candrp_count);
+ __func__, &pnc->addr, pim->vrf->name, pnc->rp_list->count,
+ pnc->upstream_hash->count, pnc->bsr_count, pnc->candrp_count);
- if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 &&
- pnc->bsr_count == 0 && pnc->candrp_count == 0) {
+ if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 && pnc->bsr_count == 0 &&
+ pnc->candrp_count == 0) {
struct zclient *zclient = pim_zebra_zclient_get();
- pim_sendmsg_zebra_rnh(pim, zclient, pnc,
- ZEBRA_NEXTHOP_UNREGISTER);
+ pim_sendmsg_zebra_rnh(pim, zclient, pnc->addr, ZEBRA_NEXTHOP_UNREGISTER);
list_delete(&pnc->rp_list);
+
hash_free(pnc->upstream_hash);
+ hash_release(pim->nht_hash, pnc);
+
+ if (pnc->urib.nexthop)
+ nexthops_free(pnc->urib.nexthop);
+ if (pnc->mrib.nexthop)
+ nexthops_free(pnc->mrib.nexthop);
- hash_release(pim->rpf_hash, pnc);
- if (pnc->nexthop)
- nexthops_free(pnc->nexthop);
XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
}
}
-void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp)
+void pim_nht_delete_tracked(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
struct pim_upstream *upstream = NULL;
/* Remove from RPF hash if it is the last entry */
- lookup.rpf.rpf_addr = addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ lookup.addr = addr;
+ pnc = hash_lookup(pim->nht_hash, &lookup);
if (!pnc) {
zlog_warn("attempting to delete nonexistent NHT entry %pPA",
&addr);
@@ -251,9 +399,9 @@ void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)
if (pim_addr_is_any(addr))
return;
- lookup.rpf.rpf_addr = addr;
+ lookup.addr = addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ pnc = hash_lookup(pim->nht_hash, &lookup);
if (!pnc) {
zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA",
@@ -272,9 +420,9 @@ void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr)
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
- lookup.rpf.rpf_addr = addr;
+ lookup.addr = addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ pnc = hash_lookup(pim->nht_hash, &lookup);
if (!pnc) {
zlog_warn("attempting to delete nonexistent NHT C-RP entry %pPA",
@@ -297,10 +445,10 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct nexthop *nh;
struct interface *ifp;
- lookup.rpf.rpf_addr = bsr_addr;
+ lookup.addr = bsr_addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
- if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) {
+ pnc = hash_lookup(pim->nht_hash, &lookup);
+ if (!pnc || !pim_nht_pnc_has_answer(pim, pnc)) {
/* BSM from a new freshly registered BSR - do a synchronous
* zebra query since otherwise we'd drop the first packet,
* leading to additional delay in picking up BSM data
@@ -359,91 +507,92 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
return false;
}
- if (!CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID))
- return false;
-
- /* if we accept BSMs from more than one ECMP nexthop, this will cause
- * BSM message "multiplication" for each ECMP hop. i.e. if you have
- * 4-way ECMP and 4 hops you end up with 256 copies of each BSM
- * message.
- *
- * so... only accept the first (IPv4) valid nexthop as source.
- */
+ if (pim_nht_pnc_is_valid(pim, pnc)) {
+ /* if we accept BSMs from more than one ECMP nexthop, this will cause
+ * BSM message "multiplication" for each ECMP hop. i.e. if you have
+ * 4-way ECMP and 4 hops you end up with 256 copies of each BSM
+ * message.
+ *
+ * so... only accept the first (IPv4) valid nexthop as source.
+ */
+ struct pim_nexthop_cache_rib *rib = pim_pnc_get_rib(pim, pnc);
- for (nh = pnc->nexthop; nh; nh = nh->next) {
- pim_addr nhaddr;
+ for (nh = rib->nexthop; nh; nh = nh->next) {
+ pim_addr nhaddr;
- switch (nh->type) {
+ switch (nh->type) {
#if PIM_IPV == 4
- case NEXTHOP_TYPE_IPV4:
- if (nh->ifindex == IFINDEX_INTERNAL)
- continue;
+ case NEXTHOP_TYPE_IPV4:
+ if (nh->ifindex == IFINDEX_INTERNAL)
+ continue;
- fallthrough;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- nhaddr = nh->gate.ipv4;
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- continue;
-#else
- case NEXTHOP_TYPE_IPV6:
- if (nh->ifindex == IFINDEX_INTERNAL)
+ fallthrough;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ nhaddr = nh->gate.ipv4;
+ break;
+
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
continue;
+#else
+ case NEXTHOP_TYPE_IPV6:
+ if (nh->ifindex == IFINDEX_INTERNAL)
+ continue;
- fallthrough;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- nhaddr = nh->gate.ipv6;
- break;
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- continue;
+ fallthrough;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ nhaddr = nh->gate.ipv6;
+ break;
+
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ continue;
#endif
- case NEXTHOP_TYPE_IFINDEX:
- nhaddr = bsr_addr;
- break;
+ case NEXTHOP_TYPE_IFINDEX:
+ nhaddr = bsr_addr;
+ break;
- case NEXTHOP_TYPE_BLACKHOLE:
- continue;
- }
+ case NEXTHOP_TYPE_BLACKHOLE:
+ continue;
+ }
- ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id);
- if (!ifp || !ifp->info)
- continue;
+ ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id);
+ if (!ifp || !ifp->info)
+ continue;
- if (if_is_loopback(ifp) && if_is_loopback(src_ifp))
- return true;
+ if (if_is_loopback(ifp) && if_is_loopback(src_ifp))
+ return true;
- /* MRIB (IGP) may be pointing at a router where PIM is down */
- nbr = pim_neighbor_find(ifp, nhaddr, true);
- if (!nbr)
- continue;
+ /* MRIB (IGP) may be pointing at a router where PIM is down */
+ nbr = pim_neighbor_find(ifp, nhaddr, true);
+ if (!nbr)
+ continue;
- /* Are we on the correct interface? */
- if (nh->ifindex == src_ifp->ifindex) {
- /* Do we have the correct NH ? */
- if (!pim_addr_cmp(nhaddr, src_ip))
- return true;
- /*
- * check If the packet came from the neighbor,
- * and the dst is a secondary address on the connected interface
- */
- return (!pim_addr_cmp(nbr->source_addr, src_ip) &&
- pim_if_connected_to_source(ifp, nhaddr));
+ /* Are we on the correct interface? */
+ if (nh->ifindex == src_ifp->ifindex) {
+ /* Do we have the correct NH ? */
+ if (!pim_addr_cmp(nhaddr, src_ip))
+ return true;
+ /*
+ * check If the packet came from the neighbor,
+ * and the dst is a secondary address on the connected interface
+ */
+ return (!pim_addr_cmp(nbr->source_addr, src_ip) &&
+ pim_if_connected_to_source(ifp, nhaddr));
+ }
+ return false;
}
- return false;
}
return false;
}
-void pim_rp_nexthop_del(struct rp_info *rp_info)
+void pim_nht_rp_del(struct rp_info *rp_info)
{
rp_info->rp.source_nexthop.interface = NULL;
rp_info->rp.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;
rp_info->rp.source_nexthop.mrib_metric_preference =
router->infinite_assert_metric.metric_preference;
- rp_info->rp.source_nexthop.mrib_route_metric =
- router->infinite_assert_metric.route_metric;
+ rp_info->rp.source_nexthop.mrib_route_metric = router->infinite_assert_metric.route_metric;
}
/* Update RP nexthop info based on Nexthop update received from Zebra.*/
@@ -461,10 +610,9 @@ static void pim_update_rp_nh(struct pim_instance *pim,
ifp = rp_info->rp.source_nexthop.interface;
// Compute PIM RPF using cached nexthop
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- rp_info->rp.rpf_addr,
- &rp_info->group, 1))
- pim_rp_nexthop_del(rp_info);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
+ &rp_info->group, true))
+ pim_nht_rp_del(rp_info);
/*
* If we transition from no path to a path
@@ -544,33 +692,43 @@ static int pim_upstream_nh_if_update_helper(struct hash_bucket *bucket,
struct pim_instance *pim = pwd->pim;
struct interface *ifp = pwd->ifp;
struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
- for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
- if (ifp != if_lookup_by_index(first_ifindex, pim->vrf->vrf_id))
- continue;
+ /* This update happens when an interface is added to/removed from pim.
+ * So go through both MRIB and URIB and update any upstreams for any
+ * matching nexthop
+ */
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ if (ifp->ifindex == nh_node->ifindex) {
+ if (pnc->upstream_hash->count) {
+ pim_update_upstream_nh(pim, pnc);
+ break;
+ }
+ }
+ }
- if (pnc->upstream_hash->count) {
- pim_update_upstream_nh(pim, pnc);
- break;
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+ if (ifp->ifindex == nh_node->ifindex) {
+ if (pnc->upstream_hash->count) {
+ pim_update_upstream_nh(pim, pnc);
+ break;
+ }
}
}
return HASHWALK_CONTINUE;
}
-void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp)
+void pim_nht_upstream_if_update(struct pim_instance *pim, struct interface *ifp)
{
struct pnc_hash_walk_data pwd;
pwd.pim = pim;
pwd.ifp = ifp;
- hash_walk(pim->rpf_hash, pim_upstream_nh_if_update_helper, &pwd);
+ hash_walk(pim->nht_hash, pim_upstream_nh_if_update_helper, &pwd);
}
-uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
+static uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
{
uint32_t hash_val;
@@ -583,47 +741,42 @@ uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
return hash_val;
}
-static int pim_ecmp_nexthop_search(struct pim_instance *pim,
- struct pim_nexthop_cache *pnc,
- struct pim_nexthop *nexthop, pim_addr src,
- struct prefix *grp, int neighbor_needed)
+static bool pim_ecmp_nexthop_search(struct pim_instance *pim, struct pim_nexthop_cache *pnc,
+ struct pim_nexthop *nexthop, pim_addr src, struct prefix *grp,
+ bool neighbor_needed)
{
- struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
- struct interface *ifps[router->multipath];
struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
- struct interface *ifp = NULL;
- uint32_t hash_val = 0, mod_val = 0;
- uint16_t nh_iter = 0, found = 0;
- uint32_t i, num_nbrs = 0;
- struct pim_interface *pim_ifp;
-
- if (!pnc || !pnc->nexthop_num || !nexthop)
- return 0;
-
- pim_addr nh_addr = nexthop->mrib_nexthop_addr;
- pim_addr grp_addr = pim_addr_from_prefix(grp);
+ uint32_t hash_val = 0;
+ uint32_t mod_val = 0;
+ uint16_t nh_iter = 0;
+ bool found = false;
+ uint32_t num_nbrs = 0;
+ pim_addr nh_addr;
+ pim_addr grp_addr;
+ struct pim_nexthop_cache_rib *rib;
- memset(&nbrs, 0, sizeof(nbrs));
- memset(&ifps, 0, sizeof(ifps));
+ /* Early return if required parameters aren't provided */
+ if (!pim || !pnc || !pim_nht_pnc_is_valid(pim, pnc) || !nexthop || !grp)
+ return false;
+ nh_addr = nexthop->mrib_nexthop_addr;
+ grp_addr = pim_addr_from_prefix(grp);
+ rib = pim_pnc_get_rib(pim, pnc);
- // Current Nexthop is VALID, check to stay on the current path.
+ /* Current Nexthop is VALID, check to stay on the current path. */
if (nexthop->interface && nexthop->interface->info &&
(!pim_addr_is_any(nh_addr))) {
- /* User configured knob to explicitly switch
- to new path is disabled or current path
- metric is less than nexthop update.
+ /* User configured knob to explicitly switch to new path is disabled or
+ * current path metric is less than nexthop update.
*/
+ if (!pim->ecmp_rebalance_enable) {
+ bool curr_route_valid = false;
- if (pim->ecmp_rebalance_enable == 0) {
- uint8_t curr_route_valid = 0;
- // Check if current nexthop is present in new updated
- // Nexthop list.
- // If the current nexthop is not valid, candidate to
- // choose new Nexthop.
- for (nh_node = pnc->nexthop; nh_node;
- nh_node = nh_node->next) {
+ /* Check if current nexthop is present in new updated Nexthop list.
+ * If the current nexthop is not valid, candidate to choose new
+ * Nexthop.
+ */
+ for (nh_node = rib->nexthop; nh_node; nh_node = nh_node->next) {
curr_route_valid = (nexthop->interface->ifindex
== nh_node->ifindex);
if (curr_route_valid)
@@ -633,9 +786,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (curr_route_valid &&
!pim_if_connected_to_source(nexthop->interface,
src)) {
- nbr = pim_neighbor_find(
- nexthop->interface,
- nexthop->mrib_nexthop_addr, true);
+ struct pim_neighbor *nbr =
+ pim_neighbor_find(nexthop->interface,
+ nexthop->mrib_nexthop_addr, true);
if (!nbr
&& !if_is_loopback(nexthop->interface)) {
if (PIM_DEBUG_PIM_NHT)
@@ -646,10 +799,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
/* update metric even if the upstream
* neighbor stays unchanged
*/
- nexthop->mrib_metric_preference =
- pnc->distance;
- nexthop->mrib_route_metric =
- pnc->metric;
+ nexthop->mrib_metric_preference = rib->distance;
+ nexthop->mrib_route_metric = rib->metric;
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: (%pPA,%pPA)(%s) current nexthop %s is valid, skipping new path selection",
@@ -657,40 +808,39 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
&grp_addr,
pim->vrf->name,
nexthop->interface->name);
- return 1;
+ return true;
}
}
}
}
- /*
- * Look up all interfaces and neighbors,
- * store for later usage
- */
- for (nh_node = pnc->nexthop, i = 0; nh_node;
- nh_node = nh_node->next, i++) {
- ifps[i] =
- if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
- if (ifps[i]) {
+ /* Count the number of neighbors for ECMP */
+ for (nh_node = rib->nexthop; nh_node; nh_node = nh_node->next) {
+ struct pim_neighbor *nbr;
+ struct interface *ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+
+ if (!ifp)
+ continue;
+
#if PIM_IPV == 4
- pim_addr nhaddr = nh_node->gate.ipv4;
+ pim_addr nhaddr = nh_node->gate.ipv4;
#else
- pim_addr nhaddr = nh_node->gate.ipv6;
+ pim_addr nhaddr = nh_node->gate.ipv6;
#endif
- nbrs[i] = pim_neighbor_find(ifps[i], nhaddr, true);
- if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
- num_nbrs++;
- }
+ nbr = pim_neighbor_find(ifp, nhaddr, true);
+ if (nbr || pim_if_connected_to_source(ifp, src))
+ num_nbrs++;
}
+
if (pim->ecmp_enable) {
struct prefix src_pfx;
- uint32_t consider = pnc->nexthop_num;
+ uint32_t consider = rib->nexthop_num;
if (neighbor_needed && num_nbrs < consider)
consider = num_nbrs;
if (consider == 0)
- return 0;
+ return false;
// PIM ECMP flag is enable then choose ECMP path.
pim_addr_to_prefix(&src_pfx, src);
@@ -698,16 +848,16 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
mod_val = hash_val % consider;
}
- for (nh_node = pnc->nexthop; nh_node && (found == 0);
- nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
- ifp = ifps[nh_iter];
+ for (nh_node = rib->nexthop; nh_node && !found; nh_node = nh_node->next) {
+ struct pim_neighbor *nbr = NULL;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+
if (!ifp) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
- __FILE__, __func__, first_ifindex, &src,
- pim->vrf->name);
+ zlog_debug("%s %s: could not find interface for ifindex %d (address %pPA(%s))",
+ __FILE__, __func__, nh_node->ifindex, &src,
+ pim->vrf->name);
if (nh_iter == mod_val)
mod_val++; // Select nexthpath
nh_iter++;
@@ -718,10 +868,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
- __func__, ifp->name, pim->vrf->name,
- first_ifindex, &src);
+ zlog_debug("%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ __func__, ifp->name, pim->vrf->name, nh_node->ifindex,
+ &src);
if (nh_iter == mod_val)
mod_val++; // Select nexthpath
nh_iter++;
@@ -729,7 +878,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
}
if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
- nbr = nbrs[nh_iter];
+#if PIM_IPV == 4
+ nbr = pim_neighbor_find(ifp, nh_node->gate.ipv4, true);
+#else
+ nbr = pim_neighbor_find(ifp, nh_node->gate.ipv6, true);
+#endif
+
if (!nbr && !if_is_loopback(ifp)) {
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
@@ -750,12 +904,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
#else
nexthop->mrib_nexthop_addr = nh_node->gate.ipv6;
#endif
- nexthop->mrib_metric_preference = pnc->distance;
- nexthop->mrib_route_metric = pnc->metric;
+ nexthop->mrib_metric_preference = rib->distance;
+ nexthop->mrib_route_metric = rib->metric;
nexthop->last_lookup = src;
nexthop->last_lookup_time = pim_time_monotonic_usec();
nexthop->nbr = nbr;
- found = 1;
+ found = true;
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: (%pPA,%pPA)(%s) selected nhop interface %s addr %pPAs mod_val %u iter %d ecmp %d",
@@ -766,260 +920,55 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
nh_iter++;
}
- if (found)
- return 1;
- else
- return 0;
+ return found;
}
-/* This API is used to parse Registered address nexthop update coming from Zebra
- */
-void pim_nexthop_update(struct vrf *vrf, struct prefix *match,
- struct zapi_route *nhr)
-{
- struct nexthop *nexthop;
- struct nexthop *nhlist_head = NULL;
- struct nexthop *nhlist_tail = NULL;
- int i;
- struct pim_rpf rpf;
- struct pim_nexthop_cache *pnc = NULL;
- struct interface *ifp = NULL;
- struct pim_instance *pim;
-
- pim = vrf->info;
-
- rpf.rpf_addr = pim_addr_from_prefix(match);
- pnc = pim_nexthop_cache_find(pim, &rpf);
- if (!pnc) {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: Skipping NHT update, addr %pPA is not in local cached DB.",
- __func__, &rpf.rpf_addr);
- return;
- }
-
- pnc->last_update = pim_time_monotonic_usec();
-
- if (nhr->nexthop_num) {
- pnc->nexthop_num = 0;
-
- for (i = 0; i < nhr->nexthop_num; i++) {
- nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]);
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IFINDEX:
- /*
- * Connected route (i.e. no nexthop), use
- * RPF address from nexthop cache (i.e.
- * destination) as PIM nexthop.
- */
-#if PIM_IPV == 4
- nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- nexthop->gate.ipv4 = pnc->rpf.rpf_addr;
-#else
- nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- nexthop->gate.ipv6 = pnc->rpf.rpf_addr;
-#endif
- break;
-#if PIM_IPV == 4
- /* RFC5549 IPv4-over-IPv6 nexthop handling:
- * if we get an IPv6 nexthop in IPv4 PIM, hunt down a
- * PIM neighbor and use that instead.
- */
- case NEXTHOP_TYPE_IPV6_IFINDEX: {
- struct interface *ifp1 = NULL;
- struct pim_neighbor *nbr = NULL;
-
- ifp1 = if_lookup_by_index(nexthop->ifindex,
- pim->vrf->vrf_id);
-
- if (!ifp1)
- nbr = NULL;
- else
- /* FIXME: should really use nbr's
- * secondary address list here
- */
- nbr = pim_neighbor_find_if(ifp1);
-
- /* Overwrite with Nbr address as NH addr */
- if (nbr)
- nexthop->gate.ipv4 = nbr->source_addr;
- else
- // Mark nexthop address to 0 until PIM
- // Nbr is resolved.
- nexthop->gate.ipv4 = PIMADDR_ANY;
-
- break;
- }
-#else
- case NEXTHOP_TYPE_IPV6_IFINDEX:
-#endif
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- case NEXTHOP_TYPE_BLACKHOLE:
- /* nothing to do for the other nexthop types */
- break;
- }
-
- ifp = if_lookup_by_index(nexthop->ifindex,
- pim->vrf->vrf_id);
- if (!ifp) {
- if (PIM_DEBUG_PIM_NHT) {
- char buf[NEXTHOP_STRLEN];
- zlog_debug(
- "%s: could not find interface for ifindex %d(%s) (addr %s)",
- __func__, nexthop->ifindex,
- pim->vrf->name,
- nexthop2str(nexthop, buf,
- sizeof(buf)));
- }
- nexthop_free(nexthop);
- continue;
- }
-
- if (PIM_DEBUG_PIM_NHT) {
-#if PIM_IPV == 4
- pim_addr nhaddr = nexthop->gate.ipv4;
-#else
- pim_addr nhaddr = nexthop->gate.ipv6;
-#endif
- zlog_debug("%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ",
- __func__, match, pim->vrf->name,
- i + 1, &nhaddr, ifp->name,
- nexthop->type, nhr->distance,
- nhr->metric);
- }
-
- if (!ifp->info) {
- /*
- * Though Multicast is not enabled on this
- * Interface store it in database otheriwse we
- * may miss this update and this will not cause
- * any issue, because while choosing the path we
- * are ommitting the Interfaces which are not
- * multicast enabled
- */
- if (PIM_DEBUG_PIM_NHT) {
- char buf[NEXTHOP_STRLEN];
-
- zlog_debug(
- "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)",
- __func__, ifp->name,
- pim->vrf->name,
- nexthop->ifindex,
- nexthop2str(nexthop, buf,
- sizeof(buf)));
- }
- }
-
- if (nhlist_tail) {
- nhlist_tail->next = nexthop;
- nhlist_tail = nexthop;
- } else {
- nhlist_tail = nexthop;
- nhlist_head = nexthop;
- }
-
- // Keep track of all nexthops, even PIM-disabled ones.
- pnc->nexthop_num++;
- }
- /* Reset existing pnc->nexthop before assigning new list */
- nexthops_free(pnc->nexthop);
- pnc->nexthop = nhlist_head;
- if (pnc->nexthop_num) {
- pnc->flags |= PIM_NEXTHOP_VALID;
- pnc->distance = nhr->distance;
- pnc->metric = nhr->metric;
- }
- } else {
- pnc->flags &= ~PIM_NEXTHOP_VALID;
- pnc->nexthop_num = nhr->nexthop_num;
- nexthops_free(pnc->nexthop);
- pnc->nexthop = NULL;
- }
- SET_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED);
-
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug("%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d",
- __func__, match, pim->vrf->name, nhr->nexthop_num,
- pnc->nexthop_num, vrf->vrf_id,
- pnc->upstream_hash->count, listcount(pnc->rp_list));
-
- pim_rpf_set_refresh_time(pim);
-
- if (listcount(pnc->rp_list))
- pim_update_rp_nh(pim, pnc);
- if (pnc->upstream_hash->count)
- pim_update_upstream_nh(pim, pnc);
-
- if (pnc->candrp_count)
- pim_crp_nht_update(pim, pnc);
-}
-
-int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, pim_addr src,
- struct prefix *grp, int neighbor_needed)
+bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, bool neighbor_needed)
{
struct pim_nexthop_cache *pnc;
struct pim_zlookup_nexthop nexthop_tab[router->multipath];
- struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
- struct pim_rpf rpf;
int num_ifindex;
- struct interface *ifps[router->multipath], *ifp;
- int first_ifindex;
- int found = 0;
+ bool found = false;
uint16_t i = 0;
- uint32_t hash_val = 0, mod_val = 0;
+ uint32_t hash_val = 0;
+ uint32_t mod_val = 0;
uint32_t num_nbrs = 0;
- struct pim_interface *pim_ifp;
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
- __func__, &src, pim->vrf->name,
- nexthop->last_lookup_time);
-
- rpf.rpf_addr = src;
+ zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld", __func__, &src,
+ pim->vrf->name, nexthop->last_lookup_time);
- pnc = pim_nexthop_cache_find(pim, &rpf);
+ pnc = pim_nexthop_cache_find(pim, src);
if (pnc) {
- if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED))
- return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp,
- neighbor_needed);
+ if (pim_nht_pnc_has_answer(pim, pnc))
+ return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp, neighbor_needed);
}
- memset(nexthop_tab, 0,
- sizeof(struct pim_zlookup_nexthop) * router->multipath);
- num_ifindex =
- zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
- PIM_NEXTHOP_LOOKUP_MAX);
+ memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
+ PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex < 1) {
if (PIM_DEBUG_PIM_NHT)
- zlog_warn(
- "%s: could not find nexthop ifindex for address %pPA(%s)",
- __func__, &src, pim->vrf->name);
- return 0;
+ zlog_warn("%s: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, &src, pim->vrf->name);
+ return false;
}
- memset(&nbrs, 0, sizeof(nbrs));
- memset(&ifps, 0, sizeof(ifps));
-
- /*
- * Look up all interfaces and neighbors,
- * store for later usage
- */
+ /* Count the number of neighbors for ECMP computation */
for (i = 0; i < num_ifindex; i++) {
- ifps[i] = if_lookup_by_index(nexthop_tab[i].ifindex,
- pim->vrf->vrf_id);
- if (ifps[i]) {
- nbrs[i] = pim_neighbor_find(
- ifps[i], nexthop_tab[i].nexthop_addr, true);
-
- if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
- num_nbrs++;
- }
+ struct pim_neighbor *nbr;
+ struct interface *ifp = if_lookup_by_index(nexthop_tab[i].ifindex, pim->vrf->vrf_id);
+
+ if (!ifp)
+ continue;
+
+ nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true);
+ if (nbr || pim_if_connected_to_source(ifp, src))
+ num_nbrs++;
}
- // If PIM ECMP enable then choose ECMP path.
+ /* If PIM ECMP enable then choose ECMP path. */
if (pim->ecmp_enable) {
struct prefix src_pfx;
uint32_t consider = num_ifindex;
@@ -1028,30 +977,27 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
consider = num_nbrs;
if (consider == 0)
- return 0;
+ return false;
pim_addr_to_prefix(&src_pfx, src);
hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
mod_val = hash_val % consider;
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("%s: hash_val %u mod_val %u", __func__,
- hash_val, mod_val);
+ zlog_debug("%s: hash_val %u mod_val %u", __func__, hash_val, mod_val);
}
- i = 0;
- while (!found && (i < num_ifindex)) {
- first_ifindex = nexthop_tab[i].ifindex;
+ for (i = 0; i < num_ifindex && !found; i++) {
+ struct pim_neighbor *nbr = NULL;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp = if_lookup_by_index(nexthop_tab[i].ifindex, pim->vrf->vrf_id);
- ifp = ifps[i];
if (!ifp) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
- __FILE__, __func__, first_ifindex, &src,
- pim->vrf->name);
+ zlog_debug("%s %s: could not find interface for ifindex %d (address %pPA(%s))",
+ __FILE__, __func__, nexthop_tab[i].ifindex, &src,
+ pim->vrf->name);
if (i == mod_val)
mod_val++;
- i++;
continue;
}
@@ -1059,99 +1005,431 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
- __func__, ifp->name, pim->vrf->name,
- first_ifindex, &src);
+ zlog_debug("%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ __func__, ifp->name, pim->vrf->name,
+ nexthop_tab[i].ifindex, &src);
if (i == mod_val)
mod_val++;
- i++;
continue;
}
+
if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
- nbr = nbrs[i];
+ nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true);
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("ifp name: %s(%s), pim nbr: %p",
- ifp->name, pim->vrf->name, nbr);
+ zlog_debug("ifp name: %s(%s), pim nbr: %p", ifp->name,
+ pim->vrf->name, nbr);
if (!nbr && !if_is_loopback(ifp)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)",
+ __func__, &nexthop_tab[i].nexthop_addr,
+ ifp->name, pim->vrf->name, &src);
if (i == mod_val)
mod_val++;
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)",
- __func__,
- &nexthop_tab[i].nexthop_addr,
- ifp->name, pim->vrf->name,
- &src);
- i++;
continue;
}
}
if (i == mod_val) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
- __func__, &nexthop_tab[i].nexthop_addr,
- &src, ifp->name, pim->vrf->name,
- nexthop_tab[i].route_metric,
- nexthop_tab[i].protocol_distance);
+ zlog_debug("%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
+ __func__, &nexthop_tab[i].nexthop_addr, &src, ifp->name,
+ pim->vrf->name, nexthop_tab[i].route_metric,
+ nexthop_tab[i].protocol_distance);
/* update nexthop data */
nexthop->interface = ifp;
- nexthop->mrib_nexthop_addr =
- nexthop_tab[i].nexthop_addr;
- nexthop->mrib_metric_preference =
- nexthop_tab[i].protocol_distance;
- nexthop->mrib_route_metric =
- nexthop_tab[i].route_metric;
+ nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
+ nexthop->mrib_metric_preference = nexthop_tab[i].protocol_distance;
+ nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
nexthop->last_lookup = src;
nexthop->last_lookup_time = pim_time_monotonic_usec();
nexthop->nbr = nbr;
- found = 1;
+ found = true;
}
- i++;
}
- if (found)
- return 1;
- else
- return 0;
+ return found;
}
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
- struct prefix *grp)
+bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr,
+ int neighbor_needed)
+{
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
+ struct pim_neighbor *nbr = NULL;
+ int num_ifindex;
+ struct interface *ifp = NULL;
+ ifindex_t first_ifindex = 0;
+ bool found = false;
+ int i = 0;
+ struct pim_interface *pim_ifp;
+
+#if PIM_IPV == 4
+ /*
+ * We should not attempt to lookup a
+ * 255.255.255.255 address, since
+ * it will never work
+ */
+ if (pim_addr_is_any(addr))
+ return false;
+#endif
+
+ if ((!pim_addr_cmp(nexthop->last_lookup, addr)) &&
+ (nexthop->last_lookup_time > pim->last_route_change_time)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: Using last lookup for %pPAs at %lld, %" PRId64 " addr %pPAs",
+ __func__, &addr, nexthop->last_lookup_time,
+ pim->last_route_change_time, &nexthop->mrib_nexthop_addr);
+ pim->nexthop_lookups_avoided++;
+ return true;
+ }
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64, __func__,
+ &addr, nexthop->last_lookup_time, pim->last_route_change_time);
+
+ memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, addr,
+ PIM_NEXTHOP_LOOKUP_MAX);
+ if (num_ifindex < 1) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: could not find nexthop ifindex for address %pPAs", __func__,
+ &addr);
+ return false;
+ }
+
+ while (!found && (i < num_ifindex)) {
+ first_ifindex = nexthop_tab[i].ifindex;
+
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: could not find interface for ifindex %d (address %pPAs)",
+ __func__, first_ifindex, &addr);
+ i++;
+ continue;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp || !pim_ifp->pim_enable) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
+ __func__, ifp->name, first_ifindex, &addr);
+ i++;
+ } else if (neighbor_needed && !pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true);
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("ifp name: %s, pim nbr: %p", ifp->name, nbr);
+ if (!nbr && !if_is_loopback(ifp))
+ i++;
+ else
+ found = true;
+ } else
+ found = true;
+ }
+
+ if (found) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
+ __func__, &nexthop_tab[i].nexthop_addr, &addr, ifp->name,
+ first_ifindex, nexthop_tab[i].route_metric,
+ nexthop_tab[i].protocol_distance);
+
+ /* update nexthop data */
+ nexthop->interface = ifp;
+ nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
+ nexthop->mrib_metric_preference = nexthop_tab[i].protocol_distance;
+ nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
+ nexthop->last_lookup = addr;
+ nexthop->last_lookup_time = pim_time_monotonic_usec();
+ nexthop->nbr = nbr;
+ return true;
+ } else
+ return false;
+}
+
+int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp)
{
struct pim_nexthop nhop;
int vif_index;
ifindex_t ifindex;
memset(&nhop, 0, sizeof(nhop));
- if (!pim_ecmp_nexthop_lookup(pim, &nhop, src, grp, 1)) {
+ if (!pim_nht_lookup_ecmp(pim, &nhop, src, grp, true)) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: could not find nexthop ifindex for address %pPA(%s)",
- __func__, &src, pim->vrf->name);
+ zlog_debug("%s: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, &src, pim->vrf->name);
return -1;
}
ifindex = nhop.interface->ifindex;
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
- __func__, ifindex,
- ifindex2ifname(ifindex, pim->vrf->vrf_id),
- pim->vrf->name, &src);
+ zlog_debug("%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
+ __func__, ifindex, ifindex2ifname(ifindex, pim->vrf->vrf_id),
+ pim->vrf->name, &src);
vif_index = pim_if_find_vifindex_by_ifindex(pim, ifindex);
if (vif_index < 0) {
if (PIM_DEBUG_PIM_NHT) {
- zlog_debug(
- "%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
- __func__, vif_index, pim->vrf->name, &src);
+ zlog_debug("%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
+ __func__, vif_index, pim->vrf->name, &src);
}
return -2;
}
return vif_index;
}
+
+/* This API is used to parse Registered address nexthop update coming from Zebra
+ */
+void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr)
+{
+ struct nexthop *nhlist_head = NULL;
+ struct nexthop *nhlist_tail = NULL;
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache_rib *pnc_rib = NULL;
+ struct interface *ifp = NULL;
+ struct pim_instance *pim;
+ pim_addr addr;
+
+ pim = vrf->info;
+ addr = pim_addr_from_prefix(match);
+ pnc = pim_nexthop_cache_find(pim, addr);
+ if (!pnc) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: Skipping NHT update, addr %pPA is not in local cached DB.",
+ __func__, &addr);
+ return;
+ }
+
+ if (nhr->safi == SAFI_UNICAST)
+ pnc_rib = &pnc->urib;
+ else if (nhr->safi == SAFI_MULTICAST)
+ pnc_rib = &pnc->mrib;
+ else
+ return;
+
+ pnc_rib->last_update = pim_time_monotonic_usec();
+ SET_FLAG(pnc_rib->flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+ UNSET_FLAG(pnc_rib->flags, PIM_NEXTHOP_VALID);
+ pnc_rib->nexthop_num = 0;
+ /* Free the existing nexthop list, resets with any valid nexthops from the update */
+ nexthops_free(pnc_rib->nexthop);
+ pnc_rib->nexthop = NULL;
+
+ for (int i = 0; i < nhr->nexthop_num; i++) {
+ struct nexthop *nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]);
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ /*
+ * Connected route (i.e. no nexthop), use
+ * RPF address from nexthop cache (i.e.
+ * destination) as PIM nexthop.
+ */
+#if PIM_IPV == 4
+ nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ nexthop->gate.ipv4 = pnc->addr;
+#else
+ nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ nexthop->gate.ipv6 = pnc->addr;
+#endif
+ break;
+
+#if PIM_IPV == 4
+ /* RFC5549 IPv4-over-IPv6 nexthop handling:
+ * if we get an IPv6 nexthop in IPv4 PIM, hunt down a
+ * PIM neighbor and use that instead.
+ */
+ case NEXTHOP_TYPE_IPV6_IFINDEX: {
+ struct pim_neighbor *nbr = NULL;
+ struct interface *ifp1 = if_lookup_by_index(nexthop->ifindex,
+ pim->vrf->vrf_id);
+
+ if (ifp1)
+ /* FIXME: should really use nbr's
+ * secondary address list here
+ */
+ nbr = pim_neighbor_find_if(ifp1);
+
+ /* Overwrite with Nbr address as NH addr */
+ if (nbr)
+ nexthop->gate.ipv4 = nbr->source_addr;
+ else
+ /* Mark nexthop address to 0 until PIM Nbr is resolved. */
+ nexthop->gate.ipv4 = PIMADDR_ANY;
+
+ break;
+ }
+#else
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+#endif
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* nothing to do for the other nexthop types */
+ break;
+ }
+
+ ifp = if_lookup_by_index(nexthop->ifindex, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char buf[NEXTHOP_STRLEN];
+ zlog_debug("%s: could not find interface for ifindex %d(%s) (addr %s)",
+ __func__, nexthop->ifindex, pim->vrf->name,
+ nexthop2str(nexthop, buf, sizeof(buf)));
+ }
+ nexthop_free(nexthop);
+ continue;
+ }
+
+ if (PIM_DEBUG_PIM_NHT) {
+#if PIM_IPV == 4
+ pim_addr nhaddr = nexthop->gate.ipv4;
+#else
+ pim_addr nhaddr = nexthop->gate.ipv6;
+#endif
+ zlog_debug("%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ",
+ __func__, match, pim->vrf->name, i + 1, &nhaddr, ifp->name,
+ nexthop->type, nhr->distance, nhr->metric);
+ }
+
+ if (!ifp->info) {
+ /*
+ * Though Multicast is not enabled on this
+ * Interface store it in database otheriwse we
+ * may miss this update and this will not cause
+ * any issue, because while choosing the path we
+ * are ommitting the Interfaces which are not
+ * multicast enabled
+ */
+ if (PIM_DEBUG_PIM_NHT) {
+ char buf[NEXTHOP_STRLEN];
+
+ zlog_debug("%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)",
+ __func__, ifp->name, pim->vrf->name, nexthop->ifindex,
+ nexthop2str(nexthop, buf, sizeof(buf)));
+ }
+ }
+
+ if (nhlist_tail) {
+ nhlist_tail->next = nexthop;
+ nhlist_tail = nexthop;
+ } else {
+ nhlist_tail = nexthop;
+ nhlist_head = nexthop;
+ }
+
+ /* Keep track of all nexthops, even PIM-disabled ones. */
+ pnc_rib->nexthop_num++;
+ } /* End for nexthops */
+
+ /* Assign the list if there are nexthops */
+ if (pnc_rib->nexthop_num) {
+ SET_FLAG(pnc_rib->flags, PIM_NEXTHOP_VALID);
+ pnc_rib->nexthop = nhlist_head;
+ pnc_rib->distance = nhr->distance;
+ pnc_rib->metric = nhr->metric;
+ pnc_rib->prefix_len = nhr->prefix.prefixlen;
+ }
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d",
+ __func__, match, pim->vrf->name, nhr->nexthop_num, pnc_rib->nexthop_num,
+ vrf->vrf_id, pnc->upstream_hash->count, listcount(pnc->rp_list));
+
+ pim_rpf_set_refresh_time(pim);
+
+ if (listcount(pnc->rp_list))
+ pim_update_rp_nh(pim, pnc);
+ if (pnc->upstream_hash->count)
+ pim_update_upstream_nh(pim, pnc);
+
+ if (pnc->candrp_count)
+ pim_crp_nht_update(pim, pnc);
+}
+
+static int pim_nht_hash_mode_update_helper(struct hash_bucket *bucket, void *arg)
+{
+ struct pim_nexthop_cache *pnc = bucket->data;
+ struct pnc_hash_walk_data *pwd = arg;
+ struct pim_instance *pim = pwd->pim;
+
+ if (listcount(pnc->rp_list))
+ pim_update_rp_nh(pim, pnc);
+
+ if (pnc->upstream_hash->count)
+ pim_update_upstream_nh(pim, pnc);
+
+ if (pnc->candrp_count)
+ pim_crp_nht_update(pim, pnc);
+
+ return HASHWALK_CONTINUE;
+}
+
+void pim_nht_mode_changed(struct pim_instance *pim)
+{
+ struct pnc_hash_walk_data pwd;
+
+ /* Update the refresh time to force new lookups if needed */
+ pim_rpf_set_refresh_time(pim);
+
+ /* Force update the registered RP and upstreams for all cache entries */
+ pwd.pim = pim;
+ hash_walk(pim->nht_hash, pim_nht_hash_mode_update_helper, &pwd);
+}
+
+/* Cleanup pim->nht_hash each node data */
+static void pim_nht_hash_clean(void *data)
+{
+ struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
+
+ list_delete(&pnc->rp_list);
+ hash_clean_and_free(&pnc->upstream_hash, NULL);
+
+ if (pnc->mrib.nexthop)
+ nexthops_free(pnc->mrib.nexthop);
+
+ if (pnc->urib.nexthop)
+ nexthops_free(pnc->urib.nexthop);
+
+ XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
+}
+
+static unsigned int pim_nht_hash_key(const void *arg)
+{
+ const struct pim_nexthop_cache *r = arg;
+
+#if PIM_IPV == 4
+ return jhash_1word(r->addr.s_addr, 0);
+#else
+ return jhash2(r->addr.s6_addr32, array_size(r->addr.s6_addr32), 0);
+#endif
+}
+
+static bool pim_nht_equal(const void *arg1, const void *arg2)
+{
+ const struct pim_nexthop_cache *r1 = arg1;
+ const struct pim_nexthop_cache *r2 = arg2;
+
+ return (!pim_addr_cmp(r1->addr, r2->addr));
+}
+
+void pim_nht_init(struct pim_instance *pim)
+{
+ char hash_name[64];
+
+ snprintf(hash_name, sizeof(hash_name), "PIM %s NHT Hash", pim->vrf->name);
+ pim->nht_hash = hash_create_size(256, pim_nht_hash_key, pim_nht_equal, hash_name);
+
+ pim->rpf_mode = MCAST_NO_CONFIG;
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: NHT hash init: %s ", __func__, hash_name);
+}
+
+void pim_nht_terminate(struct pim_instance *pim)
+{
+ /* Traverse and cleanup nht_hash */
+ hash_clean_and_free(&pim->nht_hash, (void *)pim_nht_hash_clean);
+}
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index d064f714a5..144139f406 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -17,11 +17,12 @@
#include "pim_rpf.h"
/* PIM nexthop cache value structure. */
-struct pim_nexthop_cache {
- struct pim_rpf rpf;
+struct pim_nexthop_cache_rib {
/* IGP route's metric. */
uint32_t metric;
uint32_t distance;
+ uint16_t prefix_len;
+
/* Nexthop number and nexthop linked list. */
uint16_t nexthop_num;
struct nexthop *nexthop;
@@ -29,6 +30,13 @@ struct pim_nexthop_cache {
uint16_t flags;
#define PIM_NEXTHOP_VALID (1 << 0)
#define PIM_NEXTHOP_ANSWER_RECEIVED (1 << 1)
+};
+
+struct pim_nexthop_cache {
+ pim_addr addr;
+
+ struct pim_nexthop_cache_rib mrib;
+ struct pim_nexthop_cache_rib urib;
struct list *rp_list;
struct hash *upstream_hash;
@@ -46,36 +54,74 @@ struct pnc_hash_walk_data {
struct interface *ifp;
};
-void pim_nexthop_update(struct vrf *vrf, struct prefix *match,
- struct zapi_route *nhr);
-int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp,
- struct pim_nexthop_cache *out_pnc);
-void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp);
-struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
- struct pim_rpf *rpf);
-uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
-int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, pim_addr src,
- struct prefix *grp, int neighbor_needed);
-void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
- struct pim_nexthop_cache *pnc, int command);
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
- struct prefix *grp);
-void pim_rp_nexthop_del(struct rp_info *rp_info);
-
-/* for RPF check on BSM message receipt */
+/* Verify that we have nexthop information in the cache entry */
+bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc);
+
+/* Get (or add) the NH cache entry for the given address */
+struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr);
+
+/* Set the gateway address for all nexthops in the given cache entry to the given address
+ * unless the gateway is already set, and only if the nexthop is through the given interface.
+ */
+void pim_nht_set_gateway(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr addr,
+ struct interface *ifp);
+
+/* Track a new addr, registers an upstream or RP for updates */
+bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp, struct pim_nexthop_cache *out_pnc);
+
+/* Track a new addr, increments BSR count */
void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr);
-void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
-/* RPF(bsr_addr) == src_ip%src_ifp? */
-bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
- struct interface *src_ifp, pim_addr src_ip);
-void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp);
-/* wrappers for usage with Candidate RPs in BSMs */
+/* Track a new addr, increments Cand RP count */
bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr);
+
+/* Delete a tracked addr with registered upstream or RP, if no-one else is interested, stop tracking */
+void pim_nht_delete_tracked(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp);
+
+/* Delete a tracked addr and decrement BSR count, if no-one else is interested, stop tracking */
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
+
+/* Delete a tracked addr and decrement Cand RP count, if no-one else is interested, stop tracking */
void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr);
-void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc);
+
+/* RPF(bsr_addr) == src_ip%src_ifp? */
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, struct interface *src_ifp,
+ pim_addr src_ip);
+
+/* Reset the rp.source_nexthop of the given RP */
+void pim_nht_rp_del(struct rp_info *rp_info);
+
+/* Walk the NH cache and update every nexthop that uses the given interface */
+void pim_nht_upstream_if_update(struct pim_instance *pim, struct interface *ifp);
+
+/* Lookup nexthop information for src, returned in nexthop when function returns true.
+ * Tries to find in cache first and does a synchronous lookup if not found in the cache.
+ * If neighbor_needed is true, then nexthop is only considered valid if it's to a pim
+ * neighbor.
+ * Providing the group only effects the ECMP decision, if enabled
+ */
+bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, bool neighbor_needed);
+
+/* Very similar to pim_nht_lookup_ecmp, but does not check the nht cache and only does
+ * a synchronous lookup. No ECMP decision is made.
+ */
+bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr,
+ int neighbor_needed);
+
+/* Performs a pim_nht_lookup_ecmp and returns the mroute VIF index of the nexthop interface */
+int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp);
+
+/* Tracked nexthop update from zebra */
+void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr);
+
+/* RPF lookup mode changed via configuration */
+void pim_nht_mode_changed(struct pim_instance *pim);
+
+/* NHT init and finish funcitons */
+void pim_nht_init(struct pim_instance *pim);
+void pim_nht_terminate(struct pim_instance *pim);
#endif
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index b149b5a2a9..f776a59b7f 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -709,7 +709,10 @@ int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
// inherited_olist(S,G,rpt)
// This is taken care of by the kernel for us
}
+
+#if PIM_IPV == 4
pim_upstream_msdp_reg_timer_start(upstream);
+#endif /* PIM_IPV == 4 */
} else {
if (PIM_DEBUG_PIM_REG) {
if (!i_am_rp)
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index e6de991a14..4fd19b5dbe 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -40,20 +40,6 @@
#include "pim_ssm.h"
#include "termtable.h"
-/* Cleanup pim->rpf_hash each node data */
-void pim_rp_list_hash_clean(void *data)
-{
- struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
-
- list_delete(&pnc->rp_list);
-
- hash_clean_and_free(&pnc->upstream_hash, NULL);
- if (pnc->nexthop)
- nexthops_free(pnc->nexthop);
-
- XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
-}
-
static void pim_rp_info_free(struct rp_info *rp_info)
{
XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
@@ -97,14 +83,7 @@ void pim_rp_init(struct pim_instance *pim)
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- if (!pim_get_all_mcast_group(&rp_info->group)) {
- flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert all-multicast prefix");
- list_delete(&pim->rp_list);
- route_table_finish(pim->rp_table);
- XFREE(MTYPE_PIM_RP, rp_info);
- return;
- }
+ pim_get_all_mcast_group(&rp_info->group);
rp_info->rp.rpf_addr = PIMADDR_ANY;
listnode_add(pim->rp_list, rp_info);
@@ -343,7 +322,9 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
*/
void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
{
+#if PIM_IPV == 4
pim_msdp_i_am_rp_changed(pim);
+#endif /* PIM_IPV == 4 */
pim_upstream_reeval_use_rpt(pim);
}
@@ -437,7 +418,7 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
zlog_debug(
"%s: Deregister upstream %s addr %pPA with Zebra NHT",
__func__, up->sg_str, &old_upstream_addr);
- pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
+ pim_nht_delete_tracked(pim, old_upstream_addr, up, NULL);
}
/* Update the upstream address */
@@ -522,11 +503,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
} else {
-
- if (!pim_get_all_mcast_group(&group_all)) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_BAD_ADDRESS;
- }
+ pim_get_all_mcast_group(&group_all);
rp_all = pim_rp_find_match_group(pim, &group_all);
/*
@@ -592,12 +569,10 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
zlog_debug("new RP %pPA for %pFX is ourselves",
&rp_all->rp.rpf_addr, &rp_all->group);
pim_rp_refresh_group_to_rp_mapping(pim);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
- NULL);
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_all, NULL);
- if (!pim_ecmp_nexthop_lookup(pim,
- &rp_all->rp.source_nexthop,
- nht_p, &rp_all->group, 1))
+ if (!pim_nht_lookup_ecmp(pim, &rp_all->rp.source_nexthop, nht_p,
+ &rp_all->group, true))
return PIM_RP_NO_PATH;
return PIM_SUCCESS;
}
@@ -692,9 +667,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
- &rp_info->group, 1))
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true))
return PIM_RP_NO_PATH;
return PIM_SUCCESS;
@@ -706,9 +680,10 @@ void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
struct prefix group;
int result;
- if (group_range == NULL)
- result = pim_get_all_mcast_group(&group);
- else
+ if (group_range == NULL) {
+ result = 0;
+ pim_get_all_mcast_group(&group);
+ } else
result = str2prefix(group_range, &group);
if (!result) {
@@ -785,11 +760,9 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
&nht_p);
- pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
-
- if (!pim_get_all_mcast_group(&g_all))
- return PIM_RP_BAD_ADDRESS;
+ pim_nht_delete_tracked(pim, nht_p, NULL, rp_info);
+ pim_get_all_mcast_group(&g_all);
rp_all = pim_rp_find_match_group(pim, &g_all);
if (rp_all == rp_info) {
@@ -919,10 +892,10 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
__func__, &nht_p);
- pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
+ pim_nht_delete_tracked(pim, nht_p, NULL, rp_info);
}
- pim_rp_nexthop_del(rp_info);
+ pim_nht_rp_del(rp_info);
listnode_delete(pim->rp_list, rp_info);
/* Update the new RP address*/
@@ -956,9 +929,8 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
- &rp_info->group, 1)) {
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true)) {
route_unlock_node(rn);
return PIM_RP_NO_PATH;
}
@@ -984,13 +956,14 @@ void pim_rp_setup(struct pim_instance *pim)
nht_p = rp_info->rp.rpf_addr;
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- nht_p, &rp_info->group, 1)) {
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group,
+ true)) {
if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug(
- "Unable to lookup nexthop for rp specified");
- pim_rp_nexthop_del(rp_info);
+ zlog_debug("%s: unable to lookup nexthop for rp %pPA", __func__,
+ &rp_info->rp.rpf_addr);
+
+ pim_nht_rp_del(rp_info);
}
}
}
@@ -1030,7 +1003,9 @@ void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
}
if (i_am_rp_changed) {
+#if PIM_IPV == 4
pim_msdp_i_am_rp_changed(pim);
+#endif /* PIM_IPV == 4 */
pim_upstream_reeval_use_rpt(pim);
}
}
@@ -1072,7 +1047,9 @@ void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
}
if (i_am_rp_changed) {
+#if PIM_IPV == 4
pim_msdp_i_am_rp_changed(pim);
+#endif /* PIM_IPV == 4 */
pim_upstream_reeval_use_rpt(pim);
}
}
@@ -1129,10 +1106,14 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
zlog_debug(
"%s: NHT Register RP addr %pPA grp %pFX with Zebra",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
pim_rpf_set_refresh_time(pim);
- (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- nht_p, &rp_info->group, 1);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group,
+ true))
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: unable to lookup nexthop for rp %pPA", __func__,
+ &rp_info->rp.rpf_addr);
+
return (&rp_info->rp);
}
@@ -1337,7 +1318,6 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
{
struct listnode *node = NULL;
struct rp_info *rp_info = NULL;
- struct nexthop *nh_node = NULL;
pim_addr nht_p;
struct pim_nexthop_cache pnc;
@@ -1347,35 +1327,11 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
nht_p = rp_info->rp.rpf_addr;
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
- if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
- continue;
- for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
-#if PIM_IPV == 4
- if (!pim_addr_is_any(nh_node->gate.ipv4))
- continue;
-#else
- if (!pim_addr_is_any(nh_node->gate.ipv6))
- continue;
-#endif
-
- struct interface *ifp1 = if_lookup_by_index(
- nh_node->ifindex, pim->vrf->vrf_id);
-
- if (nbr->interface != ifp1)
- continue;
+ if (!pim_nht_find_or_track(pim, nht_p, NULL, rp_info, &pnc))
+ continue;
-#if PIM_IPV == 4
- nh_node->gate.ipv4 = nbr->source_addr;
-#else
- nh_node->gate.ipv6 = nbr->source_addr;
-#endif
- if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug(
- "%s: addr %pPA new nexthop addr %pPAs interface %s",
- __func__, &nht_p, &nbr->source_addr,
- ifp1->name);
- }
+ pim_nht_set_gateway(pim, &pnc, nbr->source_addr, nbr->interface);
}
}
@@ -1540,9 +1496,9 @@ void pim_embedded_rp_new(struct pim_instance *pim, const pim_addr *group, const
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra", __func__,
&rp_info->rp.rpf_addr, &rp_info->group);
- pim_find_or_track_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
- &rp_info->group, 1)) {
+ pim_nht_find_or_track(pim, rp_info->rp.rpf_addr, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
+ &rp_info->group, 1)) {
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Embedded RP %pPA learned but no next hop", __func__,
&rp_info->rp.rpf_addr);
@@ -1582,7 +1538,7 @@ void pim_embedded_rp_free(struct pim_instance *pim, struct rp_info *rp_info)
if (PIM_DEBUG_TRACE)
zlog_debug("delete embedded RP %pPA", &rp_info->rp.rpf_addr);
- pim_delete_tracked_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info);
+ pim_nht_delete_tracked(pim, rp_info->rp.rpf_addr, NULL, rp_info);
listnode_delete(pim->rp_list, rp_info);
XFREE(MTYPE_PIM_EMBEDDED_RP_ENTRY, rp_info);
}
diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h
index 9da059f8be..a7818a9d39 100644
--- a/pimd/pim_rp.h
+++ b/pimd/pim_rp.h
@@ -42,8 +42,6 @@ struct rp_info {
void pim_rp_init(struct pim_instance *pim);
void pim_rp_free(struct pim_instance *pim);
-void pim_rp_list_hash_clean(void *data);
-
int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
const char *plist, enum rp_source rp_src_flag);
void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index d18ec4943a..75e9213825 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -38,120 +38,6 @@ void pim_rpf_set_refresh_time(struct pim_instance *pim)
pim->last_route_change_time);
}
-bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
- pim_addr addr, int neighbor_needed)
-{
- struct pim_zlookup_nexthop nexthop_tab[router->multipath];
- struct pim_neighbor *nbr = NULL;
- int num_ifindex;
- struct interface *ifp = NULL;
- ifindex_t first_ifindex = 0;
- int found = 0;
- int i = 0;
- struct pim_interface *pim_ifp;
-
-#if PIM_IPV == 4
- /*
- * We should not attempt to lookup a
- * 255.255.255.255 address, since
- * it will never work
- */
- if (pim_addr_is_any(addr))
- return false;
-#endif
-
- if ((!pim_addr_cmp(nexthop->last_lookup, addr)) &&
- (nexthop->last_lookup_time > pim->last_route_change_time)) {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: Using last lookup for %pPAs at %lld, %" PRId64
- " addr %pPAs",
- __func__, &addr, nexthop->last_lookup_time,
- pim->last_route_change_time,
- &nexthop->mrib_nexthop_addr);
- pim->nexthop_lookups_avoided++;
- return true;
- } else {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64,
- __func__, &addr, nexthop->last_lookup_time,
- pim->last_route_change_time);
- }
-
- memset(nexthop_tab, 0,
- sizeof(struct pim_zlookup_nexthop) * router->multipath);
- num_ifindex =
- zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
- addr, PIM_NEXTHOP_LOOKUP_MAX);
- if (num_ifindex < 1) {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s %s: could not find nexthop ifindex for address %pPAs",
- __FILE__, __func__, &addr);
- return false;
- }
-
- while (!found && (i < num_ifindex)) {
- first_ifindex = nexthop_tab[i].ifindex;
-
- ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
- if (!ifp) {
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s %s: could not find interface for ifindex %d (address %pPAs)",
- __FILE__, __func__, first_ifindex,
- &addr);
- i++;
- continue;
- }
-
- pim_ifp = ifp->info;
- if (!pim_ifp || !pim_ifp->pim_enable) {
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
- __func__, ifp->name, first_ifindex,
- &addr);
- i++;
- } else if (neighbor_needed &&
- !pim_if_connected_to_source(ifp, addr)) {
- nbr = pim_neighbor_find(
- ifp, nexthop_tab[i].nexthop_addr, true);
- if (PIM_DEBUG_PIM_TRACE_DETAIL)
- zlog_debug("ifp name: %s, pim nbr: %p",
- ifp->name, nbr);
- if (!nbr && !if_is_loopback(ifp))
- i++;
- else
- found = 1;
- } else
- found = 1;
- }
-
- if (found) {
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s %s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
- __FILE__, __func__,
- &nexthop_tab[i].nexthop_addr, &addr, ifp->name,
- first_ifindex, nexthop_tab[i].route_metric,
- nexthop_tab[i].protocol_distance);
-
- /* update nexthop data */
- nexthop->interface = ifp;
- nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
- nexthop->mrib_metric_preference =
- nexthop_tab[i].protocol_distance;
- nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
- nexthop->last_lookup = addr;
- nexthop->last_lookup_time = pim_time_monotonic_usec();
- nexthop->nbr = nbr;
- return true;
- } else
- return false;
-}
-
static int nexthop_mismatch(const struct pim_nexthop *nh1,
const struct pim_nexthop *nh2)
{
@@ -221,9 +107,9 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
if ((pim_addr_is_any(up->sg.src) && I_am_RP(pim, up->sg.grp)) ||
PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
neigh_needed = false;
- pim_find_or_track_nexthop(pim, up->upstream_addr, up, NULL, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, src, &grp,
- neigh_needed)) {
+
+ pim_nht_find_or_track(pim, up->upstream_addr, up, NULL, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rpf->source_nexthop, src, &grp, neigh_needed)) {
/* Route is Deleted in Zebra, reset the stored NH data */
pim_upstream_rpf_clear(pim, up);
pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
@@ -371,25 +257,3 @@ int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2)
return 0;
}
-
-unsigned int pim_rpf_hash_key(const void *arg)
-{
- const struct pim_nexthop_cache *r = arg;
-
-#if PIM_IPV == 4
- return jhash_1word(r->rpf.rpf_addr.s_addr, 0);
-#else
- return jhash2(r->rpf.rpf_addr.s6_addr32,
- array_size(r->rpf.rpf_addr.s6_addr32), 0);
-#endif
-}
-
-bool pim_rpf_equal(const void *arg1, const void *arg2)
-{
- const struct pim_nexthop_cache *r1 =
- (const struct pim_nexthop_cache *)arg1;
- const struct pim_nexthop_cache *r2 =
- (const struct pim_nexthop_cache *)arg2;
-
- return (!pim_addr_cmp(r1->rpf.rpf_addr, r2->rpf.rpf_addr));
-}
diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h
index 7dae53f8fc..84d6b7f6c2 100644
--- a/pimd/pim_rpf.h
+++ b/pimd/pim_rpf.h
@@ -11,6 +11,7 @@
#include "pim_str.h"
struct pim_instance;
+struct pim_upstream;
/*
RFC 4601:
@@ -41,13 +42,17 @@ struct pim_rpf {
enum pim_rpf_result { PIM_RPF_OK = 0, PIM_RPF_CHANGED, PIM_RPF_FAILURE };
-struct pim_upstream;
-
-unsigned int pim_rpf_hash_key(const void *arg);
-bool pim_rpf_equal(const void *arg1, const void *arg2);
+/* RPF lookup behaviour */
+enum pim_rpf_lookup_mode {
+ MCAST_NO_CONFIG = 0, /* MIX_MRIB_FIRST, but no show in config write */
+ MCAST_MRIB_ONLY, /* MRIB only */
+ MCAST_URIB_ONLY, /* URIB only */
+ MCAST_MIX_MRIB_FIRST, /* MRIB, if nothing at all then URIB */
+ MCAST_MIX_DISTANCE, /* MRIB & URIB, lower distance wins */
+ MCAST_MIX_PFXLEN, /* MRIB & URIB, longer prefix wins */
+ /* on equal value, MRIB wins for last 2 */
+};
-bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
- pim_addr addr, int neighbor_needed);
enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
struct pim_upstream *up,
struct pim_rpf *old, const char *caller);
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
index e21793b8ca..d067abf45a 100644
--- a/pimd/pim_tib.c
+++ b/pimd/pim_tib.c
@@ -34,16 +34,17 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
up = pim_upstream_find(pim, &sg);
if (up) {
- memcpy(&nexthop, &up->rpf.source_nexthop,
- sizeof(struct pim_nexthop));
- (void)pim_ecmp_nexthop_lookup(pim, &nexthop, vif_source, &grp,
- 0);
+ memcpy(&nexthop, &up->rpf.source_nexthop, sizeof(struct pim_nexthop));
+ if (!pim_nht_lookup_ecmp(pim, &nexthop, vif_source, &grp, false))
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: Nexthop Lookup failed vif_src:%pPA, sg.src:%pPA, sg.grp:%pPA",
+ __func__, &vif_source, &sg.src, &sg.grp);
+
if (nexthop.interface)
input_iface_vif_index = pim_if_find_vifindex_by_ifindex(
pim, nexthop.interface->ifindex);
} else
- input_iface_vif_index =
- pim_ecmp_fib_lookup_if_vif_index(pim, vif_source, &grp);
+ input_iface_vif_index = pim_nht_lookup_ecmp_if_vif_index(pim, vif_source, &grp);
if (PIM_DEBUG_ZEBRA)
zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d",
@@ -115,13 +116,8 @@ bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
return false;
}
- if (!*oilp) {
+ if (!*oilp)
*oilp = tib_sg_oil_setup(pim, sg, oif);
-#if PIM_IPV == 6
- if (pim_embedded_rp_is_embedded(&sg.grp))
- (*oilp)->oil_ref_count--;
-#endif /* PIM_IPV == 6 */
- }
if (!*oilp)
return false;
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 7417f31137..c52119e43a 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -178,7 +178,9 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
{
struct listnode *node, *nnode;
struct pim_ifchannel *ch;
+#if PIM_IPV == 4
bool notify_msdp = false;
+#endif /* PIM_IPV == 4 */
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
@@ -206,12 +208,14 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
if (up->join_state == PIM_UPSTREAM_JOINED) {
pim_jp_agg_single_upstream_send(&up->rpf, up, 0);
+#if PIM_IPV == 4
if (pim_addr_is_any(up->sg.src)) {
/* if a (*, G) entry in the joined state is being
* deleted we
* need to notify MSDP */
notify_msdp = true;
}
+#endif /* PIM_IPV == 4 */
}
join_timer_stop(up);
@@ -221,7 +225,9 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
if (!pim_addr_is_any(up->sg.src)) {
if (pim->upstream_sg_wheel)
wheel_remove_item(pim->upstream_sg_wheel, up);
+#if PIM_IPV == 4
notify_msdp = true;
+#endif /* PIM_IPV == 4 */
}
pim_mroute_del(up->channel_oil, __func__);
@@ -241,9 +247,11 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
rb_pim_upstream_del(&pim->upstream_head, up);
+#if PIM_IPV == 4
if (notify_msdp) {
pim_msdp_up_del(pim, &up->sg);
}
+#endif /* PIM_IPV == 4 */
/* When RP gets deleted, pim_rp_del() deregister addr with Zebra NHT
* and assign up->upstream_addr as INADDR_ANY.
@@ -257,7 +265,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
zlog_debug(
"%s: Deregister upstream %s addr %pPA with Zebra NHT",
__func__, up->sg_str, &up->upstream_addr);
- pim_delete_tracked_nexthop(pim, up->upstream_addr, up, NULL);
+ pim_nht_delete_tracked(pim, up->upstream_addr, up, NULL);
}
XFREE(MTYPE_PIM_UPSTREAM, up);
@@ -723,7 +731,9 @@ void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
if (old_state != PIM_UPSTREAM_JOINED) {
int old_fhr = PIM_UPSTREAM_FLAG_TEST_FHR(up->flags);
+#if PIM_IPV == 4
pim_msdp_up_join_state_changed(pim, up);
+#endif /* PIM_IPV == 4 */
if (pim_upstream_could_register(up)) {
PIM_UPSTREAM_FLAG_SET_FHR(up->flags);
if (!old_fhr
@@ -753,8 +763,10 @@ void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
if (!pim_addr_is_any(up->sg.src))
up->sptbit = PIM_UPSTREAM_SPTBIT_FALSE;
+#if PIM_IPV == 4
if (old_state == PIM_UPSTREAM_JOINED)
pim_msdp_up_join_state_changed(pim, up);
+#endif /* PIM_IPV == 4 */
if (old_state != new_state) {
old_use_rpt =
@@ -1424,8 +1436,10 @@ struct pim_upstream *pim_upstream_keep_alive_timer_proc(
*/
}
+#if PIM_IPV == 4
/* source is no longer active - pull the SA from MSDP's cache */
pim_msdp_sa_local_del(pim, &up->sg);
+#endif /* PIM_IPV == 4 */
/* JoinDesired can change when KAT is started or stopped */
pim_upstream_update_join_desired(pim, up);
@@ -1493,32 +1507,15 @@ void pim_upstream_keep_alive_timer_start(struct pim_upstream *up, uint32_t time)
event_add_timer(router->master, pim_upstream_keep_alive_timer, up, time,
&up->t_ka_timer);
+#if PIM_IPV == 4
/* any time keepalive is started against a SG we will have to
* re-evaluate our active source database */
pim_msdp_sa_local_update(up);
+#endif /* PIM_IPV == 4 */
/* JoinDesired can change when KAT is started or stopped */
pim_upstream_update_join_desired(up->pim, up);
}
-/* MSDP on RP needs to know if a source is registerable to this RP */
-static void pim_upstream_msdp_reg_timer(struct event *t)
-{
- struct pim_upstream *up = EVENT_ARG(t);
- struct pim_instance *pim = up->channel_oil->pim;
-
- /* source is no longer active - pull the SA from MSDP's cache */
- pim_msdp_sa_local_del(pim, &up->sg);
-}
-
-void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
-{
- EVENT_OFF(up->t_msdp_reg_timer);
- event_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
- PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
-
- pim_msdp_sa_local_update(up);
-}
-
/*
* 4.2.1 Last-Hop Switchover to the SPT
*
diff --git a/pimd/pim_upstream.h b/pimd/pim_upstream.h
index 8b4a35be39..1d4b2128a8 100644
--- a/pimd/pim_upstream.h
+++ b/pimd/pim_upstream.h
@@ -350,7 +350,6 @@ int pim_upstream_inherited_olist(struct pim_instance *pim,
int pim_upstream_empty_inherited_olist(struct pim_upstream *up);
void pim_upstream_find_new_rpf(struct pim_instance *pim);
-void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up);
void pim_upstream_init(struct pim_instance *pim);
void pim_upstream_terminate(struct pim_instance *pim);
diff --git a/pimd/pim_util.c b/pimd/pim_util.c
index 657e84ae50..0aea240587 100644
--- a/pimd/pim_util.c
+++ b/pimd/pim_util.c
@@ -9,7 +9,10 @@
#include "log.h"
#include "prefix.h"
#include "plist.h"
+#include "plist_int.h"
+#include "pimd.h"
+#include "pim_instance.h"
#include "pim_util.h"
/*
@@ -126,34 +129,105 @@ int pim_is_group_224_4(struct in_addr group_addr)
return prefix_match(&group_all, &group);
}
-bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp)
+static bool pim_cisco_match(const struct filter *filter, const struct in_addr *source,
+ const struct in_addr *group)
{
- struct prefix grp_pfx;
- struct prefix_list *pl;
+ const struct filter_cisco *cfilter = &filter->u.cfilter;
+ uint32_t source_addr;
+ uint32_t group_addr;
- if (!pim_ifp->boundary_oil_plist)
+ group_addr = group->s_addr & ~cfilter->mask_mask.s_addr;
+
+ if (cfilter->extended) {
+ source_addr = source->s_addr & ~cfilter->addr_mask.s_addr;
+ if (group_addr == cfilter->mask.s_addr && source_addr == cfilter->addr.s_addr)
+ return true;
+ } else if (group_addr == cfilter->addr.s_addr)
+ return true;
+
+ return false;
+}
+
+enum filter_type pim_access_list_apply(struct access_list *access, const struct in_addr *source,
+ const struct in_addr *group)
+{
+ struct filter *filter;
+ struct prefix group_prefix = {};
+
+ if (access == NULL)
+ return FILTER_DENY;
+
+ for (filter = access->head; filter; filter = filter->next) {
+ if (filter->cisco) {
+ if (pim_cisco_match(filter, source, group))
+ return filter->type;
+ }
+ }
+
+ group_prefix.family = AF_INET;
+ group_prefix.prefixlen = IPV4_MAX_BITLEN;
+ group_prefix.u.prefix4.s_addr = group->s_addr;
+ return access_list_apply(access, &group_prefix);
+}
+
+bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp, pim_addr *src)
+{
+ bool is_filtered = false;
+#if PIM_IPV == 4
+ struct prefix grp_pfx = {};
+ pim_addr any_src = PIMADDR_ANY;
+
+ if (!pim_ifp->boundary_oil_plist && !pim_ifp->boundary_acl)
return false;
pim_addr_to_prefix(&grp_pfx, *grp);
- pl = prefix_list_lookup(PIM_AFI, pim_ifp->boundary_oil_plist);
- return pl ? prefix_list_apply_ext(pl, NULL, &grp_pfx, true) ==
- PREFIX_DENY
- : false;
+ /* Filter if either group or (S,G) are denied */
+ if (pim_ifp->boundary_oil_plist) {
+ is_filtered = prefix_list_apply_ext(pim_ifp->boundary_oil_plist, NULL, &grp_pfx,
+ true) == PREFIX_DENY;
+ if (is_filtered && PIM_DEBUG_EVENTS) {
+ zlog_debug("Filtering group %pI4 per prefix-list %s", grp,
+ pim_ifp->boundary_oil_plist->name);
+ }
+ }
+ if (!is_filtered && pim_ifp->boundary_acl) {
+ /* If src not provided, set to "any" (*)? */
+ if (!src)
+ src = &any_src;
+ /* S,G filtering using extended access-list syntax */
+ is_filtered = pim_access_list_apply(pim_ifp->boundary_acl, src, grp) == FILTER_DENY;
+ if (is_filtered && PIM_DEBUG_EVENTS) {
+ if (pim_addr_is_any(*src)) {
+ zlog_debug("Filtering (S,G)=(*, %pI4) per access-list %s", grp,
+ pim_ifp->boundary_acl->name);
+ } else {
+ zlog_debug("Filtering (S,G)=(%pI4, %pI4) per access-list %s", src,
+ grp, pim_ifp->boundary_acl->name);
+ }
+ }
+ }
+#endif
+ return is_filtered;
}
/* This function returns all multicast group */
-int pim_get_all_mcast_group(struct prefix *prefix)
+void pim_get_all_mcast_group(struct prefix *prefix)
{
+ memset(prefix, 0, sizeof(*prefix));
+
#if PIM_IPV == 4
- if (!str2prefix("224.0.0.0/4", prefix))
- return 0;
+ /* Precomputed version of: `str2prefix("224.0.0.0/4", prefix);` */
+ prefix->family = AF_INET;
+ prefix->prefixlen = 4;
+ prefix->u.prefix4.s_addr = htonl(0xe0000000);
#else
- if (!str2prefix("FF00::0/8", prefix))
- return 0;
+ /* Precomputed version of: `str2prefix("FF00::0/8", prefix)` */
+ prefix->family = AF_INET6;
+ prefix->prefixlen = 8;
+ prefix->u.prefix6.s6_addr[0] = 0xff;
#endif
- return 1;
}
bool pim_addr_is_multicast(pim_addr addr)
diff --git a/pimd/pim_util.h b/pimd/pim_util.h
index c882fe4878..a3d944b82b 100644
--- a/pimd/pim_util.h
+++ b/pimd/pim_util.h
@@ -10,6 +10,7 @@
#include <stdint.h>
#include <zebra.h>
+#include "lib/filter.h"
#include "checksum.h"
#include "pimd.h"
@@ -22,7 +23,9 @@ void pim_pkt_dump(const char *label, const uint8_t *buf, int size);
int pim_is_group_224_0_0_0_24(struct in_addr group_addr);
int pim_is_group_224_4(struct in_addr group_addr);
-bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp);
-int pim_get_all_mcast_group(struct prefix *prefix);
+enum filter_type pim_access_list_apply(struct access_list *access, const struct in_addr *source,
+ const struct in_addr *group);
+bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp, pim_addr *src);
+void pim_get_all_mcast_group(struct prefix *prefix);
bool pim_addr_is_multicast(pim_addr addr);
#endif /* PIM_UTIL_H */
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index ed91d2339b..974cf30cf1 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -12,6 +12,8 @@
#include "vty.h"
#include "vrf.h"
#include "plist.h"
+#include "plist_int.h"
+#include "filter.h"
#include "pimd.h"
#include "pim_vty.h"
@@ -178,8 +180,10 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
int writes = 0;
struct pim_ssm *ssm = pim->ssm_info;
+#if PIM_IPV == 4
writes += pim_msdp_peer_config_write(vty, pim);
writes += pim_msdp_config_write(pim, vty);
+#endif /* PIM_IPV == 4 */
if (!pim->send_v6_secondary) {
vty_out(vty, " no send-v6-secondary\n");
@@ -271,15 +275,14 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
}
}
- if (pim->msdp.hold_time != PIM_MSDP_PEER_HOLD_TIME
- || pim->msdp.keep_alive != PIM_MSDP_PEER_KA_TIME
- || pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME) {
- vty_out(vty, " msdp timers %u %u", pim->msdp.hold_time,
- pim->msdp.keep_alive);
- if (pim->msdp.connection_retry
- != PIM_MSDP_PEER_CONNECT_RETRY_TIME)
- vty_out(vty, " %u", pim->msdp.connection_retry);
- vty_out(vty, "\n");
+ if (pim->rpf_mode != MCAST_NO_CONFIG) {
+ ++writes;
+ vty_out(vty, " rpf-lookup-mode %s\n",
+ pim->rpf_mode == MCAST_URIB_ONLY ? "urib-only"
+ : pim->rpf_mode == MCAST_MRIB_ONLY ? "mrib-only"
+ : pim->rpf_mode == MCAST_MIX_MRIB_FIRST ? "mrib-then-urib"
+ : pim->rpf_mode == MCAST_MIX_DISTANCE ? "lower-distance"
+ : "longer-prefix");
}
return writes;
@@ -492,7 +495,13 @@ int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
/* boundary */
if (pim_ifp->boundary_oil_plist) {
vty_out(vty, " " PIM_AF_NAME " multicast boundary oil %s\n",
- pim_ifp->boundary_oil_plist);
+ pim_ifp->boundary_oil_plist->name);
+ ++writes;
+ }
+
+ if (pim_ifp->boundary_acl) {
+ vty_out(vty, " " PIM_AF_NAME " multicast boundary %s\n",
+ pim_ifp->boundary_acl->name);
++writes;
}
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
index f1f315cc98..511d35bf76 100644
--- a/pimd/pim_vxlan.c
+++ b/pimd/pim_vxlan.c
@@ -411,10 +411,9 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
* we must dereg the old nexthop and force to new "static"
* iif
*/
- if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
- pim_delete_tracked_nexthop(vxlan_sg->pim,
- up->upstream_addr, up, NULL);
- }
+ if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
+ pim_nht_delete_tracked(vxlan_sg->pim, up->upstream_addr, up, NULL);
+
/* We are acting FHR; clear out use_rpt setting if any */
pim_upstream_update_use_rpt(up, false /*update_mroute*/);
pim_upstream_ref(up, flags, __func__);
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
index ce4d85a2c8..f0ec3c6b6e 100644
--- a/pimd/pim_zebra.c
+++ b/pimd/pim_zebra.c
@@ -426,7 +426,6 @@ static void pim_zebra_connected(struct zclient *zclient)
static void pim_zebra_capabilities(struct zclient_capabilities *cap)
{
- router->mlag_role = cap->role;
router->multipath = cap->ecmp;
}
diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c
index 5d344f1f66..febc595ad4 100644
--- a/pimd/pim_zlookup.c
+++ b/pimd/pim_zlookup.c
@@ -153,6 +153,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
struct ipaddr raddr;
uint8_t distance;
uint32_t metric;
+ uint16_t prefix_len;
int nexthop_num;
int i, err;
@@ -162,7 +163,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
s = zlookup->ibuf;
- while (command != ZEBRA_NEXTHOP_LOOKUP_MRIB) {
+ while (command != ZEBRA_NEXTHOP_LOOKUP) {
stream_reset(s);
err = zclient_read_header(s, zlookup->sock, &length, &marker,
&version, &vrf_id, &command);
@@ -193,8 +194,14 @@ static int zclient_read_nexthop(struct pim_instance *pim,
distance = stream_getc(s);
metric = stream_getl(s);
+ prefix_len = stream_getw(s);
nexthop_num = stream_getw(s);
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), distance=%d, metric=%d, prefix_len=%d, nexthop_num=%d",
+ __func__, &addr, pim->vrf->name, distance, metric, prefix_len,
+ nexthop_num);
+
if (nexthop_num < 1 || nexthop_num > router->multipath) {
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: socket %d bad nexthop_num=%d", __func__,
@@ -220,6 +227,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
}
nexthop_tab[num_ifindex].protocol_distance = distance;
nexthop_tab[num_ifindex].route_metric = metric;
+ nexthop_tab[num_ifindex].prefix_len = prefix_len;
nexthop_tab[num_ifindex].vrf_id = nexthop_vrf_id;
switch (nexthop_type) {
case NEXTHOP_TYPE_IFINDEX:
@@ -301,20 +309,23 @@ static int zclient_read_nexthop(struct pim_instance *pim,
}
}
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), num_ifindex=%d", __func__, &addr, pim->vrf->name,
+ num_ifindex);
+
return num_ifindex;
}
-static int zclient_lookup_nexthop_once(struct pim_instance *pim,
- struct pim_zlookup_nexthop nexthop_tab[],
- const int tab_size, pim_addr addr)
+static int zclient_rib_lookup(struct pim_instance *pim, struct pim_zlookup_nexthop nexthop_tab[],
+ const int tab_size, pim_addr addr, safi_t safi)
{
struct stream *s;
int ret;
struct ipaddr ipaddr;
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr,
- pim->vrf->name);
+ zlog_debug("%s: addr=%pPAs(%s), %sRIB", __func__, &addr, pim->vrf->name,
+ (safi == SAFI_MULTICAST ? "M" : "U"));
/* Check socket. */
if (zlookup->sock < 0) {
@@ -337,8 +348,9 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim,
s = zlookup->obuf;
stream_reset(s);
- zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, pim->vrf->vrf_id);
+ zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, pim->vrf->vrf_id);
stream_put_ipaddr(s, &ipaddr);
+ stream_putc(s, safi);
stream_putw_at(s, 0, stream_get_endp(s));
ret = writen(zlookup->sock, s->data, stream_get_endp(s));
@@ -361,6 +373,79 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim,
return zclient_read_nexthop(pim, zlookup, nexthop_tab, tab_size, addr);
}
+static int zclient_lookup_nexthop_once(struct pim_instance *pim,
+ struct pim_zlookup_nexthop nexthop_tab[], const int tab_size,
+ pim_addr addr)
+{
+ if (pim->rpf_mode == MCAST_MRIB_ONLY)
+ return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_MULTICAST);
+
+ if (pim->rpf_mode == MCAST_URIB_ONLY)
+ return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_UNICAST);
+
+ /* All other modes require looking up both tables and making a choice */
+ struct pim_zlookup_nexthop mrib_tab[tab_size];
+ struct pim_zlookup_nexthop urib_tab[tab_size];
+ int mrib_num;
+ int urib_num;
+
+ memset(mrib_tab, 0, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ memset(urib_tab, 0, sizeof(struct pim_zlookup_nexthop) * tab_size);
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), looking up both MRIB and URIB", __func__, &addr,
+ pim->vrf->name);
+
+ mrib_num = zclient_rib_lookup(pim, mrib_tab, tab_size, addr, SAFI_MULTICAST);
+ urib_num = zclient_rib_lookup(pim, urib_tab, tab_size, addr, SAFI_UNICAST);
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), MRIB nexthops=%d, URIB nexthops=%d", __func__,
+ &addr, pim->vrf->name, mrib_num, urib_num);
+
+ /* If only one table has results, use that always */
+ if (mrib_num < 1) {
+ if (urib_num > 0)
+ memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return urib_num;
+ }
+
+ if (urib_num < 1) {
+ if (mrib_num > 0)
+ memcpy(nexthop_tab, mrib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return mrib_num;
+ }
+
+ /* See if we should use the URIB based on configured lookup mode */
+ /* Both tables have results, so compare them. Distance and prefix length are the same for all
+ * nexthops, so only compare the first in the list
+ */
+ if (pim->rpf_mode == MCAST_MIX_DISTANCE &&
+ mrib_tab[0].protocol_distance > urib_tab[0].protocol_distance) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), URIB has shortest distance", __func__,
+ &addr, pim->vrf->name);
+ memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return urib_num;
+ } else if (pim->rpf_mode == MCAST_MIX_PFXLEN &&
+ mrib_tab[0].prefix_len < urib_tab[0].prefix_len) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), URIB has lengthest prefix length", __func__,
+ &addr, pim->vrf->name);
+ memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return urib_num;
+ }
+
+ /* All others use the MRIB */
+ /* For MCAST_MIX_MRIB_FIRST (and by extension, MCAST_NO_CONFIG),
+ * always return mrib if both have results
+ */
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), MRIB has nexthops", __func__, &addr, pim->vrf->name);
+ memcpy(nexthop_tab, mrib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return mrib_num;
+}
+
void zclient_lookup_read_pipe(struct event *thread)
{
struct zclient *zlookup = EVENT_ARG(thread);
diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h
index ee2dd20113..c9461eb7e3 100644
--- a/pimd/pim_zlookup.h
+++ b/pimd/pim_zlookup.h
@@ -21,6 +21,7 @@ struct pim_zlookup_nexthop {
ifindex_t ifindex;
uint32_t route_metric;
uint8_t protocol_distance;
+ uint16_t prefix_len;
};
void zclient_lookup_new(void);
diff --git a/staticd/static_nht.c b/staticd/static_nht.c
index 6be598434d..06d27c6f59 100644
--- a/staticd/static_nht.c
+++ b/staticd/static_nht.c
@@ -21,6 +21,7 @@ static void static_nht_update_path(struct static_path *pn, struct prefix *nhp,
uint32_t nh_num, vrf_id_t nh_vrf_id)
{
struct static_nexthop *nh;
+ bool route_changed = false;
frr_each(static_nexthop_list, &pn->nexthop_list, nh) {
if (nh->nh_vrf_id != nh_vrf_id)
@@ -42,8 +43,10 @@ static void static_nht_update_path(struct static_path *pn, struct prefix *nhp,
nh->nh_valid = !!nh_num;
if (nh->state == STATIC_START)
- static_zebra_route_add(pn, true);
+ route_changed = true;
}
+ if (route_changed)
+ static_zebra_route_add(pn, true);
}
static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
diff --git a/tests/helpers/python/frrtest.py b/tests/helpers/python/frrtest.py
index 3faa2a6f13..4682bd8786 100644
--- a/tests/helpers/python/frrtest.py
+++ b/tests/helpers/python/frrtest.py
@@ -163,8 +163,8 @@ class TestRefMismatch(Exception):
difflib.unified_diff(
self.reftext.splitlines(),
self.outtext.splitlines(),
- "outtext",
"reftext",
+ "outtext",
lineterm="",
)
)
diff --git a/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref b/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref
index 33c44780b4..a188ad92fc 100644
--- a/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref
+++ b/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
C>* 192.168.0.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 192.168.1.0/26 is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 192.168.2.0/26 is directly connected, r1-eth2, weight 1, XX:XX:XX
diff --git a/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref b/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref
index f5c1d6d7d2..4cb8692f90 100644
--- a/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref
+++ b/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref
@@ -1,3 +1,4 @@
+IPv6 unicast VRF default:
C>* fc00:0:0:1::/64 is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* fc00:0:0:2::/64 is directly connected, r1-eth2, weight 1, XX:XX:XX
C>* fc00:0:0:3::/64 is directly connected, r1-eth3, weight 1, XX:XX:XX
diff --git a/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf b/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf
index 1aab1d1372..0fe6f1c1c4 100644
--- a/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf
+++ b/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf
@@ -5,9 +5,11 @@ router bgp 100
no bgp ebgp-requires-policy
neighbor 172.16.1.1 remote-as 100
neighbor 172.16.1.1 timers 3 10
+ neighbor 172.16.1.1 timers connect 1
neighbor 172.16.1.1 bfd profile fasttx
neighbor 2001:db8:2::2 remote-as 200
neighbor 2001:db8:2::2 timers 3 10
+ neighbor 2001:db8:2::2 timers connect 1
neighbor 2001:db8:2::2 ebgp-multihop 2
neighbor 2001:db8:2::2 bfd profile slowtx
address-family ipv4 unicast
diff --git a/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf b/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf
index 65647b39e5..d1168d93bc 100644
--- a/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf
+++ b/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 100
bgp router-id 10.254.254.3
neighbor 172.16.1.2 remote-as 100
neighbor 172.16.1.2 timers 3 10
+ neighbor 172.16.1.2 timers connect 1
neighbor 172.16.1.2 bfd profile DOES_NOT_EXIST
address-family ipv4 unicast
redistribute connected
diff --git a/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf b/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf
index 12d68270f8..1a8e6bb94d 100644
--- a/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf
+++ b/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf
@@ -5,6 +5,7 @@ router bgp 200
no bgp ebgp-requires-policy
neighbor 2001:db8:1::2 remote-as 100
neighbor 2001:db8:1::2 timers 3 10
+ neighbor 2001:db8:1::2 timers connect 1
neighbor 2001:db8:1::2 ebgp-multihop 2
neighbor 2001:db8:1::2 bfd profile DOES_NOT_EXIST
address-family ipv4 unicast
diff --git a/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py b/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py
index 3d17a2b709..e58b53728b 100644
--- a/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py
+++ b/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py
@@ -73,7 +73,9 @@ def test_bgp_addpath_best_selected():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ r1 = tgen.gears["r1"]
r2 = tgen.gears["r2"]
+ r7 = tgen.gears["r7"]
def _bgp_converge():
output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast 172.16.16.254/32 json"))
@@ -111,78 +113,67 @@ def test_bgp_addpath_best_selected():
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Can't converge initially"
- def check_bgp_advertised_routes_to_r1():
+ def r1_check_bgp_received_routes_from_r2():
output = json.loads(
- r2.vtysh_cmd(
- "show bgp ipv4 neighbors 192.168.1.1 advertised-routes detail json"
- )
+ r1.vtysh_cmd("show bgp ipv4 neighbors 192.168.1.2 routes json")
)
expected = {
- "advertisedRoutes": {
- "172.16.16.254/32": {
- "paths": [
- {
- "aspath": {
- "string": "65005",
- }
- },
- {
- "aspath": {
- "string": "65006",
- }
- },
- ]
- }
+ "routes": {
+ "172.16.16.254/32": [
+ {
+ "valid": True,
+ "path": "65002 65005",
+ },
+ {
+ "valid": True,
+ "path": "65002 65006",
+ },
+ ]
},
- "totalPrefixCounter": 2,
+ "totalRoutes": 1,
+ "totalPaths": 2,
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(check_bgp_advertised_routes_to_r1)
+ test_func = functools.partial(r1_check_bgp_received_routes_from_r2)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert (
result is None
- ), "Received more/less Add-Path best paths, but should be only 1+1 (real best path)"
+ ), "Received more/less Add-Path best paths, but should be ONLY 1+1 (real best path)"
- def check_bgp_advertised_routes_to_r7():
+ def r7_check_bgp_received_routes_from_r2():
output = json.loads(
- r2.vtysh_cmd(
- "show bgp ipv4 neighbors 192.168.7.7 advertised-routes detail json"
- )
+ r7.vtysh_cmd("show bgp ipv4 neighbors 192.168.7.2 routes json")
)
expected = {
- "advertisedRoutes": {
- "172.16.16.254/32": {
- "paths": [
- {
- "aspath": {
- "string": "65004",
- }
- },
- {
- "aspath": {
- "string": "65005",
- }
- },
- {
- "aspath": {
- "string": "65006",
- }
- },
- ]
- }
+ "routes": {
+ "172.16.16.254/32": [
+ {
+ "valid": True,
+ "path": "65002 65004",
+ },
+ {
+ "valid": True,
+ "path": "65002 65005",
+ },
+ {
+ "valid": True,
+ "path": "65002 65006",
+ },
+ ]
},
- "totalPrefixCounter": 3,
+ "totalRoutes": 1,
+ "totalPaths": 3,
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(check_bgp_advertised_routes_to_r7)
+ test_func = functools.partial(r7_check_bgp_received_routes_from_r2)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert (
result is None
- ), "Received more/less Add-Path best paths, but should be only 2+1 (real best path)"
+ ), "Received more/less Add-Path best paths, but should be ONLY 2+1 (real best path)"
if __name__ == "__main__":
diff --git a/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf b/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf
index c7cf4a527f..69be4b541d 100644
--- a/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf
+++ b/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf
@@ -19,8 +19,10 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor 10.0.0.2 remote-as 65001
neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
neighbor 10.0.1.2 remote-as internal
neighbor 10.0.1.2 timers 3 10
+ neighbor 10.0.1.2 timers connect 1
address-family ipv4 unicast
redistribute connected
aggregate-address 192.168.0.0/24 matching-MED-only
diff --git a/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf b/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf
index acacd86526..418624aed4 100644
--- a/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf
+++ b/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf
@@ -1,6 +1,7 @@
router bgp 65000
neighbor 10.0.1.1 remote-as internal
neighbor 10.0.1.1 timers 3 10
+ neighbor 10.0.1.1 timers connect 1
address-family ipv4 unicast
redistribute connected
exit-address-family
diff --git a/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf b/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf
index 002a5c78c0..a6e24b221b 100644
--- a/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf
+++ b/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf
@@ -3,4 +3,5 @@ router bgp 65534
no bgp ebgp-requires-policy
neighbor 10.0.0.2 remote-as external
neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
!
diff --git a/tests/topotests/bgp_aspath_zero/r1/bgpd.conf b/tests/topotests/bgp_aspath_zero/r1/bgpd.conf
index 002a5c78c0..a6e24b221b 100644
--- a/tests/topotests/bgp_aspath_zero/r1/bgpd.conf
+++ b/tests/topotests/bgp_aspath_zero/r1/bgpd.conf
@@ -3,4 +3,5 @@ router bgp 65534
no bgp ebgp-requires-policy
neighbor 10.0.0.2 remote-as external
neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
!
diff --git a/tests/topotests/bgp_bmp/bgpbmp.py b/tests/topotests/bgp_bmp/bgpbmp.py
index 41995e2b5e..eac78a63f7 100644
--- a/tests/topotests/bgp_bmp/bgpbmp.py
+++ b/tests/topotests/bgp_bmp/bgpbmp.py
@@ -164,18 +164,6 @@ def bmp_check_for_prefixes(
for k, v in sorted(m.items())
# filter out variable keys
if k not in ["timestamp", "seq", "nxhp_link-local"]
- and (
- # When policy is loc-rib, the peer-distinguisher is 0:0
- # for the default VRF or the RD if any or the 0:<vrf_id>.
- # 0:<vrf_id> is used to distinguished. RFC7854 says: "If the
- # peer is a "Local Instance Peer", it is set to a unique,
- # locally defined value." The value is not tested because it
- # is variable.
- k != "peer_distinguisher"
- or policy != loc_rib
- or v == "0:0"
- or not v.startswith("0:")
- )
}
# build expected JSON files
@@ -199,7 +187,7 @@ def bmp_check_for_prefixes(
def bmp_check_for_peer_message(
- expected_peers, bmp_log_type, bmp_collector, bmp_log_file
+ expected_peers, bmp_log_type, bmp_collector, bmp_log_file, is_rd_instance=False
):
"""
Check for the presence of a peer up message for the peer
@@ -216,11 +204,20 @@ def bmp_check_for_peer_message(
]
# get the list of pairs (prefix, policy, seq) for the given message type
- peers = [
- m["peer_ip"]
- for m in messages
- if "peer_ip" in m.keys() and m["bmp_log_type"] == bmp_log_type
- ]
+ peers = []
+ for m in messages:
+ if is_rd_instance and m["peer_distinguisher"] == "0:0":
+ continue
+ if (
+ "peer_ip" in m.keys()
+ and m["peer_ip"] != "0.0.0.0"
+ and m["bmp_log_type"] == bmp_log_type
+ ):
+ if is_rd_instance and m["peer_type"] != "route distinguisher instance":
+ continue
+ peers.append(m["peer_ip"])
+ elif m["policy"] == "loc-rib" and m["bmp_log_type"] == bmp_log_type:
+ peers.append("0.0.0.0")
# check for prefixes
for ep in expected_peers:
diff --git a/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json
index ba31bf1d5d..d6c87dd4fd 100644
--- a/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json
@@ -10,6 +10,7 @@
"origin": "IGP",
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "444:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib"
},
@@ -23,6 +24,7 @@
"origin": "IGP",
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "555:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib",
"safi": 1
diff --git a/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json
index d5d9d65182..04e01623df 100644
--- a/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json
@@ -10,9 +10,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy"
},
"2111::1111/128": {
@@ -25,9 +25,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json
index e11badc040..760ee0409a 100644
--- a/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json
@@ -10,9 +10,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy"
},
"2111::1111/128": {
@@ -25,9 +25,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json
index 37ddc09ff8..6a82f7af1a 100644
--- a/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json
@@ -7,6 +7,7 @@
"is_filtered": false,
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "444:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib"
},
@@ -17,6 +18,7 @@
"is_filtered": false,
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "555:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib",
"safi": 1
diff --git a/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json
index de84307a4e..f57b1a51ce 100644
--- a/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json
@@ -7,9 +7,9 @@
"ipv6": false,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy"
},
"2111::1111/128": {
@@ -19,9 +19,9 @@
"ipv6": true,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json
index 1c34498b7a..a52308c789 100644
--- a/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json
@@ -7,9 +7,9 @@
"ipv6": false,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy"
},
"2111::1111/128": {
@@ -19,9 +19,9 @@
"ipv6": true,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp/r1vrf/frr.conf b/tests/topotests/bgp_bmp/r1vrf/frr.conf
index cb8a7d2b14..8706693458 100644
--- a/tests/topotests/bgp_bmp/r1vrf/frr.conf
+++ b/tests/topotests/bgp_bmp/r1vrf/frr.conf
@@ -23,12 +23,14 @@ router bgp 65501 vrf vrf1
exit
!
address-family ipv4 unicast
+ rd vpn export 444:1
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 soft-reconfiguration inbound
no neighbor 192:168::2 activate
exit-address-family
!
address-family ipv6 unicast
+ rd vpn export 555:1
neighbor 192:168::2 activate
neighbor 192:168::2 soft-reconfiguration inbound
exit-address-family
diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_1.py b/tests/topotests/bgp_bmp/test_bgp_bmp_1.py
index 6142863441..be3e07929a 100644
--- a/tests/topotests/bgp_bmp/test_bgp_bmp_1.py
+++ b/tests/topotests/bgp_bmp/test_bgp_bmp_1.py
@@ -192,7 +192,7 @@ def test_peer_up():
"""
tgen = get_topogen()
- peers = ["192.168.0.2", "192:168::2"]
+ peers = ["192.168.0.2", "192:168::2", "0.0.0.0"]
logger.info("checking for BMP peers up messages")
diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_2.py b/tests/topotests/bgp_bmp/test_bgp_bmp_2.py
index b45452e7c4..f16ff2b445 100644
--- a/tests/topotests/bgp_bmp/test_bgp_bmp_2.py
+++ b/tests/topotests/bgp_bmp/test_bgp_bmp_2.py
@@ -200,7 +200,7 @@ def test_peer_up():
"""
tgen = get_topogen()
- peers = ["192.168.0.2", "192:168::2"]
+ peers = ["192.168.0.2", "192:168::2", "0.0.0.0"]
logger.info("checking for BMP peers up messages")
@@ -210,6 +210,7 @@ def test_peer_up():
"peer up",
tgen.gears["bmp1vrf"],
os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"),
+ is_rd_instance=True,
)
success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
assert success, "Checking the updated prefixes has been failed !."
@@ -245,6 +246,7 @@ def test_peer_down():
"peer down",
tgen.gears["bmp1vrf"],
os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"),
+ is_rd_instance=True,
)
success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
assert success, "Checking the updated prefixes has been failed !."
diff --git a/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf b/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf
index 49981ac589..09c65321c2 100644
--- a/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf
+++ b/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf
@@ -8,44 +8,64 @@ router bgp 100
no bgp ebgp-requires-policy
neighbor 10.0.1.101 remote-as 99
neighbor 10.0.1.101 timers 3 10
+ neighbor 10.0.1.101 timers connect 1
neighbor 10.0.1.102 remote-as 99
neighbor 10.0.1.102 timers 3 10
+ neighbor 10.0.1.102 timers connect 1
neighbor 10.0.1.103 remote-as 99
neighbor 10.0.1.103 timers 3 10
+ neighbor 10.0.1.103 timers connect 1
neighbor 10.0.1.104 remote-as 99
neighbor 10.0.1.104 timers 3 10
+ neighbor 10.0.1.104 timers connect 1
neighbor 10.0.1.105 remote-as 99
neighbor 10.0.1.105 timers 3 10
+ neighbor 10.0.1.105 timers connect 1
neighbor 10.0.2.106 remote-as 99
neighbor 10.0.2.106 timers 3 10
+ neighbor 10.0.1.106 timers connect 1
neighbor 10.0.2.107 remote-as 99
neighbor 10.0.2.107 timers 3 10
+ neighbor 10.0.1.107 timers connect 1
neighbor 10.0.2.108 remote-as 99
neighbor 10.0.2.108 timers 3 10
+ neighbor 10.0.1.108 timers connect 1
neighbor 10.0.2.109 remote-as 99
neighbor 10.0.2.109 timers 3 10
+ neighbor 10.0.1.109 timers connect 1
neighbor 10.0.2.110 remote-as 99
neighbor 10.0.2.110 timers 3 10
+ neighbor 10.0.1.110 timers connect 1
neighbor 10.0.3.111 remote-as 111
neighbor 10.0.3.111 timers 3 10
+ neighbor 10.0.1.111 timers connect 1
neighbor 10.0.3.112 remote-as 112
neighbor 10.0.3.112 timers 3 10
+ neighbor 10.0.1.112 timers connect 1
neighbor 10.0.3.113 remote-as 113
neighbor 10.0.3.113 timers 3 10
+ neighbor 10.0.1.113 timers connect 1
neighbor 10.0.3.114 remote-as 114
neighbor 10.0.3.114 timers 3 10
+ neighbor 10.0.1.114 timers connect 1
neighbor 10.0.3.115 remote-as 115
neighbor 10.0.3.115 timers 3 10
+ neighbor 10.0.1.115 timers connect 1
neighbor 10.0.4.116 remote-as 116
neighbor 10.0.4.116 timers 3 10
+ neighbor 10.0.1.116 timers connect 1
neighbor 10.0.4.117 remote-as 117
neighbor 10.0.4.117 timers 3 10
+ neighbor 10.0.1.117 timers connect 1
neighbor 10.0.4.118 remote-as 118
neighbor 10.0.4.118 timers 3 10
+ neighbor 10.0.1.118 timers connect 1
neighbor 10.0.4.119 remote-as 119
neighbor 10.0.4.119 timers 3 10
+ neighbor 10.0.1.119 timers connect 1
neighbor 10.0.4.120 remote-as 120
neighbor 10.0.4.120 timers 3 10
+ neighbor 10.0.1.120 timers connect 1
!
!
diff --git a/tests/topotests/bgp_flowspec/r1/bgpd.conf b/tests/topotests/bgp_flowspec/r1/bgpd.conf
index 4b7a20f958..288aeaf4dd 100644
--- a/tests/topotests/bgp_flowspec/r1/bgpd.conf
+++ b/tests/topotests/bgp_flowspec/r1/bgpd.conf
@@ -6,6 +6,7 @@ router bgp 100
bgp router-id 10.0.1.1
neighbor 10.0.1.101 remote-as 100
neighbor 10.0.1.101 timers 3 10
+ neighbor 10.0.1.101 timers connect 1
neighbor 10.0.1.101 update-source 10.0.1.1
address-family ipv6 flowspec
local-install r1-eth0
diff --git a/tests/topotests/bgp_invalid_nexthop/r1/frr.conf b/tests/topotests/bgp_invalid_nexthop/r1/frr.conf
index 05e1a6c825..f96aeb4366 100644
--- a/tests/topotests/bgp_invalid_nexthop/r1/frr.conf
+++ b/tests/topotests/bgp_invalid_nexthop/r1/frr.conf
@@ -8,6 +8,7 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor fc00::2 remote-as external
neighbor fc00::2 timers 3 10
+ neighbor fc00::2 timers connect 1
address-family ipv6
neighbor fc00::2 activate
exit-address-family
diff --git a/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py b/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py
index c9ff2ffc7e..d2d6a40ae8 100755
--- a/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py
+++ b/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py
@@ -76,7 +76,7 @@ def test_bgp_minimum_holdtime():
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_neighbor_check_if_notification_sent)
- _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Failed to send notification message\n"
diff --git a/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf b/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf
index cd7f44ac66..ced5cb5e4d 100644
--- a/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf
+++ b/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf
@@ -19,10 +19,13 @@ router bgp 100 view 1
timers bgp 60 180
neighbor 172.16.1.1 remote-as 65001
neighbor 172.16.1.1 timers 3 10
+ neighbor 172.16.1.1 timers connect 1
neighbor 172.16.1.2 remote-as 65002
neighbor 172.16.1.2 timers 3 10
+ neighbor 172.16.1.2 timers connect 1
neighbor 172.16.1.5 remote-as 65005
neighbor 172.16.1.5 timers 3 10
+ neighbor 172.16.1.5 timers connect 1
!
router bgp 100 view 2
bgp router-id 172.30.1.1
@@ -32,8 +35,10 @@ router bgp 100 view 2
timers bgp 60 180
neighbor 172.16.1.3 remote-as 65003
neighbor 172.16.1.3 timers 3 10
+ neighbor 172.16.1.3 timers connect 1
neighbor 172.16.1.4 remote-as 65004
neighbor 172.16.1.4 timers 3 10
+ neighbor 172.16.1.4 timers connect 1
!
router bgp 100 view 3
bgp router-id 172.30.1.1
@@ -43,10 +48,13 @@ router bgp 100 view 3
timers bgp 60 180
neighbor 172.16.1.6 remote-as 65006
neighbor 172.16.1.6 timers 3 10
+ neighbor 172.16.1.6 timers connect 1
neighbor 172.16.1.7 remote-as 65007
neighbor 172.16.1.7 timers 3 10
+ neighbor 172.16.1.7 timers connect 1
neighbor 172.16.1.8 remote-as 65008
neighbor 172.16.1.8 timers 3 10
+ neighbor 172.16.1.8 timers connect 1
!
route-map local1 permit 10
set community 100:9999 additive
diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf
index 7efa1b79fa..06ac666ce6 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:2::9 remote-as internal
neighbor fd00:0:2::9 timers 3 10
+ neighbor fd00:0:2::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf
index 4d4ae44e28..4b696b51b3 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:2::9 remote-as internal
neighbor fd00:0:2::9 timers 3 10
+ neighbor fd00:0:2::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf
index b14c9bace4..081909bbb3 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor fd00:0:2::9 remote-as external
neighbor fd00:0:2::9 timers 3 10
+ neighbor fd00:0:2::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf
index becea2bbe6..b8f9078f51 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65002
no bgp ebgp-requires-policy
neighbor fd00:0:3::9 remote-as external
neighbor fd00:0:3::9 timers 3 10
+ neighbor fd00:0:3::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf
index 801736ab98..19c6bbc819 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:4::9 remote-as internal
neighbor fd00:0:4::9 timers 3 10
+ neighbor fd00:0:4::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf
index 705ae78b8e..1c8f2fa49e 100644
--- a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf
@@ -2,16 +2,22 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:2::1 remote-as internal
neighbor fd00:0:2::1 timers 3 10
+ neighbor fd00:0:2::1 timers connect 1
neighbor fd00:0:2::2 remote-as internal
neighbor fd00:0:2::2 timers 3 10
+ neighbor fd00:0:2::2 timers connect 1
neighbor fd00:0:2::3 remote-as internal
neighbor fd00:0:2::3 timers 3 10
+ neighbor fd00:0:2::3 timers connect 1
neighbor fd00:0:2::4 remote-as external
neighbor fd00:0:2::4 timers 3 10
+ neighbor fd00:0:2::4 timers connect 1
neighbor fd00:0:3::5 remote-as external
neighbor fd00:0:3::5 timers 3 10
+ neighbor fd00:0:3::5 timers connect 1
neighbor fd00:0:4::6 remote-as internal
neighbor fd00:0:4::6 timers 3 10
+ neighbor fd00:0:4::6 timers connect 1
address-family ipv4 unicast
neighbor fd00:0:2::1 route-reflector-client
neighbor fd00:0:2::2 route-reflector-client
diff --git a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py
index 7875f2e0f6..58daee32c3 100644
--- a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py
+++ b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py
@@ -36,7 +36,7 @@ pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
- """
+ r"""
All peers are FRR BGP peers except r3 that is a exabgp peer.
rr is a route-reflector for AS 65000 iBGP peers.
Exabgp does not send any IPv6 Link-Local nexthop
diff --git a/tests/topotests/bgp_path_attribute_discard/r1/frr.conf b/tests/topotests/bgp_path_attribute_discard/r1/frr.conf
index ae7fbdd9a9..ae47862963 100644
--- a/tests/topotests/bgp_path_attribute_discard/r1/frr.conf
+++ b/tests/topotests/bgp_path_attribute_discard/r1/frr.conf
@@ -6,4 +6,5 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor 10.0.0.254 remote-as external
neighbor 10.0.0.254 timers 3 10
+ neighbor 10.0.0.254 timers connect 1
!
diff --git a/tests/topotests/bgp_path_attribute_discard/r2/frr.conf b/tests/topotests/bgp_path_attribute_discard/r2/frr.conf
index 1dafbdd8e1..30ffdefff3 100644
--- a/tests/topotests/bgp_path_attribute_discard/r2/frr.conf
+++ b/tests/topotests/bgp_path_attribute_discard/r2/frr.conf
@@ -6,5 +6,6 @@ router bgp 65254
no bgp ebgp-requires-policy
neighbor 10.0.0.254 remote-as internal
neighbor 10.0.0.254 timers 3 10
+ neighbor 10.0.0.254 timers connect 1
neighbor 10.0.0.254 path-attribute discard 26
!
diff --git a/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py b/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py
index adc92f59fe..c6f1b6193b 100644
--- a/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py
+++ b/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py
@@ -142,6 +142,27 @@ def test_bgp_path_attribute_discard():
result is None
), "Failed to discard path attributes (atomic-aggregate, community)"
+ def _bgp_check_attributes_discarded_stats():
+ output = json.loads(r1.vtysh_cmd("show bgp neighbor json"))
+ expected = {
+ "10.0.0.254": {
+ "prefixStats": {
+ "inboundFiltered": 0,
+ "aspathLoop": 0,
+ "originatorLoop": 0,
+ "clusterLoop": 0,
+ "invalidNextHop": 0,
+ "withdrawn": 0,
+ "attributesDiscarded": 3,
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_attributes_discarded_stats)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Discarded path attributes count is not as expected"
+
def _bgp_check_if_aigp_invalid_attribute_discarded():
output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast json detail"))
expected = {
diff --git a/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py b/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py
index a9d678a42d..4f6472f3c5 100644
--- a/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py
+++ b/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py
@@ -134,6 +134,27 @@ def test_bgp_path_attribute_treat_as_withdraw():
_, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Failed to withdraw prefixes with atomic-aggregate attribute"
+ def _bgp_check_attributes_withdrawn_stats():
+ output = json.loads(r2.vtysh_cmd("show bgp neighbor json"))
+ expected = {
+ "10.0.0.1": {
+ "prefixStats": {
+ "inboundFiltered": 0,
+ "aspathLoop": 0,
+ "originatorLoop": 0,
+ "clusterLoop": 0,
+ "invalidNextHop": 0,
+ "withdrawn": 1,
+ "attributesDiscarded": 0,
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_attributes_withdrawn_stats)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Withdrawn prefix count is not as expected"
+
def test_memory_leak():
"Run the memory leak test and report results."
diff --git a/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf b/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf
index 038f108aa8..e743010922 100644
--- a/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf
+++ b/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf
@@ -8,9 +8,17 @@ router bgp 64510
bgp bestpath compare-routerid
bgp bestpath peer-type multipath-relax
neighbor 10.0.1.2 remote-as 64510
+ neighbor 10.0.1.2 timers 3 10
+ neighbor 10.0.1.2 timers connect 1
neighbor 10.0.3.2 remote-as 64502
+ neighbor 10.0.3.2 timers 3 10
+ neighbor 10.0.3.2 timers connect 1
neighbor 10.0.4.2 remote-as 64503
+ neighbor 10.0.4.2 timers 3 10
+ neighbor 10.0.4.2 timers connect 1
neighbor 10.0.5.2 remote-as 64511
+ neighbor 10.0.5.2 timers 3 10
+ neighbor 10.0.5.2 timers connect 1
!
line vty
!
diff --git a/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf b/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf
index 2362a19f26..1da7173bba 100644
--- a/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf
+++ b/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf
@@ -7,7 +7,11 @@ router bgp 64511
bgp router-id 10.0.5.2
no bgp ebgp-requires-policy
neighbor 10.0.2.2 remote-as 64511
+ neighbor 10.0.2.2 timers 3 10
+ neighbor 10.0.2.2 timers connect 1
neighbor 10.0.5.1 remote-as 64510
+ neighbor 10.0.5.1 timers 3 10
+ neighbor 10.0.5.1 timers connect 1
!
address-family ipv4 unicast
neighbor 10.0.5.1 route-map dropall in
diff --git a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf
index e02226f2fd..3fd5e5e9c3 100644
--- a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf
+++ b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf
@@ -7,8 +7,10 @@ router bgp 1
no bgp ebgp-requires-policy
neighbor 10.0.0.101 remote-as 2
neighbor 10.0.0.101 timers 3 10
+ neighbor 10.0.0.101 timers connect 1
neighbor 10.0.0.102 remote-as 3
neighbor 10.0.0.102 timers 3 10
+ neighbor 10.0.0.102 timers connect 1
!
address-family ipv4 labeled-unicast
neighbor 10.0.0.101 activate
diff --git a/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
index b3ca0e114d..946103c30f 100644
--- a/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
+++ b/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
@@ -18,6 +18,7 @@ router bgp 1
no bgp ebgp-requires-policy
neighbor 10.0.0.101 remote-as 2
neighbor 10.0.0.101 timers 3 10
+ neighbor 10.0.0.101 timers connect 1
!
address-family ipv6 vpn
neighbor 10.0.0.101 activate
diff --git a/tests/topotests/bgp_route_server_client/r1/bgpd.conf b/tests/topotests/bgp_route_server_client/r1/bgpd.conf
index e464e6c50b..5cbb7956be 100644
--- a/tests/topotests/bgp_route_server_client/r1/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r1/bgpd.conf
@@ -2,10 +2,12 @@
router bgp 65001
bgp router-id 10.10.10.1
no bgp ebgp-requires-policy
- no bgp enforce-first-as
- neighbor 2001:db8:1::1 remote-as external
- neighbor 2001:db8:1::1 timers 3 10
- neighbor 2001:db8:1::1 timers connect 5
+ neighbor pg peer-group
+ neighbor pg remote-as external
+ neighbor pg timers 1 3
+ neighbor pg timers connect 1
+ no neighbor pg enforce-first-as
+ neighbor 2001:db8:1::1 peer-group pg
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:1::1 activate
diff --git a/tests/topotests/bgp_route_server_client/r2/bgpd.conf b/tests/topotests/bgp_route_server_client/r2/bgpd.conf
index 19607660f9..7fda2b0a05 100644
--- a/tests/topotests/bgp_route_server_client/r2/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r2/bgpd.conf
@@ -3,16 +3,16 @@ router bgp 65000 view RS
no bgp ebgp-requires-policy
neighbor 2001:db8:1::2 remote-as external
neighbor 2001:db8:1::2 timers 3 10
- neighbor 2001:db8:1::2 timers connect 5
+ neighbor 2001:db8:1::2 timers connect 1
neighbor 2001:db8:1::3 remote-as external
neighbor 2001:db8:1::3 timers 3 10
- neighbor 2001:db8:1::3 timers connect 5
+ neighbor 2001:db8:1::3 timers connect 1
neighbor 2001:db8:1::4 remote-as external
neighbor 2001:db8:1::4 timers 3 10
- neighbor 2001:db8:1::4 timers connect 5
+ neighbor 2001:db8:1::4 timers connect 1
neighbor 2001:db8:3::2 remote-as external
neighbor 2001:db8:3::2 timers 3 10
- neighbor 2001:db8:3::2 timers connect 5
+ neighbor 2001:db8:3::2 timers connect 1
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:1::2 activate
diff --git a/tests/topotests/bgp_route_server_client/r3/bgpd.conf b/tests/topotests/bgp_route_server_client/r3/bgpd.conf
index f7daba87fa..2f20b91334 100644
--- a/tests/topotests/bgp_route_server_client/r3/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r3/bgpd.conf
@@ -5,7 +5,7 @@ router bgp 65003
no bgp enforce-first-as
neighbor 2001:db8:3::1 remote-as external
neighbor 2001:db8:3::1 timers 3 10
- neighbor 2001:db8:3::1 timers connect 5
+ neighbor 2001:db8:3::1 timers connect 1
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:3::1 activate
diff --git a/tests/topotests/bgp_route_server_client/r4/bgpd.conf b/tests/topotests/bgp_route_server_client/r4/bgpd.conf
index c907d7284e..66a1573018 100644
--- a/tests/topotests/bgp_route_server_client/r4/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r4/bgpd.conf
@@ -5,7 +5,7 @@ router bgp 65004
no bgp enforce-first-as
neighbor 2001:db8:1::1 remote-as external
neighbor 2001:db8:1::1 timers 3 10
- neighbor 2001:db8:1::1 timers connect 5
+ neighbor 2001:db8:1::1 timers connect 1
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:1::1 activate
diff --git a/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json b/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json
new file mode 100644
index 0000000000..016c019d10
--- /dev/null
+++ b/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json
@@ -0,0 +1,70 @@
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "tableVersion": 3,
+ "routerId": "192.0.2.2",
+ "defaultLocPrf": 100,
+ "localAS": 65002,
+ "routes": {
+ "198.51.100.0/24": [
+ {
+ "origin": "IGP",
+ "metric": 0,
+ "valid": true,
+ "version": 2,
+ "rpkiValidationState": "valid",
+ "bestpath": {
+ "overall": true,
+ "selectionReason": "First path received"
+ },
+ "nexthops": [
+ {
+ "ip": "192.0.2.1",
+ "hostname": "r1",
+ "afi": "ipv4",
+ "metric": 0,
+ "accessible": true,
+ "used": true
+ }
+ ],
+ "peer": {
+ "peerId": "192.0.2.1",
+ "routerId": "192.0.2.1",
+ "hostname": "r1",
+ "type": "external"
+ }
+ }
+ ],
+ "203.0.113.0/24": [
+ {
+ "origin": "IGP",
+ "metric": 0,
+ "valid": true,
+ "version": 3,
+ "rpkiValidationState": "valid",
+ "bestpath": {
+ "overall": true,
+ "selectionReason": "First path received"
+ },
+ "nexthops": [
+ {
+ "ip": "192.0.2.1",
+ "hostname": "r1",
+ "afi": "ipv4",
+ "metric": 0,
+ "accessible": true,
+ "used": true
+ }
+ ],
+ "peer": {
+ "peerId": "192.0.2.1",
+ "routerId": "192.0.2.1",
+ "hostname": "r1",
+ "type": "external"
+ }
+ }
+ ]
+ },
+ "totalRoutes": 3,
+ "totalPaths": 3
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py b/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
index 7b40bbdae8..5b775aa6cb 100644
--- a/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
+++ b/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
@@ -101,6 +101,16 @@ def show_rpki_prefixes(rname, expected, vrf=None):
return topotest.json_cmp(output, expected)
+def show_rpki_valid(rname, expected, vrf=None):
+ tgen = get_topogen()
+
+ cmd = "show bgp ipv4 detail json"
+
+ output = json.loads(tgen.gears[rname].vtysh_cmd(cmd))
+
+ return topotest.json_cmp(output, expected)
+
+
def show_bgp_ipv4_table_rpki(rname, rpki_state, expected, vrf=None):
tgen = get_topogen()
@@ -123,6 +133,25 @@ def show_bgp_ipv4_table_rpki(rname, rpki_state, expected, vrf=None):
return topotest.json_cmp(output, expected)
+def test_show_bgp_rpki_prefixes_valid():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["r1", "r3"]:
+ logger.info("{}: checking if rtrd is running".format(rname))
+ if rtrd_process[rname].poll() is not None:
+ pytest.skip(tgen.errors)
+
+ rname = "r2"
+ expected = open(os.path.join(CWD, "{}/bgp_rpki_valid.json".format(rname))).read()
+ expected_json = json.loads(expected)
+ test_func = functools.partial(show_rpki_valid, rname, expected_json)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see RPKI on {}".format(rname)
+
+
def test_show_bgp_rpki_prefixes():
tgen = get_topogen()
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/__init__.py b/tests/topotests/bgp_show_advertised_routes_detail/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/__init__.py
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf b/tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf
new file mode 100644
index 0000000000..c9710eb5e8
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf
@@ -0,0 +1,13 @@
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as auto
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ network 10.10.10.1/32
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf b/tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf
new file mode 100644
index 0000000000..30b4ba539f
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf
@@ -0,0 +1,29 @@
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
+int r2-eth1
+ ip address 192.168.2.2/24
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as auto
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+ neighbor 192.168.2.3 remote-as auto
+ neighbor 192.168.2.3 timers 1 3
+ neighbor 192.168.2.3 timers connect 1
+ address-family ipv4 unicast
+ neighbor 192.168.2.3 route-map r3 out
+ exit-address-family
+ !
+!
+ip prefix-list p1 permit 10.10.10.1/32
+!
+route-map r3 permit 10
+ match ip address prefix-list p1
+ set large-community 65001:65002:65003
+ set community 65001:65002
+ set extcommunity bandwidth 100
+exit
+!
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf b/tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf
new file mode 100644
index 0000000000..11333d481f
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf
@@ -0,0 +1,11 @@
+!
+int r3-eth0
+ ip address 192.168.2.3/24
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as auto
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ !
+!
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py b/tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py
new file mode 100644
index 0000000000..fda7ec601d
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2024 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = [pytest.mark.bgpd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2"), "s2": ("r2", "r3")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_show_advertised_routes_detail():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r2 = tgen.gears["r2"]
+
+ def _bgp_converge():
+ output = json.loads(
+ r2.vtysh_cmd(
+ "show bgp ipv4 unicast neighbor 192.168.2.3 advertised-routes detail json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "10.10.10.1/32": {
+ "paths": [
+ {
+ "community": {
+ "string": "65001:65002",
+ },
+ "extendedCommunity": {
+ "string": "LB:65002:12500000 (100.000 Mbps)"
+ },
+ "largeCommunity": {
+ "string": "65001:65002:65003",
+ },
+ }
+ ],
+ }
+ },
+ "totalPrefixCounter": 1,
+ "filteredPrefixCounter": 0,
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_converge,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Can't converge"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf
new file mode 100644
index 0000000000..7daf335aab
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf
@@ -0,0 +1,117 @@
+interface r1-eth1 vrf vrf1
+ ip address 173.31.1.1/32
+!
+interface r1-eth2 vrf vrf2
+ ip address 173.31.1.2/32
+!
+interface r1-eth3 vrf vrf3
+ ip address 173.31.1.3/32
+!
+interface r1-eth4 vrf vrf4
+ ip address 173.31.1.4/32
+!
+interface r1-eth5 vrf vrf5
+ ip address 173.31.1.5/32
+!
+
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+
+interface r1-eth6
+ ip address 193.170.0.1/24
+
+interface lo
+ ip address 11.11.11.11/32
+!
+router ospf
+ ospf router-id 11.11.11.11
+ network 193.170.0.0/24 area 0.0.0.0
+ network 11.11.11.11/32 area 0.0.0.0
+ redistribute connected
+exit
+!
+mpls ldp
+ router-id 11.11.11.11
+ !
+ address-family ipv4
+ discovery transport-address 11.11.11.11
+ !
+ interface r1-eth6
+ exit
+ !
+ exit-address-family
+ !
+exit
+!
+bgp route-map delay-timer 1
+router bgp 65500
+ bgp router-id 192.0.2.1
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 192.168.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.2 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:1
+ rt vpn both 53:1
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf2
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:2
+ rt vpn both 53:2
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf3
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:3
+ rt vpn both 53:3
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf4
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:4
+ rt vpn both 53:4
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf5
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:5
+ rt vpn both 53:5
+ export vpn
+ import vpn
+ exit-address-family
+!
+
+interface r1-eth0
+ mpls bgp forwarding
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf
new file mode 100644
index 0000000000..6facebe40e
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf
@@ -0,0 +1,88 @@
+interface r2-eth1 vrf vrf1
+ ip address 173.31.0.1/32
+!
+interface r2-eth2 vrf vrf2
+ ip address 173.31.0.2/32
+!
+interface r2-eth3 vrf vrf3
+ ip address 173.31.0.3/32
+!
+interface r2-eth4 vrf vrf4
+ ip address 173.31.0.4/32
+!
+interface r2-eth5 vrf vrf5
+ ip address 173.31.0.5/32
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
+router bgp 65501
+ bgp router-id 192.0.2.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:1
+ rt vpn both 53:1
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf2
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:2
+ rt vpn both 53:2
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf3
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:3
+ rt vpn both 53:3
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf4
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:4
+ rt vpn both 53:4
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf5
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:5
+ rt vpn both 53:5
+ export vpn
+ import vpn
+ exit-address-family
+!
+
+interface r2-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf
new file mode 100644
index 0000000000..8f49cdfe0c
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf
@@ -0,0 +1,32 @@
+interface r3-eth0
+ ip address 193.170.0.2/24
+!
+interface lo
+ ip address 33.33.33.33/32
+!
+interface r3-eth1
+ ip address 180.170.0.2/32
+!
+interface r3-eth2
+ ip address 180.170.0.3/32
+!
+router ospf
+ ospf router-id 33.33.33.33
+ network 193.170.0.0/24 area 0.0.0.0
+ network 33.33.33.33/32 area 0.0.0.0
+ redistribute connected
+exit
+!
+mpls ldp
+ router-id 33.33.33.33
+ !
+ address-family ipv4
+ discovery transport-address 33.33.33.33
+ !
+ interface r3-eth0
+ exit
+ !
+ exit-address-family
+ !
+exit
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py
new file mode 100644
index 0000000000..ed3cdca2f9
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_bgp_vpnv4_vpn_auto.py
+#
+# Copyright (c) 2024 by Varun Hegde
+#
+
+"""
+ test_bgp_vpnv4_vpn_auto.py: Test the FRR BGP daemon with BGP VPN session with label export auto
+"""
+
+import os
+import sys
+import json
+import functools
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.bgpcheck import (
+ check_show_bgp_vpn_prefix_found,
+ check_show_bgp_vpn_prefix_not_found,
+)
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 3 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+
+
+ for i in range(6):
+ switch = tgen.add_switch("s{0}".format(i))
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ #create a singiluar link between R2 -- R3
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+ for i in range(7, 9):
+ switch = tgen.add_switch("s{0}".format(i))
+ switch.add_link(tgen.gears["r3"])
+
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf{} type vrf table {}",
+ "ip link set dev vrf{} up",
+ "ip link set dev r1-eth{} master vrf{}",
+ "echo 1 > /proc/sys/net/mpls/conf/r1-eth{}/input",
+ ]
+ cmds_list2 = [
+ "ip link add vrf{} type vrf table {}",
+ "ip link set dev vrf{} up",
+ "ip link set dev r2-eth{} master vrf{}",
+ "echo 1 > /proc/sys/net/mpls/conf/r2-eth{}/input",
+ ]
+
+ for i in range(1, 6):
+ for cmd in cmds_list:
+ input = cmd.format(i, i)
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format(i, i))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list2:
+ input = cmd.format(i, i)
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format(i, i))
+ logger.info("output: " + output)
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def test_labelpool_release():
+ """
+ Check that once we remove BGP VPN sesson
+ label pool structure ( allocated_map ) gets released properly or not
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Just waiting for BGP VPN session to converge
+ logger.info("Waiting for BGP VPN sessions to converge and label pools to get initialised")
+ router = tgen.gears["r1"]
+
+ def _bgp_converge():
+ output = json.loads(
+ router.vtysh_cmd("show bgp labelpool summary json")
+ )
+ expected = {"ledger":5,"inUse":5,"requests":0,"labelChunks":1,"pending":0,"reconnects":1}
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to see BGP Labelpool initialised"
+
+
+ # checking the initial label pool chunk's free labels
+ logger.info("checking the initial label pool chunk's free labels")
+ expected = [{"first":80,"last":207,"size":128,"numberFree":123}]
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp label chunks json",
+ expected,
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+ # Test case : check whether label got released or not
+ logger.info(
+ "Remove multiple vpn session and check whether label got released or no"
+ )
+ router.vtysh_cmd(
+ """
+ configure terminal
+ no router bgp 65500 vrf vrf1
+ no router bgp 65500 vrf vrf2
+ """
+ )
+ expected = [{"first":80,"last":207,"size":128,"numberFree":125}]
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp label chunks json",
+ expected,
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py b/tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf b/tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf
new file mode 100644
index 0000000000..30d11627f5
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf
@@ -0,0 +1,30 @@
+!
+interface r1-eth0
+ ip address 192.168.179.4/24
+exit
+!
+router bgp 65001
+ bgp router-id 192.168.179.4
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.179.5 remote-as auto
+!
+ address-family ipv4 vpn
+ neighbor 192.168.179.5 activate
+ neighbor 192.168.179.5 next-hop-self
+ neighbor 192.168.179.5 allowas-in 1
+ exit-address-family
+!
+router bgp 65001 vrf CUSTOMER-A
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+!
+ address-family ipv4 unicast
+ label vpn export auto
+ rd vpn export 100:1
+ rt vpn both 100:1
+ export vpn
+ import vpn
+ exit-address-family
+
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf b/tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf
new file mode 100644
index 0000000000..bbfd2c22f4
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf
@@ -0,0 +1,40 @@
+!
+interface lo
+ ip address 10.10.10.10/32
+!
+interface r2-eth0
+ ip address 192.168.179.5/24
+exit
+!
+interface r2-eth1
+ ip address 192.168.2.2/24
+exit
+!
+router bgp 65002
+ bgp router-id 192.168.179.5
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.179.4 remote-as auto
+!
+ address-family ipv4 vpn
+ neighbor 192.168.179.4 activate
+ neighbor 192.168.179.4 next-hop-self
+ exit-address-family
+!
+router bgp 65002 vrf CUSTOMER-A
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+!
+ address-family ipv4 unicast
+ redistribute connected
+ network 10.10.10.10/32 route-map r1
+ label vpn export auto
+ rd vpn export 100:1
+ rt vpn both 100:1
+ export vpn
+ import vpn
+ exit-address-family
+!
+route-map r1 permit 10
+ set as-path prepend 65001
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py b/tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py
new file mode 100644
index 0000000000..f3d016cb17
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2024 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ r1.run("ip link add CUSTOMER-A type vrf table 1001")
+ r1.run("ip link set up dev CUSTOMER-A")
+ r1.run("ip link set r1-eth1 master CUSTOMER-A")
+
+ r2.run("ip link add CUSTOMER-A type vrf table 1001")
+ r2.run("ip link set up dev CUSTOMER-A")
+ r2.run("ip link set r2-eth1 master CUSTOMER-A")
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_issue_12502():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ def _bgp_converge():
+ output = json.loads(
+ r1.vtysh_cmd("show bgp vrf CUSTOMER-A ipv4 unicast 10.10.10.10/32 json")
+ )
+ expected = {
+ "paths": [
+ {
+ "importedFrom": "100:1",
+ "aspath": {
+ "string": "65002 65001",
+ },
+ "valid": True,
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to see 192.168.2.0/24 with a valid next-hop"
+
+ def _vrf_route_imported_to_zebra():
+ output = json.loads(
+ r1.vtysh_cmd("show ip route vrf CUSTOMER-A 10.10.10.10/32 json")
+ )
+ expected = {
+ "10.10.10.10/32": [
+ {
+ "protocol": "bgp",
+ "vrfName": "CUSTOMER-A",
+ "selected": True,
+ "installed": True,
+ "table": 1001,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "fib": True,
+ "ip": "192.168.179.5",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "vrf": "default",
+ "active": True,
+ }
+ ],
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_vrf_route_imported_to_zebra)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert (
+ result is None
+ ), "Failed to see 10.10.10.10/32 to be imported into default VRF (Zebra)"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
index 6237decfc3..ee84e375fb 100644
--- a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
+++ b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
@@ -140,6 +140,10 @@ def router_json_cmp_exact_filter(router, cmd, expected):
# filter out tableVersion, version and nhVrfID
json_output.pop("tableVersion")
+ if "totalRoutes" in json_output:
+ json_output.pop("totalRoutes")
+ if "totalPaths" in json_output:
+ json_output.pop("totalPaths")
for rd, data in json_output["routes"]["routeDistinguishers"].items():
for _, attrs in data.items():
for attr in attrs:
@@ -163,12 +167,18 @@ def router_vrf_json_cmp_exact_filter(router, cmd, expected):
json_output = json.loads(output)
+ print(json_output)
+
# filter out tableVersion, version, nhVrfId and vrfId
for vrf, data in json_output.items():
if "vrfId" in data:
data.pop("vrfId")
if "tableVersion" in data:
data.pop("tableVersion")
+ if "totalRoutes" in data:
+ data.pop("totalRoutes")
+ if "totalPaths" in data:
+ data.pop("totalPaths")
if "routes" not in data:
continue
for _, attrs in data["routes"].items():
@@ -203,7 +213,7 @@ def check_show_bgp_ipv4_vpn(rname, json_file):
"show bgp ipv4 vpn json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
@@ -224,7 +234,7 @@ def check_show_bgp_vrf_ipv4(rname, json_file):
"show bgp vrf all ipv4 unicast json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
@@ -248,7 +258,7 @@ def test_protocols_convergence_step0():
"show bgp ipv4 vpn summary json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
diff --git a/tests/topotests/bgp_vrf_netns/r1/bgpd.conf b/tests/topotests/bgp_vrf_netns/r1/bgpd.conf
index 572dce7455..2853a7a5ca 100644
--- a/tests/topotests/bgp_vrf_netns/r1/bgpd.conf
+++ b/tests/topotests/bgp_vrf_netns/r1/bgpd.conf
@@ -5,6 +5,7 @@ router bgp 100 vrf r1-bgp-cust1
no bgp ebgp-requires-policy
neighbor 10.0.1.101 remote-as 99
neighbor 10.0.1.101 timers 3 10
+ neighbor 10.0.1.101 timers connect 1
!
!
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
index 45868663a8..cb3104a522 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
@@ -21,6 +21,8 @@ import sys
import time
import pytest
import platform
+import functools
+from lib import topotest
from copy import deepcopy
@@ -539,6 +541,16 @@ def test_RT_verification_auto_p0(request):
result = create_vrf_cfg(tgen, topo, input_dict=input_dict_vni)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ expected = {"numL3Vnis": 0}
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ tgen.gears["e1"],
+ "show bgp l2vpn evpn vni json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=3)
+ assert result is None, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
input_dict_2 = {}
for dut in ["e1"]:
temp = {dut: {"bgp": []}}
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index beb4de432e..52181a75dc 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -25,6 +25,8 @@ import sys
import time
import pytest
import platform
+import functools
+from lib import topotest
from copy import deepcopy
@@ -1124,7 +1126,6 @@ def test_active_standby_evpn_implementation_p1(request):
)
for addr_type in ADDR_TYPES:
-
logger.info("Verifying only ipv4 routes")
if addr_type != "ipv4":
continue
@@ -2050,6 +2051,18 @@ def test_bgp_attributes_for_evpn_address_family_p1(request, attribute):
tc_name, result
)
+ expected = {"numL3Vnis": 0}
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ tgen.gears["d1"],
+ "show bgp l2vpn evpn vni json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=3)
+ assert result is None, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
input_dict_2 = {}
for dut in ["d1"]:
temp = {dut: {"bgp": []}}
diff --git a/tests/topotests/mgmt_config/r1/early-end-zebra.conf b/tests/topotests/mgmt_config/r1/early-end-zebra.conf
index 44a2f96825..926540f9bc 100644
--- a/tests/topotests/mgmt_config/r1/early-end-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-end-zebra.conf
@@ -1,6 +1,6 @@
allow-external-route-update
end
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
end
ip table range 2 3
end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
index 37619d52ac..b8514f324f 100644
--- a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
@@ -1,7 +1,7 @@
conf t
allow-external-route-update
end
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
end
ip table range 2 3
end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
index 44f202dbcb..990351685b 100644
--- a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
@@ -1,6 +1,6 @@
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
exit
ip table range 2 3
exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
index c7109bfd39..5a783f4492 100644
--- a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
@@ -1,7 +1,7 @@
conf t
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
exit
ip table range 2 3
exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
index 0c38459702..c8396fec70 100644
--- a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
@@ -1,3 +1,3 @@
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
diff --git a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
index 34acb76d92..3a50f6d136 100644
--- a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
@@ -1,4 +1,4 @@
conf t
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only \ No newline at end of file
+router-id 1.2.3.4 \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/test_config.py b/tests/topotests/mgmt_config/test_config.py
index 1d732223ff..627a564a66 100644
--- a/tests/topotests/mgmt_config/test_config.py
+++ b/tests/topotests/mgmt_config/test_config.py
@@ -153,7 +153,7 @@ def cleanup_config(r1, tempdir, logpath):
yield
r1.cmd_nostatus("vtysh -c 'conf t' -c 'no allow-external-route-update'")
- r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip multicast rpf-lookup-mode urib-only'")
+ r1.cmd_nostatus("vtysh -c 'conf t' -c 'no router-id 1.2.3.4'")
r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip table range 2 3'")
logbuf = save_log_snippet(logpath, logbuf, "/dev/null")
@@ -290,9 +290,7 @@ def test_zebra_one_exit_file(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath):
@@ -307,9 +305,7 @@ def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
def test_zebra_early_exit_file(r1, confdir, tempdir, logpath):
@@ -324,9 +320,7 @@ def test_zebra_early_exit_file(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
@@ -342,9 +336,7 @@ def test_zebra_early_exit_redir(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
@@ -360,9 +352,7 @@ def test_zebra_early_end_file(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" in showrun
- ), "zebra second conf missing"
+ assert "router-id 1.2.3.4" in showrun, "zebra second conf missing"
assert "ip table range 2 3" in showrun, "zebra third missing"
@@ -378,7 +368,5 @@ def test_zebra_early_end_redir(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
index 948f4e6c23..da2d8e3625 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
@@ -39,7 +39,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -68,7 +68,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -97,7 +97,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -126,7 +126,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -238,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -250,7 +252,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -265,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -277,7 +281,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -292,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -304,7 +310,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -319,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -331,7 +339,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -369,7 +377,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -398,7 +406,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -427,7 +435,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -456,7 +464,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -480,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -492,7 +502,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -507,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -519,7 +531,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -534,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -546,7 +560,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -561,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -573,7 +589,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -589,3 +605,4 @@
]
}
}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
index 30daecf16e..b4abdde465 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
@@ -38,8 +38,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -67,8 +67,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -96,8 +96,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -125,8 +125,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -238,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -249,8 +251,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -265,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -276,8 +280,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -292,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -303,8 +309,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -319,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -330,8 +338,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -368,8 +376,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -397,8 +405,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -426,8 +434,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -455,8 +463,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -480,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -491,8 +501,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -507,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -518,8 +530,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -534,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -545,8 +559,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -561,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -572,8 +588,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -623,8 +639,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -652,8 +668,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -681,8 +697,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -710,8 +726,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -823,7 +839,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -834,8 +852,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -850,7 +868,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -861,8 +881,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -877,7 +897,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -888,8 +910,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -904,7 +926,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -915,8 +939,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -953,8 +977,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -982,8 +1006,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1011,8 +1035,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1040,8 +1064,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1065,7 +1089,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1076,8 +1102,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1092,7 +1118,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1103,8 +1131,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1119,7 +1147,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1130,8 +1160,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1146,7 +1176,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1157,8 +1189,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
index cfabd49c45..5d61b9865f 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
@@ -38,7 +38,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -66,7 +67,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -94,7 +96,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -122,7 +125,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -234,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -245,7 +251,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -260,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -271,7 +280,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -286,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -297,7 +309,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -312,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -323,7 +338,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -360,7 +376,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -388,7 +405,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -416,7 +434,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -444,7 +463,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -468,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -479,7 +501,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -494,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -505,7 +530,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -520,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -531,7 +559,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -546,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -557,7 +588,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
index b1124bd7bb..86e67a9e23 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
@@ -35,7 +35,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -64,7 +64,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -93,7 +93,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -122,7 +122,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -234,7 +234,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -246,7 +248,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -261,7 +263,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -273,7 +277,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -288,7 +292,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -300,7 +306,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -315,7 +321,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -327,7 +335,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -365,7 +373,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -394,7 +402,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -423,7 +431,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -452,7 +460,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -476,7 +484,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -488,7 +498,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -503,7 +513,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -515,7 +527,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -530,7 +542,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -542,7 +556,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -557,7 +571,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -569,7 +585,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
index 70c8798b31..86e67a9e23 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
@@ -234,7 +234,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -261,7 +263,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -288,7 +292,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -315,7 +321,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -476,7 +484,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -503,7 +513,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -530,7 +542,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -557,7 +571,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib.json b/tests/topotests/mgmt_oper/oper-results/result-lib.json
index 0b2a9fa427..b4abdde465 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib.json
@@ -39,7 +39,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -68,7 +68,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -97,7 +97,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -126,7 +126,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -238,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -250,7 +252,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -265,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -277,7 +281,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -292,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -304,7 +310,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -319,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -331,7 +339,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -369,7 +377,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -398,7 +406,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -427,7 +435,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -456,7 +464,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -480,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -492,7 +502,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -507,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -519,7 +531,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -534,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -546,7 +560,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -561,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -573,7 +589,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -624,7 +640,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -653,7 +669,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -682,7 +698,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -711,7 +727,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -823,7 +839,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -835,7 +853,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -850,7 +868,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -862,7 +882,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -877,7 +897,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -889,7 +911,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -904,7 +926,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -916,7 +940,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -954,7 +978,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -983,7 +1007,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1012,7 +1036,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1041,7 +1065,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1065,7 +1089,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1077,7 +1103,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1092,7 +1118,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1104,7 +1132,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1119,7 +1147,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1131,7 +1161,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1146,7 +1176,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1158,7 +1190,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
index 769c1f73a5..e313a158a3 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
@@ -35,7 +35,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -64,7 +64,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -93,7 +93,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -122,7 +122,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
index c740f592f7..86e67a9e23 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
@@ -35,7 +35,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -64,7 +64,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -93,7 +93,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -122,7 +122,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -234,7 +234,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -246,7 +248,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -261,7 +263,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -273,7 +277,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -288,7 +292,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -300,7 +306,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -315,7 +321,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -327,7 +335,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -365,7 +373,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -394,7 +402,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -423,7 +431,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -476,7 +484,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -488,7 +498,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -503,7 +513,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -515,7 +527,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight": 1
+ "weight": 1
}
]
}
@@ -530,7 +542,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -542,7 +556,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -557,7 +571,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -569,7 +585,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json
new file mode 100644
index 0000000000..e313a158a3
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json
@@ -0,0 +1,229 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper.py b/tests/topotests/mgmt_oper/oper.py
index f54e64ae18..bca452d011 100644
--- a/tests/topotests/mgmt_oper/oper.py
+++ b/tests/topotests/mgmt_oper/oper.py
@@ -77,7 +77,13 @@ def _do_oper_test(tgen, qr, seconds_left=None):
# Don't use this for now.
dd_json_cmp = None
- expected = open(qr[1], encoding="ascii").read()
+ if isinstance(qr[1], str):
+ expected = open(qr[1], encoding="ascii").read()
+ expected_alt = None
+ else:
+ expected = open(qr[1][0], encoding="ascii").read()
+ expected_alt = open(qr[1][1], encoding="ascii").read()
+
output = r1.cmd_nostatus(qcmd.format(qr[0], qr[2] if len(qr) > 2 else ""))
diag = logging.debug if seconds_left else logging.warning
@@ -90,6 +96,7 @@ def _do_oper_test(tgen, qr, seconds_left=None):
try:
ejson = json.loads(expected)
+ ejson_alt = json.loads(expected_alt) if expected_alt is not None else None
except json.decoder.JSONDecodeError as error:
logging.error(
"Error decoding json exp result: %s\noutput:\n%s", error, expected
@@ -99,6 +106,8 @@ def _do_oper_test(tgen, qr, seconds_left=None):
if dd_json_cmp:
cmpout = json_cmp(ojson, ejson, exact_match=True)
+ if cmpout and ejson_alt is not None:
+ cmpout = json_cmp(ojson, ejson_alt, exact_match=True)
if cmpout:
diag(
"-------DIFF---------\n%s\n---------DIFF----------",
@@ -106,6 +115,8 @@ def _do_oper_test(tgen, qr, seconds_left=None):
)
else:
cmpout = tt_json_cmp(ojson, ejson, exact=True)
+ if cmpout and ejson_alt is not None:
+ cmpout = tt_json_cmp(ojson, ejson_alt, exact=True)
if cmpout:
diag(
"-------EXPECT--------\n%s\n------END-EXPECT------",
@@ -118,6 +129,7 @@ def _do_oper_test(tgen, qr, seconds_left=None):
diag("----diff---\n{}".format(cmpout))
diag("Command: {}".format(qcmd.format(qr[0], qr[2] if len(qr) > 2 else "")))
diag("File: {}".format(qr[1]))
+ cmpout = str(cmpout)
return cmpout
@@ -127,7 +139,8 @@ def do_oper_test(tgen, query_results):
step(f"Perform query '{qr[0]}'", reset=reset)
if reset:
reset = False
- _do_oper_test(tgen, qr)
+ ret = _do_oper_test(tgen, qr)
+ assert ret is None, "Unexpected diff: " + str(ret)
def get_ip_networks(super_prefix, count):
diff --git a/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json
new file mode 100644
index 0000000000..efd7e8c684
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json
@@ -0,0 +1,3 @@
+{
+ "frr-zebra:evpn-mh": {}
+}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json
index efd7e8c684..2c63c08510 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json
@@ -1,3 +1,2 @@
{
- "frr-zebra:evpn-mh": {}
}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
index f85b163bd6..19295870d5 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
@@ -121,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -148,7 +150,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
index e2cfec9724..f0bde048f2 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
@@ -121,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -148,7 +150,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -282,7 +286,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -309,7 +315,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
index 3567f35a34..8b632bac66 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
@@ -92,7 +92,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -119,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
index d9ca58d25d..678a80ab97 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
@@ -117,7 +117,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -144,7 +146,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
index d9ca58d25d..678a80ab97 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
@@ -117,7 +117,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -144,7 +146,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib.json b/tests/topotests/mgmt_oper/simple-results/result-lib.json
index e2cfec9724..f0bde048f2 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib.json
@@ -121,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -148,7 +150,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -282,7 +286,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -309,7 +315,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
index d9ca58d25d..678a80ab97 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
@@ -117,7 +117,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -144,7 +146,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/test_oper.py b/tests/topotests/mgmt_oper/test_oper.py
index e4ceabf352..23529bc75e 100644
--- a/tests/topotests/mgmt_oper/test_oper.py
+++ b/tests/topotests/mgmt_oper/test_oper.py
@@ -107,6 +107,7 @@ vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ri
for f in ${resdir}/result-*; do
sed -i -e 's/"uptime": ".*"/"uptime": "rubout"/;s/"id": [0-9][0-9]*/"id": "rubout"/' $f
+ sed -i -e 's/"phy-address": ".*"/"phy-address": "rubout"/' $f
sed -i -e 's/"if-index": [0-9][0-9]*/"if-index": "rubout"/' $f
sed -i -e 's,"vrf": "[0-9]*","vrf": "rubout",' $f
done
diff --git a/tests/topotests/mgmt_oper/test_simple.py b/tests/topotests/mgmt_oper/test_simple.py
index 2b3d6ff6a5..237f7d57d5 100644
--- a/tests/topotests/mgmt_oper/test_simple.py
+++ b/tests/topotests/mgmt_oper/test_simple.py
@@ -154,7 +154,11 @@ def test_oper_simple(tgen):
),
(
'/frr-interface:lib/interface[name="r1-eth0"]/frr-zebra:zebra/evpn-mh',
- "simple-results/result-intf-eth0-wd-trim.json",
+ (
+ # Output is different between libyang2 and libyang3+
+ "simple-results/result-intf-eth0-wd-trim.json",
+ "simple-results/result-intf-eth0-wd-trim-empty-label.json",
+ ),
"with-config exact with-defaults trim",
),
(
@@ -181,7 +185,7 @@ vtysh -c 'show mgmt get-data /frr-vrf:lib' > ${resdir}/result-lib.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf' > ${resdir}/result-lib-vrf-nokey.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]' > ${resdir}/result-lib-vrf-default.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="red"]' > ${resdir}/result-lib-vrf-red.json
-vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra' > ${resdir}/result-lib-vrf-ebra.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra' > ${resdir}/result-lib-vrf-zebra.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs' > ${resdir}/result-lib-vrf-zebra-ribs.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib' > ${resdir}/result-ribs-rib-nokeys.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]' > ${resdir}/result-ribs-rib-ipv4-unicast.json
diff --git a/tests/topotests/msdp_topo1/test_msdp_topo1.py b/tests/topotests/msdp_topo1/test_msdp_topo1.py
index 8c25eeca06..5143ef67a5 100755
--- a/tests/topotests/msdp_topo1/test_msdp_topo1.py
+++ b/tests/topotests/msdp_topo1/test_msdp_topo1.py
@@ -511,6 +511,42 @@ def test_msdp_sa_filter():
assert val is None, "multicast route convergence failure"
+def test_msdp_sa_limit():
+ "Test MSDP SA limiting."
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r4"].vtysh_cmd(
+ """
+ configure terminal
+ router pim
+ msdp log sa-events
+ msdp peer 192.168.2.1 sa-limit 4
+ msdp peer 192.168.3.1 sa-limit 4
+ """
+ )
+
+ # Flow from r1 -> r4
+ for multicast_address in [
+ "229.1.2.10",
+ "229.1.2.11",
+ "229.1.2.12",
+ "229.1.2.13",
+ "229.1.2.14",
+ ]:
+ app_helper.run("h1", [multicast_address, "h1-eth0"])
+ app_helper.run("h2", ["--send=0.7", multicast_address, "h2-eth0"])
+
+ def test_sa_limit_log():
+ r4_log = tgen.gears["r4"].net.getLog("log", "pimd")
+ return re.search(r"MSDP peer .+ reject SA (.+, .+): SA limit \d+ of 4", r4_log)
+
+ _, val = topotest.run_and_expect(test_sa_limit_log, None, count=30, wait=1)
+ assert val is None, "SA limit check failed"
+
+
def test_msdp_log_events():
"Test that the enabled logs are working as expected."
diff --git a/tests/topotests/msdp_topo3/__init__.py b/tests/topotests/msdp_topo3/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/msdp_topo3/__init__.py
diff --git a/tests/topotests/msdp_topo3/r1/frr.conf b/tests/topotests/msdp_topo3/r1/frr.conf
new file mode 100644
index 0000000000..d5b10bf8a1
--- /dev/null
+++ b/tests/topotests/msdp_topo3/r1/frr.conf
@@ -0,0 +1,31 @@
+log commands
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+ ip pim
+!
+interface r1-eth1
+ ip address 192.168.100.1/24
+ ip igmp
+ ip pim passive
+!
+interface lo
+ ip address 10.254.254.1/32
+ ip pim
+ ip pim use-source 10.254.254.1
+!
+router bgp 65100
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as 65200
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
+router pim
+ msdp originator-id 10.254.254.1
+ msdp log sa-events
+ msdp peer 192.168.1.2 source 192.168.1.1
+ rp 192.168.1.1
+! \ No newline at end of file
diff --git a/tests/topotests/msdp_topo3/r2/frr.conf b/tests/topotests/msdp_topo3/r2/frr.conf
new file mode 100644
index 0000000000..245c061874
--- /dev/null
+++ b/tests/topotests/msdp_topo3/r2/frr.conf
@@ -0,0 +1,28 @@
+log commands
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+ ip pim
+!
+interface r2-eth1
+ ip address 192.168.101.1/24
+ ip igmp
+ ip pim passive
+!
+interface lo
+ ip address 10.254.254.2/32
+!
+router bgp 65200
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.1 remote-as 65100
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
+router pim
+ msdp log sa-events
+ msdp peer 192.168.1.1 source 192.168.1.2
+ rp 192.168.1.2
+! \ No newline at end of file
diff --git a/tests/topotests/msdp_topo3/test_msdp_topo3.py b/tests/topotests/msdp_topo3/test_msdp_topo3.py
new file mode 100644
index 0000000000..9393ae7ffd
--- /dev/null
+++ b/tests/topotests/msdp_topo3/test_msdp_topo3.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_msdp_topo3.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2024 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+test_msdp_topo3.py: Test the FRR PIM MSDP peer.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import re
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+
+# Required to instantiate the topology builder class.
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+from lib.pim import McastTesterHelper
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
+
+app_helper = McastTesterHelper()
+
+
+def build_topo(tgen):
+ """
+ +----+ +----+ +----+ +----+
+ | h1 | <-> | r1 | <-> | r2 | <-> | h2 |
+ +----+ +----+ +----+ +----+
+
+ -------------------------->
+
+ Multicast traffic SG(192.168.100.100, 229.1.1.1)
+ """
+
+ # Create 2 routers
+ for routern in range(1, 3):
+ tgen.add_router(f"r{routern}")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # Create a host connected and direct at r1:
+ switch = tgen.add_switch("s2")
+ tgen.add_host("h1", "192.168.100.100/24", "via 192.168.100.1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["h1"])
+
+ # Create a host connected and direct at r2:
+ switch = tgen.add_switch("s3")
+ tgen.add_host("h2", "192.168.101.100/24", "via 192.168.101.1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["h2"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for _, router in router_list.items():
+ router.load_frr_config(os.path.join(CWD, f"{router.name}/frr.conf"))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+ app_helper.init(tgen)
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ app_helper.cleanup()
+ tgen.stop_topology()
+
+
+def test_bgp_convergence():
+ "Wait for BGP protocol convergence"
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("waiting for protocols to converge")
+
+ def expect_loopback_route(router, iptype, route, proto):
+ "Wait until route is present on RIB for protocol."
+ logger.info("waiting route {} in {}".format(route, router))
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router],
+ "show {} route json".format(iptype),
+ {route: [{"protocol": proto}]},
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
+ assertmsg = '"{}" convergence failure'.format(router)
+ assert result is None, assertmsg
+
+ # Wait for R1
+ expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp")
+
+ # Wait for R2
+ expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
+
+
+def test_sa_learn():
+ """
+ Test that the learned SA uses the configured originator ID instead
+ of the configured RP.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ MCAST_ADDRESS = "229.1.1.1"
+ app_helper.run("h1", ["--send=0.7", MCAST_ADDRESS, "h1-eth0"])
+ app_helper.run("h2", [MCAST_ADDRESS, "h2-eth0"])
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears["r2"],
+ "show ip msdp sa json",
+ {
+ "229.1.1.1": {
+ "192.168.100.100": {
+ "rp": "10.254.254.1",
+ "local": "no",
+ }
+ }
+ }
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=100, wait=1)
+ assert result is None, 'r2 SA convergence failure'
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json b/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json
new file mode 100644
index 0000000000..628a556c62
--- /dev/null
+++ b/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json
@@ -0,0 +1,50 @@
+{
+ "10.48.48.0/24":[
+ {
+ "prefix":"10.48.48.0/24",
+ "prefixLen":24,
+ "protocol":"ospf",
+ "vrfId":0,
+ "vrfName":"default",
+ "distance":20,
+ "metric":134,
+ "table":254,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth0",
+ "active":true,
+ "weight":1
+ }
+ ]
+ },
+ {
+ "prefix":"10.48.48.0/24",
+ "prefixLen":24,
+ "protocol":"bgp",
+ "vrfId":0,
+ "vrfName":"default",
+ "selected":true,
+ "destSelected":true,
+ "distance":20,
+ "metric":34,
+ "installed":true,
+ "table":254,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "ip":"10.0.10.5",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth1",
+ "vrf":"blue",
+ "active":true,
+ "weight":1
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tests/topotests/ospf_metric_propagation/r4/frr.conf b/tests/topotests/ospf_metric_propagation/r4/frr.conf
index b02ae18fc1..d9832d80b8 100644
--- a/tests/topotests/ospf_metric_propagation/r4/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/r4/frr.conf
@@ -1,6 +1,10 @@
!
hostname r4
!
+vrf green
+ ip route 10.48.48.0/24 10.0.94.2
+exit
+
interface r4-eth0
ip address 10.0.3.4/24
ip ospf cost 100
@@ -59,6 +63,7 @@ router bgp 99 vrf green
address-family ipv4 unicast
redistribute connected
redistribute ospf
+ redistribute static
import vrf route-map rmap
import vrf default
import vrf blue
diff --git a/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py b/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py
index b97b86bff9..4639a1e26b 100644
--- a/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py
+++ b/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py
@@ -190,6 +190,25 @@ def test_all_links_up():
assert result is None, assertmsg
+def test_static_remote():
+ "Test static route at R1 configured on R4"
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ r1 = tgen.gears["r1"]
+ json_file = "{}/r1/show_ip_route_static.json".format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 10.48.48.2 json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+
+ assertmsg = "r1 JSON output mismatches"
+ assert result is None, assertmsg
+
+
def test_link_1_down():
"Test path R1 -> R2 -> Ra -> Rb -> R4"
tgen = get_topogen()
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
index 131085a47a..e4787be3c9 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
index 45ee1071d4..2f893c3d96 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
@@ -1,4 +1,4 @@
-VRF neno:
+IPv4 unicast VRF neno:
O>* 10.0.3.0/24 [110/20] via 10.0.30.3, r1-eth2, weight 1, XX:XX:XX
B>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX
O 10.0.30.0/24 [110/10] is directly connected, r1-eth2, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
index f3724bbb9f..07ec7226fa 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
S>* 0.0.0.0/0 [1/0] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
O>* 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
index 0f8b12bdfa..f409034b80 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
@@ -1,4 +1,4 @@
-VRF ray:
+IPv4 unicast VRF ray:
B 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX
B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX
B>* 10.0.3.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
index db4e268cb0..2af9d2460d 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r3-eth0, weight 1, XX:XX:XX
L>* 10.0.3.3/32 is directly connected, r3-eth0, weight 1, XX:XX:XX
@@ -6,5 +7,3 @@ O 10.0.30.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.30.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX
L>* 10.0.30.3/32 is directly connected, r3-eth1, weight 1, XX:XX:XX
O>* 10.0.40.0/24 [110/20] via 10.0.30.1, r3-eth1, weight 1, XX:XX:XX
-
-
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
index 4865708578..013073795b 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
O>* 10.0.3.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX
O 10.0.4.0/24 [110/10] is directly connected, r4-eth0, weight 1, XX:XX:XX
C>* 10.0.4.0/24 is directly connected, r4-eth0, weight 1, XX:XX:XX
@@ -6,4 +7,3 @@ O>* 10.0.30.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX
O 10.0.40.0/24 [110/10] is directly connected, r4-eth1, weight 1, XX:XX:XX
C>* 10.0.40.0/24 is directly connected, r4-eth1, weight 1, XX:XX:XX
L>* 10.0.40.4/32 is directly connected, r4-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt
index 68fd30d4cc..82cc2d9136 100644
--- a/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt
+++ b/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt
@@ -1,4 +1,4 @@
-VRF r1-ospf-cust1:
+IPv4 unicast VRF r1-ospf-cust1:
O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX
@@ -7,4 +7,3 @@ O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX
L>* 10.0.3.2/32 is directly connected, r1-eth1, weight 1, XX:XX:XX
O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt
index f0bce905b1..d6ad2a2500 100644
--- a/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt
+++ b/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt
@@ -1,4 +1,4 @@
-VRF r1-ospf-cust1:
+IPv4 unicast VRF r1-ospf-cust1:
O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX
@@ -6,4 +6,3 @@ O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX
O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX
L>* 10.0.3.2/32 is directly connected, r1-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt
index 098eceb28b..effcbc4634 100644
--- a/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt
+++ b/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt
@@ -1,4 +1,4 @@
-VRF r2-ospf-cust1:
+IPv4 unicast VRF r2-ospf-cust1:
O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX
O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX
@@ -7,4 +7,3 @@ O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX
L>* 10.0.3.3/32 is directly connected, r2-eth1, weight 1, XX:XX:XX
O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt
index a9300f8dfa..7321b184a3 100644
--- a/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt
+++ b/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt
@@ -1,4 +1,4 @@
-VRF r2-ospf-cust1:
+IPv4 unicast VRF r2-ospf-cust1:
O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX
O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX
@@ -6,4 +6,3 @@ L>* 10.0.2.1/32 is directly connected, r2-eth0, weight 1, XX:XX:XX
O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX
L>* 10.0.3.3/32 is directly connected, r2-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt
index f58beb81a7..3fea04bd19 100644
--- a/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt
+++ b/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt
@@ -1,4 +1,4 @@
-VRF r3-ospf-cust1:
+IPv4 unicast VRF r3-ospf-cust1:
O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, weight 1, XX:XX:XX
O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, weight 1, XX:XX:XX
O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX
@@ -7,4 +7,3 @@ L>* 10.0.3.1/32 is directly connected, r3-eth0, weight 1, XX:XX:XX
O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.10.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX
L>* 10.0.10.1/32 is directly connected, r3-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt
index cfedf8fcb4..3287355ce0 100644
--- a/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt
+++ b/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt
@@ -1,5 +1,4 @@
-VRF r3-ospf-cust1:
+IPv4 unicast VRF r3-ospf-cust1:
O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.10.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX
L>* 10.0.10.1/32 is directly connected, r3-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/pim_boundary_acl/r1/frr.conf b/tests/topotests/pim_boundary_acl/r1/frr.conf
new file mode 100644
index 0000000000..cc639b304b
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/r1/frr.conf
@@ -0,0 +1,39 @@
+hostname r1
+!
+!debug pim events
+!debug igmp events
+!debug igmp packets
+!
+ip prefix-list pim-oil-plist seq 10 deny 229.1.1.0/24
+ip prefix-list pim-oil-plist seq 20 permit any
+!
+access-list pim-acl seq 10 deny ip host 10.0.20.2 232.1.1.0 0.0.0.255
+access-list pim-acl seq 20 permit ip any any
+!
+interface r1-eth0
+ ip address 10.0.20.1/24
+ ip igmp
+ ip pim
+!
+interface r1-eth1
+ ip address 10.0.30.1/24
+ ip pim
+!
+interface r1-eth2
+ ip address 10.0.40.1/24
+ ip igmp
+ ip pim
+!
+interface lo
+ ip address 10.254.0.1/32
+ ip pim
+!
+router pim
+ rp 10.254.0.3
+ join-prune-interval 5
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 10.0.30.3 remote-as external
+ neighbor 10.0.30.3 timers 3 10
+ redistribute connected
diff --git a/tests/topotests/pim_boundary_acl/r2/frr.conf b/tests/topotests/pim_boundary_acl/r2/frr.conf
new file mode 100644
index 0000000000..10ace947b2
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/r2/frr.conf
@@ -0,0 +1,19 @@
+hostname r2
+!
+!debug pim events
+!debug igmp events
+!debug igmp packets
+!
+ip prefix-list pim-oil-plist seq 10 deny 229.1.1.0/24
+ip prefix-list pim-oil-plist seq 20 permit any
+!
+access-list pim-acl seq 10 deny ip host 10.0.20.2 232.1.1.0 0.0.0.255
+access-list pim-acl seq 20 permit ip any any
+!
+interface r2-eth0
+ ip address 10.0.20.2/24
+ ip pim
+!
+interface lo
+ ip address 10.254.0.2/32
+!
diff --git a/tests/topotests/pim_boundary_acl/r3/frr.conf b/tests/topotests/pim_boundary_acl/r3/frr.conf
new file mode 100644
index 0000000000..9720774266
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/r3/frr.conf
@@ -0,0 +1,13 @@
+hostname r3
+!
+!debug pim events
+!debug igmp events
+!debug igmp packets
+!
+interface r3-eth0
+ ip address 10.0.40.4/24
+ ip pim
+!
+interface lo
+ ip address 10.254.0.4/32
+!
diff --git a/tests/topotests/pim_boundary_acl/rp/frr.conf b/tests/topotests/pim_boundary_acl/rp/frr.conf
new file mode 100644
index 0000000000..f6eed23917
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/rp/frr.conf
@@ -0,0 +1,22 @@
+hostname rp
+!
+interface rp-eth0
+ ip address 10.0.30.3/24
+ ip pim
+!
+interface lo
+ ip address 10.254.0.3/32
+ ip pim
+!
+router pim
+ rp 10.254.0.3
+ join-prune-interval 5
+ register-accept-list ACCEPT
+!
+ip prefix-list ACCEPT seq 5 permit 10.0.20.0/24 le 32
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 10.0.30.1 remote-as external
+ neighbor 10.0.30.1 timers 3 10
+ redistribute connected \ No newline at end of file
diff --git a/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py b/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py
new file mode 100644
index 0000000000..1488e610c8
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py
@@ -0,0 +1,523 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_pim_boundary_acl.py
+#
+# Copyright (c) 2024 Architecture Technology Corporation
+# Corey Siltala
+#
+
+"""
+test_pim_boundary_acl.py: Test multicast boundary commands (access-lists and prefix-lists)
+"""
+
+import os
+import sys
+import pytest
+import json
+from functools import partial
+
+pytestmark = [pytest.mark.pimd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+ASM_GROUP="229.1.1.1"
+SSM_GROUP="232.1.1.1"
+
+def build_topo(tgen):
+ "Build function"
+
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ tgen.add_router("rp")
+
+ # rp ------ r1 -------- r2
+ # \
+ # --------- r3
+ # r1 -> .1
+ # r2 -> .2
+ # rp -> .3
+ # r3 -> .4
+ # loopback network is 10.254.0.X/32
+ #
+ # r1 <- sw1 -> r2
+ # r1-eth0 <-> r2-eth0
+ # 10.0.20.0/24
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r2"])
+
+ # r1 <- sw2 -> rp
+ # r1-eth1 <-> rp-eth0
+ # 10.0.30.0/24
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["rp"])
+
+ # r1 <- sw3 -> r3
+ # r1-eth2 <-> r3-eth0
+ # 10.0.40.0/24
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ # For all registered routers, load the zebra configuration file
+ for rname, router in tgen.routers().items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+ # tgen.mininet_cli()
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def test_pim_rp_setup():
+ "Ensure basic routing has come up and the rp has an outgoing interface"
+ # Ensure rp and r1 establish pim neighbor ship and bgp has come up
+ # Finally ensure that the rp has an outgoing interface on r1
+ tgen = get_topogen()
+
+ r1 = tgen.gears["r1"]
+ expected = {
+ "10.254.0.3":[
+ {
+ "outboundInterface":"r1-eth1",
+ "group":"224.0.0.0/4",
+ "source":"Static"
+ }
+ ]
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip pim rp-info json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(r1.name)
+ assert result is None, assertmsg
+ # tgen.mininet_cli()
+
+
+def test_pim_asm_igmp_join_acl():
+ "Test ASM IGMP joins with prefix-list ACLs"
+ logger.info("Send IGMP joins from r2 to r1 with ACL enabled and disabled")
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r2 = tgen.gears["r2"]
+ r1 = tgen.gears["r1"]
+
+ # No IGMP sources other than from self for AutoRP Discovery group initially
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "224.0.1.40":"*",
+ "229.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "224.0.1.40":"*",
+ "229.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected no IGMP sources other than for AutoRP Discovery"
+
+ # Send IGMP join from r2, check if r1 has IGMP source
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface {}
+ ip igmp join {}
+ """
+ ).format("r2-eth0", ASM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "229.1.1.1":{
+ "group":"229.1.1.1",
+ "sources":[
+ {
+ "source":"*",
+ "timer":"--:--",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # Test inbound boundary on r1
+ # Enable multicast boundary on r1, toggle IGMP join on r2
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {}
+ """
+ ).format(ASM_GROUP))
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ interface r1-eth0
+ ip multicast boundary oil pim-oil-plist
+ """
+ )
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {}
+ """
+ ).format(ASM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "229.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be absent but is present"
+
+ # Test outbound boundary on r2
+ # Enable multicast boundary on r2, toggle IGMP join (test outbound)
+ # Note: json_cmp treats "*" as wildcard but in this case that's actually what the source is
+ expected = {
+ "vrf":"default",
+ "r2-eth0":{
+ "name":"r2-eth0",
+ "groups":[
+ {
+ "source":"*",
+ "group":"229.1.1.1",
+ "primaryAddr":"10.0.20.2",
+ "sockFd":"*",
+ "upTime":"*"
+ }
+ ]
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be present but is absent"
+
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {}
+ ip multicast boundary oil pim-oil-plist
+ ip igmp join {}
+ """
+ ).format(ASM_GROUP, ASM_GROUP))
+ expected = {
+ "vrf":"default",
+ "r2-eth0":None
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be absent but is present"
+
+ # Cleanup
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {}
+ no ip multicast boundary oil pim-oil-plist
+ """
+ ).format(ASM_GROUP))
+
+
+def test_pim_ssm_igmp_join_acl():
+ "Test SSM IGMP joins with extended ACLs"
+ logger.info("Send IGMP joins from r2 to r1 with ACL enabled and disabled")
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r3 = tgen.gears["r3"]
+ r2 = tgen.gears["r2"]
+ r1 = tgen.gears["r1"]
+
+ # No IGMP sources other than from self for AutoRP Discovery group initially
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "224.0.1.40":"*",
+ "229.1.1.1":None,
+ "232.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "224.0.1.40":"*",
+ "229.1.1.1":None,
+ "232.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", {}
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected no IGMP sources other than from AutoRP Discovery"
+
+ # Send IGMP join from r2, check if r1 has IGMP source
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":{
+ "group":"232.1.1.1",
+ "sources":[
+ {
+ "source":"10.0.20.2",
+ "timer":"*",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # Test inbound boundary on r1
+ # Enable multicast boundary on r1, toggle IGMP join on r2
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ interface r1-eth0
+ ip multicast boundary pim-acl
+ """
+ )
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be absent but is present"
+
+ # Add lower, more-specific permit rule to access-list
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ r1.vtysh_cmd((
+ """
+ configure terminal
+ access-list pim-acl seq 5 permit ip host 10.0.20.2 {} 0.0.0.128
+ """
+ ).format(SSM_GROUP))
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":{
+ "group":"232.1.1.1",
+ "sources":[
+ {
+ "source":"10.0.20.2",
+ "timer":"*",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # Test outbound boundary on r2
+ # Enable multicast boundary on r2, toggle IGMP join (test outbound)
+ expected = {
+ "vrf":"default",
+ "r2-eth0":{
+ "name":"r2-eth0",
+ "groups":[
+ {
+ "source":"10.0.20.2",
+ "group":"232.1.1.1",
+ "primaryAddr":"10.0.20.2",
+ "sockFd":"*",
+ "upTime":"*"
+ }
+ ]
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be present but is absent"
+
+ # Enable boundary ACL, check join is absent
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {} 10.0.20.2
+ ip multicast boundary pim-acl
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP, SSM_GROUP))
+ expected = {
+ "vrf":"default",
+ "r2-eth0":None
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be absent but is present"
+ # Check sources on r1 again, should be absent even though we permitted it because r2 is blocking it outbound
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "232.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be absent but is present"
+
+ # Send IGMP join from r3 with different source, should show up on r1
+ # Add lower, more-specific permit rule to access-list
+ r3.vtysh_cmd((
+ """
+ configure terminal
+ interface r3-eth0
+ ip igmp join {} 10.0.40.4
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "232.1.1.1":{
+ "group":"232.1.1.1",
+ "sources":[
+ {
+ "source":"10.0.40.4",
+ "timer":"*",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # PIM join
+ # PIM-DM forwarding
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/pim_mrib/__init__.py b/tests/topotests/pim_mrib/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/pim_mrib/__init__.py
diff --git a/tests/topotests/pim_mrib/r1/frr.conf b/tests/topotests/pim_mrib/r1/frr.conf
new file mode 100644
index 0000000000..28cf2b2c46
--- /dev/null
+++ b/tests/topotests/pim_mrib/r1/frr.conf
@@ -0,0 +1,28 @@
+!
+hostname r1
+password zebra
+log file /tmp/r1-frr.log
+!
+!debug pim nht
+!debug pim nht detail
+!debug pim nht rp
+!
+interface r1-eth0
+ ip address 10.0.0.1/24
+ ip igmp
+ ip pim
+!
+interface r1-eth1
+ ip address 10.0.1.1/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.2.0/24 10.0.0.2 50
+ip route 10.0.3.0/24 10.0.1.3 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/r2/frr.conf b/tests/topotests/pim_mrib/r2/frr.conf
new file mode 100644
index 0000000000..3e647f6795
--- /dev/null
+++ b/tests/topotests/pim_mrib/r2/frr.conf
@@ -0,0 +1,28 @@
+!
+hostname r2
+password zebra
+log file /tmp/r2-frr.log
+!
+!debug pim nht
+!debug pim nht detail
+!debug pim nht rp
+!
+interface r2-eth0
+ ip address 10.0.0.2/24
+ ip igmp
+ ip pim
+!
+interface r2-eth1
+ ip address 10.0.2.2/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.1.0/24 10.0.0.1 50
+ip route 10.0.3.0/24 10.0.2.4 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/r3/frr.conf b/tests/topotests/pim_mrib/r3/frr.conf
new file mode 100644
index 0000000000..9815484d02
--- /dev/null
+++ b/tests/topotests/pim_mrib/r3/frr.conf
@@ -0,0 +1,28 @@
+!
+hostname r3
+password zebra
+log file /tmp/r3-frr.log
+!
+!debug pim nht
+!debug pim nht detail
+!debug pim nht rp
+!
+interface r3-eth0
+ ip address 10.0.1.3/24
+ ip igmp
+ ip pim
+!
+interface r3-eth1
+ ip address 10.0.3.3/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.0.0/24 10.0.1.1 50
+ip route 10.0.2.0/24 10.0.3.4 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/r4/frr.conf b/tests/topotests/pim_mrib/r4/frr.conf
new file mode 100644
index 0000000000..8432a7a350
--- /dev/null
+++ b/tests/topotests/pim_mrib/r4/frr.conf
@@ -0,0 +1,29 @@
+!
+hostname r4
+password zebra
+log file /tmp/r4-frr.log
+!
+debug pim nht
+debug pim nht detail
+debug pim nht rp
+debug zebra rib detail
+!
+interface r4-eth0
+ ip address 10.0.2.4/24
+ ip igmp
+ ip pim
+!
+interface r4-eth1
+ ip address 10.0.3.4/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.0.0/24 10.0.2.2 50
+ip route 10.0.1.0/24 10.0.3.3 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/test_pim_mrib.py b/tests/topotests/pim_mrib/test_pim_mrib.py
new file mode 100644
index 0000000000..355c503e3b
--- /dev/null
+++ b/tests/topotests/pim_mrib/test_pim_mrib.py
@@ -0,0 +1,328 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_pim_mrib.py
+#
+# Copyright (c) 2024 ATCorp
+# Nathan Bahr
+#
+
+import os
+import sys
+import pytest
+from functools import partial
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
+from lib.pim import (
+ verify_pim_rp_info,
+)
+from lib.common_config import step, write_test_header
+
+"""
+test_pim_mrib.py: Test PIM MRIB overrides and RPF modes
+"""
+
+TOPOLOGY = """
+ Test PIM MRIB overrides and RPF modes
+
+ +---+---+ +---+---+
+ | | 10.0.0.0/24 | |
+ + R1 +----------------------+ R2 |
+ | | .1 .2 | |
+ +---+---+ r1-eth0 r2-eth0 +---+---+
+ .1 | r1-eth1 r2-eth1 | .2
+ | |
+ 10.0.1.0/24 | | 10.0.2.0/24
+ | |
+ .3 | r3-eth0 r4-eth0 | .4
+ +---+---+ r3-eth1 r4-eth1 +---+---+
+ | | .3 .4 | |
+ + R3 +----------------------+ R4 |
+ | | 10.0.3.0/24 | |
+ +---+---+ +---+---+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# Required to instantiate the topology builder class.
+pytestmark = [pytest.mark.pimd]
+
+
+def build_topo(tgen):
+ '''Build function'''
+
+ # Create routers
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("r4")
+
+ # Create topology links
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth0", "r2-eth0")
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r3"], "r1-eth1", "r3-eth0")
+ tgen.add_link(tgen.gears["r2"], tgen.gears["r4"], "r2-eth1", "r4-eth0")
+ tgen.add_link(tgen.gears["r3"], tgen.gears["r4"], "r3-eth1", "r4-eth1")
+
+
+def setup_module(mod):
+ logger.info("PIM MRIB/RPF functionality:\n {}".format(TOPOLOGY))
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ '''Teardown the pytest environment'''
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_pim_mrib_init(request):
+ '''Test boot in MRIB-than-URIB with the default MRIB'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Verify rp-info using default URIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth0",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_override(request):
+ '''Test MRIB override nexthop'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Install a MRIB route that has a shorter prefix length and lower cost.
+ # In MRIB-than-URIB mode, it should use this route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ ip mroute 10.0.0.0/16 10.0.3.3 25
+ '''
+ )
+
+ step("Verify rp-info using MRIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth1",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_prefix_mode(request):
+ '''Test longer prefix lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to longer prefix match, should switch back to the URIB route
+ # even with the lower cost, the longer prefix match will win because of the mode
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode longer-prefix
+ '''
+ )
+
+ step("Verify rp-info using URIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth0",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_dist_mode(request):
+ '''Test lower distance lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to lower distance match, should switch back to the MRIB route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode lower-distance
+ '''
+ )
+
+ step("Verify rp-info using MRIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth1",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_urib_mode(request):
+ '''Test URIB only lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to urib only match, should switch back to the URIB route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode urib-only
+ '''
+ )
+
+ step("Verify rp-info using URIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth0",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_mrib_mode(request):
+ '''Test MRIB only lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to mrib only match, should switch back to the MRIB route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode mrib-only
+ '''
+ )
+
+ step("Verify rp-info using MRIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth1",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_mrib_mode_no_route(request):
+ '''Test MRIB only with no route'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Remove the MRIB route, in mrib-only mode, it should switch to no path for the RP
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ no ip mroute 10.0.0.0/16 10.0.3.3 25
+ '''
+ )
+
+ step("Verify rp-info with Unknown next hop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "Unknown",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_memory_leak():
+ '''Run the memory leak test and report results.'''
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/srv6_static_route/test_srv6_route.py b/tests/topotests/srv6_static_route/test_srv6_route.py
index f23e199d4a..e26775daf7 100755
--- a/tests/topotests/srv6_static_route/test_srv6_route.py
+++ b/tests/topotests/srv6_static_route/test_srv6_route.py
@@ -27,7 +27,7 @@ from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-pytestmark = [pytest.mark.bgpd, pytest.mark.sharpd]
+pytestmark = [pytest.mark.staticd]
def open_json_file(filename):
diff --git a/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json
new file mode 100644
index 0000000000..50871ae038
--- /dev/null
+++ b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json
@@ -0,0 +1,20 @@
+{
+ "5.5.6.7/32":[
+ {
+ "prefix":"5.5.6.7/32",
+ "prefixLen":32,
+ "protocol":"kernel",
+ "vrfName":"default",
+ "internalFlags":0,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":0,
+ "nexthops":[
+ {
+ "flags":0,
+ "interfaceName":"r1-eth2"
+ }
+ ]
+
+ }
+ ]
+}
diff --git a/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json
new file mode 100644
index 0000000000..d0ab2fa187
--- /dev/null
+++ b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json
@@ -0,0 +1,21 @@
+{
+ "5.5.6.7/32":[
+ {
+ "prefix":"5.5.6.7/32",
+ "prefixLen":32,
+ "protocol":"kernel",
+ "vrfName":"default",
+ "internalFlags":8,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":1,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "interfaceName":"r1-eth2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py b/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py
index eda8c88706..89bc6cf8e0 100644
--- a/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py
+++ b/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py
@@ -65,6 +65,9 @@ def build_topo(tgen):
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
+ # Create a p2p connection between r1 and r2
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"])
+
#####################################################
##
@@ -222,6 +225,50 @@ def test_zebra_kernel_route_blackhole_add():
result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
assert result, "Blackhole Route should have not been removed\n{}".format(_)
+def test_zebra_kernel_route_interface_linkdown():
+ "Test that a kernel routes should be affected by interface change"
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+ router.run("ip route add 5.5.6.7/32 via 10.0.1.66 dev r1-eth2")
+
+ kernel = "{}/{}/ip_route_kernel_interface_up.json".format(CWD, router.name)
+ expected = json.loads(open(kernel).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route 5.5.6.7/32 json", expected
+ )
+ result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, "Kernel Route should be selected:\n{}".format(_)
+
+ # link down
+ router2 = tgen.gears["r2"]
+ router2.run("ip link set dev r2-eth2 down")
+
+ kernel = "{}/{}/ip_route_kernel_interface_down.json".format(CWD, router.name)
+ expected = json.loads(open(kernel).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route 5.5.6.7/32 json", expected
+ )
+ result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, "Kernel Route should not be selected:\n{}".format(_)
+
+ # link up
+ router2 = tgen.gears["r2"]
+ router2.run("ip link set dev r2-eth2 up")
+
+ kernel = "{}/{}/ip_route_kernel_interface_up.json".format(CWD, router.name)
+ expected = json.loads(open(kernel).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route 5.5.6.7/32 json", expected
+ )
+ result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, "Kernel Route should be selected:\n{}".format(_)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf
index a248a1a304..be831a1d34 100644
--- a/tools/etc/frr/support_bundle_commands.conf
+++ b/tools/etc/frr/support_bundle_commands.conf
@@ -134,9 +134,11 @@ show ip ospf router-info pce
CMD_LIST_END
# RIP Support Bundle Command List
-# PROC_NAME:rip
-# CMD_LIST_START
-# CMD_LIST_END
+PROC_NAME:rip
+CMD_LIST_START
+show ip rip
+show ip rip status
+CMD_LIST_END
# ISIS Support Bundle Command List
PROC_NAME:isis
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 2bb364f32b..2ea63a290e 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -279,7 +279,11 @@ ctx_keywords = {
"policy ": {"candidate-path ": {}},
"pcep": {"pcc": {}, "pce ": {}, "pce-config ": {}},
},
- "srv6": {"locators": {"locator ": {}}, "encapsulation": {}},
+ "srv6": {
+ "locators": {"locator ": {}},
+ "encapsulation": {},
+ "formats": {"format": {}},
+ },
},
"nexthop-group ": {},
"route-map ": {},
diff --git a/tools/gen_northbound_callbacks.c b/tools/gen_northbound_callbacks.c
index 046dc9e99e..516743acab 100644
--- a/tools/gen_northbound_callbacks.c
+++ b/tools/gen_northbound_callbacks.c
@@ -11,6 +11,7 @@
#include <unistd.h>
+#include "darr.h"
#include "yang.h"
#include "northbound.h"
@@ -19,7 +20,7 @@ static bool static_cbs;
static void __attribute__((noreturn)) usage(int status)
{
extern const char *__progname;
- fprintf(stderr, "usage: %s [-h] [-s] [-p path] MODULE\n", __progname);
+ fprintf(stderr, "usage: %s [-h] [-s] [-p path]* MODULE\n", __progname);
exit(status);
}
@@ -408,7 +409,8 @@ static int generate_nb_nodes(const struct lysc_node *snode, void *arg)
int main(int argc, char *argv[])
{
- const char *search_path = NULL;
+ char **search_paths = NULL;
+ char **iter = NULL;
struct yang_module *module;
char module_name_underscores[64];
struct stat st;
@@ -433,7 +435,7 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
- search_path = optarg;
+ *darr_append(search_paths) = darr_strdup(optarg);
break;
case 's':
static_cbs = true;
@@ -450,8 +452,11 @@ int main(int argc, char *argv[])
yang_init(false, true, false);
- if (search_path)
- ly_ctx_set_searchdir(ly_native_ctx, search_path);
+ darr_foreach_p (search_paths, iter) {
+ ly_ctx_set_searchdir(ly_native_ctx, *iter);
+ darr_free(*iter);
+ }
+ darr_free(search_paths);
/* Load all FRR native models to ensure all augmentations are loaded. */
yang_module_load_all();
diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c
index acc612c0a8..611a7872d0 100644
--- a/watchfrr/watchfrr.c
+++ b/watchfrr/watchfrr.c
@@ -44,7 +44,7 @@
#define DEFAULT_PERIOD 5
#define DEFAULT_TIMEOUT 90
-#define DEFAULT_RESTART_TIMEOUT 20
+#define DEFAULT_RESTART_TIMEOUT 90
#define DEFAULT_LOGLEVEL LOG_INFO
#define DEFAULT_MIN_RESTART 60
#define DEFAULT_MAX_RESTART 600
diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang
index 33602fd29e..8dadf4fd7c 100644
--- a/yang/frr-pim.yang
+++ b/yang/frr-pim.yang
@@ -78,6 +78,51 @@ module frr-pim {
type string;
}
+ typedef access-list-ref {
+ type string;
+ }
+
+ /*
+ * Multicast RPF mode configurable type
+ */
+
+ typedef mcast-rpf-lookup-mode {
+ type enumeration {
+ enum "none" {
+ value 0;
+ description
+ "No mode set.";
+ }
+ enum "mrib-only" {
+ value 1;
+ description
+ "Lookup in unicast RIB only.";
+ }
+ enum "urib-only" {
+ value 2;
+ description
+ "Lookup in multicast RIB only.";
+ }
+ enum "mrib-then-urib" {
+ value 3;
+ description
+ "Try multicast RIB first, fall back to unicast RIB.";
+ }
+ enum "lower-distance" {
+ value 4;
+ description
+ "Lookup both unicast and mcast, use entry with lower distance.";
+ }
+ enum "longer-prefix" {
+ value 5;
+ description
+ "Lookup both unicast and mcast, use entry with longer prefix.";
+ }
+ }
+ description
+ "Multicast RPF lookup behavior";
+ }
+
/*
* Groupings
*/
@@ -157,20 +202,27 @@ module frr-pim {
description
"A grouping defining per address family pim global attributes";
+ leaf mcast-rpf-lookup {
+ type mcast-rpf-lookup-mode;
+ default "none";
+ description
+ "Multicast RPF lookup behavior.";
+ }
+
leaf ecmp {
type boolean;
default "false";
description
"Enable PIM ECMP.";
}
-
+
leaf ecmp-rebalance {
type boolean;
default "false";
description
"Enable PIM ECMP Rebalance.";
}
-
+
leaf keep-alive-timer {
type uint16 {
range "1..max";
@@ -179,7 +231,7 @@ module frr-pim {
description
"Keep alive Timer in seconds.";
}
-
+
leaf rp-keep-alive-timer {
type uint16 {
range "1..max";
@@ -265,6 +317,14 @@ module frr-pim {
"Log all MSDP SA related events.";
}
+ leaf originator-id {
+ type inet:ip-address;
+ description
+ "Configure the RP address for the SAs.
+
+ By default the local system RP address will be used.";
+ }
+
leaf shutdown {
type boolean;
default false;
@@ -337,6 +397,12 @@ module frr-pim {
}
uses msdp-authentication;
+
+ leaf sa-limit {
+ type uint32;
+ description
+ "Peer SA maximum limit.";
+ }
}
container mlag {
@@ -507,7 +573,13 @@ module frr-pim {
leaf multicast-boundary-oil {
type plist-ref;
description
- "Prefix-List to define multicast boundary";
+ "Prefix-List to define multicast boundary by group";
+ }
+
+ leaf multicast-boundary-acl {
+ type access-list-ref;
+ description
+ "Access-list to define multicast boundary by source and group";
}
list mroute {
diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang
index f97a4cc129..a3c066c56c 100644
--- a/yang/frr-zebra.yang
+++ b/yang/frr-zebra.yang
@@ -157,47 +157,6 @@ module frr-zebra {
"Zebra interface type gre.";
}
- /*
- * Multicast RPF mode configurable type
- */
-
- typedef mcast-rpf-lookup-mode {
- type enumeration {
- enum "none" {
- value 0;
- description
- "No mode set.";
- }
- enum "mrib-only" {
- value 1;
- description
- "Lookup in unicast RIB only.";
- }
- enum "urib-only" {
- value 2;
- description
- "Lookup in multicast RIB only.";
- }
- enum "mrib-then-urib" {
- value 3;
- description
- "Try multicast RIB first, fall back to unicast RIB.";
- }
- enum "lower-distance" {
- value 4;
- description
- "Lookup both unicast and mcast, use entry with lower distance.";
- }
- enum "longer-prefix" {
- value 5;
- description
- "Lookup both unicast and mcast, use entry with longer prefix.";
- }
- }
- description
- "Multicast RPF lookup behavior";
- }
-
// End of ip6-route
/*
* VxLAN Network Identifier type
@@ -2883,12 +2842,6 @@ module frr-zebra {
container zebra {
description
"Data model for the Zebra daemon.";
- leaf mcast-rpf-lookup {
- type frr-zebra:mcast-rpf-lookup-mode;
- default "mrib-then-urib";
- description
- "Multicast RPF lookup behavior.";
- }
leaf ip-forwarding {
type boolean;
description
diff --git a/zebra/interface.c b/zebra/interface.c
index f7fd112cd4..1c86a6a5c7 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -972,6 +972,8 @@ void if_up(struct interface *ifp, bool install_connected)
event_ignore_late_timer(zif->speed_update);
if_addr_wakeup(ifp);
+
+ rib_update_handle_vrf_all(RIB_UPDATE_KERNEL, ZEBRA_ROUTE_KERNEL);
}
/* Interface goes down. We have to manage different behavior of based
diff --git a/zebra/rib.h b/zebra/rib.h
index 5fedb07335..8484fe1291 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -402,11 +402,7 @@ extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
bool fromkernel);
extern struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
- const union g_addr *addr,
- struct route_node **rn_out);
-extern struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id,
- union g_addr *gaddr,
- struct route_node **rn_out);
+ const union g_addr *addr, struct route_node **rn_out);
extern void rib_update(enum rib_update_event event);
extern void rib_update_table(struct route_table *table,
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 10acee9be4..ab55998af0 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -282,7 +282,7 @@ int zsend_interface_address(int cmd, struct zserv *client,
{
int blen;
struct prefix *p;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, ifp->vrf->vrf_id);
stream_putl(s, ifp->ifindex);
@@ -323,7 +323,7 @@ static int zsend_interface_nbr_address(int cmd, struct zserv *client,
struct nbr_connected *ifc)
{
int blen;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
struct prefix *p;
zclient_create_header(s, cmd, ifp->vrf->vrf_id);
@@ -640,10 +640,15 @@ int zsend_redistribute_route(int cmd, struct zserv *client,
* (Otherwise we would need to implement sending NHT updates for the result of
* this "URIB-MRIB-combined" table, but we only decide that here on the fly,
* so it'd be rather complex to do NHT for.)
+ *
+ * 9/19/24 NEB I've updated this API to include the SAFI in the lookup
+ * request and response. This allows PIM to do a syncronous lookup for the
+ * correct table along side NHT.
+ * This also makes this a more generic synchronous lookup not specifically
+ * tied to the mrib.
*/
-static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr,
- struct route_entry *re,
- struct zebra_vrf *zvrf)
+static int zsend_nexthop_lookup(struct zserv *client, struct ipaddr *addr, struct route_entry *re,
+ struct route_node *rn, struct zebra_vrf *zvrf, safi_t safi)
{
struct stream *s;
unsigned long nump;
@@ -651,18 +656,20 @@ static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr,
struct nexthop *nexthop;
/* Get output stream. */
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
stream_reset(s);
/* Fill in result. */
- zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf));
+ zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, zvrf_id(zvrf));
stream_put_ipaddr(s, addr);
- if (re) {
+ if (re && rn) {
struct nexthop_group *nhg;
stream_putc(s, re->distance);
stream_putl(s, re->metric);
+ stream_putw(s, rn->p.prefixlen);
+
num = 0;
/* remember position for nexthop_num */
nump = stream_get_endp(s);
@@ -679,6 +686,7 @@ static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr,
} else {
stream_putc(s, 0); /* distance */
stream_putl(s, 0); /* metric */
+ stream_putw(s, 0); /* prefix len */
stream_putw(s, 0); /* nexthop_num */
}
@@ -706,7 +714,7 @@ int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id,
zlog_debug("%s: type %d, id %d, note %s",
__func__, type, id, zapi_nhg_notify_owner2str(note));
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
stream_reset(s);
zclient_create_header(s, ZEBRA_NHG_NOTIFY_OWNER, VRF_DEFAULT);
@@ -835,7 +843,7 @@ void zsend_rule_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_RULE_NOTIFY_OWNER,
dplane_ctx_rule_get_vrfid(ctx));
@@ -889,7 +897,7 @@ void zsend_iptable_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putw(s, note);
@@ -923,7 +931,7 @@ void zsend_ipset_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putw(s, note);
@@ -959,7 +967,7 @@ void zsend_ipset_entry_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putw(s, note);
@@ -1049,13 +1057,12 @@ int zsend_router_id_update(struct zserv *client, afi_t afi, struct prefix *p,
vrf_id_t vrf_id)
{
int blen;
- struct stream *s;
/* Check this client need interface information. */
if (!vrf_bitmap_check(&client->ridinfo[afi], vrf_id))
return 0;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
/* Message type. */
zclient_create_header(s, ZEBRA_ROUTER_ID_UPDATE, vrf_id);
@@ -1077,7 +1084,7 @@ int zsend_router_id_update(struct zserv *client, afi_t afi, struct prefix *p,
*/
int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_PW_STATUS_UPDATE, pw->vrf_id);
stream_write(s, pw->ifname, IFNAMSIZ);
@@ -1094,7 +1101,7 @@ int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id,
struct label_manager_chunk *lmc)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_GET_LABEL_CHUNK, vrf_id);
/* proto */
@@ -1120,7 +1127,7 @@ int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id,
int zsend_label_manager_connect_response(struct zserv *client, vrf_id_t vrf_id,
unsigned short result)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_LABEL_MANAGER_CONNECT, vrf_id);
@@ -1144,7 +1151,7 @@ static int zsend_assign_table_chunk_response(struct zserv *client,
vrf_id_t vrf_id,
struct table_manager_chunk *tmc)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_GET_TABLE_CHUNK, vrf_id);
@@ -1164,7 +1171,7 @@ static int zsend_table_manager_connect_response(struct zserv *client,
vrf_id_t vrf_id,
uint16_t result)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_TABLE_MANAGER_CONNECT, vrf_id);
@@ -2316,33 +2323,37 @@ static void zread_route_del(ZAPI_HANDLER_ARGS)
}
}
-/* MRIB Nexthop lookup for IPv4. */
-static void zread_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS)
+/* Syncronous Nexthop lookup. */
+static void zread_nexthop_lookup(ZAPI_HANDLER_ARGS)
{
struct ipaddr addr;
struct route_entry *re = NULL;
+ struct route_node *rn = NULL;
union g_addr gaddr;
+ afi_t afi = AFI_IP;
+ safi_t safi = SAFI_UNICAST;
STREAM_GET_IPADDR(msg, &addr);
+ STREAM_GETC(msg, safi);
switch (addr.ipa_type) {
case IPADDR_V4:
gaddr.ipv4 = addr.ipaddr_v4;
- re = rib_match_multicast(AFI_IP, zvrf_id(zvrf), &gaddr, NULL);
+ afi = AFI_IP;
break;
case IPADDR_V6:
gaddr.ipv6 = addr.ipaddr_v6;
- re = rib_match_multicast(AFI_IP6, zvrf_id(zvrf), &gaddr, NULL);
+ afi = AFI_IP6;
break;
case IPADDR_NONE:
/* ??? */
goto stream_failure;
}
- zsend_nexthop_lookup_mrib(client, &addr, re, zvrf);
+ re = rib_match(afi, safi, zvrf_id(zvrf), &gaddr, &rn);
stream_failure:
- return;
+ zsend_nexthop_lookup(client, &addr, re, rn, zvrf, safi);
}
/* Register zebra server router-id information. Send current router-id */
@@ -2406,7 +2417,7 @@ stream_failure:
static void zsend_capabilities(struct zserv *client, struct zebra_vrf *zvrf)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_CAPABILITIES, zvrf->vrf->vrf_id);
stream_putl(s, vrf_get_backend());
@@ -3990,8 +4001,7 @@ static inline void zebra_gre_source_set(ZAPI_HANDLER_ARGS)
static void zsend_error_msg(struct zserv *client, enum zebra_error_types error,
struct zmsghdr *bad_hdr)
{
-
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_ERROR, bad_hdr->vrf_id);
@@ -4029,7 +4039,7 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
[ZEBRA_REDISTRIBUTE_DELETE] = zebra_redistribute_delete,
[ZEBRA_REDISTRIBUTE_DEFAULT_ADD] = zebra_redistribute_default_add,
[ZEBRA_REDISTRIBUTE_DEFAULT_DELETE] = zebra_redistribute_default_delete,
- [ZEBRA_NEXTHOP_LOOKUP_MRIB] = zread_nexthop_lookup_mrib,
+ [ZEBRA_NEXTHOP_LOOKUP] = zread_nexthop_lookup,
[ZEBRA_HELLO] = zread_hello,
[ZEBRA_NEXTHOP_REGISTER] = zread_rnh_register,
[ZEBRA_NEXTHOP_UNREGISTER] = zread_rnh_unregister,
diff --git a/zebra/zebra_cli.c b/zebra/zebra_cli.c
index 6ee0fdbb8d..ca53eb2eb3 100644
--- a/zebra/zebra_cli.c
+++ b/zebra/zebra_cli.c
@@ -2252,6 +2252,9 @@ static void lib_vrf_mpls_fec_nexthop_resolution_cli_write(
}
}
+#if CONFDATE > 20251207
+CPP_NOTICE("Remove no-op netns command")
+#endif
DEFPY_YANG (vrf_netns,
vrf_netns_cmd,
"[no] netns ![NAME$netns_name]",
diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c
index 0d53591336..3fd84b5257 100644
--- a/zebra/zebra_evpn_mac.c
+++ b/zebra/zebra_evpn_mac.c
@@ -1152,6 +1152,7 @@ int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac)
listcount(mac->neigh_list));
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ mac->rem_seq = 0;
return 0;
}
@@ -1322,6 +1323,7 @@ int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr,
uint32_t flags, bool force)
{
int state = ZEBRA_NEIGH_ACTIVE;
+ struct zebra_vrf *zvrf;
if (!force) {
if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL_INACTIVE) &&
@@ -1329,12 +1331,14 @@ int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr,
/* the host was not advertised - nothing to delete */
return 0;
- /* MAC is LOCAL and DUP_DETECTED, this local mobility event
- * is not known to bgpd. Upon receiving local delete
- * ask bgp to reinstall the best route (remote entry).
+ /* Duplicate detect action is freeze enabled and
+ * Local MAC is duplicate deteced, this local
+ * mobility event is not known to bgpd.
+ * Upon receiving local delete ask bgp to reinstall
+ * the best route (remote entry).
*/
- if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL) &&
- CHECK_FLAG(flags, ZEBRA_MAC_DUPLICATE))
+ zvrf = zebra_vrf_get_evpn();
+ if (zvrf && zvrf->dad_freeze && CHECK_FLAG(flags, ZEBRA_MAC_DUPLICATE))
state = ZEBRA_NEIGH_INACTIVE;
}
@@ -2411,6 +2415,7 @@ int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac,
UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_LOCAL_FLAGS);
UNSET_FLAG(mac->flags, ZEBRA_MAC_STICKY);
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ mac->rem_seq = 0;
}
return 0;
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 0d3fd2a726..3325532ca9 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -466,7 +466,7 @@ static int fec_send(struct zebra_fec *fec, struct zserv *client)
rn = fec->rn;
/* Get output stream. */
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_FEC_UPDATE, VRF_DEFAULT);
diff --git a/zebra/zebra_mroute.c b/zebra/zebra_mroute.c
index 881b681c2f..86e25469ba 100644
--- a/zebra/zebra_mroute.c
+++ b/zebra/zebra_mroute.c
@@ -61,7 +61,7 @@ void zebra_ipmr_route_stats(ZAPI_HANDLER_ARGS)
suc = kernel_get_ipmr_sg_stats(zvrf, &mroute);
stream_failure:
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
stream_reset(s);
diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c
index 0a7ed5db41..6b41993a95 100644
--- a/zebra/zebra_nb.c
+++ b/zebra/zebra_nb.c
@@ -26,12 +26,6 @@ const struct frr_yang_module_info frr_zebra_info = {
.features = features,
.nodes = {
{
- .xpath = "/frr-zebra:zebra/mcast-rpf-lookup",
- .cbs = {
- .modify = zebra_mcast_rpf_lookup_modify,
- }
- },
- {
.xpath = "/frr-zebra:zebra/ip-forwarding",
.cbs = {
.modify = zebra_ip_forwarding_modify,
diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c
index 09c0091ec6..ec151360bd 100644
--- a/zebra/zebra_nb_config.c
+++ b/zebra/zebra_nb_config.c
@@ -31,23 +31,6 @@
#include "zebra/table_manager.h"
/*
- * XPath: /frr-zebra:zebra/mcast-rpf-lookup
- */
-int zebra_mcast_rpf_lookup_modify(struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
- break;
- }
-
- return NB_OK;
-}
-
-/*
* XPath: /frr-zebra:zebra/ip-forwarding
*/
int zebra_ip_forwarding_modify(struct nb_cb_modify_args *args)
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 1519246c17..a32fc2bb14 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1056,6 +1056,7 @@ static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, struct nh_g
static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe, bool valid)
{
struct nhg_connected *rb_node_dep;
+ bool dependent_valid = valid;
if (valid)
SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
@@ -1071,6 +1072,7 @@ static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe, bool valid)
/* Update validity of nexthops depending on it */
frr_each (nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) {
+ dependent_valid = valid;
if (!valid) {
/*
* Grab the first nexthop from the depending nexthop group
@@ -1080,16 +1082,22 @@ static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe, bool valid)
struct nexthop *nexthop = rb_node_dep->nhe->nhg.nexthop;
while (nexthop) {
- if (nexthop_same(nexthop, nhe->nhg.nexthop))
- break;
-
+ if (nexthop_same(nexthop, nhe->nhg.nexthop)) {
+ /* Invalid Nexthop */
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ } else {
+ /*
+ * If other nexthops in the nexthop
+ * group are valid then we can continue
+ * to use this nexthop group as valid
+ */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ dependent_valid = true;
+ }
nexthop = nexthop->next;
}
-
- if (nexthop)
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
}
- zebra_nhg_set_valid(rb_node_dep->nhe, valid);
+ zebra_nhg_set_valid(rb_node_dep->nhe, dependent_valid);
}
}
@@ -2648,7 +2656,7 @@ static unsigned nexthop_active_check(struct route_node *rn,
ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
- if (ifp && ifp->vrf->vrf_id == vrf_id && if_is_up(ifp)) {
+ if (ifp && ifp->vrf->vrf_id == vrf_id && if_is_up(ifp) && if_is_operative(ifp)) {
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
goto skip_check;
}
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 574083ae02..0226c355c8 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -503,7 +503,7 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
/* Lookup table. */
table = zebra_vrf_table(afi, safi, vrf_id);
if (!table)
- return 0;
+ return NULL;
memset(&p, 0, sizeof(p));
p.family = afi;
@@ -552,65 +552,6 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
return NULL;
}
-struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id,
- union g_addr *gaddr,
- struct route_node **rn_out)
-{
- struct route_entry *re = NULL, *mre = NULL, *ure = NULL;
- struct route_node *m_rn = NULL, *u_rn = NULL;
-
- switch (zrouter.ipv4_multicast_mode) {
- case MCAST_MRIB_ONLY:
- return rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, rn_out);
- case MCAST_URIB_ONLY:
- return rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, rn_out);
- case MCAST_NO_CONFIG:
- case MCAST_MIX_MRIB_FIRST:
- re = mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn);
- if (!mre)
- re = ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr,
- &u_rn);
- break;
- case MCAST_MIX_DISTANCE:
- mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn);
- ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, &u_rn);
- if (mre && ure)
- re = ure->distance < mre->distance ? ure : mre;
- else if (mre)
- re = mre;
- else if (ure)
- re = ure;
- break;
- case MCAST_MIX_PFXLEN:
- mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn);
- ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, &u_rn);
- if (mre && ure)
- re = u_rn->p.prefixlen > m_rn->p.prefixlen ? ure : mre;
- else if (mre)
- re = mre;
- else if (ure)
- re = ure;
- break;
- }
-
- if (rn_out)
- *rn_out = (re == mre) ? m_rn : u_rn;
-
- if (IS_ZEBRA_DEBUG_RIB) {
- char buf[BUFSIZ];
- inet_ntop(afi == AFI_IP ? AF_INET : AF_INET6, gaddr, buf,
- BUFSIZ);
-
- zlog_debug("%s: %s: %pRN vrf: %s(%u) found %s, using %s",
- __func__, buf, (re == mre) ? m_rn : u_rn,
- vrf_id_to_name(vrf_id), vrf_id,
- mre ? (ure ? "MRIB+URIB" : "MRIB")
- : ure ? "URIB" : "nothing",
- re == ure ? "URIB" : re == mre ? "MRIB" : "none");
- }
- return re;
-}
-
/*
* Is this RIB labeled-unicast? It must be of type BGP and all paths
* (nexthops) must have a label.
@@ -1480,7 +1421,7 @@ static void rib_process(struct route_node *rn)
rib_process_update_fib(zvrf, rn, old_fib, new_fib);
else if (new_fib)
rib_process_add_fib(zvrf, rn, new_fib);
- else if (old_fib)
+ else if (old_fib && !RIB_SYSTEM_ROUTE(old_fib))
rib_process_del_fib(zvrf, rn, old_fib);
/* Remove all RE entries queued for removal */
@@ -2838,6 +2779,8 @@ static void process_subq_early_route_add(struct zebra_early_route *ere)
if (!ere->startup && (re->flags & ZEBRA_FLAG_SELFROUTE) &&
zrouter.asic_offloaded) {
+ struct route_entry *entry;
+
if (!same) {
if (IS_ZEBRA_DEBUG_RIB)
zlog_debug(
@@ -2854,6 +2797,25 @@ static void process_subq_early_route_add(struct zebra_early_route *ere)
early_route_memory_free(ere);
return;
}
+
+ RNODE_FOREACH_RE (rn, entry) {
+ if (CHECK_FLAG(entry->status, ROUTE_ENTRY_REMOVED))
+ continue;
+
+ if (entry->type != ere->re->type)
+ continue;
+
+ /*
+ * If we have an entry that is changed but un
+ * processed and not a self route, then
+ * we should just drop this new self route
+ */
+ if (CHECK_FLAG(entry->status, ROUTE_ENTRY_CHANGED) &&
+ !(entry->flags & ZEBRA_FLAG_SELFROUTE)) {
+ early_route_memory_free(ere);
+ return;
+ }
+ }
}
/* Set default distance by route type. */
@@ -3746,10 +3708,8 @@ static struct meta_queue *meta_queue_new(void)
new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct meta_queue));
- for (i = 0; i < MQ_SIZE; i++) {
+ for (i = 0; i < MQ_SIZE; i++)
new->subq[i] = list_new();
- assert(new->subq[i]);
- }
return new;
}
@@ -3935,12 +3895,7 @@ void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
/* initialise zebra rib work queue */
static void rib_queue_init(void)
{
- if (!(zrouter.ribq = work_queue_new(zrouter.master,
- "route_node processing"))) {
- flog_err(EC_ZEBRA_WQ_NONEXISTENT,
- "%s: could not initialise work queue!", __func__);
- return;
- }
+ zrouter.ribq = work_queue_new(zrouter.master, "route_node processing");
/* fill in the work queue spec */
zrouter.ribq->spec.workfunc = &meta_queue_process;
@@ -3950,11 +3905,8 @@ static void rib_queue_init(void)
zrouter.ribq->spec.hold = ZEBRA_RIB_PROCESS_HOLD_TIME;
zrouter.ribq->spec.retry = ZEBRA_RIB_PROCESS_RETRY_TIME;
- if (!(zrouter.mq = meta_queue_new())) {
- flog_err(EC_ZEBRA_WQ_NONEXISTENT,
- "%s: could not initialise meta queue!", __func__);
- return;
- }
+ zrouter.mq = meta_queue_new();
+
return;
}
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index 4022c1a26f..ae2910af41 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -23,7 +23,6 @@ DEFINE_MTYPE_STATIC(ZEBRA, ZEBRA_RT_TABLE, "Zebra VRF table");
struct zebra_router zrouter = {
.multipath_num = MULTIPATH_NUM,
- .ipv4_multicast_mode = MCAST_NO_CONFIG,
};
static inline int
@@ -221,19 +220,6 @@ uint32_t zebra_router_get_next_sequence(void)
memory_order_relaxed);
}
-void multicast_mode_ipv4_set(enum multicast_mode mode)
-{
- if (IS_ZEBRA_DEBUG_RIB)
- zlog_debug("%s: multicast lookup mode set (%d)", __func__,
- mode);
- zrouter.ipv4_multicast_mode = mode;
-}
-
-enum multicast_mode multicast_mode_ipv4_get(void)
-{
- return zrouter.ipv4_multicast_mode;
-}
-
void zebra_router_terminate(void)
{
struct zebra_router_table *zrt, *tmp;
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index a637c3214e..28c4cf0790 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -34,17 +34,6 @@ RB_HEAD(zebra_router_table_head, zebra_router_table);
RB_PROTOTYPE(zebra_router_table_head, zebra_router_table,
zebra_router_table_entry, zebra_router_table_entry_compare)
-/* RPF lookup behaviour */
-enum multicast_mode {
- MCAST_NO_CONFIG = 0, /* MIX_MRIB_FIRST, but no show in config write */
- MCAST_MRIB_ONLY, /* MRIB only */
- MCAST_URIB_ONLY, /* URIB only */
- MCAST_MIX_MRIB_FIRST, /* MRIB, if nothing at all then URIB */
- MCAST_MIX_DISTANCE, /* MRIB & URIB, lower distance wins */
- MCAST_MIX_PFXLEN, /* MRIB & URIB, longer prefix wins */
- /* on equal value, MRIB wins for last 2 */
-};
-
/* An interface can be error-disabled if a protocol (such as EVPN or
* VRRP) detects a problem with keeping it operationally-up.
* If any of the protodown bits are set protodown-on is programmed
@@ -187,9 +176,6 @@ struct zebra_router {
uint32_t multipath_num;
- /* RPF Lookup behavior */
- enum multicast_mode ipv4_multicast_mode;
-
/*
* zebra start time and time of sweeping RIB of old routes
*/
@@ -287,10 +273,6 @@ static inline struct zebra_vrf *zebra_vrf_get_evpn(void)
: zebra_vrf_lookup_by_id(VRF_DEFAULT);
}
-extern void multicast_mode_ipv4_set(enum multicast_mode mode);
-
-extern enum multicast_mode multicast_mode_ipv4_get(void);
-
extern bool zebra_router_notify_on_ack(void);
static inline void zebra_router_set_supports_nhgs(bool support)
diff --git a/zebra/zebra_srv6_vty.c b/zebra/zebra_srv6_vty.c
index 5a80524149..6867b1bbb6 100644
--- a/zebra/zebra_srv6_vty.c
+++ b/zebra/zebra_srv6_vty.c
@@ -338,10 +338,6 @@ DEFUN_NOSH (srv6_locator,
}
locator = srv6_locator_alloc(argv[1]->arg);
- if (!locator) {
- vty_out(vty, "%% Alloc failed\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
locator->status_up = true;
VTY_PUSH_CONTEXT(SRV6_LOC_NODE, locator);
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index b65097e725..582d15627c 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -81,126 +81,14 @@ static void show_nexthop_detail_helper(struct vty *vty,
const struct nexthop *nexthop,
bool is_backup);
-static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table);
+static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table, afi_t afi,
+ safi_t safi);
static void show_ip_route_nht_dump(struct vty *vty,
const struct nexthop *nexthop,
const struct route_node *rn,
const struct route_entry *re,
unsigned int num);
-DEFUN (ip_multicast_mode,
- ip_multicast_mode_cmd,
- "ip multicast rpf-lookup-mode <urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix>",
- IP_STR
- "Multicast options\n"
- "RPF lookup behavior\n"
- "Lookup in unicast RIB only\n"
- "Lookup in multicast RIB only\n"
- "Try multicast RIB first, fall back to unicast RIB\n"
- "Lookup both, use entry with lower distance\n"
- "Lookup both, use entry with longer prefix\n")
-{
- char *mode = argv[3]->text;
-
- if (strmatch(mode, "urib-only"))
- multicast_mode_ipv4_set(MCAST_URIB_ONLY);
- else if (strmatch(mode, "mrib-only"))
- multicast_mode_ipv4_set(MCAST_MRIB_ONLY);
- else if (strmatch(mode, "mrib-then-urib"))
- multicast_mode_ipv4_set(MCAST_MIX_MRIB_FIRST);
- else if (strmatch(mode, "lower-distance"))
- multicast_mode_ipv4_set(MCAST_MIX_DISTANCE);
- else if (strmatch(mode, "longer-prefix"))
- multicast_mode_ipv4_set(MCAST_MIX_PFXLEN);
- else {
- vty_out(vty, "Invalid mode specified\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ip_multicast_mode,
- no_ip_multicast_mode_cmd,
- "no ip multicast rpf-lookup-mode [<urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix>]",
- NO_STR
- IP_STR
- "Multicast options\n"
- "RPF lookup behavior\n"
- "Lookup in unicast RIB only\n"
- "Lookup in multicast RIB only\n"
- "Try multicast RIB first, fall back to unicast RIB\n"
- "Lookup both, use entry with lower distance\n"
- "Lookup both, use entry with longer prefix\n")
-{
- multicast_mode_ipv4_set(MCAST_NO_CONFIG);
- return CMD_SUCCESS;
-}
-
-
-DEFPY (show_ip_rpf,
- show_ip_rpf_cmd,
- "show [ip$ip|ipv6$ipv6] rpf [json]",
- SHOW_STR
- IP_STR
- IPV6_STR
- "Display RPF information for multicast source\n"
- JSON_STR)
-{
- bool uj = use_json(argc, argv);
- struct route_show_ctx ctx = {
- .multi = false,
- };
-
- return do_show_ip_route(vty, VRF_DEFAULT_NAME, ip ? AFI_IP : AFI_IP6,
- SAFI_MULTICAST, false, uj, 0, NULL, false, 0, 0,
- 0, false, &ctx);
-}
-
-DEFPY (show_ip_rpf_addr,
- show_ip_rpf_addr_cmd,
- "show ip rpf A.B.C.D$address",
- SHOW_STR
- IP_STR
- "Display RPF information for multicast source\n"
- "IP multicast source address (e.g. 10.0.0.0)\n")
-{
- struct route_node *rn;
- struct route_entry *re;
-
- re = rib_match_multicast(AFI_IP, VRF_DEFAULT, (union g_addr *)&address,
- &rn);
-
- if (re)
- vty_show_ip_route_detail(vty, rn, 1, false, false);
- else
- vty_out(vty, "%% No match for RPF lookup\n");
-
- return CMD_SUCCESS;
-}
-
-DEFPY (show_ipv6_rpf_addr,
- show_ipv6_rpf_addr_cmd,
- "show ipv6 rpf X:X::X:X$address",
- SHOW_STR
- IPV6_STR
- "Display RPF information for multicast source\n"
- "IPv6 multicast source address\n")
-{
- struct route_node *rn;
- struct route_entry *re;
-
- re = rib_match_multicast(AFI_IP6, VRF_DEFAULT, (union g_addr *)&address,
- &rn);
-
- if (re)
- vty_show_ip_route_detail(vty, rn, 1, false, false);
- else
- vty_out(vty, "%% No match for RPF lookup\n");
-
- return CMD_SUCCESS;
-}
-
static char re_status_output_char(const struct route_entry *re,
const struct nexthop *nhop,
bool is_fib)
@@ -858,35 +746,36 @@ static void vty_show_ip_route_detail_json(struct vty *vty,
vty_json(vty, json);
}
-static void zebra_vty_display_vrf_header(struct vty *vty, struct zebra_vrf *zvrf, uint32_t tableid)
+static void zebra_vty_display_vrf_header(struct vty *vty, struct zebra_vrf *zvrf, uint32_t tableid,
+ afi_t afi, safi_t safi)
{
if (!tableid)
- vty_out(vty, "VRF %s:\n", zvrf_name(zvrf));
+ vty_out(vty, "%s %s VRF %s:\n", afi2str(afi), safi2str(safi), zvrf_name(zvrf));
else {
if (vrf_is_backend_netns())
- vty_out(vty, "VRF %s table %u:\n", zvrf_name(zvrf), tableid);
+ vty_out(vty, "%s %s VRF %s table %u:\n", afi2str(afi), safi2str(safi),
+ zvrf_name(zvrf), tableid);
else {
vrf_id_t vrf = zebra_vrf_lookup_by_table(tableid, zvrf->zns->ns_id);
if (vrf == VRF_DEFAULT && tableid != RT_TABLE_ID_MAIN)
- vty_out(vty, "table %u:\n", tableid);
+ vty_out(vty, "%s %s table %u:\n", afi2str(afi), safi2str(safi),
+ tableid);
else {
struct zebra_vrf *zvrf2 = zebra_vrf_lookup_by_id(vrf);
- vty_out(vty, "VRF %s table %u:\n", zvrf_name(zvrf2), tableid);
+ vty_out(vty, "%s %s VRF %s table %u:\n", afi2str(afi),
+ safi2str(safi), zvrf_name(zvrf2), tableid);
}
}
}
}
-static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
- struct route_table *table, afi_t afi,
- bool use_fib, route_tag_t tag,
- const struct prefix *longer_prefix_p,
- bool supernets_only, int type,
- unsigned short ospf_instance_id, bool use_json,
- uint32_t tableid, bool show_ng,
- struct route_show_ctx *ctx)
+static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf, struct route_table *table,
+ afi_t afi, safi_t safi, bool use_fib, route_tag_t tag,
+ const struct prefix *longer_prefix_p, bool supernets_only,
+ int type, unsigned short ospf_instance_id, bool use_json,
+ uint32_t tableid, bool show_ng, struct route_show_ctx *ctx)
{
struct route_node *rn;
struct route_entry *re;
@@ -958,9 +847,7 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
}
if (ctx->multi && ctx->header_done)
vty_out(vty, "\n");
- if (ctx->multi || zvrf_id(zvrf) != VRF_DEFAULT || tableid)
- zebra_vty_display_vrf_header(vty, zvrf, tableid);
-
+ zebra_vty_display_vrf_header(vty, zvrf, tableid, afi, safi);
ctx->header_done = true;
first = 0;
}
@@ -982,12 +869,10 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
vty_json_close(vty, first_json);
}
-static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf,
- afi_t afi, bool use_fib, bool use_json,
- route_tag_t tag,
- const struct prefix *longer_prefix_p,
- bool supernets_only, int type,
- unsigned short ospf_instance_id, bool show_ng,
+static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf, afi_t afi, safi_t safi,
+ bool use_fib, bool use_json, route_tag_t tag,
+ const struct prefix *longer_prefix_p, bool supernets_only,
+ int type, unsigned short ospf_instance_id, bool show_ng,
struct route_show_ctx *ctx)
{
struct zebra_router_table *zrt;
@@ -999,13 +884,11 @@ static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf,
if (zvrf != info->zvrf)
continue;
- if (zrt->afi != afi ||
- zrt->safi != SAFI_UNICAST)
+ if (zrt->afi != afi || zrt->safi != safi)
continue;
- do_show_ip_route(vty, zvrf_name(zvrf), afi, SAFI_UNICAST,
- use_fib, use_json, tag, longer_prefix_p,
- supernets_only, type, ospf_instance_id,
+ do_show_ip_route(vty, zvrf_name(zvrf), afi, safi, use_fib, use_json, tag,
+ longer_prefix_p, supernets_only, type, ospf_instance_id,
zrt->tableid, show_ng, ctx);
}
}
@@ -1038,7 +921,7 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi,
}
if (tableid)
- table = zebra_router_find_table(zvrf, tableid, afi, SAFI_UNICAST);
+ table = zebra_router_find_table(zvrf, tableid, afi, safi);
else
table = zebra_vrf_table(afi, safi, zvrf_id(zvrf));
if (!table) {
@@ -1047,9 +930,9 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi,
return CMD_SUCCESS;
}
- do_show_route_helper(vty, zvrf, table, afi, use_fib, tag,
- longer_prefix_p, supernets_only, type,
- ospf_instance_id, use_json, tableid, show_ng, ctx);
+ do_show_route_helper(vty, zvrf, table, afi, safi, use_fib, tag, longer_prefix_p,
+ supernets_only, type, ospf_instance_id, use_json, tableid, show_ng,
+ ctx);
return CMD_SUCCESS;
}
@@ -1702,27 +1585,35 @@ DEFPY_HIDDEN(rnh_hide_backups, rnh_hide_backups_cmd,
DEFPY (show_route,
show_route_cmd,
"show\
- <\
- ip$ipv4 <fib$fib|route> [table <(1-4294967295)$table|all$table_all>]\
- [vrf <NAME$vrf_name|all$vrf_all>]\
- [{\
- tag (1-4294967295)\
- |A.B.C.D/M$prefix longer-prefixes\
- |supernets-only$supernets_only\
- }]\
- [<\
- " FRR_IP_REDIST_STR_ZEBRA "$type_str\
- |ospf$type_str (1-65535)$ospf_instance_id\
- >]\
- |ipv6$ipv6 <fib$fib|route> [table <(1-4294967295)$table|all$table_all>]\
- [vrf <NAME$vrf_name|all$vrf_all>]\
- [{\
- tag (1-4294967295)\
- |X:X::X:X/M$prefix longer-prefixes\
- }]\
- [" FRR_IP6_REDIST_STR_ZEBRA "$type_str]\
- >\
- [<json$json|nexthop-group$ng>]",
+ <\
+ ip$ipv4 <fib$fib|route>\
+ [{\
+ table <(1-4294967295)$table|all$table_all>\
+ |mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ [{\
+ tag (1-4294967295)\
+ |A.B.C.D/M$prefix longer-prefixes\
+ |supernets-only$supernets_only\
+ }]\
+ [<\
+ " FRR_IP_REDIST_STR_ZEBRA "$type_str\
+ |ospf$type_str (1-65535)$ospf_instance_id\
+ >]\
+ |ipv6$ipv6 <fib$fib|route>\
+ [{\
+ table <(1-4294967295)$table|all$table_all>\
+ |mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ [{\
+ tag (1-4294967295)\
+ |X:X::X:X/M$prefix longer-prefixes\
+ }]\
+ [" FRR_IP6_REDIST_STR_ZEBRA "$type_str]\
+ >\
+ [<json$json|nexthop-group$ng>]",
SHOW_STR
IP_STR
"IP forwarding table\n"
@@ -1730,6 +1621,7 @@ DEFPY (show_route,
"Table to display\n"
"The table number to display\n"
"All tables\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Show only routes with tag\n"
"Tag value\n"
@@ -1745,6 +1637,7 @@ DEFPY (show_route,
"Table to display\n"
"The table number to display\n"
"All tables\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Show only routes with tag\n"
"Tag value\n"
@@ -1755,6 +1648,7 @@ DEFPY (show_route,
"Nexthop Group Information\n")
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
bool first_vrf_json = true;
struct vrf *vrf;
int type = 0;
@@ -1784,26 +1678,19 @@ DEFPY (show_route,
if (vrf_all) {
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if ((zvrf = vrf->info) == NULL
- || (zvrf->table[afi][SAFI_UNICAST] == NULL))
+ if ((zvrf = vrf->info) == NULL || (zvrf->table[afi][safi] == NULL))
continue;
if (json)
vty_json_key(vty, zvrf_name(zvrf),
&first_vrf_json);
if (table_all)
- do_show_ip_route_all(vty, zvrf, afi, !!fib,
- !!json, tag,
- prefix_str ? prefix : NULL,
- !!supernets_only, type,
- ospf_instance_id, !!ng,
- &ctx);
+ do_show_ip_route_all(vty, zvrf, afi, safi, !!fib, !!json, tag,
+ prefix_str ? prefix : NULL, !!supernets_only,
+ type, ospf_instance_id, !!ng, &ctx);
else
- do_show_ip_route(vty, zvrf_name(zvrf), afi,
- SAFI_UNICAST, !!fib, !!json,
- tag, prefix_str ? prefix : NULL,
- !!supernets_only, type,
- ospf_instance_id, table, !!ng,
- &ctx);
+ do_show_ip_route(vty, zvrf_name(zvrf), afi, safi, !!fib, !!json,
+ tag, prefix_str ? prefix : NULL, !!supernets_only,
+ type, ospf_instance_id, table, !!ng, &ctx);
}
if (json)
vty_json_close(vty, first_vrf_json);
@@ -1821,21 +1708,27 @@ DEFPY (show_route,
return CMD_SUCCESS;
if (table_all)
- do_show_ip_route_all(vty, zvrf, afi, !!fib, !!json, tag,
- prefix_str ? prefix : NULL,
- !!supernets_only, type,
+ do_show_ip_route_all(vty, zvrf, afi, safi, !!fib, !!json, tag,
+ prefix_str ? prefix : NULL, !!supernets_only, type,
ospf_instance_id, !!ng, &ctx);
else
- do_show_ip_route(vty, vrf->name, afi, SAFI_UNICAST,
- !!fib, !!json, tag,
- prefix_str ? prefix : NULL,
- !!supernets_only, type,
+ do_show_ip_route(vty, vrf->name, afi, safi, !!fib, !!json, tag,
+ prefix_str ? prefix : NULL, !!supernets_only, type,
ospf_instance_id, table, !!ng, &ctx);
}
return CMD_SUCCESS;
}
+ALIAS_DEPRECATED (show_route,
+ show_ip_rpf_cmd,
+ "show <ip$ipv4|ipv6$ipv6> rpf$mrib [json$json]",
+ SHOW_STR
+ IP_STR
+ IPV6_STR
+ "Display RPF information for multicast source\n"
+ JSON_STR);
+
ALIAS_HIDDEN (show_route,
show_ro_cmd,
"show <ip$ipv4|ipv6$ipv6> ro",
@@ -1849,28 +1742,38 @@ DEFPY (show_route_detail,
show_route_detail_cmd,
"show\
<\
- ip$ipv4 <fib$fib|route> [vrf <NAME$vrf_name|all$vrf_all>]\
- <\
- A.B.C.D$address\
- |A.B.C.D/M$prefix\
- >\
- |ipv6$ipv6 <fib$fib|route> [vrf <NAME$vrf_name|all$vrf_all>]\
- <\
- X:X::X:X$address\
- |X:X::X:X/M$prefix\
- >\
- >\
- [json$json] [nexthop-group$ng]",
+ ip$ipv4 <fib$fib|route>\
+ [{\
+ mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ <\
+ A.B.C.D$address\
+ |A.B.C.D/M$prefix\
+ >\
+ |ipv6$ipv6 <fib$fib|route>\
+ [{\
+ mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ <\
+ X:X::X:X$address\
+ |X:X::X:X/M$prefix\
+ >\
+ >\
+ [json$json] [nexthop-group$ng]",
SHOW_STR
IP_STR
"IP forwarding table\n"
"IP routing table\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Network in the IP routing table to display\n"
"IP prefix <network>/<length>, e.g., 35.0.0.0/8\n"
IP6_STR
"IPv6 forwarding table\n"
"IPv6 routing table\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"IPv6 Address\n"
"IPv6 prefix\n"
@@ -1878,6 +1781,7 @@ DEFPY (show_route_detail,
"Nexthop Group Information\n")
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
struct route_table *table;
struct prefix p;
struct route_node *rn;
@@ -1898,8 +1802,7 @@ DEFPY (show_route_detail,
struct zebra_vrf *zvrf;
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if ((zvrf = vrf->info) == NULL
- || (table = zvrf->table[afi][SAFI_UNICAST]) == NULL)
+ if ((zvrf = vrf->info) == NULL || (table = zvrf->table[afi][safi]) == NULL)
continue;
rn = route_node_match(table, &p);
@@ -1920,7 +1823,7 @@ DEFPY (show_route_detail,
if (json)
vty_show_ip_route_detail_json(vty, rn, use_fib);
else
- vty_show_ip_route_detail(vty, rn, 0, use_fib,
+ vty_show_ip_route_detail(vty, rn, (safi == SAFI_MULTICAST), use_fib,
show_ng);
route_unlock_node(rn);
@@ -1945,7 +1848,7 @@ DEFPY (show_route_detail,
if (vrf_name)
VRF_GET_ID(vrf_id, vrf_name, false);
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
+ table = zebra_vrf_table(afi, safi, vrf_id);
if (!table)
return CMD_SUCCESS;
@@ -1973,7 +1876,8 @@ DEFPY (show_route_detail,
if (json)
vty_show_ip_route_detail_json(vty, rn, use_fib);
else
- vty_show_ip_route_detail(vty, rn, 0, use_fib, show_ng);
+ vty_show_ip_route_detail(vty, rn, (safi == SAFI_MULTICAST), use_fib,
+ show_ng);
route_unlock_node(rn);
}
@@ -1983,12 +1887,13 @@ DEFPY (show_route_detail,
DEFPY (show_route_summary,
show_route_summary_cmd,
- "show <ip$ipv4|ipv6$ipv6> route [vrf <NAME$vrf_name|all$vrf_all>] \
+ "show <ip$ipv4|ipv6$ipv6> route [{mrib$mrib|vrf <NAME$vrf_name|all$vrf_all>}] \
summary [table (1-4294967295)$table_id] [prefix$prefix] [json]",
SHOW_STR
IP_STR
IP6_STR
"IP routing table\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Summary of all routes\n"
"Table to display summary for\n"
@@ -1997,6 +1902,7 @@ DEFPY (show_route_summary,
JSON_STR)
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
struct route_table *table;
bool uj = use_json(argc, argv);
json_object *vrf_json = NULL;
@@ -2013,12 +1919,11 @@ DEFPY (show_route_summary,
continue;
if (table_id == 0)
- table = zebra_vrf_table(afi, SAFI_UNICAST,
- zvrf->vrf->vrf_id);
+ table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id);
else
- table = zebra_vrf_lookup_table_with_table_id(
- afi, SAFI_UNICAST, zvrf->vrf->vrf_id,
- table_id);
+ table = zebra_vrf_lookup_table_with_table_id(afi, safi,
+ zvrf->vrf->vrf_id,
+ table_id);
if (!table)
continue;
@@ -2040,10 +1945,9 @@ DEFPY (show_route_summary,
VRF_GET_ID(vrf_id, vrf_name, false);
if (table_id == 0)
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
+ table = zebra_vrf_table(afi, safi, vrf_id);
else
- table = zebra_vrf_lookup_table_with_table_id(
- afi, SAFI_UNICAST, vrf_id, table_id);
+ table = zebra_vrf_lookup_table_with_table_id(afi, safi, vrf_id, table_id);
if (!table)
return CMD_SUCCESS;
@@ -2056,50 +1960,49 @@ DEFPY (show_route_summary,
return CMD_SUCCESS;
}
-DEFUN_HIDDEN (show_route_zebra_dump,
+DEFPY_HIDDEN (show_route_zebra_dump,
show_route_zebra_dump_cmd,
- "show <ip|ipv6> zebra route dump [vrf VRFNAME]",
+ "show <ip$ipv4|ipv6$ipv6> zebra route dump [{mrib$mrib|vrf <NAME$vrf_name|all$vrf_all>}]",
SHOW_STR
IP_STR
IP6_STR
"Zebra daemon\n"
"Routing table\n"
"All information\n"
- VRF_CMD_HELP_STR)
+ "Multicast SAFI table\n"
+ VRF_FULL_CMD_HELP_STR)
{
- afi_t afi = AFI_IP;
+ afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
struct route_table *table;
- const char *vrf_name = NULL;
- int idx = 0;
- afi = strmatch(argv[1]->text, "ipv6") ? AFI_IP6 : AFI_IP;
-
- if (argv_find(argv, argc, "vrf", &idx))
- vrf_name = argv[++idx]->arg;
-
- if (!vrf_name) {
+ if (vrf_all) {
struct vrf *vrf;
struct zebra_vrf *zvrf;
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
zvrf = vrf->info;
- if ((zvrf == NULL)
- || (zvrf->table[afi][SAFI_UNICAST] == NULL))
+ if (zvrf == NULL)
continue;
- table = zvrf->table[afi][SAFI_UNICAST];
- show_ip_route_dump_vty(vty, table);
+ table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id);
+ if (!table)
+ continue;
+
+ show_ip_route_dump_vty(vty, table, afi, safi);
}
} else {
vrf_id_t vrf_id = VRF_DEFAULT;
- VRF_GET_ID(vrf_id, vrf_name, true);
+ if (vrf_name)
+ VRF_GET_ID(vrf_id, vrf_name, false);
+
+ table = zebra_vrf_table(afi, safi, vrf_id);
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
if (!table)
return CMD_SUCCESS;
- show_ip_route_dump_vty(vty, table);
+ show_ip_route_dump_vty(vty, table, afi, safi);
}
return CMD_SUCCESS;
@@ -2193,7 +2096,8 @@ static void show_ip_route_nht_dump(struct vty *vty,
}
}
-static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table)
+static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table, afi_t afi,
+ safi_t safi)
{
struct route_node *rn;
struct route_entry *re;
@@ -2205,7 +2109,7 @@ static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table)
struct nexthop *nexthop = NULL;
int nexthop_num = 0;
- vty_out(vty, "\nIPv4/IPv6 Routing table dump\n");
+ vty_out(vty, "\n%s %s Routing table dump\n", afi2str(afi), safi2str(safi));
vty_out(vty, "----------------------------\n");
for (rn = route_top(table); rn; rn = route_next(rn)) {
@@ -3757,22 +3661,6 @@ static int config_write_protocol(struct vty *vty)
vty_out(vty, "zebra zapi-packets %u\n",
zrouter.packets_to_process);
- enum multicast_mode ipv4_multicast_mode = multicast_mode_ipv4_get();
-
- if (ipv4_multicast_mode != MCAST_NO_CONFIG)
- vty_out(vty, "ip multicast rpf-lookup-mode %s\n",
- ipv4_multicast_mode == MCAST_URIB_ONLY
- ? "urib-only"
- : ipv4_multicast_mode == MCAST_MRIB_ONLY
- ? "mrib-only"
- : ipv4_multicast_mode
- == MCAST_MIX_MRIB_FIRST
- ? "mrib-then-urib"
- : ipv4_multicast_mode
- == MCAST_MIX_DISTANCE
- ? "lower-distance"
- : "longer-prefix");
-
/* Include dataplane info */
dplane_config_write_helper(vty);
@@ -4356,9 +4244,6 @@ void zebra_vty_init(void)
install_element(CONFIG_NODE, &allow_external_route_update_cmd);
install_element(CONFIG_NODE, &no_allow_external_route_update_cmd);
- install_element(CONFIG_NODE, &ip_multicast_mode_cmd);
- install_element(CONFIG_NODE, &no_ip_multicast_mode_cmd);
-
install_element(CONFIG_NODE, &zebra_nexthop_group_keep_cmd);
install_element(CONFIG_NODE, &ip_zebra_import_table_distance_cmd);
install_element(CONFIG_NODE, &no_ip_zebra_import_table_cmd);
@@ -4376,15 +4261,12 @@ void zebra_vty_init(void)
install_element(VIEW_NODE, &show_vrf_cmd);
install_element(VIEW_NODE, &show_vrf_vni_cmd);
install_element(VIEW_NODE, &show_route_cmd);
+ install_element(VIEW_NODE, &show_ip_rpf_cmd);
install_element(VIEW_NODE, &show_ro_cmd);
install_element(VIEW_NODE, &show_route_detail_cmd);
install_element(VIEW_NODE, &show_route_summary_cmd);
install_element(VIEW_NODE, &show_ip_nht_cmd);
- install_element(VIEW_NODE, &show_ip_rpf_cmd);
- install_element(VIEW_NODE, &show_ip_rpf_addr_cmd);
- install_element(VIEW_NODE, &show_ipv6_rpf_addr_cmd);
-
install_element(CONFIG_NODE, &rnh_hide_backups_cmd);
install_element(VIEW_NODE, &show_frr_cmd);
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index ad112a4ab1..c60eeab946 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -2203,7 +2203,7 @@ static int zl3vni_send_add_to_client(struct zebra_l3vni *zl3vni)
is_anycast_mac = false;
}
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
/* The message is used for both vni add and/or update like
* vrr mac is added for l3vni SVI.
@@ -2246,7 +2246,7 @@ static int zl3vni_send_del_to_client(struct zebra_l3vni *zl3vni)
if (!client)
return 0;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_L3VNI_DEL, zl3vni_vrf_id(zl3vni));
stream_putl(s, zl3vni->vni);
@@ -4403,6 +4403,7 @@ static int zebra_vxlan_check_del_local_mac(struct interface *ifp,
UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_LOCAL_FLAGS);
UNSET_FLAG(mac->flags, ZEBRA_MAC_STICKY);
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ mac->rem_seq = 0;
}
return 0;
@@ -5860,7 +5861,7 @@ static int zebra_vxlan_sg_send(struct zebra_vrf *zvrf,
if (!CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG))
return 0;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putl(s, IPV4_MAX_BYTELEN);
diff --git a/zebra/zserv.c b/zebra/zserv.c
index d6c017d259..7ef3582329 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -184,10 +184,9 @@ void zserv_log_message(const char *errmsg, struct stream *msg,
*/
static void zserv_client_fail(struct zserv *client)
{
- flog_warn(
- EC_ZEBRA_CLIENT_IO_ERROR,
- "Client '%s' (session id %d) encountered an error and is shutting down.",
- zebra_route_string(client->proto), client->session_id);
+ flog_warn(EC_ZEBRA_CLIENT_IO_ERROR,
+ "Client %d '%s' (session id %d) encountered an error and is shutting down.",
+ client->sock, zebra_route_string(client->proto), client->session_id);
atomic_store_explicit(&client->pthread->running, false,
memory_order_relaxed);
@@ -468,8 +467,8 @@ static void zserv_read(struct event *thread)
}
if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("Read %d packets from client: %s. Current ibuf fifo count: %zu. Conf P2p %d",
- p2p_avail - p2p, zebra_route_string(client->proto),
+ zlog_debug("Read %d packets from client: %s(%d). Current ibuf fifo count: %zu. Conf P2p %d",
+ p2p_avail - p2p, zebra_route_string(client->proto), client->sock,
client_ibuf_fifo_cnt, p2p_orig);
/* Reschedule ourselves since we have space in ibuf_fifo */