summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/babel_interface.c7
-rw-r--r--babeld/babeld.c6
-rw-r--r--bfdd/bfd.c151
-rw-r--r--bfdd/bfd.h2
-rw-r--r--bfdd/dplane.c3
-rw-r--r--bgpd/bgp_attr.c9
-rw-r--r--bgpd/bgp_bfd.c6
-rw-r--r--bgpd/bgp_bmp.c169
-rw-r--r--bgpd/bgp_clist.c1
-rw-r--r--bgpd/bgp_evpn.c529
-rw-r--r--bgpd/bgp_evpn.h2
-rw-r--r--bgpd/bgp_evpn_private.h16
-rw-r--r--bgpd/bgp_evpn_vty.c121
-rw-r--r--bgpd/bgp_flowspec_vty.c2
-rw-r--r--bgpd/bgp_fsm.c136
-rw-r--r--bgpd/bgp_labelpool.c4
-rw-r--r--bgpd/bgp_main.c2
-rw-r--r--bgpd/bgp_mplsvpn.c42
-rw-r--r--bgpd/bgp_mplsvpn.h1
-rw-r--r--bgpd/bgp_network.c28
-rw-r--r--bgpd/bgp_network.h6
-rw-r--r--bgpd/bgp_nexthop.c181
-rw-r--r--bgpd/bgp_nexthop.h2
-rw-r--r--bgpd/bgp_nht.c11
-rw-r--r--bgpd/bgp_packet.c4
-rw-r--r--bgpd/bgp_route.c688
-rw-r--r--bgpd/bgp_route.h50
-rw-r--r--bgpd/bgp_rpki.c170
-rw-r--r--bgpd/bgp_table.h11
-rw-r--r--bgpd/bgp_updgrp.c10
-rw-r--r--bgpd/bgp_updgrp_adv.c10
-rw-r--r--bgpd/bgp_vty.c222
-rw-r--r--bgpd/bgp_zebra.c72
-rw-r--r--bgpd/bgp_zebra.h2
-rw-r--r--bgpd/bgpd.c339
-rw-r--r--bgpd/bgpd.h41
-rw-r--r--doc/developer/grpc.rst1
-rw-r--r--doc/developer/topotests.rst2
-rw-r--r--doc/developer/workflow.rst10
-rw-r--r--doc/figures/releases.dot44
-rw-r--r--doc/user/bgp.rst7
-rw-r--r--doc/user/filter.rst27
-rw-r--r--doc/user/pim.rst196
-rw-r--r--doc/user/zebra.rst94
-rw-r--r--docker/ubuntu-ci/Dockerfile3
-rw-r--r--include/linux/if_packet.h317
-rw-r--r--isisd/isis_circuit.c37
-rw-r--r--isisd/isis_cli.c16
-rw-r--r--isisd/isis_nb.c8
-rw-r--r--isisd/isis_nb.h4
-rw-r--r--isisd/isis_pdu.c3
-rw-r--r--isisd/isis_pfpacket.c56
-rw-r--r--lib/bfd.c8
-rw-r--r--lib/darr.c3
-rw-r--r--lib/darr.h24
-rw-r--r--lib/log.c2
-rw-r--r--lib/mgmt_msg_native.h16
-rw-r--r--lib/monotime.h16
-rw-r--r--lib/nexthop.c26
-rw-r--r--lib/nexthop.h2
-rw-r--r--lib/northbound_cli.c13
-rw-r--r--lib/northbound_cli.h3
-rw-r--r--lib/plist.c4
-rw-r--r--lib/routemap.c38
-rw-r--r--lib/routemap.h11
-rw-r--r--lib/sockopt.c8
-rw-r--r--lib/sockopt.h4
-rw-r--r--lib/table.c26
-rw-r--r--lib/table.h4
-rw-r--r--lib/vty.c16
-rw-r--r--lib/vty.h4
-rw-r--r--lib/zclient.c13
-rw-r--r--lib/zclient.h2
-rw-r--r--mgmtd/mgmt_be_adapter.c6
-rw-r--r--mgmtd/mgmt_fe_adapter.c2
-rw-r--r--nhrpd/nhrp_vty.c7
-rw-r--r--ospfd/ospf_abr.c2
-rw-r--r--ospfd/ospf_asbr.c86
-rw-r--r--ospfd/ospf_asbr.h6
-rw-r--r--ospfd/ospf_flood.c2
-rw-r--r--ospfd/ospf_lsa.c9
-rw-r--r--ospfd/ospf_sr.c6
-rw-r--r--ospfd/ospf_vty.c3
-rw-r--r--ospfd/ospf_zebra.c30
-rw-r--r--ospfd/ospf_zebra.h3
-rw-r--r--ospfd/ospfd.c2
-rw-r--r--pathd/path_pcep_debug.c3
-rw-r--r--pbrd/pbr_map.c8
-rw-r--r--pimd/pim6_mld.c2
-rw-r--r--pimd/pim_autorp.c1563
-rw-r--r--pimd/pim_autorp.h75
-rw-r--r--pimd/pim_bsm.c14
-rw-r--r--pimd/pim_bsm.h5
-rw-r--r--pimd/pim_bsr_rpdb.c4
-rw-r--r--pimd/pim_cmd.c258
-rw-r--r--pimd/pim_cmd_common.c337
-rw-r--r--pimd/pim_cmd_common.h16
-rw-r--r--pimd/pim_iface.c42
-rw-r--r--pimd/pim_iface.h6
-rw-r--r--pimd/pim_igmp.c2
-rw-r--r--pimd/pim_igmp_mtrace.c9
-rw-r--r--pimd/pim_igmpv2.c3
-rw-r--r--pimd/pim_igmpv3.c26
-rw-r--r--pimd/pim_instance.c31
-rw-r--r--pimd/pim_instance.h12
-rw-r--r--pimd/pim_join.c14
-rw-r--r--pimd/pim_mroute.c16
-rw-r--r--pimd/pim_msdp.c332
-rw-r--r--pimd/pim_msdp.h81
-rw-r--r--pimd/pim_msdp_packet.c90
-rw-r--r--pimd/pim_msdp_socket.c29
-rw-r--r--pimd/pim_nb.c97
-rw-r--r--pimd/pim_nb.h25
-rw-r--r--pimd/pim_nb_config.c709
-rw-r--r--pimd/pim_nht.c1314
-rw-r--r--pimd/pim_nht.h104
-rw-r--r--pimd/pim_register.c3
-rw-r--r--pimd/pim_rp.c134
-rw-r--r--pimd/pim_rp.h2
-rw-r--r--pimd/pim_rpf.c142
-rw-r--r--pimd/pim_rpf.h17
-rw-r--r--pimd/pim_tib.c20
-rw-r--r--pimd/pim_upstream.c37
-rw-r--r--pimd/pim_upstream.h1
-rw-r--r--pimd/pim_util.c102
-rw-r--r--pimd/pim_util.h7
-rw-r--r--pimd/pim_vty.c35
-rw-r--r--pimd/pim_vxlan.c7
-rw-r--r--pimd/pim_zebra.c1
-rw-r--r--pimd/pim_zlookup.c99
-rw-r--r--pimd/pim_zlookup.h1
-rw-r--r--pimd/pimd.c6
-rw-r--r--ripd/ripd.c8
-rw-r--r--staticd/static_nht.c5
-rw-r--r--tests/helpers/python/frrtest.py2
-rw-r--r--tests/topotests/Dockerfile138
-rw-r--r--tests/topotests/all_protocol_startup/r1/ipv4_routes.ref1
-rw-r--r--tests/topotests/all_protocol_startup/r1/ipv6_routes.ref1
-rw-r--r--tests/topotests/all_protocol_startup/test_all_protocol_startup.py72
-rw-r--r--tests/topotests/bfd_profiles_topo1/r2/bgpd.conf2
-rw-r--r--tests/topotests/bfd_profiles_topo1/r3/bgpd.conf1
-rw-r--r--tests/topotests/bfd_profiles_topo1/r4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py89
-rw-r--r--tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf2
-rw-r--r--tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_aggregator_zero/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_aspath_zero/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_bmp/bgpbmp.py230
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json (renamed from tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-loc-rib-step1.json)2
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json (renamed from tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-post-policy-step1.json)8
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json (renamed from tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-pre-policy-step1.json)8
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json (renamed from tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-loc-rib-step1.json)2
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json (renamed from tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-post-policy-step1.json)8
-rw-r--r--tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json (renamed from tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-pre-policy-step1.json)8
-rw-r--r--tests/topotests/bgp_bmp/r1/frr.conf (renamed from tests/topotests/bgp_bmp/r1/bgpd.conf)9
-rw-r--r--tests/topotests/bgp_bmp/r1/zebra.conf7
-rw-r--r--tests/topotests/bgp_bmp/r1vrf/frr.conf (renamed from tests/topotests/bgp_bmp_vrf/r1/bgpd.conf)10
-rw-r--r--tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv4-update-step1.json (renamed from tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv4-update-step1.json)1
-rw-r--r--tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv4-withdraw-step1.json (renamed from tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv4-withdraw-step1.json)0
-rw-r--r--tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv6-update-step1.json (renamed from tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv6-update-step1.json)2
-rw-r--r--tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv6-withdraw-step1.json (renamed from tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv6-withdraw-step1.json)0
-rw-r--r--tests/topotests/bgp_bmp/r2/frr.conf (renamed from tests/topotests/bgp_bmp/r2/bgpd.conf)8
-rw-r--r--tests/topotests/bgp_bmp/r2/zebra.conf8
-rw-r--r--tests/topotests/bgp_bmp/r2vrf/frr.conf (renamed from tests/topotests/bgp_bmp_vrf/r2/bgpd.conf)8
-rw-r--r--tests/topotests/bgp_bmp/test_bgp_bmp.py476
-rw-r--r--tests/topotests/bgp_bmp/test_bgp_bmp_1.py257
-rw-r--r--tests/topotests/bgp_bmp/test_bgp_bmp_2.py257
-rw-r--r--tests/topotests/bgp_bmp_vrf/r1/zebra.conf7
-rw-r--r--tests/topotests/bgp_bmp_vrf/r2/zebra.conf8
-rw-r--r--tests/topotests/bgp_bmp_vrf/test_bgp_bmp_vrf.py418
-rw-r--r--tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf20
-rw-r--r--tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json131
-rw-r--r--tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json191
-rw-r--r--tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json131
-rw-r--r--tests/topotests/bgp_evpn_rt5/r2/bgpd.conf22
-rw-r--r--tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py243
-rw-r--r--tests/topotests/bgp_flowspec/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py15
-rw-r--r--tests/topotests/bgp_invalid_nexthop/r1/frr.conf1
-rw-r--r--tests/topotests/bgp_lu_topo1/R3/bgpd.conf1
-rw-r--r--tests/topotests/bgp_lu_topo1/R3/zebra.conf1
-rw-r--r--tests/topotests/bgp_lu_topo2/R3/bgpd.conf1
-rw-r--r--tests/topotests/bgp_lu_topo2/R3/staticd.conf1
-rw-r--r--tests/topotests/bgp_lu_topo2/R3/zebra.conf1
-rw-r--r--tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf2
-rw-r--r--tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py4
-rwxr-xr-xtests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py2
-rw-r--r--tests/topotests/bgp_multiview_topo1/r1/bgpd.conf8
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf1
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf6
-rw-r--r--tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py5
-rw-r--r--tests/topotests/bgp_path_attribute_discard/r1/frr.conf1
-rw-r--r--tests/topotests/bgp_path_attribute_discard/r2/frr.conf1
-rw-r--r--tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py21
-rw-r--r--tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py21
-rw-r--r--tests/topotests/bgp_peer_group/r1/bgpd.conf12
-rw-r--r--tests/topotests/bgp_peer_group/r1/frr.conf28
-rw-r--r--tests/topotests/bgp_peer_group/r1/zebra.conf9
-rw-r--r--tests/topotests/bgp_peer_group/r2/bgpd.conf11
-rw-r--r--tests/topotests/bgp_peer_group/r2/frr.conf19
-rw-r--r--tests/topotests/bgp_peer_group/r2/zebra.conf9
-rw-r--r--tests/topotests/bgp_peer_group/r3/frr.conf (renamed from tests/topotests/bgp_peer_group/r3/bgpd.conf)5
-rw-r--r--tests/topotests/bgp_peer_group/r3/zebra.conf6
-rw-r--r--tests/topotests/bgp_peer_group/r4/frr.conf7
-rw-r--r--tests/topotests/bgp_peer_group/test_bgp_peer-group.py70
-rw-r--r--tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf8
-rw-r--r--tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf4
-rw-r--r--tests/topotests/bgp_prefix_sid/r1/bgpd.conf2
-rw-r--r--tests/topotests/bgp_prefix_sid2/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_route_server_client/r1/bgpd.conf10
-rw-r--r--tests/topotests/bgp_route_server_client/r2/bgpd.conf8
-rw-r--r--tests/topotests/bgp_route_server_client/r3/bgpd.conf2
-rw-r--r--tests/topotests/bgp_route_server_client/r4/bgpd.conf2
-rw-r--r--tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py3
-rw-r--r--tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json70
-rw-r--r--tests/topotests/bgp_rpki_topo1/r2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py29
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/__init__.py (renamed from tests/topotests/bgp_bmp_vrf/__init__.py)0
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf13
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf29
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf11
-rw-r--r--tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py88
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce1/zebra.conf2
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce2/bgpd.conf1
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce2/zebra.conf2
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce3/bgpd.conf1
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce3/zebra.conf2
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/ce4/zebra.conf2
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/r1/bgpd.conf1
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/r2/zebra.conf2
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/r3/zebra.conf2
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/r4/bgpd.conf1
-rw-r--r--tests/topotests/bgp_snmp_mplsl3vpn/r4/zebra.conf2
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py0
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf117
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf88
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf32
-rw-r--r--tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py191
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py0
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf30
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf40
-rw-r--r--tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py135
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py16
-rw-r--r--tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf75
-rw-r--r--tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs17
-rw-r--r--tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py517
-rw-r--r--tests/topotests/bgp_vrf_netns/r1/bgpd.conf1
-rw-r--r--tests/topotests/docker/README.md2
-rwxr-xr-xtests/topotests/docker/build.sh4
-rwxr-xr-xtests/topotests/docker/frr-topotests.sh17
-rwxr-xr-xtests/topotests/docker/inner/compile_frr.sh5
-rwxr-xr-xtests/topotests/docker/inner/entrypoint.sh5
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py12
-rw-r--r--tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py15
-rw-r--r--tests/topotests/isis_topo1/test_isis_topo1.py161
-rw-r--r--tests/topotests/lib/bgp.py19
-rw-r--r--tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py6
-rwxr-xr-xtests/topotests/lib/bmp_collector/bmpserver.py (renamed from tests/topotests/lib/bmp_collector/bmpserver)84
-rw-r--r--tests/topotests/lib/pim.py40
-rw-r--r--tests/topotests/lib/snmptest.py6
-rw-r--r--tests/topotests/lib/topogen.py7
-rw-r--r--tests/topotests/mgmt_config/r1/early-end-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/early-end2-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/early-exit2-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/r1/one-exit2-zebra.conf2
-rw-r--r--tests/topotests/mgmt_config/test_config.py26
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json49
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json160
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json64
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json48
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json16
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-lib.json96
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json8
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json46
-rw-r--r--tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json229
-rw-r--r--tests/topotests/mgmt_oper/oper.py17
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json3
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json1
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json8
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json4
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-lib.json8
-rw-r--r--tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json4
-rw-r--r--tests/topotests/mgmt_oper/test_oper.py1
-rw-r--r--tests/topotests/mgmt_oper/test_simple.py8
-rw-r--r--tests/topotests/msdp_topo1/r1/pimd.conf2
-rwxr-xr-xtests/topotests/msdp_topo1/test_msdp_topo1.py99
-rw-r--r--tests/topotests/msdp_topo3/__init__.py0
-rw-r--r--tests/topotests/msdp_topo3/r1/frr.conf31
-rw-r--r--tests/topotests/msdp_topo3/r2/frr.conf28
-rw-r--r--tests/topotests/msdp_topo3/test_msdp_topo3.py165
-rw-r--r--tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py31
-rw-r--r--tests/topotests/ospf_metric_propagation/h1/frr.conf4
-rw-r--r--tests/topotests/ospf_metric_propagation/h2/frr.conf4
-rw-r--r--tests/topotests/ospf_metric_propagation/r1/frr.conf5
-rw-r--r--tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json50
-rw-r--r--tests/topotests/ospf_metric_propagation/r2/frr.conf3
-rw-r--r--tests/topotests/ospf_metric_propagation/r3/frr.conf3
-rw-r--r--tests/topotests/ospf_metric_propagation/r4/frr.conf8
-rw-r--r--tests/topotests/ospf_metric_propagation/ra/frr.conf3
-rw-r--r--tests/topotests/ospf_metric_propagation/rb/frr.conf3
-rw-r--r--tests/topotests/ospf_metric_propagation/rc/frr.conf3
-rw-r--r--tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py19
-rw-r--r--tests/topotests/ospf_multi_instance/r1/frr.conf16
-rw-r--r--tests/topotests/ospf_multi_instance/r2/frr.conf37
-rw-r--r--tests/topotests/ospf_multi_instance/r3/frr.conf16
-rw-r--r--tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py403
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt1
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt1
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt3
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf2
-rw-r--r--tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt2
-rw-r--r--tests/topotests/ospf_netns_vrf/r1/ospfd.conf2
-rw-r--r--tests/topotests/ospf_netns_vrf/r1/zebra.conf2
-rw-r--r--tests/topotests/ospf_netns_vrf/r1/zebraroute.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r2/ospfd.conf2
-rw-r--r--tests/topotests/ospf_netns_vrf/r2/zebra.conf2
-rw-r--r--tests/topotests/ospf_netns_vrf/r2/zebraroute.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r3/ospfd.conf2
-rw-r--r--tests/topotests/ospf_netns_vrf/r3/zebra.conf2
-rw-r--r--tests/topotests/ospf_netns_vrf/r3/zebraroute.txt3
-rw-r--r--tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt3
-rw-r--r--tests/topotests/ospf_p2mp/r1/frr-p2mp-non-broadcast.conf3
-rw-r--r--tests/topotests/ospf_p2mp/r1/frr-p2mp.conf3
-rw-r--r--tests/topotests/ospf_p2mp/r2/frr-p2mp-non-broadcast.conf3
-rw-r--r--tests/topotests/ospf_p2mp/r2/frr-p2mp.conf4
-rw-r--r--tests/topotests/ospf_p2mp/r3/frr-p2mp-non-broadcast.conf3
-rw-r--r--tests/topotests/ospf_p2mp/r3/frr-p2mp.conf4
-rw-r--r--tests/topotests/ospf_p2mp/r4/frr-p2mp-non-broadcast.conf3
-rw-r--r--tests/topotests/ospf_p2mp/r4/frr-p2mp.conf4
-rw-r--r--tests/topotests/ospf_prefix_suppression/r1/frr.conf2
-rw-r--r--tests/topotests/ospf_prefix_suppression/r2/frr.conf2
-rw-r--r--tests/topotests/ospf_prefix_suppression/r3/frr.conf2
-rw-r--r--tests/topotests/ospfapi/test_ospf_clientapi.py2
-rw-r--r--tests/topotests/pim_autorp/r1/frr.conf18
-rw-r--r--tests/topotests/pim_autorp/r2/frr.conf18
-rw-r--r--tests/topotests/pim_autorp/r3/frr.conf24
-rw-r--r--tests/topotests/pim_autorp/r4/frr.conf24
-rw-r--r--tests/topotests/pim_autorp/test_pim_autorp.py800
-rw-r--r--tests/topotests/pim_basic/test_pim.py8
-rw-r--r--tests/topotests/pim_boundary_acl/r1/frr.conf39
-rw-r--r--tests/topotests/pim_boundary_acl/r2/frr.conf19
-rw-r--r--tests/topotests/pim_boundary_acl/r3/frr.conf13
-rw-r--r--tests/topotests/pim_boundary_acl/rp/frr.conf22
-rw-r--r--tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py523
-rw-r--r--tests/topotests/pim_cand_rp_bsr/r1/frr.conf2
-rw-r--r--tests/topotests/pim_cand_rp_bsr/r2/frr.conf2
-rw-r--r--tests/topotests/pim_cand_rp_bsr/r3/frr.conf2
-rw-r--r--tests/topotests/pim_cand_rp_bsr/r4/frr.conf2
-rw-r--r--tests/topotests/pim_cand_rp_bsr/r5/frr.conf2
-rw-r--r--tests/topotests/pim_cand_rp_bsr/r6/frr.conf2
-rwxr-xr-xtests/topotests/pim_mrib/__init__.py0
-rw-r--r--tests/topotests/pim_mrib/r1/frr.conf28
-rw-r--r--tests/topotests/pim_mrib/r2/frr.conf28
-rw-r--r--tests/topotests/pim_mrib/r3/frr.conf28
-rw-r--r--tests/topotests/pim_mrib/r4/frr.conf29
-rw-r--r--tests/topotests/pim_mrib/test_pim_mrib.py328
-rw-r--r--tests/topotests/simple_snmp_test/r1/bgpd.conf1
-rw-r--r--tests/topotests/simple_snmp_test/r1/isisd.conf2
-rw-r--r--tests/topotests/simple_snmp_test/r1/ospf6d.conf12
-rw-r--r--tests/topotests/simple_snmp_test/r1/ospfd.conf11
-rw-r--r--tests/topotests/simple_snmp_test/r1/ripd.conf8
-rw-r--r--tests/topotests/simple_snmp_test/r1/zebra.conf3
-rwxr-xr-xtests/topotests/simple_snmp_test/test_simple_snmp.py52
-rwxr-xr-xtests/topotests/srv6_static_route/test_srv6_route.py2
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf2
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf1
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf1
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf1
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf1
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf2
-rw-r--r--tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py9
-rw-r--r--tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json20
-rw-r--r--tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json21
-rw-r--r--tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py47
-rw-r--r--tests/topotests/zebra_rib/r1/frr-import.conf3
-rw-r--r--tools/etc/frr/support_bundle_commands.conf130
-rwxr-xr-xtools/frr-reload.py14
-rwxr-xr-xtools/frr_babeltrace.py76
-rw-r--r--tools/gen_northbound_callbacks.c15
-rw-r--r--vtysh/vtysh.c6
-rw-r--r--watchfrr/watchfrr.c2
-rw-r--r--yang/frr-pim-rp.yang57
-rw-r--r--yang/frr-pim.yang101
-rw-r--r--yang/frr-zebra.yang47
-rw-r--r--zebra/dpdk/zebra_dplane_dpdk.c120
-rw-r--r--zebra/dplane_fpm_nl.c34
-rw-r--r--zebra/interface.c38
-rw-r--r--zebra/ioctl.c2
-rw-r--r--zebra/main.c3
-rw-r--r--zebra/rib.h6
-rw-r--r--zebra/rt_netlink.c7
-rw-r--r--zebra/zapi_msg.c70
-rw-r--r--zebra/zebra_cli.c3
-rw-r--r--zebra/zebra_evpn_mac.c15
-rw-r--r--zebra/zebra_mpls.c29
-rw-r--r--zebra/zebra_mroute.c2
-rw-r--r--zebra/zebra_nb.c6
-rw-r--r--zebra/zebra_nb_config.c17
-rw-r--r--zebra/zebra_nhg.c24
-rw-r--r--zebra/zebra_rib.c106
-rw-r--r--zebra/zebra_router.c14
-rw-r--r--zebra/zebra_router.h18
-rw-r--r--zebra/zebra_srv6_vty.c4
-rw-r--r--zebra/zebra_vty.c396
-rw-r--r--zebra/zebra_vxlan.c14
-rw-r--r--zebra/zebra_vxlan_if.c8
-rw-r--r--zebra/zserv.c43
-rw-r--r--zebra/zserv.h15
426 files changed, 14709 insertions, 5881 deletions
diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c
index b83c7b1908..f17842366a 100644
--- a/babeld/babel_interface.c
+++ b/babeld/babel_interface.c
@@ -719,6 +719,7 @@ babel_interface_close_all(void)
{
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
struct interface *ifp = NULL;
+ int type;
FOR_ALL_INTERFACES(vrf, ifp) {
if(!if_up(ifp))
@@ -740,8 +741,14 @@ babel_interface_close_all(void)
flushbuf(ifp);
usleep(roughly(10000));
gettime(&babel_now);
+ babel_enable_if_delete(ifp->name);
interface_reset(ifp);
}
+ /* Disable babel redistribution */
+ for (type = 0; type < ZEBRA_ROUTE_MAX; type++) {
+ zclient_redistribute (ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP, type, 0, VRF_DEFAULT);
+ zclient_redistribute (ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP6, type, 0, VRF_DEFAULT);
+ }
}
/* return "true" if address is one of our ipv6 addresses */
diff --git a/babeld/babeld.c b/babeld/babeld.c
index b562f0b70c..1d2f60e3ad 100644
--- a/babeld/babeld.c
+++ b/babeld/babeld.c
@@ -304,6 +304,12 @@ void babel_clean_routing_process(void)
flush_all_routes();
babel_interface_close_all();
+ /* Clean babel config */
+ diversity_kind = DIVERSITY_NONE;
+ diversity_factor = BABEL_DEFAULT_DIVERSITY_FACTOR;
+ resend_delay = BABEL_DEFAULT_RESEND_DELAY;
+ change_smoothing_half_life(BABEL_DEFAULT_SMOOTHING_HALF_LIFE);
+
/* cancel events */
event_cancel(&babel_routing_process->t_read);
event_cancel(&babel_routing_process->t_update);
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index eb9c300313..f32bc2598b 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -1172,6 +1172,9 @@ void bfd_set_echo(struct bfd_session *bs, bool echo)
if (bs->bdc == NULL)
ptm_bfd_echo_stop(bs);
}
+
+ if (bs->vrf && bs->vrf->info)
+ bfd_vrf_toggle_echo(bs->vrf->info);
}
void bfd_set_shutdown(struct bfd_session *bs, bool shutdown)
@@ -1800,6 +1803,69 @@ void bfd_profiles_remove(void)
bfd_profile_free(bp);
}
+struct __bfd_session_echo {
+ /* VRF peers must match */
+ struct vrf *vrf;
+ /* Echo enabled or not */
+ bool enabled;
+};
+
+static int __bfd_session_has_echo(struct hash_bucket *hb, void *arg)
+{
+ const struct bfd_session *session = hb->data;
+ struct __bfd_session_echo *has_echo = arg;
+
+ if (session->vrf != has_echo->vrf)
+ return HASHWALK_CONTINUE;
+ if (!CHECK_FLAG(session->flags, BFD_SESS_FLAG_ECHO))
+ return HASHWALK_CONTINUE;
+
+ has_echo->enabled = true;
+ return HASHWALK_ABORT;
+}
+
+void bfd_vrf_toggle_echo(struct bfd_vrf_global *bfd_vrf)
+{
+ struct __bfd_session_echo has_echo = {
+ .enabled = false,
+ .vrf = bfd_vrf->vrf,
+ };
+
+ /* Check for peers using echo */
+ hash_walk(bfd_id_hash, __bfd_session_has_echo, &has_echo);
+
+ /*
+ * No peers using echo, close all echo sockets.
+ */
+ if (!has_echo.enabled) {
+ if (bfd_vrf->bg_echo != -1) {
+ event_cancel(&bfd_vrf->bg_ev[4]);
+ close(bfd_vrf->bg_echo);
+ bfd_vrf->bg_echo = -1;
+ }
+
+ if (bfd_vrf->bg_echov6 != -1) {
+ event_cancel(&bfd_vrf->bg_ev[5]);
+ close(bfd_vrf->bg_echov6);
+ bfd_vrf->bg_echov6 = -1;
+ }
+ return;
+ }
+
+ /*
+ * At least one peer using echo, open echo sockets.
+ */
+ if (bfd_vrf->bg_echo == -1)
+ bfd_vrf->bg_echo = bp_echo_socket(bfd_vrf->vrf);
+ if (bfd_vrf->bg_echov6 == -1)
+ bfd_vrf->bg_echov6 = bp_echov6_socket(bfd_vrf->vrf);
+
+ if (bfd_vrf->bg_ev[4] == NULL && bfd_vrf->bg_echo != -1)
+ event_add_read(master, bfd_recv_cb, bfd_vrf, bfd_vrf->bg_echo, &bfd_vrf->bg_ev[4]);
+ if (bfd_vrf->bg_ev[5] == NULL && bfd_vrf->bg_echov6 != -1)
+ event_add_read(master, bfd_recv_cb, bfd_vrf, bfd_vrf->bg_echov6, &bfd_vrf->bg_ev[5]);
+}
+
/*
* Profile related hash functions.
*/
@@ -1842,9 +1908,23 @@ static void bfd_profile_detach(struct bfd_profile *bp)
*/
static int bfd_vrf_new(struct vrf *vrf)
{
+ struct bfd_vrf_global *bvrf;
+
if (bglobal.debug_zebra)
zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
+ bvrf = XCALLOC(MTYPE_BFDD_VRF, sizeof(struct bfd_vrf_global));
+ bvrf->vrf = vrf;
+ vrf->info = bvrf;
+
+ /* Invalidate all sockets */
+ bvrf->bg_shop = -1;
+ bvrf->bg_mhop = -1;
+ bvrf->bg_shop6 = -1;
+ bvrf->bg_mhop6 = -1;
+ bvrf->bg_echo = -1;
+ bvrf->bg_echov6 = -1;
+
return 0;
}
@@ -1853,70 +1933,53 @@ static int bfd_vrf_delete(struct vrf *vrf)
if (bglobal.debug_zebra)
zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
+ XFREE(MTYPE_BFDD_VRF, vrf->info);
+
return 0;
}
static int bfd_vrf_enable(struct vrf *vrf)
{
- struct bfd_vrf_global *bvrf;
-
- /* a different name */
- if (!vrf->info) {
- bvrf = XCALLOC(MTYPE_BFDD_VRF, sizeof(struct bfd_vrf_global));
- bvrf->vrf = vrf;
- vrf->info = (void *)bvrf;
-
- /* Disable sockets if using data plane. */
- if (bglobal.bg_use_dplane) {
- bvrf->bg_shop = -1;
- bvrf->bg_mhop = -1;
- bvrf->bg_shop6 = -1;
- bvrf->bg_mhop6 = -1;
- bvrf->bg_echo = -1;
- bvrf->bg_echov6 = -1;
- }
- } else
- bvrf = vrf->info;
+ struct bfd_vrf_global *bvrf = vrf->info;
if (bglobal.debug_zebra)
zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id);
- if (!bvrf->bg_shop)
+ /* Don't open sockets when using data plane */
+ if (bglobal.bg_use_dplane)
+ goto skip_sockets;
+
+ if (bvrf->bg_shop == -1)
bvrf->bg_shop = bp_udp_shop(vrf);
- if (!bvrf->bg_mhop)
+ if (bvrf->bg_mhop == -1)
bvrf->bg_mhop = bp_udp_mhop(vrf);
- if (!bvrf->bg_shop6)
+ if (bvrf->bg_shop6 == -1)
bvrf->bg_shop6 = bp_udp6_shop(vrf);
- if (!bvrf->bg_mhop6)
+ if (bvrf->bg_mhop6 == -1)
bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
- if (!bvrf->bg_echo)
- bvrf->bg_echo = bp_echo_socket(vrf);
- if (!bvrf->bg_echov6)
- bvrf->bg_echov6 = bp_echov6_socket(vrf);
- if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
+ if (bvrf->bg_ev[0] == NULL && bvrf->bg_shop != -1)
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
&bvrf->bg_ev[0]);
- if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
+ if (bvrf->bg_ev[1] == NULL && bvrf->bg_mhop != -1)
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
&bvrf->bg_ev[1]);
- if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
+ if (bvrf->bg_ev[2] == NULL && bvrf->bg_shop6 != -1)
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
&bvrf->bg_ev[2]);
- if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
+ if (bvrf->bg_ev[3] == NULL && bvrf->bg_mhop6 != -1)
event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
&bvrf->bg_ev[3]);
- if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
- event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
- &bvrf->bg_ev[4]);
- if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
- event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
- &bvrf->bg_ev[5]);
+ /* Toggle echo if VRF was disabled. */
+ bfd_vrf_toggle_echo(bvrf);
+
+skip_sockets:
if (vrf->vrf_id != VRF_DEFAULT) {
bfdd_zclient_register(vrf->vrf_id);
bfdd_sessions_enable_vrf(vrf);
}
+
return 0;
}
@@ -1948,17 +2011,9 @@ static int bfd_vrf_disable(struct vrf *vrf)
socket_close(&bvrf->bg_echo);
socket_close(&bvrf->bg_shop);
socket_close(&bvrf->bg_mhop);
- if (bvrf->bg_shop6 != -1)
- socket_close(&bvrf->bg_shop6);
- if (bvrf->bg_mhop6 != -1)
- socket_close(&bvrf->bg_mhop6);
- socket_close(&bvrf->bg_echo);
- if (bvrf->bg_echov6 != -1)
- socket_close(&bvrf->bg_echov6);
-
- /* free context */
- XFREE(MTYPE_BFDD_VRF, bvrf);
- vrf->info = NULL;
+ socket_close(&bvrf->bg_shop6);
+ socket_close(&bvrf->bg_mhop6);
+ socket_close(&bvrf->bg_echov6);
return 0;
}
diff --git a/bfdd/bfd.h b/bfdd/bfd.h
index 2f83b245eb..d4d14ffce6 100644
--- a/bfdd/bfd.h
+++ b/bfdd/bfd.h
@@ -610,6 +610,8 @@ void bfd_sessions_remove_manual(void);
void bfd_profiles_remove(void);
void bfd_rtt_init(struct bfd_session *bfd);
+extern void bfd_vrf_toggle_echo(struct bfd_vrf_global *bfd_vrf);
+
/**
* Set the BFD session echo state.
*
diff --git a/bfdd/dplane.c b/bfdd/dplane.c
index 7f55f34073..b1a32fb150 100644
--- a/bfdd/dplane.c
+++ b/bfdd/dplane.c
@@ -948,6 +948,9 @@ static void bfd_dplane_client_connect(struct event *t)
_bfd_dplane_client_bootstrap(bdc);
}
+ /* Continue with the connection */
+ return;
+
reschedule_connect:
EVENT_OFF(bdc->inbufev);
EVENT_OFF(bdc->outbufev);
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index 2280aa9097..d349922c52 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -5406,7 +5406,14 @@ enum bgp_attr_parse_ret bgp_attr_ignore(struct peer *peer, uint8_t type)
lookup_msg(attr_str, type, NULL),
withdraw ? "treat-as-withdraw" : "discard");
- return withdraw ? BGP_ATTR_PARSE_WITHDRAW : BGP_ATTR_PARSE_PROCEED;
+ /* We don't increment stat_pfx_withdraw here, because it's done in
+ * bgp_update_receive().
+ */
+ if (withdraw)
+ return BGP_ATTR_PARSE_WITHDRAW;
+
+ peer->stat_pfx_discard++;
+ return BGP_ATTR_PARSE_PROCEED;
}
bool route_matches_soo(struct bgp_path_info *pi, struct ecommunity *soo)
diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c
index a331585d32..50b00d21b1 100644
--- a/bgpd/bgp_bfd.c
+++ b/bgpd/bgp_bfd.c
@@ -151,7 +151,7 @@ void bgp_peer_config_apply(struct peer *p, struct peer_group *pg)
void bgp_peer_bfd_update_source(struct peer *p)
{
- struct bfd_session_params *session = p->bfd_config->session;
+ struct bfd_session_params *session;
const union sockunion *source = NULL;
bool changed = false;
int family;
@@ -162,6 +162,10 @@ void bgp_peer_bfd_update_source(struct peer *p)
struct interface *ifp;
union sockunion addr;
+ if (!p->bfd_config)
+ return;
+
+ session = p->bfd_config->session;
/* Nothing to do for groups. */
if (CHECK_FLAG(p->sflags, PEER_STATUS_GROUP))
return;
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 2e3a0388d0..acc49cac94 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -275,37 +275,33 @@ static inline int bmp_get_peer_type(struct peer *peer)
return bmp_get_peer_type_vrf(peer->bgp->vrf_id);
}
-static inline int bmp_get_peer_distinguisher(struct bmp *bmp, afi_t afi,
- uint8_t peer_type,
+static inline int bmp_get_peer_distinguisher(struct bgp *bgp, afi_t afi, uint8_t peer_type,
uint64_t *result_ref)
{
-
- /* remove this check when the other peer types get correct peer dist.
- *(RFC7854) impl.
- * for now, always return no error and 0 peer distinguisher as before
- */
- if (peer_type != BMP_PEER_TYPE_LOC_RIB_INSTANCE)
- return (*result_ref = 0);
+ /* use RD if set in VRF config */
+ struct prefix_rd *prd;
/* sending vrf_id or rd could be turned into an option at some point */
- struct bgp *bgp = bmp->targets->bgp;
+ if (peer_type == BMP_PEER_TYPE_LOCAL_INSTANCE || bgp->vrf_id == VRF_UNKNOWN)
+ return 1;
/* vrf default => ok, distinguisher 0 */
if (bgp->inst_type == VRF_DEFAULT)
return (*result_ref = 0);
- /* use RD if set in VRF config for this AFI */
- struct prefix_rd *prd = &bgp->vpn_policy[afi].tovpn_rd;
-
- if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_RD_SET)) {
+ prd = &bgp->vpn_policy[AFI_IP].tovpn_rd;
+ if ((afi == AFI_IP || afi == AFI_UNSPEC) &&
+ CHECK_FLAG(bgp->vpn_policy[AFI_IP].flags, BGP_VPN_POLICY_TOVPN_RD_SET)) {
memcpy(result_ref, prd->val, sizeof(prd->val));
return 0;
}
- /* VRF has no id => error => message should be skipped */
- if (bgp->vrf_id == VRF_UNKNOWN)
- return 1;
+ prd = &bgp->vpn_policy[AFI_IP6].tovpn_rd;
+ if ((afi == AFI_IP6 || afi == AFI_UNSPEC) &&
+ CHECK_FLAG(bgp->vpn_policy[AFI_IP6].flags, BGP_VPN_POLICY_TOVPN_RD_SET)) {
+ memcpy(result_ref, prd->val, sizeof(prd->val));
+ return 0;
+ }
/* use VRF id converted to ::vrf_id 64bits format */
*result_ref = ((uint64_t)htonl(bgp->vrf_id)) << 32;
@@ -467,13 +463,23 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
struct stream *s;
size_t len;
struct timeval uptime, uptime_real;
+ uint8_t peer_type;
+ bool is_locrib = false;
+ uint64_t peer_distinguisher = 0;
uptime.tv_sec = peer->uptime;
uptime.tv_usec = 0;
monotime_to_realtime(&uptime, &uptime_real);
- uint8_t peer_type = bmp_get_peer_type(peer);
- bool is_locrib = peer_type == BMP_PEER_TYPE_LOC_RIB_INSTANCE;
+ peer_type = bmp_get_peer_type(peer);
+ if (peer_type == BMP_PEER_TYPE_LOC_RIB_INSTANCE)
+ is_locrib = true;
+
+ if (bmp_get_peer_distinguisher(peer->bgp, AFI_UNSPEC, peer_type, &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for peer %s: can't get peer distinguisher",
+ peer->host);
+ return NULL;
+ }
#define BGP_BMP_MAX_PACKET_SIZE 1024
#define BMP_PEERUP_INFO_TYPE_STRING 0
@@ -484,9 +490,7 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
bmp_common_hdr(s, BMP_VERSION_3,
BMP_TYPE_PEER_UP_NOTIFICATION);
- bmp_per_peer_hdr(s, peer->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0,
- &uptime_real);
+ bmp_per_peer_hdr(s, peer->bgp, peer, 0, peer_type, peer_distinguisher, &uptime_real);
/* Local Address (16 bytes) */
if (is_locrib)
@@ -548,9 +552,7 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down)
bmp_common_hdr(s, BMP_VERSION_3,
BMP_TYPE_PEER_DOWN_NOTIFICATION);
- bmp_per_peer_hdr(s, peer->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0,
- &uptime_real);
+ bmp_per_peer_hdr(s, peer->bgp, peer, 0, peer_type, peer_distinguisher, &uptime_real);
type_pos = stream_get_endp(s);
stream_putc(s, 0); /* placeholder for down reason */
@@ -604,8 +606,10 @@ static int bmp_send_peerup(struct bmp *bmp)
/* Walk down all peers */
for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
s = bmp_peerstate(peer, false);
- pullwr_write_stream(bmp->pullwr, s);
- stream_free(s);
+ if (s) {
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
}
return 0;
@@ -622,10 +626,10 @@ static int bmp_send_peerup_vrf(struct bmp *bmp)
bmp_bgp_update_vrf_status(bmpbgp, vrf_state_unknown);
s = bmp_peerstate(bmpbgp->bgp->peer_self, bmpbgp->vrf_state == vrf_state_down);
-
- pullwr_write_stream(bmp->pullwr, s);
- stream_free(s);
-
+ if (s) {
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
return 0;
}
@@ -636,6 +640,9 @@ static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
struct bmp_targets *bt;
struct bmp *bmp;
+ if (!s)
+ return;
+
frr_each(bmp_targets, &bmpbgp->targets, bt)
frr_each(bmp_session, &bt->sessions, bmp)
pullwr_write_stream(bmp->pullwr, s);
@@ -644,6 +651,9 @@ static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
static void bmp_send_all_safe(struct bmp_bgp *bmpbgp, struct stream *s)
{
+ if (!s)
+ return;
+
if (!bmpbgp) {
stream_free(s);
return;
@@ -771,14 +781,24 @@ static void bmp_wrmirror_lost(struct bmp *bmp, struct pullwr *pullwr)
{
struct stream *s;
struct timeval tv;
+ uint8_t peer_type_flag;
+ uint64_t peer_distinguisher = 0;
gettimeofday(&tv, NULL);
+ peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id);
+
+ if (bmp_get_peer_distinguisher(bmp->targets->bgp, AFI_UNSPEC, peer_type_flag,
+ &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for reason: can't get peer distinguisher");
+ return;
+ }
+
s = stream_new(BGP_MAX_PACKET_SIZE);
bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
- bmp_per_peer_hdr(s, bmp->targets->bgp, bmp->targets->bgp->peer_self, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0, &tv);
+ bmp_per_peer_hdr(s, bmp->targets->bgp, bmp->targets->bgp->peer_self, 0, peer_type_flag,
+ peer_distinguisher, &tv);
stream_putw(s, BMP_MIRROR_TLV_TYPE_INFO);
stream_putw(s, 2);
@@ -795,6 +815,8 @@ static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
struct bmp_mirrorq *bmq;
struct peer *peer;
bool written = false;
+ uint8_t peer_type_flag;
+ uint64_t peer_distinguisher = 0;
if (bmp->mirror_lost) {
bmp_wrmirror_lost(bmp, pullwr);
@@ -812,12 +834,20 @@ static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
goto out;
}
+ peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id);
+
+ if (bmp_get_peer_distinguisher(peer->bgp, AFI_UNSPEC, peer_type_flag, &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for peer %s: can't get peer distinguisher",
+ peer->host);
+ goto out;
+ }
+
struct stream *s;
s = stream_new(BGP_MAX_PACKET_SIZE);
bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
- bmp_per_peer_hdr(s, bmp->targets->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0, &bmq->tv);
+ bmp_per_peer_hdr(s, bmp->targets->bgp, peer, 0, peer_type_flag, peer_distinguisher,
+ &bmq->tv);
/* BMP Mirror TLV. */
stream_putw(s, BMP_MIRROR_TLV_TYPE_BGP_MESSAGE);
@@ -967,8 +997,7 @@ static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags,
uint64_t peer_distinguisher = 0;
/* skip this message if peer distinguisher is not available */
- if (bmp_get_peer_distinguisher(bmp, afi, peer_type_flag,
- &peer_distinguisher)) {
+ if (bmp_get_peer_distinguisher(peer->bgp, afi, peer_type_flag, &peer_distinguisher)) {
zlog_warn(
"skipping bmp message for reason: can't get peer distinguisher");
continue;
@@ -1096,8 +1125,7 @@ static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags,
uint64_t peer_distinguisher = 0;
/* skip this message if peer distinguisher is not available */
- if (bmp_get_peer_distinguisher(bmp, afi, peer_type_flag,
- &peer_distinguisher)) {
+ if (bmp_get_peer_distinguisher(peer->bgp, afi, peer_type_flag, &peer_distinguisher)) {
zlog_warn(
"skipping bmp message for reason: can't get peer distinguisher");
return;
@@ -1131,6 +1159,7 @@ static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr)
uint8_t bpi_num_labels, adjin_num_labels;
afi_t afi;
safi_t safi;
+ uint8_t peer_type_flag;
if (bmp->syncafi == AFI_MAX) {
FOREACH_AFI_SAFI (afi, safi) {
@@ -1173,6 +1202,8 @@ afibreak:
struct bgp_path_info *bpi = NULL, *bpiter;
struct bgp_adj_in *adjin = NULL, *adjiter;
+ peer_type_flag = bmp_get_peer_type_vrf(bmp->targets->bgp->vrf_id);
+
if ((afi == AFI_L2VPN && safi == SAFI_EVPN) ||
(safi == SAFI_MPLS_VPN)) {
/* initialize syncrdpos to the first
@@ -1227,10 +1258,8 @@ afibreak:
bmp->remote, afi2str(afi),
safi2str(safi));
- bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L,
- BMP_PEER_TYPE_GLOBAL_INSTANCE);
- bmp_eor(bmp, afi, safi, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE);
+ bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L, peer_type_flag);
+ bmp_eor(bmp, afi, safi, 0, peer_type_flag);
bmp_eor(bmp, afi, safi, 0,
BMP_PEER_TYPE_LOC_RIB_INSTANCE);
@@ -1314,19 +1343,20 @@ afibreak:
bpi_num_labels);
}
+ if (bpi)
+ peer_type_flag = bmp_get_peer_type(bpi->peer);
+
if (bpi && CHECK_FLAG(bpi->flags, BGP_PATH_VALID) &&
CHECK_FLAG(bmp->targets->afimon[afi][safi], BMP_MON_POSTPOLICY))
- bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, bn_p, prd, bpi->attr,
+ bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, peer_type_flag, bn_p, prd, bpi->attr,
afi, safi, bpi->uptime,
- bpi_num_labels ? bpi->extra->labels->label : NULL,
- bpi_num_labels);
+ bpi_num_labels ? bpi->extra->labels->label : NULL, bpi_num_labels);
if (adjin) {
adjin_num_labels = adjin->labels ? adjin->labels->num_labels : 0;
- bmp_monitor(bmp, adjin->peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, bn_p, prd,
- adjin->attr, afi, safi, adjin->uptime,
- adjin_num_labels ? &adjin->labels->label[0] : NULL, adjin_num_labels);
+ bmp_monitor(bmp, adjin->peer, 0, peer_type_flag, bn_p, prd, adjin->attr, afi, safi,
+ adjin->uptime, adjin_num_labels ? &adjin->labels->label[0] : NULL,
+ adjin_num_labels);
}
if (bn)
@@ -1465,6 +1495,7 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
struct bgp_dest *bn = NULL;
bool written = false;
uint8_t bpi_num_labels, adjin_num_labels;
+ uint8_t peer_type_flag;
bqe = bmp_pull(bmp);
if (!bqe)
@@ -1505,6 +1536,8 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
bn = bgp_safi_node_lookup(bmp->targets->bgp->rib[afi][safi], safi,
&bqe->p, prd);
+ peer_type_flag = bmp_get_peer_type(peer);
+
if (CHECK_FLAG(bmp->targets->afimon[afi][safi], BMP_MON_POSTPOLICY)) {
struct bgp_path_info *bpi;
@@ -1518,12 +1551,9 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
bpi_num_labels = BGP_PATH_INFO_NUM_LABELS(bpi);
- bmp_monitor(bmp, peer, BMP_PEER_FLAG_L,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, &bqe->p, prd,
- bpi ? bpi->attr : NULL, afi, safi,
- bpi ? bpi->uptime : monotime(NULL),
- bpi_num_labels ? bpi->extra->labels->label : NULL,
- bpi_num_labels);
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, peer_type_flag, &bqe->p, prd,
+ bpi ? bpi->attr : NULL, afi, safi, bpi ? bpi->uptime : monotime(NULL),
+ bpi_num_labels ? bpi->extra->labels->label : NULL, bpi_num_labels);
written = true;
}
@@ -1536,9 +1566,8 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
break;
}
adjin_num_labels = adjin && adjin->labels ? adjin->labels->num_labels : 0;
- bmp_monitor(bmp, peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, &bqe->p, prd,
- adjin ? adjin->attr : NULL, afi, safi,
- adjin ? adjin->uptime : monotime(NULL),
+ bmp_monitor(bmp, peer, 0, peer_type_flag, &bqe->p, prd, adjin ? adjin->attr : NULL,
+ afi, safi, adjin ? adjin->uptime : monotime(NULL),
adjin_num_labels ? &adjin->labels->label[0] : NULL, adjin_num_labels);
written = true;
}
@@ -1704,6 +1733,8 @@ static void bmp_stats(struct event *thread)
struct peer *peer;
struct listnode *node;
struct timeval tv;
+ uint8_t peer_type_flag;
+ uint64_t peer_distinguisher = 0;
if (bt->stat_msec)
event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
@@ -1720,8 +1751,14 @@ static void bmp_stats(struct event *thread)
s = stream_new(BGP_MAX_PACKET_SIZE);
bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_STATISTICS_REPORT);
- bmp_per_peer_hdr(s, bt->bgp, peer, 0,
- BMP_PEER_TYPE_GLOBAL_INSTANCE, 0, &tv);
+ peer_type_flag = bmp_get_peer_type(peer);
+ if (bmp_get_peer_distinguisher(peer->bgp, AFI_UNSPEC, peer_type_flag,
+ &peer_distinguisher)) {
+ zlog_warn("skipping bmp message for peer %s: can't get peer distinguisher",
+ peer->host);
+ continue;
+ }
+ bmp_per_peer_hdr(s, bt->bgp, peer, 0, peer_type_flag, peer_distinguisher, &tv);
count_pos = stream_get_endp(s);
stream_putl(s, 0);
@@ -1736,8 +1773,7 @@ static void bmp_stats(struct event *thread)
peer->stat_pfx_cluster_loop);
bmp_stat_put_u32(s, &count, BMP_STATS_PFX_DUP_WITHDRAW,
peer->stat_pfx_dup_withdraw);
- bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW,
- peer->stat_upd_7606);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW, peer->stat_pfx_withdraw);
if (bt->stats_send_experimental)
bmp_stat_put_u32(s, &count, BMP_STATS_FRR_NH_INVALID,
peer->stat_pfx_nh_invalid);
@@ -2593,8 +2629,11 @@ DEFPY(bmp_connect,
}
ba = bmp_active_get(bt, hostname, port);
- if (srcif)
+ if (srcif) {
+ if (ba->ifsrc)
+ XFREE(MTYPE_TMP, ba->ifsrc);
ba->ifsrc = XSTRDUP(MTYPE_TMP, srcif);
+ }
if (min_retry_str)
ba->minretry = min_retry;
if (max_retry_str)
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c
index 6479126d06..ca9c428b47 100644
--- a/bgpd/bgp_clist.c
+++ b/bgpd/bgp_clist.c
@@ -11,7 +11,6 @@
#include "queue.h"
#include "filter.h"
#include "stream.h"
-#include "jhash.h"
#include "frrstr.h"
#include "bgpd/bgpd.h"
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index f173bd01f2..488f635b81 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -79,6 +79,8 @@ static void bgp_evpn_remote_ip_hash_unlink_nexthop(struct hash_bucket *bucket,
void *args);
static struct in_addr zero_vtep_ip;
+static void bgp_evpn_local_l3vni_del_post_processing(struct bgp *bgp_vrf);
+
/*
* Private functions.
*/
@@ -1669,9 +1671,18 @@ static int update_evpn_type5_route_entry(struct bgp *bgp_evpn,
/* attribute changed */
*route_changed = 1;
+ /* if the asn values are different, copy the asn of
+ * source vrf to the target (evpn) vrf entry.
+ */
+ if (bgp_vrf->as != bgp_evpn->as) {
+ new_aspath = aspath_dup(static_attr.aspath);
+ new_aspath = aspath_add_seq(new_aspath, bgp_vrf->as);
+ static_attr.aspath = new_aspath;
+ }
/* The attribute has changed. */
/* Add (or update) attribute to hash. */
- attr_new = bgp_attr_intern(attr);
+ attr_new = bgp_attr_intern(&static_attr);
+ bgp_attr_flush(&static_attr);
bgp_path_info_set_flag(dest, tmp_pi,
BGP_PATH_ATTR_CHANGED);
@@ -3882,14 +3893,6 @@ int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
const struct prefix_evpn *evp =
(const struct prefix_evpn *)bgp_dest_get_prefix(pi->net);
- /* Consider "valid" remote routes applicable for
- * this VRF.
- */
- if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID)
- && pi->type == ZEBRA_ROUTE_BGP
- && pi->sub_type == BGP_ROUTE_NORMAL))
- return 0;
-
if (is_route_matching_for_vrf(bgp_vrf, pi)) {
if (bgp_evpn_route_rmac_self_check(bgp_vrf, evp, pi))
return 0;
@@ -3916,26 +3919,66 @@ int bgp_evpn_route_entry_install_if_vrf_match(struct bgp *bgp_vrf,
return ret;
}
+#define BGP_PROC_L3VNI_LIMIT 10
+static int install_uninstall_evpn_remote_route_per_l3vni(struct bgp_path_info *pi,
+ const struct prefix_evpn *evp)
+{
+ int ret = 0;
+ uint8_t vni_iter = 0;
+ bool is_install = false;
+ struct bgp *bgp_to_proc = NULL;
+ struct bgp *bgp_to_proc_next = NULL;
+
+ for (bgp_to_proc = zebra_l3_vni_first(&bm->zebra_l3_vni_head);
+ bgp_to_proc && vni_iter < BGP_PROC_L3VNI_LIMIT; bgp_to_proc = bgp_to_proc_next) {
+ bgp_to_proc_next = zebra_l3_vni_next(&bm->zebra_l3_vni_head, bgp_to_proc);
+ vni_iter++;
+ is_install = !!CHECK_FLAG(bgp_to_proc->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+
+ ret = bgp_evpn_route_entry_install_if_vrf_match(bgp_to_proc, pi, is_install);
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in L3VNI %u during BP",
+ bgp_to_proc->vrf_id, is_install ? "install" : "uninstall",
+ bgp_evpn_route_type_str[evp->prefix.route_type].str,
+ bgp_to_proc->l3vni);
+ zebra_l3_vni_del(&bm->zebra_l3_vni_head, bgp_to_proc);
+ if (!is_install)
+ bgp_evpn_local_l3vni_del_post_processing(bgp_to_proc);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
/*
* Install or uninstall mac-ip routes are appropriate for this
* particular VRF.
*/
-static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
+int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
{
afi_t afi;
safi_t safi;
struct bgp_dest *rd_dest, *dest;
struct bgp_table *table;
struct bgp_path_info *pi;
- int ret;
+ int ret = 0;
struct bgp *bgp_evpn = NULL;
+ uint8_t count = 0;
afi = AFI_L2VPN;
safi = SAFI_EVPN;
bgp_evpn = bgp_get_evpn();
- if (!bgp_evpn)
+ if (!bgp_evpn) {
+ zlog_warn("%s: No BGP EVPN instance found...", __func__);
+
return -1;
+ }
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Total %u L3VNI BGP-VRFs pending to be processed for remote route installation",
+ __func__, (uint32_t)zebra_l3_vni_count(&bm->zebra_l3_vni_head));
/* Walk entire global routing table and evaluate routes which could be
* imported into this VRF. Note that we need to loop through all global
* routes to determine which route matches the import rt on vrf
@@ -3952,30 +3995,109 @@ static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
(const struct prefix_evpn *)bgp_dest_get_prefix(
dest);
- /* if not mac-ip route skip this route */
- if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE
- || evp->prefix.route_type
- == BGP_EVPN_IP_PREFIX_ROUTE))
- continue;
-
- /* if not a mac+ip route skip this route */
- if (!(is_evpn_prefix_ipaddr_v4(evp)
- || is_evpn_prefix_ipaddr_v6(evp)))
+ /* Proceed only for MAC-IP and IP-Prefix routes */
+ switch (evp->prefix.route_type) {
+ case BGP_EVPN_MAC_IP_ROUTE:
+ case BGP_EVPN_IP_PREFIX_ROUTE:
+ if (!(is_evpn_prefix_ipaddr_v4(evp) ||
+ is_evpn_prefix_ipaddr_v6(evp)))
+ continue;
+ break;
+ case BGP_EVPN_AD_ROUTE:
+ case BGP_EVPN_IMET_ROUTE:
+ case BGP_EVPN_ES_ROUTE:
continue;
+ }
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
- ret = bgp_evpn_route_entry_install_if_vrf_match(
- bgp_vrf, pi, install);
- if (ret) {
- bgp_dest_unlock_node(rd_dest);
- bgp_dest_unlock_node(dest);
- return ret;
+ /* Consider "valid" remote routes applicable for
+ * this VRF */
+ if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID) &&
+ pi->type == ZEBRA_ROUTE_BGP &&
+ pi->sub_type == BGP_ROUTE_NORMAL))
+ continue;
+
+ if (!bgp_vrf) {
+ ret = install_uninstall_evpn_remote_route_per_l3vni(pi, evp);
+ if (ret) {
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+
+ return ret;
+ }
+ } else {
+ ret = bgp_evpn_route_entry_install_if_vrf_match(bgp_vrf, pi,
+ install);
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in L3VNI %u",
+ bgp_vrf->vrf_id,
+ install ? "install" : "uninstall",
+ bgp_evpn_route_type_str[evp->prefix.route_type]
+ .str,
+ bgp_vrf->l3vni);
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+
+ return ret;
+ }
}
}
}
}
+ if (!bgp_vrf) {
+ while (count < BGP_PROC_L3VNI_LIMIT) {
+ struct bgp *bgp_to_proc = zebra_l3_vni_pop(&bm->zebra_l3_vni_head);
+
+ if (!bgp_to_proc)
+ return 0;
+
+ if (CHECK_FLAG(bgp_to_proc->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE))
+ bgp_evpn_local_l3vni_del_post_processing(bgp_to_proc);
+
+ UNSET_FLAG(bgp_to_proc->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+ count++;
+ }
+ }
+
+ return 0;
+}
+
+#define BGP_PROC_L2VNI_LIMIT 10
+static int install_evpn_remote_route_per_l2vni(struct bgp *bgp, struct bgp_path_info *pi,
+ const struct prefix_evpn *evp)
+{
+ int ret = 0;
+ uint8_t vni_iter = 0;
+ struct bgpevpn *t_vpn = NULL;
+ struct bgpevpn *t_vpn_next = NULL;
+
+ for (t_vpn = zebra_l2_vni_first(&bm->zebra_l2_vni_head);
+ t_vpn && vni_iter < BGP_PROC_L2VNI_LIMIT; t_vpn = t_vpn_next) {
+ t_vpn_next = zebra_l2_vni_next(&bm->zebra_l2_vni_head, t_vpn);
+ vni_iter++;
+ /*
+ * Skip install/uninstall if the route entry is not needed to
+ * be imported into the VNI i.e. RTs dont match
+ */
+ if (!is_route_matching_for_vni(bgp, t_vpn, pi))
+ continue;
+
+ ret = install_evpn_route_entry(bgp, t_vpn, evp, pi);
+
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to install EVPN %s route in VNI %u during BP",
+ bgp->vrf_id, bgp_evpn_route_type_str[evp->prefix.route_type].str,
+ t_vpn->vni);
+ zebra_l2_vni_del(&bm->zebra_l2_vni_head, t_vpn);
+
+ return ret;
+ }
+ }
+
return 0;
}
@@ -3983,26 +4105,40 @@ static int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install)
* Install or uninstall routes of specified type that are appropriate for this
* particular VNI.
*/
-static int install_uninstall_routes_for_vni(struct bgp *bgp,
- struct bgpevpn *vpn, bool install)
+int install_uninstall_routes_for_vni(struct bgp *bgp, struct bgpevpn *vpn, bool install)
{
afi_t afi;
safi_t safi;
struct bgp_dest *rd_dest, *dest;
struct bgp_table *table;
struct bgp_path_info *pi;
- int ret;
+ int ret = 0;
+ uint8_t count = 0;
+ bool walk_fifo = false;
afi = AFI_L2VPN;
safi = SAFI_EVPN;
- /* Walk entire global routing table and evaluate routes which could be
+ if (!bgp) {
+ walk_fifo = true;
+ bgp = bgp_get_evpn();
+ if (!bgp) {
+ zlog_warn("%s: No BGP EVPN instance found...", __func__);
+
+ return -1;
+ }
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Total %u L2VNI VPNs pending to be processed for remote route installation",
+ __func__, (uint32_t)zebra_l2_vni_count(&bm->zebra_l2_vni_head));
+ /*
+ * Walk entire global routing table and evaluate routes which could be
* imported into this VPN. Note that we cannot just look at the routes
- * for
- * the VNI's RD - remote routes applicable for this VNI could have any
- * RD.
+ * for the VNI's RD - remote routes applicable for this VNI could have
+ * any RD.
+ * Note: EVPN routes are a 2-level table.
*/
- /* EVPN routes are a 2-level table. */
for (rd_dest = bgp_table_top(bgp->rib[afi][safi]); rd_dest;
rd_dest = bgp_route_next(rd_dest)) {
table = bgp_dest_get_bgp_table_info(rd_dest);
@@ -4015,54 +4151,80 @@ static int install_uninstall_routes_for_vni(struct bgp *bgp,
(const struct prefix_evpn *)bgp_dest_get_prefix(
dest);
- if (evp->prefix.route_type != BGP_EVPN_IMET_ROUTE &&
- evp->prefix.route_type != BGP_EVPN_AD_ROUTE &&
- evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE)
+ /* Proceed only for AD, MAC_IP and IMET routes */
+ switch (evp->prefix.route_type) {
+ case BGP_EVPN_AD_ROUTE:
+ case BGP_EVPN_MAC_IP_ROUTE:
+ case BGP_EVPN_IMET_ROUTE:
+ break;
+ case BGP_EVPN_ES_ROUTE:
+ case BGP_EVPN_IP_PREFIX_ROUTE:
continue;
+ }
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
- /* Consider "valid" remote routes applicable for
- * this VNI. */
- if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID)
- && pi->type == ZEBRA_ROUTE_BGP
- && pi->sub_type == BGP_ROUTE_NORMAL))
- continue;
-
- if (!is_route_matching_for_vni(bgp, vpn, pi))
+ /*
+ * Skip install/uninstall if
+ * - Not a valid remote routes
+ * - Install & evpn route matchesi macvrf SOO
+ */
+ if (!(CHECK_FLAG(pi->flags, BGP_PATH_VALID) &&
+ pi->type == ZEBRA_ROUTE_BGP &&
+ pi->sub_type == BGP_ROUTE_NORMAL) ||
+ (install && bgp_evpn_route_matches_macvrf_soo(pi, evp)))
continue;
- if (install) {
- if (bgp_evpn_route_matches_macvrf_soo(
- pi, evp))
+ if (walk_fifo) {
+ ret = install_evpn_remote_route_per_l2vni(bgp, pi, evp);
+ if (ret) {
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+ return ret;
+ }
+ } else {
+ /*
+ * Skip install/uninstall if the route
+ * entry is not needed to be imported
+ * into the VNI i.e. RTs dont match
+ */
+ if (!is_route_matching_for_vni(bgp, vpn, pi))
continue;
- ret = install_evpn_route_entry(bgp, vpn,
- evp, pi);
- } else
- ret = uninstall_evpn_route_entry(
- bgp, vpn, evp, pi);
-
- if (ret) {
- flog_err(EC_BGP_EVPN_FAIL,
- "%u: Failed to %s EVPN %s route in VNI %u",
- bgp->vrf_id,
- install ? "install"
- : "uninstall",
- evp->prefix.route_type ==
- BGP_EVPN_MAC_IP_ROUTE
- ? "MACIP"
- : "IMET",
- vpn->vni);
-
- bgp_dest_unlock_node(rd_dest);
- bgp_dest_unlock_node(dest);
- return ret;
+ if (install)
+ ret = install_evpn_route_entry(bgp, vpn, evp, pi);
+ else
+ ret = uninstall_evpn_route_entry(bgp, vpn, evp, pi);
+
+ if (ret) {
+ flog_err(EC_BGP_EVPN_FAIL,
+ "%u: Failed to %s EVPN %s route in VNI %u",
+ bgp->vrf_id,
+ install ? "install" : "uninstall",
+ bgp_evpn_route_type_str[evp->prefix.route_type]
+ .str,
+ vpn->vni);
+
+ bgp_dest_unlock_node(rd_dest);
+ bgp_dest_unlock_node(dest);
+ return ret;
+ }
}
}
}
}
+ if (walk_fifo) {
+ while (count < BGP_PROC_L2VNI_LIMIT) {
+ vpn = zebra_l2_vni_pop(&bm->zebra_l2_vni_head);
+ if (!vpn)
+ return 0;
+
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ count++;
+ }
+ }
+
return 0;
}
@@ -4212,9 +4374,7 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,
assert(attr);
- /* Only type-1, type-2, type-3, type-4 and type-5
- * are supported currently
- */
+ /* Only EVPN route-types 1-5 are supported currently */
if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE
|| evp->prefix.route_type == BGP_EVPN_IMET_ROUTE
|| evp->prefix.route_type == BGP_EVPN_ES_ROUTE
@@ -4271,26 +4431,28 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi,
bgp_evpn_attr_get_esi(pi->attr));
/*
- * macip routes (type-2) are imported into VNI and VRF tables.
- * IMET route is imported into VNI table.
- * prefix routes are imported into VRF table.
+ * AD/IMET routes (type-1/3) are imported into VNI table.
+ * MACIP routes (type-2) are imported into VNI and VRF tables.
+ * Prefix routes (type 5) are imported into VRF table.
*/
if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE ||
evp->prefix.route_type == BGP_EVPN_IMET_ROUTE ||
evp->prefix.route_type == BGP_EVPN_AD_ROUTE ||
evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE) {
+ if (evp->prefix.route_type != BGP_EVPN_IP_PREFIX_ROUTE) {
+ irt = in_vni_rt ? lookup_import_rt(bgp, eval) : NULL;
+ if (irt)
+ install_uninstall_route_in_vnis(bgp, afi, safi, evp, pi,
+ irt->vnis, import);
+ }
- irt = in_vni_rt ? lookup_import_rt(bgp, eval) : NULL;
- if (irt)
- install_uninstall_route_in_vnis(
- bgp, afi, safi, evp, pi, irt->vnis,
- import);
-
- vrf_irt = in_vrf_rt ? lookup_vrf_import_rt(eval) : NULL;
- if (vrf_irt)
- install_uninstall_route_in_vrfs(
- bgp, afi, safi, evp, pi, vrf_irt->vrfs,
- import);
+ if (evp->prefix.route_type != BGP_EVPN_AD_ROUTE &&
+ evp->prefix.route_type != BGP_EVPN_IMET_ROUTE) {
+ vrf_irt = in_vrf_rt ? lookup_vrf_import_rt(eval) : NULL;
+ if (vrf_irt)
+ install_uninstall_route_in_vrfs(bgp, afi, safi, evp, pi,
+ vrf_irt->vrfs, import);
+ }
/* Also check for non-exact match.
* In this, we mask out the AS and
@@ -6780,6 +6942,53 @@ static void link_l2vni_hash_to_l3vni(struct hash_bucket *bucket,
bgpevpn_link_to_l3vni(vpn);
}
+static void bgp_evpn_l3vni_remote_route_processing(struct bgp *bgp, bool install)
+{
+ /*
+ * Anytime BGP gets a Bulk of L3 VNI ADD/DEL from zebra,
+ * - Walking the entire global routing table per VNI is very expensive.
+ * - The next read (say of another VNI ADD/DEL) from the socket does
+ * not proceed unless this walk is complete.
+ * This results in huge output buffer FIFO growth spiking up the
+ * memory in zebra.
+ *
+ * To avoid this, idea is to hookup the BGP-VRF off the struct
+ * bgp_master and maintain a struct bgp FIFO list which is processed
+ * later on, where we walk a chunk of BGP-VRFs and do the remote route
+ * install/uninstall.
+ */
+ if (!CHECK_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL) &&
+ !CHECK_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE))
+ zebra_l3_vni_add_tail(&bm->zebra_l3_vni_head, bgp);
+
+ if (install) {
+ SET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+ UNSET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE);
+ } else {
+ SET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE);
+ UNSET_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL);
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Scheduling L3VNI %s to be processed later for %s VNI %u",
+ install ? "ADD" : "DEL", bgp->name_pretty, bgp->l3vni);
+ /*
+ * If there are no BGP-VRFs's in the bm L3VNI FIFO list i.e. an update
+ * for an already processed L3VNI comes in, schedule the remote route
+ * install immediately.
+ *
+ * In all other cases, it is ok to schedule the remote route un/install
+ * after a small sleep. This is to give benefit of doubt in case more
+ * L3VNI events come.
+ */
+ if (zebra_l3_vni_count(&bm->zebra_l3_vni_head))
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l3vrf, NULL,
+ 20, &bm->t_bgp_zebra_l3_vni);
+ else
+ event_add_event(bm->master, bgp_zebra_process_remote_routes_for_l3vrf, NULL, 0,
+ &bm->t_bgp_zebra_l3_vni);
+}
+
int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id,
struct ethaddr *svi_rmac,
struct ethaddr *vrr_rmac,
@@ -6925,52 +7134,36 @@ int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id,
/* advertise type-5 routes if needed */
update_advertise_vrf_routes(bgp_vrf);
- /* install all remote routes belonging to this l3vni into correspondng
- * vrf */
- install_routes_for_vrf(bgp_vrf);
+ bgp_evpn_l3vni_remote_route_processing(bgp_vrf, true);
return 0;
}
-int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
+static void bgp_evpn_local_l3vni_del_post_processing(struct bgp *bgp_vrf)
{
- struct bgp *bgp_vrf = NULL; /* bgp vrf instance */
struct bgp *bgp_evpn = NULL; /* EVPN bgp instance */
struct listnode *node = NULL;
struct listnode *next = NULL;
struct bgpevpn *vpn = NULL;
- bgp_vrf = bgp_lookup_by_vrf_id(vrf_id);
- if (!bgp_vrf) {
- flog_err(
- EC_BGP_NO_DFLT,
- "Cannot process L3VNI %u Del - Could not find BGP instance",
- l3vni);
- return -1;
- }
-
bgp_evpn = bgp_get_evpn();
if (!bgp_evpn) {
- flog_err(
- EC_BGP_NO_DFLT,
- "Cannot process L3VNI %u Del - Could not find EVPN BGP instance",
- l3vni);
- return -1;
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u Del - Could not find EVPN BGP instance",
+ bgp_vrf->l3vni);
+ return;
}
if (CHECK_FLAG(bgp_evpn->flags, BGP_FLAG_DELETE_IN_PROGRESS)) {
flog_err(EC_BGP_NO_DFLT,
- "Cannot process L3VNI %u ADD - EVPN BGP instance is shutting down",
- l3vni);
- return -1;
+ "Cannot process L3VNI %u ADD - EVPN BGP instance is shutting down",
+ bgp_vrf->l3vni);
+ return;
}
- /* Remove remote routes from BGT VRF even if BGP_VRF_AUTO is configured,
- * bgp_delete would not remove/decrement bgp_path_info of the ip_prefix
- * routes. This will uninstalling the routes from zebra and decremnt the
- * bgp info count.
- */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("In %s for L3VNI %u after remote route installation", __func__,
+ bgp_vrf->l3vni);
/* delete/withdraw all type-5 routes */
delete_withdraw_vrf_routes(bgp_vrf);
@@ -7016,14 +7209,95 @@ int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
bgpevpn_unlink_from_l3vni(vpn);
UNSET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_L3VNI_PREFIX_ROUTES_ONLY);
+ UNSET_FLAG(bgp_vrf->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE);
/* Delete the instance if it was autocreated */
if (CHECK_FLAG(bgp_vrf->vrf_flags, BGP_VRF_AUTO))
bgp_delete(bgp_vrf);
+}
+
+int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
+{
+ struct bgp *bgp_evpn = NULL; /* EVPN bgp instance */
+ struct bgp *bgp_vrf = NULL; /* bgp vrf instance */
+
+ bgp_vrf = bgp_lookup_by_vrf_id(vrf_id);
+ if (!bgp_vrf) {
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u Del - Could not find BGP instance", l3vni);
+ return -1;
+ }
+
+ bgp_evpn = bgp_get_evpn();
+ if (!bgp_evpn) {
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u Del - Could not find EVPN BGP instance", l3vni);
+ return -1;
+ }
+
+ if (CHECK_FLAG(bgp_evpn->flags, BGP_FLAG_DELETE_IN_PROGRESS)) {
+ flog_err(EC_BGP_NO_DFLT,
+ "Cannot process L3VNI %u ADD - EVPN BGP instance is shutting down", l3vni);
+ return -1;
+ }
+
+ if (!bgp_vrf->l3vni) {
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Returning from %s since VNI %u is already deleted", __func__,
+ l3vni);
+
+ return -1;
+ }
+
+ /*
+ * Move all the l3vni_delete operation post the remote route
+ * installation processing i.e. add the L3VNI DELETE item on the
+ * BGP-VRFs FIFO and move on.
+ */
+ bgp_evpn_l3vni_remote_route_processing(bgp_vrf, false);
return 0;
}
+static void bgp_evpn_l2vni_remote_route_processing(struct bgpevpn *vpn)
+{
+ /*
+ * Anytime BGP gets a Bulk of L2 VNIs ADD/UPD from zebra,
+ * - Walking the entire global routing table per VNI is very expensive.
+ * - The next read (say of another VNI ADD/UPD) from the socket does
+ * not proceed unless this walk is complete.
+ * This results in huge output buffer FIFO growth spiking up the
+ * memory in zebra.
+ *
+ * To avoid this, idea is to hookup the VPN off the struct bgp_master
+ * and maintain a VPN FIFO list which is processed later on, where we
+ * walk a chunk of VPNs and do the remote route install.
+ */
+ if (!CHECK_FLAG(vpn->flags, VNI_FLAG_ADD)) {
+ zebra_l2_vni_add_tail(&bm->zebra_l2_vni_head, vpn);
+ SET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Scheduling L2VNI ADD to be processed later for VNI %u", vpn->vni);
+
+ /*
+ * If there are no VNI's in the bgp VPN FIFO list i.e. an update
+ * for an already processed VNI comes in, schedule the remote
+ * route install immediately.
+ *
+ * In all other cases, it is ok to schedule the remote route install
+ * after a small sleep. This is to give benefit of doubt in case more
+ * L2VNI ADD events come.
+ */
+ if (zebra_l2_vni_count(&bm->zebra_l2_vni_head))
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l2vni, NULL,
+ 20, &bm->t_bgp_zebra_l2_vni);
+ else
+ event_add_event(bm->master, bgp_zebra_process_remote_routes_for_l2vni, NULL, 0,
+ &bm->t_bgp_zebra_l2_vni);
+}
+
/*
* When bgp instance goes down also clean up what might have been left over
* from evpn.
@@ -7047,6 +7321,10 @@ int bgp_evpn_local_vni_del(struct bgp *bgp, vni_t vni)
if (!vpn)
return 0;
+ /* Remove the VPN from the bgp VPN FIFO (if exists) */
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ zebra_l2_vni_del(&bm->zebra_l2_vni_head, vpn);
+
/* Remove all local EVPN routes and schedule for processing (to
* withdraw from peers).
*/
@@ -7203,12 +7481,6 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
}
}
- /* If we have learnt and retained remote routes (VTEPs, MACs) for this
- * VNI,
- * install them.
- */
- install_routes_for_vni(bgp, vpn);
-
/* If we are advertising gateway mac-ip
It needs to be conveyed again to zebra */
bgp_zebra_advertise_gw_macip(bgp, vpn->advertise_gw_macip, vpn->vni);
@@ -7216,6 +7488,8 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
/* advertise svi mac-ip knob to zebra */
bgp_zebra_advertise_svi_macip(bgp, vpn->advertise_svi_macip, vpn->vni);
+ bgp_evpn_l2vni_remote_route_processing(vpn);
+
return 0;
}
@@ -7245,8 +7519,17 @@ void bgp_evpn_flood_control_change(struct bgp *bgp)
*/
void bgp_evpn_cleanup_on_disable(struct bgp *bgp)
{
- hash_iterate(bgp->vnihash, (void (*)(struct hash_bucket *,
- void *))cleanup_vni_on_disable,
+ struct bgpevpn *vpn = NULL;
+ uint32_t vni_count = zebra_l2_vni_count(&bm->zebra_l2_vni_head);
+
+ /* Cleanup VNI FIFO list from this bgp instance */
+ while (vni_count) {
+ vpn = zebra_l2_vni_pop(&bm->zebra_l2_vni_head);
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ vni_count--;
+ }
+
+ hash_iterate(bgp->vnihash, (void (*)(struct hash_bucket *, void *))cleanup_vni_on_disable,
bgp);
}
diff --git a/bgpd/bgp_evpn.h b/bgpd/bgp_evpn.h
index 1a333a5a09..8bbc5d3c37 100644
--- a/bgpd/bgp_evpn.h
+++ b/bgpd/bgp_evpn.h
@@ -200,4 +200,6 @@ bool bgp_evpn_skip_vrf_import_of_local_es(struct bgp *bgp_vrf, const struct pref
int uninstall_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, const struct prefix_evpn *evp,
struct bgp_path_info *parent_pi);
extern void bgp_zebra_evpn_pop_items_from_announce_fifo(struct bgpevpn *vpn);
+extern int install_uninstall_routes_for_vni(struct bgp *bgp, struct bgpevpn *vpn, bool install);
+extern int install_uninstall_routes_for_vrf(struct bgp *bgp_vrf, bool install);
#endif /* _QUAGGA_BGP_EVPN_H */
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index b05df3d82a..568d3d45ee 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -32,6 +32,13 @@
#define BGP_EVPN_TYPE4_V4_PSIZE 23
#define BGP_EVPN_TYPE4_V6_PSIZE 34
+static const struct message bgp_evpn_route_type_str[] = { { BGP_EVPN_AD_ROUTE, "AD" },
+ { BGP_EVPN_MAC_IP_ROUTE, "MACIP" },
+ { BGP_EVPN_IMET_ROUTE, "IMET" },
+ { BGP_EVPN_ES_ROUTE, "ES" },
+ { BGP_EVPN_IP_PREFIX_ROUTE, "IP-PREFIX" },
+ { 0 } };
+
RB_HEAD(bgp_es_evi_rb_head, bgp_evpn_es_evi);
RB_PROTOTYPE(bgp_es_evi_rb_head, bgp_evpn_es_evi, rb_node,
bgp_es_evi_rb_cmp);
@@ -53,8 +60,9 @@ struct bgpevpn {
#define VNI_FLAG_RD_CFGD 0x4 /* RD is user configured. */
#define VNI_FLAG_IMPRT_CFGD 0x8 /* Import RT is user configured */
#define VNI_FLAG_EXPRT_CFGD 0x10 /* Export RT is user configured */
-#define VNI_FLAG_USE_TWO_LABELS 0x20 /* Attach both L2-VNI and L3-VNI if
- needed for this VPN */
+/* Attach both L2-VNI and L3-VNI if needed for this VPN */
+#define VNI_FLAG_USE_TWO_LABELS 0x20
+#define VNI_FLAG_ADD 0x40 /* L2VNI Add */
struct bgp *bgp_vrf; /* back pointer to the vrf instance */
@@ -108,11 +116,15 @@ struct bgpevpn {
/* List of local ESs */
struct list *local_es_evi_list;
+ struct zebra_l2_vni_item zl2vni;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgpevpn);
+DECLARE_LIST(zebra_l2_vni, struct bgpevpn, zl2vni);
+
/* Mapping of Import RT to VNIs.
* The Import RTs of all VNIs are maintained in a hash table with each
* RT linking to all VNIs that will import routes matching this RT.
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index 958a9c6492..dc6e0d33c2 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -764,10 +764,9 @@ static void bgp_evpn_show_routes_mac_ip_es(struct vty *vty, esi_t *esi,
json_path = json_object_new_array();
if (detail)
- route_vty_out_detail(
- vty, bgp, bd, bgp_dest_get_prefix(bd),
- pi, AFI_L2VPN, SAFI_EVPN,
- RPKI_NOT_BEING_USED, json_path);
+ route_vty_out_detail(vty, bgp, bd, bgp_dest_get_prefix(bd), pi,
+ AFI_L2VPN, SAFI_EVPN, RPKI_NOT_BEING_USED,
+ json_path, NULL);
else
route_vty_out(vty, &bd->rn->p, pi, 0, SAFI_EVPN,
json_path, false);
@@ -892,10 +891,9 @@ static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn,
json_path = json_object_new_array();
if (detail)
- route_vty_out_detail(vty, bgp, dest, &tmp_p, pi,
- AFI_L2VPN, SAFI_EVPN,
- RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, &tmp_p, pi, AFI_L2VPN,
+ SAFI_EVPN, RPKI_NOT_BEING_USED, json_path,
+ NULL);
else
route_vty_out(vty, &tmp_p, pi, 0, SAFI_EVPN,
@@ -2570,9 +2568,8 @@ static void evpn_show_route_vni_multicast(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest),
- pi, afi, safi, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest), pi, afi, safi,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -2699,9 +2696,8 @@ static void evpn_show_route_vni_macip(struct vty *vty, struct bgp *bgp,
NULL /* ip */);
}
- route_vty_out_detail(vty, bgp, dest, (struct prefix *)&tmp_p,
- pi, afi, safi, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, (struct prefix *)&tmp_p, pi, afi, safi,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -2810,9 +2806,8 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest),
- pi, afi, safi, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest), pi, afi, safi,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -2923,9 +2918,8 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(
- vty, bgp, dest, bgp_dest_get_prefix(dest), pi,
- afi, safi, RPKI_NOT_BEING_USED, json_path);
+ route_vty_out_detail(vty, bgp, dest, bgp_dest_get_prefix(dest), pi, afi,
+ safi, RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -3060,9 +3054,8 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp,
if (json)
json_path = json_object_new_array();
- route_vty_out_detail(vty, bgp, dest, p, pi, AFI_L2VPN,
- SAFI_EVPN, RPKI_NOT_BEING_USED,
- json_path);
+ route_vty_out_detail(vty, bgp, dest, p, pi, AFI_L2VPN, SAFI_EVPN,
+ RPKI_NOT_BEING_USED, json_path, NULL);
if (json)
json_object_array_add(json_paths, json_path);
@@ -3115,6 +3108,7 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
afi_t afi;
safi_t safi;
uint32_t prefix_cnt, path_cnt;
+ int first = true;
afi = AFI_L2VPN;
safi = SAFI_EVPN;
@@ -3139,8 +3133,15 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
prefix_rd2str((struct prefix_rd *)rd_destp, rd_str,
sizeof(rd_str), bgp->asnotation);
- if (json)
+ if (json) {
+ if (first) {
+ vty_out(vty, "\"%s\":", rd_str);
+ first = false;
+ } else {
+ vty_out(vty, ",\"%s\":", rd_str);
+ }
json_rd = json_object_new_object();
+ }
rd_header = 1;
@@ -3223,11 +3224,10 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
json_path = json_object_new_array();
if (detail) {
- route_vty_out_detail(
- vty, bgp, dest,
- bgp_dest_get_prefix(dest), pi,
- AFI_L2VPN, SAFI_EVPN,
- RPKI_NOT_BEING_USED, json_path);
+ route_vty_out_detail(vty, bgp, dest,
+ bgp_dest_get_prefix(dest), pi,
+ AFI_L2VPN, SAFI_EVPN,
+ RPKI_NOT_BEING_USED, json_path, NULL);
} else
route_vty_out(vty, p, pi, 0, SAFI_EVPN,
json_path, false);
@@ -3255,18 +3255,18 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
}
if (json) {
- if (add_rd_to_json)
- json_object_object_add(json, rd_str, json_rd);
- else {
+ if (add_rd_to_json) {
+ vty_json_no_pretty(vty, json_rd);
+ } else {
+ vty_out(vty, "{}");
json_object_free(json_rd);
- json_rd = NULL;
}
}
}
if (json) {
- json_object_int_add(json, "numPrefix", prefix_cnt);
- json_object_int_add(json, "numPaths", path_cnt);
+ vty_out(vty, ",\"numPrefix\":%u", prefix_cnt);
+ vty_out(vty, ",\"numPaths\":%u", path_cnt);
} else {
if (prefix_cnt == 0) {
vty_out(vty, "No EVPN prefixes %sexist\n",
@@ -3284,20 +3284,18 @@ int bgp_evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
{
json_object *json = NULL;
- if (use_json)
+ if (use_json) {
json = json_object_new_object();
+ vty_out(vty, "{\n");
+ }
evpn_show_all_routes(vty, bgp, type, json, detail, false);
- if (use_json)
- /*
- * We are using no_pretty here because under extremely high
- * settings (lots of routes with many different paths) this can
- * save several minutes of output when FRR is run on older cpu's
- * or more underperforming routers out there. So for route
- * scale, we need to use no_pretty json.
- */
- vty_json_no_pretty(vty, json);
+ if (use_json) {
+ vty_out(vty, "}\n");
+ json_object_free(json);
+ }
+
return CMD_SUCCESS;
}
@@ -3469,7 +3467,9 @@ static void evpn_process_default_originate_cmd(struct bgp *bgp_vrf,
BGP_L2VPN_EVPN_DEFAULT_ORIGINATE_IPV6);
}
- bgp_evpn_install_uninstall_default_route(bgp_vrf, afi, safi, add);
+ if (is_l3vni_live(bgp_vrf))
+ bgp_evpn_install_uninstall_default_route(bgp_vrf,
+ afi, safi, add);
}
/*
@@ -4946,8 +4946,10 @@ DEFUN(show_bgp_l2vpn_evpn_route,
if (!bgp)
return CMD_WARNING;
- if (uj)
+ if (uj) {
json = json_object_new_object();
+ vty_out(vty, "{\n");
+ }
if (bgp_evpn_cli_parse_type(&type, argv, argc) < 0)
return CMD_WARNING;
@@ -4960,13 +4962,10 @@ DEFUN(show_bgp_l2vpn_evpn_route,
evpn_show_all_routes(vty, bgp, type, json, detail, self_orig);
- /*
- * This is an extremely expensive operation at scale
- * and as such we need to save as much time as is
- * possible.
- */
- if (uj)
- vty_json_no_pretty(vty, json);
+ if (uj) {
+ vty_out(vty, "}\n");
+ json_object_free(json);
+ }
return CMD_SUCCESS;
}
@@ -5023,10 +5022,20 @@ DEFUN(show_bgp_l2vpn_evpn_route_rd,
if (bgp_evpn_cli_parse_type(&type, argv, argc) < 0)
return CMD_WARNING;
- if (rd_all)
+ if (rd_all) {
+ if (uj)
+ vty_out(vty, "{\n");
+
evpn_show_all_routes(vty, bgp, type, json, 1, false);
- else
+
+ if (uj) {
+ vty_out(vty, "}\n");
+ json_object_free(json);
+ return CMD_SUCCESS;
+ }
+ } else {
evpn_show_route_rd(vty, bgp, &prd, type, json);
+ }
if (uj)
vty_json(vty, json);
diff --git a/bgpd/bgp_flowspec_vty.c b/bgpd/bgp_flowspec_vty.c
index d4ccca84bb..3d2dda4ee4 100644
--- a/bgpd/bgp_flowspec_vty.c
+++ b/bgpd/bgp_flowspec_vty.c
@@ -441,7 +441,7 @@ int bgp_show_table_flowspec(struct vty *vty, struct bgp *bgp, afi_t afi,
}
if (total_count && !use_json)
vty_out(vty,
- "\nDisplayed %ld flowspec entries\n",
+ "\nDisplayed %ld flowspec entries\n",
total_count);
return CMD_SUCCESS;
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 8c9050185b..3d02214ca9 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -265,7 +265,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
from_peer->addpath_paths_limit[afi][safi];
}
- if (bgp_getsockname(peer) < 0) {
+ if (bgp_getsockname(keeper) < 0) {
flog_err(EC_LIB_SOCKET,
"%%bgp_getsockname() failed for %s peer %s fd %d (from_peer fd %d)",
(CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)
@@ -277,7 +277,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
return NULL;
}
if (going_away->status > Active) {
- if (bgp_getsockname(from_peer) < 0) {
+ if (bgp_getsockname(going_away) < 0) {
flog_err(EC_LIB_SOCKET,
"%%bgp_getsockname() failed for %s from_peer %s fd %d (peer fd %d)",
@@ -325,8 +325,8 @@ void bgp_timer_set(struct peer_connection *connection)
/* First entry point of peer's finite state machine. In Idle
status start timer is on unless peer is shutdown or peer is
inactive. All other timer must be turned off */
- if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer)
- || peer->bgp->vrf_id == VRF_UNKNOWN) {
+ if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(connection) ||
+ peer->bgp->vrf_id == VRF_UNKNOWN) {
EVENT_OFF(connection->t_start);
} else {
BGP_TIMER_ON(connection->t_start, bgp_start_timer,
@@ -491,11 +491,14 @@ static void bgp_connect_timer(struct event *thread)
assert(!connection->t_read);
if (bgp_debug_neighbor_events(peer))
- zlog_debug("%s [FSM] Timer (connect timer expire)", peer->host);
+ zlog_debug("%s [FSM] Timer (connect timer (%us) expire)", peer->host,
+ peer->v_connect);
if (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
bgp_stop(connection);
else {
+ if (!peer->connect)
+ peer->v_connect = MIN(BGP_MAX_CONNECT_RETRY, peer->v_connect * 2);
EVENT_VAL(thread) = ConnectRetry_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
@@ -662,7 +665,7 @@ static void bgp_llgr_stale_timer_expire(struct event *thread)
static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
{
struct bgp_dest *dest;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
struct bgp_table *table;
struct attr attr;
@@ -677,8 +680,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
for (rm = bgp_table_top(table); rm;
rm = bgp_route_next(rm))
- for (pi = bgp_dest_get_bgp_path_info(rm); pi;
- pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(rm);
+ (pi != NULL) && (next = pi->next, 1); pi = next) {
if (pi->peer != peer)
continue;
@@ -709,8 +712,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi)
} else {
for (dest = bgp_table_top(peer->bgp->rib[afi][safi]); dest;
dest = bgp_route_next(dest))
- for (pi = bgp_dest_get_bgp_path_info(dest); pi;
- pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest);
+ (pi != NULL) && (next = pi->next, 1); pi = next) {
if (pi->peer != peer)
continue;
@@ -1224,9 +1227,14 @@ void bgp_fsm_change_status(struct peer_connection *connection,
peer_count = bgp->established_peers;
- if (status == Established)
+ if (status == Established) {
bgp->established_peers++;
- else if ((peer_established(connection)) && (status != Established))
+ /* Reset the retry timer if we already established */
+ if (peer->connect)
+ peer->v_connect = peer->connect;
+ else
+ peer->v_connect = peer->bgp->default_connect_retry;
+ } else if ((peer_established(connection)) && (status != Established))
bgp->established_peers--;
if (bgp_debug_neighbor_events(peer)) {
@@ -1694,11 +1702,11 @@ bgp_connect_success(struct peer_connection *connection)
return bgp_stop(connection);
}
- if (bgp_getsockname(peer) < 0) {
+ if (bgp_getsockname(connection) < 0) {
flog_err_sys(EC_LIB_SOCKET,
"%s: bgp_getsockname(): failed for peer %s, fd %d",
__func__, peer->host, connection->fd);
- bgp_notify_send(peer->connection, BGP_NOTIFY_FSM_ERR,
+ bgp_notify_send(connection, BGP_NOTIFY_FSM_ERR,
bgp_fsm_error_subcode(connection->status));
bgp_writes_on(connection);
return BGP_FSM_FAILURE;
@@ -1740,11 +1748,11 @@ bgp_connect_success_w_delayopen(struct peer_connection *connection)
return bgp_stop(connection);
}
- if (bgp_getsockname(peer) < 0) {
+ if (bgp_getsockname(connection) < 0) {
flog_err_sys(EC_LIB_SOCKET,
"%s: bgp_getsockname(): failed for peer %s, fd %d",
__func__, peer->host, connection->fd);
- bgp_notify_send(peer->connection, BGP_NOTIFY_FSM_ERR,
+ bgp_notify_send(connection, BGP_NOTIFY_FSM_ERR,
bgp_fsm_error_subcode(connection->status));
bgp_writes_on(connection);
return BGP_FSM_FAILURE;
@@ -1807,12 +1815,14 @@ bgp_connect_fail(struct peer_connection *connection)
/* after connect is called(), getpeername is able to return
* port and address on non established streams
*/
-static void bgp_connect_in_progress_update_connection(struct peer *peer)
+static void bgp_connect_in_progress_update_connection(struct peer_connection *connection)
{
- bgp_updatesockname(peer);
+ struct peer *peer = connection->peer;
+
+ bgp_updatesockname(peer, connection);
if (!peer->su_remote && !BGP_CONNECTION_SU_UNSPEC(peer->connection)) {
/* if connect initiated, then dest port and dest addresses are well known */
- peer->su_remote = sockunion_dup(&peer->connection->su);
+ peer->su_remote = sockunion_dup(&connection->su);
if (sockunion_family(peer->su_remote) == AF_INET)
peer->su_remote->sin.sin_port = htons(peer->port);
else if (sockunion_family(peer->su_remote) == AF_INET6)
@@ -1826,7 +1836,7 @@ static void bgp_connect_in_progress_update_connection(struct peer *peer)
static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection)
{
struct peer *peer = connection->peer;
- int status;
+ enum connect_result status;
bgp_peer_conf_if_to_su_update(connection);
@@ -1916,7 +1926,7 @@ static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection)
__func__, peer->connection->fd);
return BGP_FSM_FAILURE;
}
- bgp_connect_in_progress_update_connection(peer);
+ bgp_connect_in_progress_update_connection(connection);
/*
* - when the socket becomes ready, poll() will signify POLLOUT
@@ -2726,33 +2736,49 @@ static void bgp_gr_update_mode_of_all_peers(struct bgp *bgp,
struct listnode *node = {0};
struct listnode *nnode = {0};
enum peer_mode peer_old_state = PEER_INVALID;
-
- /* TODO: Need to handle peer-groups. */
+ struct peer_group *group;
+ struct peer *member;
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
- peer_old_state = bgp_peer_gr_mode_get(peer);
- if (peer_old_state != PEER_GLOBAL_INHERIT)
- continue;
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ peer_old_state = bgp_peer_gr_mode_get(peer);
+ if (peer_old_state != PEER_GLOBAL_INHERIT)
+ continue;
- bgp_peer_inherit_global_gr_mode(peer, global_new_state);
- bgp_peer_gr_flags_update(peer);
+ bgp_peer_inherit_global_gr_mode(peer, global_new_state);
+ bgp_peer_gr_flags_update(peer);
- if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART))
- zlog_debug("%pBP: Inherited Global GR mode, GR flags 0x%x peer flags 0x%" PRIx64
- "...resetting session",
- peer, peer->peer_gr_new_status_flag,
- peer->flags);
+ if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART))
+ zlog_debug("%pBP: Inherited Global GR mode, GR flags 0x%x peer flags 0x%" PRIx64
+ "...resetting session",
+ peer, peer->peer_gr_new_status_flag, peer->flags);
- peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
+ peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
- /* Reset session to match with behavior for other peer
- * configs that require the session to be re-setup.
- */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
- bgp_session_reset_safe(peer, &nnode);
+ if (!peer_notify_config_change(peer->connection))
+ bgp_session_reset_safe(peer, &nnode);
+ } else {
+ group = peer->group;
+ for (ALL_LIST_ELEMENTS(group->peer, node, nnode, member)) {
+ peer_old_state = bgp_peer_gr_mode_get(member);
+ if (peer_old_state != PEER_GLOBAL_INHERIT)
+ continue;
+
+ bgp_peer_inherit_global_gr_mode(member, global_new_state);
+ bgp_peer_gr_flags_update(member);
+
+ if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART))
+ zlog_debug("%pBP: Inherited Global GR mode, GR flags 0x%x peer flags 0x%" PRIx64
+ "...resetting session",
+ member, member->peer_gr_new_status_flag,
+ member->flags);
+
+ member->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
+
+ if (!peer_notify_config_change(member->connection))
+ bgp_session_reset(member);
+ }
+ }
}
}
@@ -2911,6 +2937,9 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state,
{
enum global_mode global_gr_mode;
bool session_reset = true;
+ struct peer_group *group;
+ struct peer *member;
+ struct listnode *node, *nnode;
if (old_state == new_state)
return BGP_GR_NO_OPERATION;
@@ -2945,16 +2974,21 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state,
bgp_peer_move_to_gr_mode(peer, new_state);
if (session_reset) {
- peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
- /* Reset session to match with behavior for other peer
- * configs that require the session to be re-setup.
- */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
- bgp_session_reset(peer);
+ if (!peer_notify_config_change(peer->connection))
+ bgp_session_reset(peer);
+ } else {
+ group = peer->group;
+ for (ALL_LIST_ELEMENTS(group->peer, node, nnode, member)) {
+ member->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
+ bgp_peer_move_to_gr_mode(member, new_state);
+
+ if (!peer_notify_config_change(member->connection))
+ bgp_session_reset(member);
+ }
+ }
}
return BGP_GR_SUCCESS;
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index 23e0c191dc..54a966e191 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -1125,7 +1125,6 @@ static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
struct bgp_path_info *path;
struct bgp *bgp_path;
struct bgp_table *table;
- time_t tbuf;
vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
afi2str(afi), bgp->name_pretty);
@@ -1146,8 +1145,7 @@ static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
vty_out(vty, " if %s\n",
ifindex2ifname(iter->nh->ifindex,
iter->nh->vrf_id));
- tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, buf));
+ vty_out(vty, " Last update: %s", time_to_string(iter->last_update, buf));
if (!detail)
continue;
vty_out(vty, " Paths:\n");
diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c
index 535d2fc5f4..9ca20c949a 100644
--- a/bgpd/bgp_main.c
+++ b/bgpd/bgp_main.c
@@ -207,6 +207,8 @@ static __attribute__((__noreturn__)) void bgp_exit(int status)
bgp_nhg_finish();
zebra_announce_fini(&bm->zebra_announce_head);
+ zebra_l2_vni_fini(&bm->zebra_l2_vni_head);
+ zebra_l3_vni_fini(&bm->zebra_l3_vni_head);
/* reverse bgp_dump_init */
bgp_dump_finish();
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index ca7f73dde9..b96c287f86 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -2167,6 +2167,8 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
struct interface *ifp = NULL;
char rd_buf[RD_ADDRSTRLEN];
struct aspath *new_aspath;
+ int32_t aspath_loop_count = 0;
+ struct peer *peer = path_vpn->peer;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
@@ -2227,7 +2229,9 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
bn = bgp_afi_node_get(to_bgp->rib[afi][safi], afi, safi, p, NULL);
/* Check if leaked route has our asn. If so, don't import it. */
- if (aspath_loop_check(path_vpn->attr->aspath, to_bgp->as)) {
+ if (CHECK_FLAG(peer->af_flags[afi][SAFI_MPLS_VPN], PEER_FLAG_ALLOWAS_IN))
+ aspath_loop_count = peer->allowas_in[afi][SAFI_MPLS_VPN];
+ if (aspath_loop_check(path_vpn->attr->aspath, to_bgp->as) > aspath_loop_count) {
for (bpi = bgp_dest_get_bgp_path_info(bn); bpi;
bpi = bpi->next) {
if (bpi->extra && bpi->extra->vrfleak &&
@@ -2513,11 +2517,12 @@ void vpn_leak_to_vrf_update(struct bgp *from_bgp,
{
struct listnode *mnode, *mnnode;
struct bgp *bgp;
+ const struct prefix *p = bgp_dest_get_prefix(path_vpn->net);
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
if (debug)
- zlog_debug("%s: start (path_vpn=%p)", __func__, path_vpn);
+ zlog_debug("%s: start (path_vpn=%p, prefix=%pFX)", __func__, path_vpn, p);
/* Loop over VRFs */
for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) {
@@ -4074,6 +4079,35 @@ void bgp_vpn_leak_export(struct bgp *from_bgp)
}
}
+/* It releases the label from labelpool which
+ * was previously assigned and unsets the flag based on reset arg
+ * This also used in vty to release the label and to change the allocation mode as well
+ */
+void bgp_vpn_release_label(struct bgp *bgp, afi_t afi, bool reset)
+{
+ if (!CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
+ return;
+ /*
+ * label has previously been automatically
+ * assigned by labelpool: release it
+ *
+ * NB if tovpn_label == MPLS_LABEL_NONE it
+ * means the automatic assignment is in flight
+ * and therefore the labelpool callback must
+ * detect that the auto label is not needed.
+ */
+ if (bgp->vpn_policy[afi].tovpn_label == MPLS_LABEL_NONE)
+ return;
+ if (CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP))
+ return;
+
+ bgp_lp_release(LP_TYPE_VRF, &bgp->vpn_policy[afi], bgp->vpn_policy[afi].tovpn_label);
+ bgp->vpn_policy[afi].tovpn_label = MPLS_LABEL_NONE;
+
+ if (reset)
+ UNSET_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO);
+}
+
/* The nexthops values are compared to
* find in the tree the appropriate cache entry
*/
@@ -4390,7 +4424,6 @@ static void show_bgp_mplsvpn_nh_label_bind_internal(struct vty *vty,
struct bgp_path_info *path;
struct bgp *bgp_path;
struct bgp_table *table;
- time_t tbuf;
char buf[32];
vty_out(vty, "Current BGP mpls-vpn nexthop label bind cache, %s\n",
@@ -4408,8 +4441,7 @@ static void show_bgp_mplsvpn_nh_label_bind_internal(struct vty *vty,
vty_out(vty, " interface %s\n",
ifindex2ifname(iter->nh->ifindex,
iter->nh->vrf_id));
- tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, buf));
+ vty_out(vty, " Last update: %s", time_to_string(iter->last_update, buf));
if (!detail)
continue;
vty_out(vty, " Paths:\n");
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index 39fed66781..18639fc69b 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -352,6 +352,7 @@ extern void vpn_handle_router_id_update(struct bgp *bgp, bool withdraw,
bool is_config);
extern void bgp_vpn_leak_unimport(struct bgp *from_bgp);
extern void bgp_vpn_leak_export(struct bgp *from_bgp);
+extern void bgp_vpn_release_label(struct bgp *bgp, afi_t afi, bool reset);
extern bool bgp_mplsvpn_path_uses_valid_mpls_label(struct bgp_path_info *pi);
extern int
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index de57d91806..f1bea1c189 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -504,7 +504,7 @@ static void bgp_accept(struct event *thread)
bgp_fsm_change_status(connection1, Active);
EVENT_OFF(connection1->t_start);
- if (peer_active(peer1)) {
+ if (peer_active(peer1->connection)) {
if (CHECK_FLAG(peer1->flags,
PEER_FLAG_TIMER_DELAYOPEN))
BGP_EVENT_ADD(connection1,
@@ -557,7 +557,7 @@ static void bgp_accept(struct event *thread)
}
/* Check that at least one AF is activated for the peer. */
- if (!peer_active(peer1)) {
+ if (!peer_active(connection1)) {
if (bgp_debug_neighbor_events(peer1))
zlog_debug(
"%s - incoming conn rejected - no AF activated for peer",
@@ -658,7 +658,7 @@ static void bgp_accept(struct event *thread)
bgp_event_update(connection1, TCP_connection_closed);
}
- if (peer_active(peer)) {
+ if (peer_active(peer->connection)) {
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_EVENT_ADD(connection, TCP_connection_open_w_delay);
else
@@ -762,7 +762,7 @@ static int bgp_update_source(struct peer_connection *connection)
}
/* BGP try to connect to the peer. */
-int bgp_connect(struct peer_connection *connection)
+enum connect_result bgp_connect(struct peer_connection *connection)
{
struct peer *peer = connection->peer;
@@ -773,7 +773,7 @@ int bgp_connect(struct peer_connection *connection)
if (peer->conf_if && BGP_CONNECTION_SU_UNSPEC(connection)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug("Peer address not learnt: Returning from connect");
- return 0;
+ return connect_error;
}
frr_with_privs(&bgpd_privs) {
/* Make socket for the peer. */
@@ -787,7 +787,7 @@ int bgp_connect(struct peer_connection *connection)
zlog_debug("%s: Failure to create socket for connection to %s, error received: %s(%d)",
__func__, peer->host, safe_strerror(errno),
errno);
- return -1;
+ return connect_error;
}
set_nonblocking(connection->fd);
@@ -808,7 +808,7 @@ int bgp_connect(struct peer_connection *connection)
__func__, peer->host, safe_strerror(errno),
errno);
- return -1;
+ return connect_error;
}
sockopt_reuseaddr(connection->fd);
@@ -844,7 +844,7 @@ int bgp_connect(struct peer_connection *connection)
/* If the peer is passive mode, force to move to Active mode. */
if (CHECK_FLAG(peer->flags, PEER_FLAG_PASSIVE)) {
BGP_EVENT_ADD(connection, TCP_connection_open_failed);
- return BGP_FSM_SUCCESS;
+ return connect_error;
}
if (peer->conf_if || peer->ifname)
@@ -861,7 +861,7 @@ int bgp_connect(struct peer_connection *connection)
htons(peer->port), ifindex);
}
-void bgp_updatesockname(struct peer *peer)
+void bgp_updatesockname(struct peer *peer, struct peer_connection *connection)
{
if (peer->su_local) {
sockunion_free(peer->su_local);
@@ -873,14 +873,16 @@ void bgp_updatesockname(struct peer *peer)
peer->su_remote = NULL;
}
- peer->su_local = sockunion_getsockname(peer->connection->fd);
- peer->su_remote = sockunion_getpeername(peer->connection->fd);
+ peer->su_local = sockunion_getsockname(connection->fd);
+ peer->su_remote = sockunion_getpeername(connection->fd);
}
/* After TCP connection is established. Get local address and port. */
-int bgp_getsockname(struct peer *peer)
+int bgp_getsockname(struct peer_connection *connection)
{
- bgp_updatesockname(peer);
+ struct peer *peer = connection->peer;
+
+ bgp_updatesockname(peer, peer->connection);
if (!bgp_zebra_nexthop_set(peer->su_local, peer->su_remote,
&peer->nexthop, peer)) {
diff --git a/bgpd/bgp_network.h b/bgpd/bgp_network.h
index ceb6b6f002..ed1a72ec89 100644
--- a/bgpd/bgp_network.h
+++ b/bgpd/bgp_network.h
@@ -21,9 +21,9 @@ extern int bgp_socket(struct bgp *bgp, unsigned short port,
const char *address);
extern void bgp_close_vrf_socket(struct bgp *bgp);
extern void bgp_close(void);
-extern int bgp_connect(struct peer_connection *connection);
-extern int bgp_getsockname(struct peer *peer);
-extern void bgp_updatesockname(struct peer *peer);
+extern enum connect_result bgp_connect(struct peer_connection *connection);
+extern int bgp_getsockname(struct peer_connection *connection);
+extern void bgp_updatesockname(struct peer *peer, struct peer_connection *connection);
extern int bgp_md5_set_prefix(struct bgp *bgp, struct prefix *p,
const char *password);
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index 357d5292da..a1ab9d2d61 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -32,6 +32,7 @@
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_rd.h"
#include "bgpd/bgp_mplsvpn.h"
+#include "bgpd/bgp_bfd.h"
DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Addr Intf String");
@@ -409,17 +410,6 @@ void bgp_connected_add(struct bgp *bgp, struct connected *ifc)
bgp_dest_set_bgp_connected_ref_info(dest, bc);
}
- for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
- if (peer->conf_if &&
- (strcmp(peer->conf_if, ifc->ifp->name) == 0) &&
- !peer_established(peer->connection) &&
- !CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) {
- connection = peer->connection;
- if (peer_active(peer))
- BGP_EVENT_ADD(connection, BGP_Stop);
- BGP_EVENT_ADD(connection, BGP_Start);
- }
- }
} else if (addr->family == AF_INET6) {
apply_mask_ipv6((struct prefix_ipv6 *)&p);
@@ -443,6 +433,22 @@ void bgp_connected_add(struct bgp *bgp, struct connected *ifc)
bgp_dest_set_bgp_connected_ref_info(dest, bc);
}
}
+
+ /*
+ * Iterate over all the peers and attempt to set the bfd session
+ * data and if it's a bgp unnumbered get her flowing if necessary
+ */
+ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
+ bgp_peer_bfd_update_source(peer);
+ if (peer->conf_if && (strcmp(peer->conf_if, ifc->ifp->name) == 0) &&
+ !peer_established(peer->connection) &&
+ !CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) {
+ connection = peer->connection;
+ if (peer_active(connection))
+ BGP_EVENT_ADD(connection, BGP_Stop);
+ BGP_EVENT_ADD(connection, BGP_Start);
+ }
+ }
}
void bgp_connected_delete(struct bgp *bgp, struct connected *ifc)
@@ -964,9 +970,8 @@ static void bgp_show_nexthops_detail(struct vty *vty, struct bgp *bgp,
json_object_object_add(json, "nexthops", json_gates);
}
-static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
- struct bgp_nexthop_cache *bnc, bool specific,
- json_object *json)
+static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp, struct bgp_nexthop_cache *bnc,
+ bool detail, bool uj)
{
char buf[PREFIX2STR_BUFFER];
time_t tbuf;
@@ -977,10 +982,10 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
peer = (struct peer *)bnc->nht_info;
- if (json)
+ if (uj)
json_nexthop = json_object_new_object();
if (bnc->srte_color) {
- if (json)
+ if (uj)
json_object_int_add(json_nexthop, "srteColor",
bnc->srte_color);
else
@@ -988,7 +993,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
}
inet_ntop(bnc->prefix.family, &bnc->prefix.u.prefix, buf, sizeof(buf));
if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID)) {
- if (json) {
+ if (uj) {
json_object_boolean_true_add(json_nexthop, "valid");
json_object_boolean_true_add(json_nexthop, "complete");
json_object_int_add(json_nexthop, "igpMetric",
@@ -1016,7 +1021,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
}
bgp_show_nexthops_detail(vty, bgp, bnc, json_nexthop);
} else if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE)) {
- if (json) {
+ if (uj) {
json_object_boolean_true_add(json_nexthop, "valid");
json_object_boolean_false_add(json_nexthop, "complete");
json_object_int_add(json_nexthop, "igpMetric",
@@ -1036,7 +1041,7 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
}
bgp_show_nexthops_detail(vty, bgp, bnc, json_nexthop);
} else {
- if (json) {
+ if (uj) {
json_object_boolean_false_add(json_nexthop, "valid");
json_object_boolean_false_add(json_nexthop, "complete");
json_object_int_add(json_nexthop, "pathCount",
@@ -1068,38 +1073,41 @@ static void bgp_show_nexthop(struct vty *vty, struct bgp *bgp,
}
}
tbuf = time(NULL) - (monotime(NULL) - bnc->last_update);
- if (json) {
- if (!specific) {
+ if (uj) {
+ if (detail) {
json_last_update = json_object_new_object();
json_object_int_add(json_last_update, "epoch", tbuf);
json_object_string_add(json_last_update, "string",
- ctime_r(&tbuf, timebuf));
+ time_to_string_json(bnc->last_update, timebuf));
json_object_object_add(json_nexthop, "lastUpdate",
json_last_update);
} else {
json_object_int_add(json_nexthop, "lastUpdate", tbuf);
}
} else {
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, timebuf));
+ vty_out(vty, " Last update: %s", time_to_string(bnc->last_update, timebuf));
}
/* show paths dependent on nexthop, if needed. */
- if (specific)
+ if (detail)
bgp_show_nexthop_paths(vty, bgp, bnc, json_nexthop);
- if (json)
- json_object_object_add(json, buf, json_nexthop);
+
+ if (uj) {
+ vty_out(vty, "\"%s\":", buf);
+ vty_json_no_pretty(vty, json_nexthop);
+ }
}
-static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp,
- bool import_table, json_object *json, afi_t afi,
- bool detail)
+static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp, bool import_table, bool uj,
+ afi_t afi, bool detail)
{
struct bgp_nexthop_cache *bnc;
struct bgp_nexthop_cache_head(*tree)[AFI_MAX];
- json_object *json_afi = NULL;
bool found = false;
+ bool firstafi = true;
+ bool firstnh = true;
- if (!json) {
+ if (!uj) {
if (import_table)
vty_out(vty, "Current BGP import check cache:\n");
else
@@ -1111,34 +1119,42 @@ static void bgp_show_nexthops(struct vty *vty, struct bgp *bgp,
tree = &bgp->nexthop_cache_table;
if (afi == AFI_IP || afi == AFI_IP6) {
- if (json)
- json_afi = json_object_new_object();
+ if (uj)
+ vty_out(vty, "%s:{", (afi == AFI_IP) ? "\"ipv4\"" : "\"ipv6\"");
frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc) {
- bgp_show_nexthop(vty, bgp, bnc, detail, json_afi);
+ if (uj)
+ vty_out(vty, "%s", firstnh ? "" : ",");
+ bgp_show_nexthop(vty, bgp, bnc, detail, uj);
found = true;
+ firstnh = false;
}
- if (found && json)
- json_object_object_add(
- json, (afi == AFI_IP) ? "ipv4" : "ipv6",
- json_afi);
+ if (found && uj)
+ vty_out(vty, "}");
return;
}
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
- if (json && (afi == AFI_IP || afi == AFI_IP6))
- json_afi = json_object_new_object();
- frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc)
- bgp_show_nexthop(vty, bgp, bnc, detail, json_afi);
- if (json && (afi == AFI_IP || afi == AFI_IP6))
- json_object_object_add(
- json, (afi == AFI_IP) ? "ipv4" : "ipv6",
- json_afi);
+ if (afi != AFI_IP && afi != AFI_IP6)
+ continue;
+ if (uj)
+ vty_out(vty, "%s%s:{", firstafi ? "" : ",",
+ (afi == AFI_IP) ? "\"ipv4\"" : "\"ipv6\"");
+ firstafi = false;
+ firstnh = true;
+ frr_each (bgp_nexthop_cache, &(*tree)[afi], bnc) {
+ if (uj)
+ vty_out(vty, "%s", firstnh ? "" : ",");
+ bgp_show_nexthop(vty, bgp, bnc, detail, uj);
+ firstnh = false;
+ }
+
+ if (uj)
+ vty_out(vty, "}");
}
}
-static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
- const char *nhopip_str, bool import_table,
- json_object *json, afi_t afi, bool detail)
+static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name, const char *nhopip_str,
+ bool import_table, bool uj, afi_t afi, bool detail)
{
struct bgp *bgp;
@@ -1147,7 +1163,7 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
else
bgp = bgp_get_default();
if (!bgp) {
- if (!json)
+ if (!uj)
vty_out(vty, "%% No such BGP instance exist\n");
return CMD_WARNING;
}
@@ -1157,61 +1173,57 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
struct bgp_nexthop_cache_head (*tree)[AFI_MAX];
struct bgp_nexthop_cache *bnc;
bool found = false;
- json_object *json_afi = NULL;
if (!str2prefix(nhopip_str, &nhop)) {
- if (!json)
+ if (!uj)
vty_out(vty, "nexthop address is malformed\n");
return CMD_WARNING;
}
tree = import_table ? &bgp->import_check_table
: &bgp->nexthop_cache_table;
- if (json)
- json_afi = json_object_new_object();
+ if (uj)
+ vty_out(vty, "%s:{",
+ (family2afi(nhop.family) == AFI_IP) ? "\"ipv4\"" : "\"ipv6\"");
frr_each (bgp_nexthop_cache, &(*tree)[family2afi(nhop.family)],
bnc) {
if (prefix_cmp(&bnc->prefix, &nhop))
continue;
- bgp_show_nexthop(vty, bgp, bnc, true, json_afi);
+ bgp_show_nexthop(vty, bgp, bnc, true, uj);
found = true;
}
- if (json)
- json_object_object_add(
- json,
- (family2afi(nhop.family) == AFI_IP) ? "ipv4"
- : "ipv6",
- json_afi);
- if (!found && !json)
+ if (!found && !uj)
vty_out(vty, "nexthop %s does not have entry\n",
nhopip_str);
+
+ if (uj)
+ vty_out(vty, "}");
} else
- bgp_show_nexthops(vty, bgp, import_table, json, afi, detail);
+ bgp_show_nexthops(vty, bgp, import_table, uj, afi, detail);
return CMD_SUCCESS;
}
-static void bgp_show_all_instances_nexthops_vty(struct vty *vty,
- json_object *json, afi_t afi,
- bool detail)
+static void bgp_show_all_instances_nexthops_vty(struct vty *vty, bool uj, afi_t afi, bool detail)
{
struct listnode *node, *nnode;
struct bgp *bgp;
const char *inst_name;
- json_object *json_instance = NULL;
+ bool firstinst = true;
for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) {
inst_name = (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)
? VRF_DEFAULT_NAME
: bgp->name;
- if (json)
- json_instance = json_object_new_object();
+ if (uj)
+ vty_out(vty, "%s\"%s\":{", firstinst ? "" : ",", inst_name);
+
else
vty_out(vty, "\nInstance %s:\n", inst_name);
- bgp_show_nexthops(vty, bgp, false, json_instance, afi, detail);
-
- if (json)
- json_object_object_add(json, inst_name, json_instance);
+ bgp_show_nexthops(vty, bgp, false, uj, afi, detail);
+ firstinst = false;
+ if (uj)
+ vty_out(vty, "}");
}
}
@@ -1235,20 +1247,18 @@ DEFPY (show_ip_bgp_nexthop,
JSON_STR)
{
int rc = 0;
- json_object *json = NULL;
afi_t afiz = AFI_UNSPEC;
if (uj)
- json = json_object_new_object();
+ vty_out(vty, "{\n");
if (afi)
afiz = bgp_vty_afi_from_str(afi);
- rc = show_ip_bgp_nexthop_table(vty, vrf, nhop_str, false, json, afiz,
- detail);
+ rc = show_ip_bgp_nexthop_table(vty, vrf, nhop_str, false, uj, afiz, detail);
if (uj)
- vty_json(vty, json);
+ vty_out(vty, "}\n");
return rc;
}
@@ -1265,16 +1275,14 @@ DEFPY (show_ip_bgp_import_check,
JSON_STR)
{
int rc = 0;
- json_object *json = NULL;
if (uj)
- json = json_object_new_object();
+ vty_out(vty, "{\n");
- rc = show_ip_bgp_nexthop_table(vty, vrf, NULL, true, json, AFI_UNSPEC,
- detail);
+ rc = show_ip_bgp_nexthop_table(vty, vrf, NULL, true, uj, AFI_UNSPEC, detail);
if (uj)
- vty_json(vty, json);
+ vty_out(vty, "}\n");
return rc;
}
@@ -1292,19 +1300,18 @@ DEFPY (show_ip_bgp_instance_all_nexthop,
"Show detailed information\n"
JSON_STR)
{
- json_object *json = NULL;
afi_t afiz = AFI_UNSPEC;
if (uj)
- json = json_object_new_object();
+ vty_out(vty, "{");
if (afi)
afiz = bgp_vty_afi_from_str(afi);
- bgp_show_all_instances_nexthops_vty(vty, json, afiz, detail);
+ bgp_show_all_instances_nexthops_vty(vty, uj, afiz, detail);
if (uj)
- vty_json(vty, json);
+ vty_out(vty, "}");
return CMD_SUCCESS;
}
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 6a4a02dcc8..5679c215b1 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -82,7 +82,7 @@ struct bgp_nexthop_cache {
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
*/
-#define BGP_NEXTHOP_EVPN_INCOMPLETE (1 << 7)
+#define BGP_NEXTHOP_EVPN_INCOMPLETE (1 << 8)
uint32_t srte_color;
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 9b633b7139..ed83757ea3 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -1066,9 +1066,16 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
case AFI_IP6:
p->family = AF_INET6;
if (pi->attr->srv6_l3vpn) {
- IPV6_ADDR_COPY(&(p->u.prefix6),
- &(pi->attr->srv6_l3vpn->sid));
p->prefixlen = IPV6_MAX_BITLEN;
+ if (pi->attr->srv6_l3vpn->transposition_len != 0 &&
+ BGP_PATH_INFO_NUM_LABELS(pi)) {
+ IPV6_ADDR_COPY(&p->u.prefix6, &pi->attr->srv6_l3vpn->sid);
+ transpose_sid(&p->u.prefix6,
+ decode_label(&pi->extra->labels->label[0]),
+ pi->attr->srv6_l3vpn->transposition_offset,
+ pi->attr->srv6_l3vpn->transposition_len);
+ } else
+ IPV6_ADDR_COPY(&(p->u.prefix6), &(pi->attr->srv6_l3vpn->sid));
} else if (is_bgp_static) {
p->u.prefix6 = p_orig->u.prefix6;
p->prefixlen = p_orig->prefixlen;
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index a76a300c11..c5e390b045 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -2054,7 +2054,7 @@ static int bgp_open_receive(struct peer_connection *connection,
return BGP_Stop;
/* Get sockname. */
- if (bgp_getsockname(peer) < 0) {
+ if (bgp_getsockname(connection) < 0) {
flog_err_sys(EC_LIB_SOCKET,
"%s: bgp_getsockname() failed for peer: %s",
__func__, peer->host);
@@ -2411,7 +2411,7 @@ static int bgp_update_receive(struct peer_connection *connection,
sizeof(peer->rcvd_attr_str));
if (attr_parse_ret == BGP_ATTR_PARSE_WITHDRAW) {
- peer->stat_upd_7606++;
+ peer->stat_pfx_withdraw++;
flog_err(
EC_BGP_UPDATE_RCV,
"%pBP rcvd UPDATE with errors in attr(s)!! Withdrawing route.",
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 72e798a7e2..5ac1d26603 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -78,6 +78,9 @@
#include "bgpd/bgp_route_clippy.c"
+DEFINE_MTYPE_STATIC(BGPD, BGP_EOIU_MARKER_INFO, "BGP EOIU Marker info");
+DEFINE_MTYPE_STATIC(BGPD, BGP_METAQ, "BGP MetaQ");
+
DEFINE_HOOK(bgp_snmp_update_stats,
(struct bgp_dest *rn, struct bgp_path_info *pi, bool added),
(rn, pi, added));
@@ -2587,12 +2590,11 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
if (ret == RMAP_DENYMATCH) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
- zlog_debug(
- "%pBP [Update:SEND] %pFX is filtered by route-map '%s'",
- peer, p,
- bgp_path_suppressed(pi)
- ? UNSUPPRESS_MAP_NAME(filter)
- : ROUTE_MAP_OUT_NAME(filter));
+ zlog_debug("%pBP [Update:SEND] %pFX is filtered by route-map (%s) '%s'",
+ peer, p,
+ bgp_path_suppressed(pi) ? "unsuppress-map" : "out",
+ bgp_path_suppressed(pi) ? UNSUPPRESS_MAP_NAME(filter)
+ : ROUTE_MAP_OUT_NAME(filter));
bgp_attr_flush(rmap_path.attr);
return false;
}
@@ -3007,7 +3009,10 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
old_select = NULL;
pi = bgp_dest_get_bgp_path_info(dest);
- while (pi && CHECK_FLAG(pi->flags, BGP_PATH_UNSORTED)) {
+ while (pi && (CHECK_FLAG(pi->flags, BGP_PATH_UNSORTED) ||
+ (pi->peer != bgp->peer_self &&
+ !CHECK_FLAG(pi->peer->sflags, PEER_STATUS_NSF_WAIT) &&
+ !peer_established(pi->peer->connection)))) {
struct bgp_path_info *next = pi->next;
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED))
@@ -3101,6 +3106,30 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
continue;
}
+ if (first->peer && first->peer != bgp->peer_self &&
+ !CHECK_FLAG(first->peer->sflags, PEER_STATUS_NSF_WAIT) &&
+ !peer_established(first->peer->connection)) {
+ if (debug)
+ zlog_debug("%s: %pBD(%s) pi %p from %s is not in established state",
+ __func__, dest, bgp->name_pretty, first,
+ first->peer->host);
+
+ /*
+ * Peer is not in established state we cannot sort this
+ * item yet. Let's wait, so hold this one to the side
+ */
+ if (unsorted_holddown) {
+ first->next = unsorted_holddown;
+ unsorted_holddown->prev = first;
+ unsorted_holddown = first;
+ } else
+ unsorted_holddown = first;
+
+ UNSET_FLAG(first->flags, BGP_PATH_UNSORTED);
+
+ continue;
+ }
+
bgp_path_info_unset_flag(dest, first, BGP_PATH_DMED_CHECK);
worse = NULL;
@@ -3462,14 +3491,6 @@ bool bgp_zebra_has_route_changed(struct bgp_path_info *selected)
return false;
}
-struct bgp_process_queue {
- struct bgp *bgp;
- STAILQ_HEAD(, bgp_dest) pqueue;
-#define BGP_PROCESS_QUEUE_EOIU_MARKER (1 << 0)
- unsigned int flags;
- unsigned int queued;
-};
-
static void bgp_process_evpn_route_injection(struct bgp *bgp, afi_t afi,
safi_t safi, struct bgp_dest *dest,
struct bgp_path_info *new_select,
@@ -4017,43 +4038,286 @@ void bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
&bgp->gr_info[afi][safi].t_route_select);
}
-static wq_item_status bgp_process_wq(struct work_queue *wq, void *data)
+static const char *subqueue2str(enum meta_queue_indexes index)
{
- struct bgp_process_queue *pqnode = data;
- struct bgp *bgp = pqnode->bgp;
- struct bgp_table *table;
- struct bgp_dest *dest;
+ switch (index) {
+ case META_QUEUE_EARLY_ROUTE:
+ return "Early Route";
+ case META_QUEUE_OTHER_ROUTE:
+ return "Other Route";
+ case META_QUEUE_EOIU_MARKER:
+ return "EOIU Marker";
+ }
+
+ return "Unknown";
+}
+
+/*
+ * Process a node from the Early route subqueue.
+ */
+static void process_subq_early_route(struct bgp_dest *dest)
+{
+ struct bgp_table *table = bgp_dest_table(dest);
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s dequeued from sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(META_QUEUE_EARLY_ROUTE));
+
+ /* note, new DESTs may be added as part of processing */
+ bgp_process_main_one(table->bgp, dest, table->afi, table->safi);
+ bgp_dest_unlock_node(dest);
+ bgp_table_unlock(table);
+}
+
+/*
+ * Process a node from the other subqueue.
+ */
+static void process_subq_other_route(struct bgp_dest *dest)
+{
+ struct bgp_table *table = bgp_dest_table(dest);
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s dequeued from sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(META_QUEUE_OTHER_ROUTE));
+
+ /* note, new DESTs may be added as part of processing */
+ bgp_process_main_one(table->bgp, dest, table->afi, table->safi);
+ bgp_dest_unlock_node(dest);
+ bgp_table_unlock(table);
+}
+
+/*
+ * Process a node from the eoiu marker subqueue.
+ */
+static void process_eoiu_marker(struct bgp_dest *dest)
+{
+ struct bgp_eoiu_info *info = bgp_dest_get_bgp_eoiu_info(dest);
+
+ if (!info || !info->bgp) {
+ zlog_err("Unable to retrieve BGP instance, can't process EOIU marker");
+ return;
+ }
+
+ if (BGP_DEBUG(update, UPDATE_IN))
+ zlog_debug("EOIU Marker dequeued from sub-queue %s",
+ subqueue2str(META_QUEUE_EOIU_MARKER));
+
+ bgp_process_main_one(info->bgp, NULL, 0, 0);
+}
+
+/*
+ * Examine the specified subqueue; process one entry and return 1 if
+ * there is a node, return 0 otherwise.
+ */
+static unsigned int process_subq(struct bgp_dest_queue *subq, enum meta_queue_indexes qindex)
+{
+ struct bgp_dest *dest = STAILQ_FIRST(subq);
+
+ if (!dest)
+ return 0;
+
+ STAILQ_REMOVE_HEAD(subq, pq);
+ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+
+ switch (qindex) {
+ case META_QUEUE_EARLY_ROUTE:
+ process_subq_early_route(dest);
+ break;
+ case META_QUEUE_OTHER_ROUTE:
+ process_subq_other_route(dest);
+ break;
+ case META_QUEUE_EOIU_MARKER:
+ process_eoiu_marker(dest);
+ }
+
+ return 1;
+}
+
+/* Dispatch the meta queue by picking and processing the next node from
+ * a non-empty sub-queue with lowest priority. wq is equal to bgp->process_queue and
+ * data is pointed to the meta queue structure.
+ */
+static wq_item_status meta_queue_process(struct work_queue *dummy, void *data)
+{
+ struct meta_queue *mq = data;
+ uint32_t i;
+
+ for (i = 0; i < MQ_SIZE; i++)
+ if (process_subq(mq->subq[i], i)) {
+ mq->size--;
+ break;
+ }
+ return mq->size ? WQ_REQUEUE : WQ_SUCCESS;
+}
+
+static int early_route_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ uint8_t qindex = META_QUEUE_EARLY_ROUTE;
+ struct bgp_dest *dest = data;
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s queued into sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(qindex));
+
+ assert(STAILQ_NEXT(dest, pq) == NULL);
+ STAILQ_INSERT_TAIL(mq->subq[qindex], dest, pq);
+ mq->size++;
+ return 0;
+}
+
+static int other_route_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ uint8_t qindex = META_QUEUE_OTHER_ROUTE;
+ struct bgp_dest *dest = data;
+
+ if (bgp_debug_bestpath(dest))
+ zlog_debug("%s queued into sub-queue %s", bgp_dest_get_prefix_str(dest),
+ subqueue2str(qindex));
+
+ assert(STAILQ_NEXT(dest, pq) == NULL);
+ STAILQ_INSERT_TAIL(mq->subq[qindex], dest, pq);
+ mq->size++;
+ return 0;
+}
+
+static int eoiu_marker_meta_queue_add(struct meta_queue *mq, void *data)
+{
+ uint8_t qindex = META_QUEUE_EOIU_MARKER;
+ struct bgp_dest *dest = data;
+
+ if (BGP_DEBUG(update, UPDATE_IN))
+ zlog_debug("EOIU Marker queued into sub-queue %s", subqueue2str(qindex));
+
+ assert(STAILQ_NEXT(dest, pq) == NULL);
+ STAILQ_INSERT_TAIL(mq->subq[qindex], dest, pq);
+ mq->size++;
+ return 0;
+}
+
+static int mq_add_handler(struct bgp *bgp, void *data,
+ int (*mq_add_func)(struct meta_queue *mq, void *data))
+{
+ if (bgp->process_queue == NULL) {
+ zlog_err("%s: work_queue does not exist!", __func__);
+ return -1;
+ }
+
+ if (work_queue_empty(bgp->process_queue))
+ work_queue_add(bgp->process_queue, bgp->mq);
+
+ return mq_add_func(bgp->mq, data);
+}
+
+int early_route_process(struct bgp *bgp, struct bgp_dest *dest)
+{
+ if (!dest) {
+ zlog_err("%s: early route dest is NULL!", __func__);
+ return -1;
+ }
+
+ return mq_add_handler(bgp, dest, early_route_meta_queue_add);
+}
+
+int other_route_process(struct bgp *bgp, struct bgp_dest *dest)
+{
+ if (!dest) {
+ zlog_err("%s: other route dest is NULL!", __func__);
+ return -1;
+ }
+
+ return mq_add_handler(bgp, dest, other_route_meta_queue_add);
+}
+
+int eoiu_marker_process(struct bgp *bgp, struct bgp_dest *dest)
+{
+ if (!dest) {
+ zlog_err("%s: eoiu marker dest is NULL!", __func__);
+ return -1;
+ }
+
+ return mq_add_handler(bgp, dest, eoiu_marker_meta_queue_add);
+}
+
+/* Create new meta queue.
+ A destructor function doesn't seem to be necessary here.
+ */
+static struct meta_queue *meta_queue_new(void)
+{
+ struct meta_queue *new;
+ uint32_t i;
+
+ new = XCALLOC(MTYPE_BGP_METAQ, sizeof(struct meta_queue));
- /* eoiu marker */
- if (CHECK_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER)) {
- bgp_process_main_one(bgp, NULL, 0, 0);
- /* should always have dedicated wq call */
- assert(STAILQ_FIRST(&pqnode->pqueue) == NULL);
- return WQ_SUCCESS;
+ for (i = 0; i < MQ_SIZE; i++) {
+ new->subq[i] = XCALLOC(MTYPE_BGP_METAQ, sizeof(*(new->subq[i])));
+ assert(new->subq[i]);
+ STAILQ_INIT(new->subq[i]);
}
- while (!STAILQ_EMPTY(&pqnode->pqueue)) {
- dest = STAILQ_FIRST(&pqnode->pqueue);
- STAILQ_REMOVE_HEAD(&pqnode->pqueue, pq);
+ return new;
+}
+
+/* Clean up the early meta-queue list */
+static void early_meta_queue_free(struct meta_queue *mq, struct bgp_dest_queue *l)
+{
+ struct bgp_dest *dest;
+
+ while (!STAILQ_EMPTY(l)) {
+ dest = STAILQ_FIRST(l);
+ STAILQ_REMOVE_HEAD(l, pq);
STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
- table = bgp_dest_table(dest);
- /* note, new DESTs may be added as part of processing */
- bgp_process_main_one(bgp, dest, table->afi, table->safi);
+ mq->size--;
+ }
+}
- bgp_dest_unlock_node(dest);
- bgp_table_unlock(table);
+/* Clean up the other meta-queue list */
+static void other_meta_queue_free(struct meta_queue *mq, struct bgp_dest_queue *l)
+{
+ struct bgp_dest *dest;
+
+ while (!STAILQ_EMPTY(l)) {
+ dest = STAILQ_FIRST(l);
+ STAILQ_REMOVE_HEAD(l, pq);
+ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+ mq->size--;
}
+}
- return WQ_SUCCESS;
+/* Clean up the eoiu marker meta-queue list */
+static void eoiu_marker_queue_free(struct meta_queue *mq, struct bgp_dest_queue *l)
+{
+ struct bgp_dest *dest;
+
+ while (!STAILQ_EMPTY(l)) {
+ dest = STAILQ_FIRST(l);
+ XFREE(MTYPE_BGP_EOIU_MARKER_INFO, dest->info);
+ STAILQ_REMOVE_HEAD(l, pq);
+ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+ mq->size--;
+ }
}
-static void bgp_processq_del(struct work_queue *wq, void *data)
+void bgp_meta_queue_free(struct meta_queue *mq)
{
- struct bgp_process_queue *pqnode = data;
+ enum meta_queue_indexes i;
- bgp_unlock(pqnode->bgp);
+ for (i = 0; i < MQ_SIZE; i++) {
+ switch (i) {
+ case META_QUEUE_EARLY_ROUTE:
+ early_meta_queue_free(mq, mq->subq[i]);
+ break;
+ case META_QUEUE_OTHER_ROUTE:
+ other_meta_queue_free(mq, mq->subq[i]);
+ break;
+ case META_QUEUE_EOIU_MARKER:
+ eoiu_marker_queue_free(mq, mq->subq[i]);
+ break;
+ }
+
+ XFREE(MTYPE_BGP_METAQ, mq->subq[i]);
+ }
- XFREE(MTYPE_BGP_PROCESS_QUEUE, pqnode);
+ XFREE(MTYPE_BGP_METAQ, mq);
}
void bgp_process_queue_init(struct bgp *bgp)
@@ -4065,37 +4329,19 @@ void bgp_process_queue_init(struct bgp *bgp)
bgp->process_queue = work_queue_new(bm->master, name);
}
- bgp->process_queue->spec.workfunc = &bgp_process_wq;
- bgp->process_queue->spec.del_item_data = &bgp_processq_del;
+ bgp->process_queue->spec.workfunc = &meta_queue_process;
bgp->process_queue->spec.max_retries = 0;
bgp->process_queue->spec.hold = 50;
/* Use a higher yield value of 50ms for main queue processing */
bgp->process_queue->spec.yield = 50 * 1000L;
-}
-
-static struct bgp_process_queue *bgp_processq_alloc(struct bgp *bgp)
-{
- struct bgp_process_queue *pqnode;
- pqnode = XCALLOC(MTYPE_BGP_PROCESS_QUEUE,
- sizeof(struct bgp_process_queue));
-
- /* unlocked in bgp_processq_del */
- pqnode->bgp = bgp_lock(bgp);
- STAILQ_INIT(&pqnode->pqueue);
-
- return pqnode;
+ bgp->mq = meta_queue_new();
}
static void bgp_process_internal(struct bgp *bgp, struct bgp_dest *dest,
struct bgp_path_info *pi, afi_t afi,
safi_t safi, bool early_process)
{
-#define ARBITRARY_PROCESS_QLEN 10000
- struct work_queue *wq = bgp->process_queue;
- struct bgp_process_queue *pqnode;
- int pqnode_reuse = 0;
-
/*
* Indicate that *this* pi is in an unsorted
* situation, even if the node is already
@@ -4145,39 +4391,16 @@ static void bgp_process_internal(struct bgp *bgp, struct bgp_dest *dest,
return;
}
- if (wq == NULL)
- return;
-
- /* Add route nodes to an existing work queue item until reaching the
- limit only if is from the same BGP view and it's not an EOIU marker
- */
- if (work_queue_item_count(wq)) {
- struct work_queue_item *item = work_queue_last_item(wq);
- pqnode = item->data;
-
- if (CHECK_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER) ||
- (pqnode->queued >= ARBITRARY_PROCESS_QLEN && !early_process))
- pqnode = bgp_processq_alloc(bgp);
- else
- pqnode_reuse = 1;
- } else
- pqnode = bgp_processq_alloc(bgp);
- /* all unlocked in bgp_process_wq */
+ /* all unlocked in process_subq_xxx functions */
bgp_table_lock(bgp_dest_table(dest));
SET_FLAG(dest->flags, BGP_NODE_PROCESS_SCHEDULED);
bgp_dest_lock_node(dest);
- /* can't be enqueued twice */
- assert(STAILQ_NEXT(dest, pq) == NULL);
if (early_process)
- STAILQ_INSERT_HEAD(&pqnode->pqueue, dest, pq);
+ early_route_process(bgp, dest);
else
- STAILQ_INSERT_TAIL(&pqnode->pqueue, dest, pq);
- pqnode->queued++;
-
- if (!pqnode_reuse)
- work_queue_add(wq, pqnode);
+ other_route_process(bgp, dest);
return;
}
@@ -4196,15 +4419,18 @@ void bgp_process_early(struct bgp *bgp, struct bgp_dest *dest,
void bgp_add_eoiu_mark(struct bgp *bgp)
{
- struct bgp_process_queue *pqnode;
-
- if (bgp->process_queue == NULL)
- return;
+ /*
+ * Create a dummy dest as the meta queue expects all its elements to be
+ * dest's
+ */
+ struct bgp_dest *dummy_dest = XCALLOC(MTYPE_BGP_NODE, sizeof(struct bgp_dest));
- pqnode = bgp_processq_alloc(bgp);
+ struct bgp_eoiu_info *eoiu_info = XCALLOC(MTYPE_BGP_EOIU_MARKER_INFO,
+ sizeof(struct bgp_eoiu_info));
+ eoiu_info->bgp = bgp;
- SET_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER);
- work_queue_add(bgp->process_queue, pqnode);
+ bgp_dest_set_bgp_eoiu_info(dummy_dest, eoiu_info);
+ eoiu_marker_process(bgp, dummy_dest);
}
static void bgp_maximum_prefix_restart_timer(struct event *thread)
@@ -7411,7 +7637,7 @@ static void bgp_purge_af_static_redist_routes(struct bgp *bgp, afi_t afi,
{
struct bgp_table *table;
struct bgp_dest *dest;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
/* Do not install the aggregate route if BGP is in the
* process of termination.
@@ -7422,7 +7648,8 @@ static void bgp_purge_af_static_redist_routes(struct bgp *bgp, afi_t afi,
table = bgp->rib[afi][safi];
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (pi->peer == bgp->peer_self
&& ((pi->type == ZEBRA_ROUTE_BGP
&& pi->sub_type == BGP_ROUTE_STATIC)
@@ -7922,7 +8149,7 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
struct bgp_table *table = bgp->rib[afi][safi];
const struct prefix *dest_p;
struct bgp_dest *dest, *top;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
/* We've found a different MED we must revert any suppressed routes. */
top = bgp_node_get(table, p);
@@ -7932,7 +8159,8 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
if (dest_p->prefixlen <= p->prefixlen)
continue;
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (BGP_PATH_HOLDDOWN(pi))
continue;
if (pi->sub_type == BGP_ROUTE_AGGREGATE)
@@ -8007,7 +8235,7 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
struct community *community = NULL;
struct ecommunity *ecommunity = NULL;
struct lcommunity *lcommunity = NULL;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
uint8_t atomic_aggregate = 0;
/* If the bgp instance is being deleted or self peer is deleted
@@ -8057,7 +8285,8 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
if (!bgp_check_advertise(bgp, dest, safi))
continue;
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (BGP_PATH_HOLDDOWN(pi))
continue;
@@ -8214,7 +8443,7 @@ void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, afi_t afi,
struct bgp_table *table;
struct bgp_dest *top;
struct bgp_dest *dest;
- struct bgp_path_info *pi;
+ struct bgp_path_info *pi, *next;
table = bgp->rib[afi][safi];
@@ -8227,7 +8456,8 @@ void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, afi_t afi,
if (dest_p->prefixlen <= p->prefixlen)
continue;
- for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
+ for (pi = bgp_dest_get_bgp_path_info(dest); (pi != NULL) && (next = pi->next, 1);
+ pi = next) {
if (BGP_PATH_HOLDDOWN(pi))
continue;
@@ -9331,9 +9561,18 @@ static void route_vty_short_status_out(struct vty *vty,
const struct prefix *p,
json_object *json_path)
{
- enum rpki_states rpki_state = RPKI_NOT_BEING_USED;
+ enum rpki_states rpki_state;
+
+ /* RPKI validation state */
+ rpki_state = hook_call(bgp_rpki_prefix_status, path->peer, path->attr, p);
if (json_path) {
+ if (rpki_state == RPKI_VALID)
+ json_object_boolean_true_add(json_path, "rpkiValid");
+ else if (rpki_state == RPKI_INVALID)
+ json_object_boolean_true_add(json_path, "rpkiInvalid");
+ else if (rpki_state == RPKI_NOTFOUND)
+ json_object_boolean_true_add(json_path, "rpkiNotFound");
/* Route status display. */
if (CHECK_FLAG(path->flags, BGP_PATH_REMOVED))
@@ -9381,10 +9620,6 @@ static void route_vty_short_status_out(struct vty *vty,
return;
}
- /* RPKI validation state */
- rpki_state =
- hook_call(bgp_rpki_prefix_status, path->peer, path->attr, p);
-
if (rpki_state == RPKI_VALID)
vty_out(vty, "V");
else if (rpki_state == RPKI_INVALID)
@@ -10522,14 +10757,13 @@ static void route_vty_out_detail_es_info(struct vty *vty,
}
void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
- const struct prefix *p, struct bgp_path_info *path,
- afi_t afi, safi_t safi,
- enum rpki_states rpki_curr_state,
- json_object *json_paths)
+ const struct prefix *p, struct bgp_path_info *path, afi_t afi,
+ safi_t safi, enum rpki_states rpki_curr_state, json_object *json_paths,
+ struct attr *pattr)
{
char buf[INET6_ADDRSTRLEN];
char vni_buf[30] = {};
- struct attr *attr = path->attr;
+ struct attr *attr = pattr ? pattr : path->attr;
time_t tbuf;
char timebuf[32];
json_object *json_bestpath = NULL;
@@ -11254,6 +11488,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_path, "community",
bgp_attr_get_community(attr)->json);
} else {
+ if (!bgp_attr_get_community(attr)->str)
+ community_str(bgp_attr_get_community(attr), true, true);
vty_out(vty, " Community: %s\n",
bgp_attr_get_community(attr)->str);
}
@@ -11261,6 +11497,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
/* Line 5 display Extended-community */
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) {
+ if (!bgp_attr_get_ecommunity(attr)->str)
+ ecommunity_str(bgp_attr_get_ecommunity(attr));
+
if (json_paths) {
json_ext_community = json_object_new_object();
json_object_string_add(
@@ -11275,6 +11514,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
}
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_IPV6_EXT_COMMUNITIES))) {
+ if (!bgp_attr_get_ipv6_ecommunity(attr)->str)
+ ecommunity_str(bgp_attr_get_ipv6_ecommunity(attr));
+
if (json_paths) {
json_ext_ipv6_community = json_object_new_object();
json_object_string_add(json_ext_ipv6_community, "string",
@@ -11300,6 +11542,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_path, "largeCommunity",
bgp_attr_get_lcommunity(attr)->json);
} else {
+ if (!bgp_attr_get_lcommunity(attr)->str)
+ lcommunity_str(bgp_attr_get_lcommunity(attr), true, true);
vty_out(vty, " Large Community: %s\n",
bgp_attr_get_lcommunity(attr)->str);
}
@@ -11482,11 +11726,11 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_last_update = json_object_new_object();
json_object_int_add(json_last_update, "epoch", tbuf);
json_object_string_add(json_last_update, "string",
- ctime_r(&tbuf, timebuf));
+ time_to_string_json(path->uptime, timebuf));
json_object_object_add(json_path, "lastUpdate",
json_last_update);
} else
- vty_out(vty, " Last update: %s", ctime_r(&tbuf, timebuf));
+ vty_out(vty, " Last update: %s", time_to_string(path->uptime, timebuf));
/* Line 10 display PMSI tunnel attribute, if present */
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL))) {
@@ -11731,14 +11975,13 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa
continue;
}
- if (type == bgp_show_type_rpki) {
- if (dest_p->family == AF_INET
- || dest_p->family == AF_INET6)
- rpki_curr_state = hook_call(
- bgp_rpki_prefix_status,
- pi->peer, pi->attr, dest_p);
- if (rpki_target_state != RPKI_NOT_BEING_USED
- && rpki_curr_state != rpki_target_state)
+ if ((dest_p->family == AF_INET || dest_p->family == AF_INET6) &&
+ (detail_routes || detail_json || type == bgp_show_type_rpki)) {
+ rpki_curr_state = hook_call(bgp_rpki_prefix_status, pi->peer,
+ pi->attr, dest_p);
+ if (type == bgp_show_type_rpki &&
+ rpki_target_state != RPKI_NOT_BEING_USED &&
+ rpki_curr_state != rpki_target_state)
continue;
}
@@ -11967,11 +12210,9 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa
prd, table->afi, safi,
NULL, false, false);
- route_vty_out_detail(
- vty, bgp, dest, dest_p, pi,
- family2afi(dest_p->family),
- safi, RPKI_NOT_BEING_USED,
- json_paths);
+ route_vty_out_detail(vty, bgp, dest, dest_p, pi,
+ family2afi(dest_p->family), safi,
+ rpki_curr_state, json_paths, NULL);
} else {
route_vty_out(vty, dest_p, pi, display,
safi, json_paths, wide);
@@ -12084,8 +12325,13 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa
}
if (is_last) {
unsigned long i;
- for (i = 0; i < *json_header_depth; ++i)
+ for (i = 0; i < *json_header_depth; ++i) {
vty_out(vty, " } ");
+ /* Put this information before closing the last `}` */
+ if (i == *json_header_depth - 2)
+ vty_out(vty, ", \"totalRoutes\": %ld, \"totalPaths\": %ld",
+ output_count, total_count);
+ }
if (!all)
vty_out(vty, "\n");
}
@@ -12318,8 +12564,7 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
} else {
if (incremental_print) {
vty_out(vty, "\"prefix\": \"%pFX\",\n", p);
- vty_out(vty, "\"version\": \"%" PRIu64 "\",",
- dest->version);
+ vty_out(vty, "\"version\": %" PRIu64 ",", dest->version);
} else {
json_object_string_addf(json, "prefix", "%pFX",
p);
@@ -12482,11 +12727,10 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
}
}
-static void bgp_show_path_info(const struct prefix_rd *pfx_rd,
- struct bgp_dest *bgp_node, struct vty *vty,
- struct bgp *bgp, afi_t afi, safi_t safi,
- json_object *json, enum bgp_path_type pathtype,
- int *display, enum rpki_states rpki_target_state)
+static void bgp_show_path_info(const struct prefix_rd *pfx_rd, struct bgp_dest *bgp_node,
+ struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
+ json_object *json, enum bgp_path_type pathtype, int *display,
+ enum rpki_states rpki_target_state, struct attr *attr)
{
struct bgp_path_info *pi;
int header = 1;
@@ -12529,10 +12773,8 @@ static void bgp_show_path_info(const struct prefix_rd *pfx_rd,
|| (pathtype == BGP_PATH_SHOW_MULTIPATH
&& (CHECK_FLAG(pi->flags, BGP_PATH_MULTIPATH)
|| CHECK_FLAG(pi->flags, BGP_PATH_SELECTED))))
- route_vty_out_detail(vty, bgp, bgp_node,
- bgp_dest_get_prefix(bgp_node), pi,
- afi, safi, rpki_curr_state,
- json_paths);
+ route_vty_out_detail(vty, bgp, bgp_node, bgp_dest_get_prefix(bgp_node), pi,
+ afi, safi, rpki_curr_state, json_paths, attr);
}
if (json && json_paths) {
@@ -12619,9 +12861,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
continue;
}
- bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty,
- bgp, afi, safi, json, pathtype,
- &display, rpki_target_state);
+ bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty, bgp, afi, safi,
+ json, pathtype, &display, rpki_target_state, NULL);
bgp_dest_unlock_node(rm);
}
@@ -12680,9 +12921,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
rm = longest_pfx;
bgp_dest_lock_node(rm);
- bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty,
- bgp, afi, safi, json, pathtype,
- &display, rpki_target_state);
+ bgp_show_path_info((struct prefix_rd *)dest_p, rm, vty, bgp, afi, safi,
+ json, pathtype, &display, rpki_target_state, NULL);
bgp_dest_unlock_node(rm);
}
@@ -12708,9 +12948,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp,
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
if (!prefix_check
|| dest_p->prefixlen == match.prefixlen) {
- bgp_show_path_info(NULL, dest, vty, bgp, afi,
- safi, json, pathtype,
- &display, rpki_target_state);
+ bgp_show_path_info(NULL, dest, vty, bgp, afi, safi, json, pathtype,
+ &display, rpki_target_state, NULL);
}
bgp_dest_unlock_node(dest);
@@ -14604,10 +14843,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
if (use_json)
json_net = json_object_new_object();
- bgp_show_path_info(NULL /* prefix_rd */, dest, vty, bgp,
- afi, safi, json_net,
- BGP_PATH_SHOW_ALL, &display,
- RPKI_NOT_BEING_USED);
+ bgp_show_path_info(NULL /* prefix_rd */, dest, vty, bgp, afi, safi, json_net,
+ BGP_PATH_SHOW_ALL, &display, RPKI_NOT_BEING_USED, NULL);
if (use_json)
json_object_object_addf(json_ar, json_net,
"%pFX", rn_p);
@@ -14741,11 +14978,9 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
pass_in = &buildit;
} else
pass_in = dest;
- bgp_show_path_info(
- NULL, pass_in, vty, bgp, afi,
- safi, json_net,
- BGP_PATH_SHOW_ALL, &display,
- RPKI_NOT_BEING_USED);
+ bgp_show_path_info(NULL, pass_in, vty, bgp, afi, safi,
+ json_net, BGP_PATH_SHOW_ALL, &display,
+ RPKI_NOT_BEING_USED, NULL);
if (use_json)
json_object_object_addf(
json_ar, json_net,
@@ -14771,9 +15006,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
bgp_dest_get_prefix(dest);
attr = *adj->attr;
- ret = bgp_output_modifier(
- peer, rn_p, &attr, afi, safi,
- rmap_name);
+ ret = bgp_output_modifier(peer, rn_p, &attr, afi, safi,
+ rmap_name);
if (ret == RMAP_DENY) {
(*filtered_count)++;
@@ -14797,7 +15031,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
json_net = json_object_new_object();
bgp_show_path_info(NULL, dest, vty, bgp, afi, safi,
json_net, BGP_PATH_SHOW_ALL,
- &display, RPKI_NOT_BEING_USED);
+ &display, RPKI_NOT_BEING_USED,
+ adj->attr);
if (use_json)
json_object_object_addf(json_ar, json_net,
"%pFX", rn_p);
@@ -14810,7 +15045,7 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
*/
if (use_json) {
route_vty_out_tmp(vty, bgp, dest, rn_p,
- &attr, safi, use_json,
+ adj->attr, safi, use_json,
json_ar, wide);
} else {
for (bpi = bgp_dest_get_bgp_path_info(dest);
@@ -14843,11 +15078,9 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
if (use_json)
json_net =
json_object_new_object();
- bgp_show_path_info(
- NULL /* prefix_rd */, dest, vty,
- bgp, afi, safi, json_net,
- BGP_PATH_SHOW_BESTPATH,
- &display, RPKI_NOT_BEING_USED);
+ bgp_show_path_info(NULL /* prefix_rd */, dest, vty, bgp, afi,
+ safi, json_net, BGP_PATH_SHOW_BESTPATH,
+ &display, RPKI_NOT_BEING_USED, NULL);
if (use_json)
json_object_object_addf(
json_ar, json_net,
@@ -14873,6 +15106,8 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
json_object *json = NULL;
json_object *json_ar = NULL;
bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON);
+ bool first = true;
+ struct update_subgroup *subgrp;
/* Init BGP headers here so they're only displayed once
* even if 'table' is 2-tier (MPLS_VPN, ENCAP, EVPN).
@@ -14941,6 +15176,28 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
else
table = bgp->rib[afi][safi];
+ subgrp = peer_subgroup(peer, afi, safi);
+ if (use_json) {
+ if (type == bgp_show_adj_route_advertised || type == bgp_show_adj_route_received) {
+ if (header1) {
+ int version = table ? table->version : 0;
+ vty_out(vty, "\"bgpTableVersion\":%d", version);
+ vty_out(vty, ",\"bgpLocalRouterId\":\"%pI4\"", &bgp->router_id);
+ vty_out(vty, ",\"defaultLocPrf\":%u", bgp->default_local_pref);
+ vty_out(vty, ",\"localAS\":%u", bgp->as);
+ if (type == bgp_show_adj_route_advertised && subgrp &&
+ CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
+ vty_out(vty, ",\"bgpOriginatingDefaultNetwork\":\"%s\"",
+ (afi == AFI_IP) ? "0.0.0.0/0" : "::/0");
+ }
+
+ if (type == bgp_show_adj_route_advertised)
+ vty_out(vty, ",\"advertisedRoutes\": ");
+ if (type == bgp_show_adj_route_received)
+ vty_out(vty, ",\"receivedRoutes\": ");
+ }
+ }
+
if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
|| (safi == SAFI_EVPN)) {
@@ -14959,6 +15216,7 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
json_routes = json_object_new_object();
const struct prefix_rd *prd;
+
prd = (const struct prefix_rd *)bgp_dest_get_prefix(
dest);
@@ -14972,34 +15230,56 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
&filtered_count_per_rd);
/* Don't include an empty RD in the output! */
- if (json_routes && (output_count_per_rd > 0))
- json_object_object_add(json_ar, rd_str,
- json_routes);
+ if (json_routes && (output_count_per_rd > 0) && use_json) {
+ if (type == bgp_show_adj_route_advertised ||
+ type == bgp_show_adj_route_received) {
+ if (first) {
+ vty_out(vty, "\"%s\":", rd_str);
+ first = false;
+ } else {
+ vty_out(vty, ",\"%s\":", rd_str);
+ }
+ vty_json_no_pretty(vty, json_routes);
+ } else {
+ json_object_object_add(json_ar, rd_str, json_routes);
+ }
+ }
output_count += output_count_per_rd;
filtered_count += filtered_count_per_rd;
}
- } else
+ } else {
show_adj_route(vty, peer, table, afi, safi, type, rmap_name,
json, json_ar, show_flags, &header1, &header2,
rd_str, match, &output_count, &filtered_count);
+ if (use_json) {
+ if (type == bgp_show_adj_route_advertised ||
+ type == bgp_show_adj_route_received) {
+ vty_json_no_pretty(vty, json_ar);
+ }
+ }
+ }
+
if (use_json) {
- if (type == bgp_show_adj_route_advertised)
- json_object_object_add(json, "advertisedRoutes",
- json_ar);
- else
+ if (type == bgp_show_adj_route_advertised || type == bgp_show_adj_route_received) {
+ vty_out(vty, ",\"totalPrefixCounter\":%lu", output_count);
+ vty_out(vty, ",\"filteredPrefixCounter\":%lu", filtered_count);
+ json_object_free(json);
+ } else {
+ /* for bgp_show_adj_route_filtered & bgp_show_adj_route_bestpath type */
json_object_object_add(json, "receivedRoutes", json_ar);
- json_object_int_add(json, "totalPrefixCounter", output_count);
- json_object_int_add(json, "filteredPrefixCounter",
- filtered_count);
-
- /*
- * This is an extremely expensive operation at scale
- * and non-pretty reduces memory footprint significantly.
- */
- vty_json_no_pretty(vty, json);
- } else if (output_count > 0) {
+ json_object_int_add(json, "totalPrefixCounter", output_count);
+ json_object_int_add(json, "filteredPrefixCounter", filtered_count);
+ }
+
+ /*
+ * This is an extremely expensive operation at scale
+ * and non-pretty reduces memory footprint significantly.
+ */
+ if ((type != bgp_show_adj_route_advertised) && (type != bgp_show_adj_route_received))
+ vty_json_no_pretty(vty, json);
+ } else if (output_count > 0) {
if (!match && filtered_count > 0)
vty_out(vty,
"\nTotal number of prefixes %ld (%ld filtered)\n",
@@ -15102,6 +15382,7 @@ DEFPY(show_ip_bgp_instance_neighbor_advertised_route,
uint16_t show_flags = 0;
struct listnode *node;
struct bgp *abgp;
+ int ret;
if (detail || prefix_str)
SET_FLAG(show_flags, BGP_SHOW_OPT_ROUTES_DETAIL);
@@ -15143,9 +15424,22 @@ DEFPY(show_ip_bgp_instance_neighbor_advertised_route,
else if (argv_find(argv, argc, "filtered-routes", &idx))
type = bgp_show_adj_route_filtered;
- if (!all)
- return peer_adj_routes(vty, peer, afi, safi, type, route_map,
- prefix_str ? prefix : NULL, show_flags);
+ if (!all) {
+ if (uj)
+ if (type == bgp_show_adj_route_advertised ||
+ type == bgp_show_adj_route_received)
+ vty_out(vty, "{\n");
+
+ ret = peer_adj_routes(vty, peer, afi, safi, type, route_map,
+ prefix_str ? prefix : NULL, show_flags);
+ if (uj)
+ if (type == bgp_show_adj_route_advertised ||
+ type == bgp_show_adj_route_received)
+ vty_out(vty, "}\n");
+
+ return ret;
+ }
+
if (uj)
vty_out(vty, "{\n");
@@ -15544,6 +15838,28 @@ static int bgp_distance_unset(struct vty *vty, const char *distance_str,
return CMD_SUCCESS;
}
+void bgp_address_family_distance_delete(void)
+{
+ afi_t afi = AFI_UNSPEC;
+ safi_t safi = SAFI_UNSPEC;
+ struct bgp_dest *dest = NULL;
+ struct bgp_distance *bdistance = NULL;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ for (dest = bgp_table_top(bgp_distance_table[afi][safi]); dest;
+ dest = bgp_route_next(dest)) {
+ if (!bgp_dest_has_bgp_path_info_data(dest))
+ continue;
+ bdistance = bgp_dest_get_bgp_distance_info(dest);
+ XFREE(MTYPE_AS_LIST, bdistance->access_list);
+ bgp_distance_free(bdistance);
+
+ bgp_dest_set_bgp_distance_info(dest, NULL);
+ bgp_dest_unlock_node(dest);
+ }
+ }
+}
+
/* Apply BGP information to distance method. */
uint8_t bgp_distance_apply(const struct prefix *p, struct bgp_path_info *pinfo,
afi_t afi, safi_t safi, struct bgp *bgp)
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 1df0ffd300..474e229575 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -589,6 +589,42 @@ enum bgp_path_type {
BGP_PATH_SHOW_MULTIPATH
};
+/* meta-queue structure:
+ * sub-queue 0: soo routes
+ * sub-queue 1: other routes
+ */
+#define MQ_SIZE 3
+
+/* For checking that an object has already queued in some sub-queue */
+#define MQ_BIT_MASK ((1 << MQ_SIZE) - 1)
+
+struct meta_queue {
+ STAILQ_HEAD(bgp_dest_queue, bgp_dest) * subq[MQ_SIZE];
+ uint32_t size; /* sum of lengths of all subqueues */
+};
+
+/*
+ * When the update-delay expires, BGP inserts an EOIU (End-Of-Initial-Update) marker
+ * into the BGP_PROCESS_QUEUE_EOIU_MARKER meta queue. This meta queue holds only
+ * bgp_dest structures. To process the EOIU marker, we need to call bgp_process_main_one()
+ * on the corresponding BGP instance. Since the marker itself isn't a real route
+ * (a dummy dest is created for this) and doesn't inherently carry the BGP instance pointer,
+ * we store the struct bgp pointer in the dest->info field. This ensures that, when processing
+ * the EOIU marker, we have the necessary context (the relevant BGP instance) available.
+ */
+struct bgp_eoiu_info {
+ struct bgp *bgp;
+};
+
+/*
+ * Meta Q's specific names
+ */
+enum meta_queue_indexes {
+ META_QUEUE_EARLY_ROUTE,
+ META_QUEUE_OTHER_ROUTE,
+ META_QUEUE_EOIU_MARKER,
+};
+
static inline void bgp_bump_version(struct bgp_dest *dest)
{
dest->version = bgp_table_next_version(bgp_dest_table(dest));
@@ -795,6 +831,7 @@ extern void bgp_redistribute_withdraw(struct bgp *, afi_t, int, unsigned short);
extern void bgp_static_add(struct bgp *);
extern void bgp_static_delete(struct bgp *);
+extern void bgp_address_family_distance_delete(void);
extern void bgp_static_redo_import_check(struct bgp *);
extern void bgp_purge_static_redist_routes(struct bgp *bgp);
extern void bgp_static_update(struct bgp *bgp, const struct prefix *p,
@@ -932,11 +969,10 @@ extern void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
safi_t safi, json_object *json,
bool incremental_print,
bool local_table);
-extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
- struct bgp_dest *bn, const struct prefix *p,
- struct bgp_path_info *path, afi_t afi,
- safi_t safi, enum rpki_states,
- json_object *json_paths);
+extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
+ const struct prefix *p, struct bgp_path_info *path, afi_t afi,
+ safi_t safi, enum rpki_states, json_object *json_paths,
+ struct attr *attr);
extern int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
struct bgp_table *table, struct prefix_rd *prd,
enum bgp_show_type type, void *output_arg,
@@ -973,4 +1009,8 @@ extern int bgp_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,
#define bgp_path_info_add(A, B) \
bgp_path_info_add_with_caller(__func__, (A), (B))
#define bgp_path_info_free(B) bgp_path_info_free_with_caller(__func__, (B))
+extern void bgp_meta_queue_free(struct meta_queue *mq);
+extern int early_route_process(struct bgp *bgp, struct bgp_dest *dest);
+extern int other_route_process(struct bgp *bgp, struct bgp_dest *dest);
+extern int eoiu_marker_process(struct bgp *bgp, struct bgp_dest *dest);
#endif /* _QUAGGA_BGP_ROUTE_H */
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 347c5d02a1..04a709b350 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -155,7 +155,6 @@ static enum route_map_cmd_result_t route_match(void *rule,
void *object);
static void *route_match_compile(const char *arg);
static void revalidate_bgp_node(struct bgp_dest *dest, afi_t afi, safi_t safi);
-static void revalidate_all_routes(struct rpki_vrf *rpki_vrf);
static bool rpki_debug_conf, rpki_debug_term;
@@ -586,48 +585,10 @@ static void rpki_revalidate_prefix(struct event *thread)
XFREE(MTYPE_BGP_RPKI_REVALIDATE, rrp);
}
-static void bgpd_sync_callback(struct event *thread)
+static void revalidate_single_prefix(struct vrf *vrf, struct prefix prefix, afi_t afi)
{
struct bgp *bgp;
struct listnode *node;
- struct prefix prefix;
- struct pfx_record rec;
- struct rpki_vrf *rpki_vrf = EVENT_ARG(thread);
- struct vrf *vrf = NULL;
-
- event_add_read(bm->master, bgpd_sync_callback, rpki_vrf,
- rpki_vrf->rpki_sync_socket_bgpd, NULL);
-
- if (atomic_load_explicit(&rpki_vrf->rtr_update_overflow,
- memory_order_seq_cst)) {
- while (read(rpki_vrf->rpki_sync_socket_bgpd, &rec,
- sizeof(struct pfx_record)) != -1)
- ;
-
- atomic_store_explicit(&rpki_vrf->rtr_update_overflow, 0,
- memory_order_seq_cst);
- revalidate_all_routes(rpki_vrf);
- return;
- }
-
- int retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec,
- sizeof(struct pfx_record));
- if (retval != sizeof(struct pfx_record)) {
- RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd");
- return;
- }
- pfx_record_to_prefix(&rec, &prefix);
-
- afi_t afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
-
- if (rpki_vrf->vrfname) {
- vrf = vrf_lookup_by_name(rpki_vrf->vrfname);
- if (!vrf) {
- zlog_err("%s(): vrf for rpki %s not found", __func__,
- rpki_vrf->vrfname);
- return;
- }
- }
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
safi_t safi;
@@ -655,101 +616,76 @@ static void bgpd_sync_callback(struct event *thread)
}
}
-static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi,
- safi_t safi)
+static void bgpd_sync_callback(struct event *thread)
{
- struct bgp_adj_in *ain;
- mpls_label_t *label;
- uint8_t num_labels;
-
- for (ain = bgp_dest->adj_in; ain; ain = ain->next) {
- struct bgp_path_info *path =
- bgp_dest_get_bgp_path_info(bgp_dest);
-
- num_labels = BGP_PATH_INFO_NUM_LABELS(path);
- label = num_labels ? path->extra->labels->label : NULL;
-
- (void)bgp_update(ain->peer, bgp_dest_get_prefix(bgp_dest),
- ain->addpath_rx_id, ain->attr, afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL, label,
- num_labels, 1, NULL);
- }
-}
-
-/*
- * The act of a soft reconfig in revalidation is really expensive
- * coupled with the fact that the download of a full rpki state
- * from a rpki server can be expensive, let's break up the revalidation
- * to a point in time in the future to allow other bgp events
- * to take place too.
- */
-struct rpki_revalidate_peer {
+ struct prefix prefix;
+ struct pfx_record rec;
+ struct rpki_vrf *rpki_vrf = EVENT_ARG(thread);
+ struct vrf *vrf = NULL;
afi_t afi;
- safi_t safi;
- struct peer *peer;
-};
+ int retval;
-static void bgp_rpki_revalidate_peer(struct event *thread)
-{
- struct rpki_revalidate_peer *rvp = EVENT_ARG(thread);
-
- /*
- * Here's the expensive bit of gnomish deviousness
- */
- bgp_soft_reconfig_in(rvp->peer, rvp->afi, rvp->safi);
-
- XFREE(MTYPE_BGP_RPKI_REVALIDATE, rvp);
-}
-
-static void revalidate_all_routes(struct rpki_vrf *rpki_vrf)
-{
- struct bgp *bgp;
- struct listnode *node;
- struct vrf *vrf = NULL;
+ event_add_read(bm->master, bgpd_sync_callback, rpki_vrf, rpki_vrf->rpki_sync_socket_bgpd,
+ NULL);
if (rpki_vrf->vrfname) {
vrf = vrf_lookup_by_name(rpki_vrf->vrfname);
if (!vrf) {
- zlog_err("%s(): vrf for rpki %s not found", __func__,
- rpki_vrf->vrfname);
+ zlog_err("%s(): vrf for rpki %s not found", __func__, rpki_vrf->vrfname);
return;
}
}
- for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
- struct peer *peer;
- struct listnode *peer_listnode;
+ if (atomic_load_explicit(&rpki_vrf->rtr_update_overflow, memory_order_seq_cst)) {
+ ssize_t size = 0;
- if (!vrf && bgp->vrf_id != VRF_DEFAULT)
- continue;
- if (vrf && bgp->vrf_id != vrf->vrf_id)
- continue;
+ retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record));
+ while (retval != -1) {
+ if (retval != sizeof(struct pfx_record))
+ break;
- for (ALL_LIST_ELEMENTS_RO(bgp->peer, peer_listnode, peer)) {
- afi_t afi;
- safi_t safi;
+ size += retval;
+ pfx_record_to_prefix(&rec, &prefix);
+ afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
+ revalidate_single_prefix(vrf, prefix, afi);
- FOREACH_AFI_SAFI (afi, safi) {
- struct rpki_revalidate_peer *rvp;
+ retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec,
+ sizeof(struct pfx_record));
+ }
- if (!bgp->rib[afi][safi])
- continue;
+ RPKI_DEBUG("Socket overflow detected (%zu), revalidating affected prefixes", size);
- if (!peer_established(peer->connection))
- continue;
+ atomic_store_explicit(&rpki_vrf->rtr_update_overflow, 0, memory_order_seq_cst);
+ return;
+ }
- rvp = XCALLOC(MTYPE_BGP_RPKI_REVALIDATE,
- sizeof(*rvp));
- rvp->peer = peer;
- rvp->afi = afi;
- rvp->safi = safi;
+ retval = read(rpki_vrf->rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record));
+ if (retval != sizeof(struct pfx_record)) {
+ RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd");
+ return;
+ }
+ pfx_record_to_prefix(&rec, &prefix);
- event_add_event(
- bm->master, bgp_rpki_revalidate_peer,
- rvp, 0,
- &peer->t_revalidate_all[afi][safi]);
- }
- }
+ afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
+
+ revalidate_single_prefix(vrf, prefix, afi);
+}
+
+static void revalidate_bgp_node(struct bgp_dest *bgp_dest, afi_t afi, safi_t safi)
+{
+ struct bgp_adj_in *ain;
+ mpls_label_t *label;
+ uint8_t num_labels;
+
+ for (ain = bgp_dest->adj_in; ain; ain = ain->next) {
+ struct bgp_path_info *path = bgp_dest_get_bgp_path_info(bgp_dest);
+
+ num_labels = BGP_PATH_INFO_NUM_LABELS(path);
+ label = num_labels ? path->extra->labels->label : NULL;
+
+ (void)bgp_update(ain->peer, bgp_dest_get_prefix(bgp_dest), ain->addpath_rx_id,
+ ain->attr, afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
+ label, num_labels, 1, NULL);
}
}
diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h
index 130f5ca749..88276de848 100644
--- a/bgpd/bgp_table.h
+++ b/bgpd/bgp_table.h
@@ -391,6 +391,16 @@ static inline void bgp_dest_set_bgp_path_info(struct bgp_dest *dest,
dest->info = bi;
}
+static inline struct bgp_eoiu_info *bgp_dest_get_bgp_eoiu_info(struct bgp_dest *dest)
+{
+ return dest ? dest->info : NULL;
+}
+
+static inline void bgp_dest_set_bgp_eoiu_info(struct bgp_dest *dest, struct bgp_eoiu_info *eoiu_info)
+{
+ dest->info = eoiu_info;
+}
+
static inline struct bgp_table *
bgp_dest_get_bgp_table_info(struct bgp_dest *dest)
{
@@ -419,7 +429,6 @@ static inline unsigned int bgp_dest_get_lock_count(const struct bgp_dest *dest)
}
#ifdef _FRR_ATTRIBUTE_PRINTFRR
-#pragma FRR printfrr_ext "%pRN" (struct bgp_node *)
#pragma FRR printfrr_ext "%pBD" (struct bgp_dest *)
#endif
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index ef03606707..35ddfc34ff 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -757,7 +757,7 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
json_time = json_object_new_object();
json_object_int_add(json_time, "epoch", epoch_tbuf);
json_object_string_add(json_time, "epochString",
- ctime_r(&epoch_tbuf, timebuf));
+ time_to_string_json(updgrp->uptime, timebuf));
json_object_object_add(json_updgrp, "groupCreateTime",
json_time);
json_object_string_add(json_updgrp, "afi",
@@ -766,8 +766,7 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
safi2str(updgrp->safi));
} else {
vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
- vty_out(vty, " Created: %s",
- timestamp_string(updgrp->uptime, timebuf));
+ vty_out(vty, " Created: %s", time_to_string(updgrp->uptime, timebuf));
}
filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
@@ -835,15 +834,14 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
json_object_int_add(json_subgrp_time, "epoch",
epoch_tbuf);
json_object_string_add(json_subgrp_time, "epochString",
- ctime_r(&epoch_tbuf, timebuf));
+ time_to_string_json(subgrp->uptime, timebuf));
json_object_object_add(json_subgrp, "groupCreateTime",
json_subgrp_time);
} else {
vty_out(vty, "\n");
vty_out(vty, " Update-subgroup %" PRIu64 ":\n",
subgrp->id);
- vty_out(vty, " Created: %s",
- timestamp_string(subgrp->uptime, timebuf));
+ vty_out(vty, " Created: %s", time_to_string(subgrp->uptime, timebuf));
}
if (subgrp->split_from.update_group_id
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index 1a66df59fc..a1bf9a4c61 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -228,6 +228,12 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
afi2str(afi), safi2str(safi), ctx->dest);
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
+ /* withdraw stale addpath without waiting for the coalesce timer timeout.
+ * Otherwise, since adj->addpath_tx_id is overwritten, the code never
+ * notice anymore it has to do a withdrawal.
+ */
+ if (addpath_capable)
+ subgrp_withdraw_stale_addpath(ctx, subgrp);
/*
* Skip the subgroups that have coalesce timer running. We will
* walk the entire prefix table for those subgroups when the
@@ -237,8 +243,6 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
/* An update-group that uses addpath */
if (addpath_capable) {
- subgrp_withdraw_stale_addpath(ctx, subgrp);
-
subgrp_announce_addpath_best_selected(ctx->dest,
subgrp);
@@ -582,7 +586,7 @@ bool bgp_adj_out_set_subgroup(struct bgp_dest *dest,
bgp_dump_attr(attr, attr_str, sizeof(attr_str));
- zlog_debug("%s suppress UPDATE w/ attr: %s", peer->host,
+ zlog_debug("%s suppress UPDATE %pBD w/ attr: %s", peer->host, dest,
attr_str);
}
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index f4426a5a68..40b2847c6f 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -95,15 +95,15 @@ FRR_CFG_DEFAULT_BOOL(BGP_DETERMINISTIC_MED,
);
FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY,
{ .val_ulong = 10, .match_profile = "datacenter", },
- { .val_ulong = 120 },
+ { .val_ulong = BGP_DEFAULT_CONNECT_RETRY },
);
FRR_CFG_DEFAULT_ULONG(BGP_HOLDTIME,
{ .val_ulong = 9, .match_profile = "datacenter", },
- { .val_ulong = 180 },
+ { .val_ulong = BGP_DEFAULT_KEEPALIVE },
);
FRR_CFG_DEFAULT_ULONG(BGP_KEEPALIVE,
{ .val_ulong = 3, .match_profile = "datacenter", },
- { .val_ulong = 60 },
+ { .val_ulong = BGP_DEFAULT_KEEPALIVE },
);
FRR_CFG_DEFAULT_BOOL(BGP_EBGP_REQUIRES_POLICY,
{ .val_bool = false, .match_profile = "datacenter", },
@@ -1696,8 +1696,13 @@ DEFUN (no_router_bgp,
}
if (bgp->l3vni) {
- vty_out(vty, "%% Please unconfigure l3vni %u\n",
- bgp->l3vni);
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE))
+ vty_out(vty,
+ "%% L3VNI %u is scheduled to be deleted. Please give it few secs and retry the command\n",
+ bgp->l3vni);
+ else
+ vty_out(vty, "%% Please unconfigure l3vni %u\n", bgp->l3vni);
+
return CMD_WARNING_CONFIG_FAILED;
}
@@ -2940,9 +2945,7 @@ DEFUN(bgp_reject_as_sets, bgp_reject_as_sets_cmd,
*/
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
peer->last_reset = PEER_DOWN_AS_SETS_REJECT;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
return CMD_SUCCESS;
@@ -2965,9 +2968,7 @@ DEFUN(no_bgp_reject_as_sets, no_bgp_reject_as_sets_cmd,
*/
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
peer->last_reset = PEER_DOWN_AS_SETS_REJECT;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
return CMD_SUCCESS;
@@ -3519,11 +3520,6 @@ DEFUN (bgp_neighbor_graceful_restart_set,
peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- vty_out(vty,
- "Per peer-group graceful-restart configuration is not yet supported\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
result = bgp_neighbor_graceful_restart(peer, PEER_GR_CMD);
if (result == BGP_GR_SUCCESS) {
@@ -3554,11 +3550,6 @@ DEFUN (no_bgp_neighbor_graceful_restart,
peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- vty_out(vty,
- "Per peer-group graceful-restart configuration is not yet supported\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
result = bgp_neighbor_graceful_restart(peer, NO_PEER_GR_CMD);
if (ret == BGP_GR_SUCCESS) {
@@ -3588,11 +3579,6 @@ DEFUN (bgp_neighbor_graceful_restart_helper_set,
peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- vty_out(vty,
- "Per peer-group graceful-restart configuration is not yet supported\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
ret = bgp_neighbor_graceful_restart(peer, PEER_HELPER_CMD);
if (ret == BGP_GR_SUCCESS) {
@@ -3623,11 +3609,6 @@ DEFUN (no_bgp_neighbor_graceful_restart_helper,
peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- vty_out(vty,
- "Per peer-group graceful-restart configuration is not yet supported\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
ret = bgp_neighbor_graceful_restart(peer, NO_PEER_HELPER_CMD);
if (ret == BGP_GR_SUCCESS) {
@@ -3657,11 +3638,6 @@ DEFUN (bgp_neighbor_graceful_restart_disable_set,
peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- vty_out(vty,
- "Per peer-group graceful-restart configuration is not yet supported\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
ret = bgp_neighbor_graceful_restart(peer, PEER_DISABLE_CMD);
if (ret == BGP_GR_SUCCESS) {
@@ -3693,11 +3669,6 @@ DEFUN (no_bgp_neighbor_graceful_restart_disable,
peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- vty_out(vty,
- "Per peer-group graceful-restart configuration is not yet supported\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
ret = bgp_neighbor_graceful_restart(peer, NO_PEER_DISABLE_CMD);
if (ret == BGP_GR_SUCCESS) {
@@ -5130,10 +5101,7 @@ static int peer_conf_interface_get(struct vty *vty, const char *conf_if,
peer->last_reset = PEER_DOWN_V6ONLY_CHANGE;
/* v6only flag changed. Reset bgp seesion */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
}
@@ -5265,7 +5233,7 @@ DEFUN (neighbor_peer_group,
DEFUN (no_neighbor,
no_neighbor_cmd,
- "no neighbor <WORD|<A.B.C.D|X:X::X:X> [remote-as <(1-4294967295)|internal|external|auto>]>",
+ "no neighbor <WORD|<A.B.C.D|X:X::X:X> [remote-as <ASNUM|internal|external|auto>]>",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
@@ -5294,7 +5262,7 @@ DEFUN (no_neighbor,
* interface. */
if (peer->ifp)
bgp_zebra_terminate_radv(peer->bgp, peer);
- peer_notify_unconfig(peer);
+ peer_notify_unconfig(peer->connection);
peer_delete(peer);
return CMD_SUCCESS;
}
@@ -5330,10 +5298,10 @@ DEFUN (no_neighbor,
if (CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_ENHE))
bgp_zebra_terminate_radv(peer->bgp, peer);
- peer_notify_unconfig(peer);
+ peer_notify_unconfig(peer->connection);
peer_delete(peer);
if (other && other->connection->status != Deleted) {
- peer_notify_unconfig(other);
+ peer_notify_unconfig(other->connection);
peer_delete(other);
}
}
@@ -5344,7 +5312,7 @@ DEFUN (no_neighbor,
DEFUN (no_neighbor_interface_config,
no_neighbor_interface_config_cmd,
- "no neighbor WORD interface [v6only] [peer-group PGNAME] [remote-as <(1-4294967295)|internal|external|auto>]",
+ "no neighbor WORD interface [v6only] [peer-group PGNAME] [remote-as <ASNUM|internal|external|auto>]",
NO_STR
NEIGHBOR_STR
"Interface name\n"
@@ -5368,7 +5336,7 @@ DEFUN (no_neighbor_interface_config,
/* Request zebra to terminate IPv6 RAs on this interface. */
if (peer->ifp)
bgp_zebra_terminate_radv(peer->bgp, peer);
- peer_notify_unconfig(peer);
+ peer_notify_unconfig(peer->connection);
peer_delete(peer);
} else {
vty_out(vty, "%% Create the bgp interface first\n");
@@ -5776,7 +5744,7 @@ DEFUN (no_neighbor_set_peer_group,
if (CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_ENHE))
bgp_zebra_terminate_radv(peer->bgp, peer);
- peer_notify_unconfig(peer);
+ peer_notify_unconfig(peer->connection);
ret = peer_delete(peer);
return bgp_vty_return(vty, ret);
@@ -9838,6 +9806,8 @@ DEFPY (af_rd_vpn_export,
bgp_get_default(), bgp);
if (yes) {
+ if (bgp->vpn_policy[afi].tovpn_rd_pretty)
+ XFREE(MTYPE_BGP_NAME, bgp->vpn_policy[afi].tovpn_rd_pretty);
bgp->vpn_policy[afi].tovpn_rd_pretty = XSTRDUP(MTYPE_BGP_NAME,
rd_str);
bgp->vpn_policy[afi].tovpn_rd = prd;
@@ -9978,26 +9948,9 @@ DEFPY (af_label_vpn_export,
UNSET_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_MANUAL_REG);
- } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags,
- BGP_VPN_POLICY_TOVPN_LABEL_AUTO)) {
+ } else if (CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_AUTO))
/* release any previous auto label */
- if (bgp->vpn_policy[afi].tovpn_label != MPLS_LABEL_NONE) {
-
- /*
- * label has previously been automatically
- * assigned by labelpool: release it
- *
- * NB if tovpn_label == MPLS_LABEL_NONE it
- * means the automatic assignment is in flight
- * and therefore the labelpool callback must
- * detect that the auto label is not needed.
- */
-
- bgp_lp_release(LP_TYPE_VRF,
- &bgp->vpn_policy[afi],
- bgp->vpn_policy[afi].tovpn_label);
- }
- }
+ bgp_vpn_release_label(bgp, afi, false);
if (yes) {
if (label_auto) {
@@ -11523,6 +11476,72 @@ DEFPY (show_bgp_vrfs,
return CMD_SUCCESS;
}
+DEFPY(show_bgp_router,
+ show_bgp_router_cmd,
+ "show bgp router [json]",
+ SHOW_STR
+ BGP_STR
+ "Overall BGP information\n"
+ JSON_STR)
+{
+ char timebuf[MONOTIME_STRLEN];
+ time_t unix_timestamp;
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
+
+ if (uj)
+ json = json_object_new_object();
+
+ time_to_string(bm->start_time, timebuf);
+
+ if (uj) {
+ unix_timestamp = time(NULL) - (monotime(NULL) - bm->start_time);
+ json_object_int_add(json, "bgpStartedAt", unix_timestamp);
+ json_object_boolean_add(json, "bgpStartedGracefully",
+ CHECK_FLAG(bm->flags, BM_FLAG_GRACEFUL_RESTART));
+ }
+
+ if (CHECK_FLAG(bm->flags, BM_FLAG_GRACEFUL_RESTART)) {
+ if (!uj)
+ vty_out(vty, "BGP started gracefully at %s", timebuf);
+ else
+ json_object_boolean_add(json, "grComplete",
+ CHECK_FLAG(bm->flags, BM_FLAG_GR_COMPLETE));
+
+ if (CHECK_FLAG(bm->flags, BM_FLAG_GR_COMPLETE)) {
+ time_to_string(bm->gr_completion_time, timebuf);
+ if (uj) {
+ unix_timestamp = time(NULL) -
+ (monotime(NULL) - bm->gr_completion_time);
+ json_object_int_add(json, "grCompletedAt", unix_timestamp);
+ } else
+ vty_out(vty, "Graceful restart completed at %s", timebuf);
+ } else {
+ if (!uj)
+ vty_out(vty, "Graceful restart is in progress\n");
+ }
+ } else {
+ if (!uj)
+ vty_out(vty, "BGP started at %s", timebuf);
+ }
+
+ if (uj) {
+ json_object_boolean_add(json, "bgpInMaintenanceMode",
+ (CHECK_FLAG(bm->flags, BM_FLAG_MAINTENANCE_MODE)));
+ json_object_int_add(json, "bgpInstanceCount", listcount(bm->bgp));
+
+ vty_json(vty, json);
+ } else {
+ if (CHECK_FLAG(bm->flags, BM_FLAG_MAINTENANCE_MODE))
+ vty_out(vty, "BGP is in Maintenance mode (BGP GSHUT is in effect)\n");
+
+ vty_out(vty, "Number of BGP instances (including default): %d\n",
+ listcount(bm->bgp));
+ }
+
+ return CMD_SUCCESS;
+}
+
DEFUN (show_bgp_mac_hash,
show_bgp_mac_hash_cmd,
"show bgp mac hash",
@@ -14927,22 +14946,31 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV) ||
CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) {
if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV) &&
- CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV))
+ CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) {
json_object_string_add(
json_cap, "gracefulRestart",
"advertisedAndReceived");
- else if (CHECK_FLAG(p->cap,
- PEER_CAP_RESTART_ADV))
+ } else if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) {
+ json_object_string_add(json_cap, "gracefulRestart",
+ "advertised");
+#if CONFDATE > 20250525
+CPP_NOTICE("Remove `gracefulRestartCapability` JSON field")
+#endif
json_object_string_add(
json_cap,
"gracefulRestartCapability",
"advertised");
- else if (CHECK_FLAG(p->cap,
- PEER_CAP_RESTART_RCV))
+ } else if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) {
+ json_object_string_add(json_cap, "gracefulRestart",
+ "received");
+#if CONFDATE > 20250525
+CPP_NOTICE("Remove `gracefulRestartCapability` JSON field")
+#endif
json_object_string_add(
json_cap,
"gracefulRestartCapability",
"received");
+ }
if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) {
int restart_af_count = 0;
@@ -15502,9 +15530,12 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
if (use_json) {
json_object *json_stat = NULL;
+ json_object *json_pfx_stat = NULL;
+
json_stat = json_object_new_object();
- /* Packet counts. */
+ json_pfx_stat = json_object_new_object();
+ /* Packet counts. */
atomic_size_t outq_count, inq_count;
outq_count = atomic_load_explicit(&p->connection->obuf->count,
memory_order_relaxed);
@@ -15554,6 +15585,16 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object_int_add(json_stat, "totalSent", PEER_TOTAL_TX(p));
json_object_int_add(json_stat, "totalRecv", PEER_TOTAL_RX(p));
json_object_object_add(json_neigh, "messageStats", json_stat);
+
+ /* Prefix statistics */
+ json_object_int_add(json_pfx_stat, "inboundFiltered", p->stat_pfx_filter);
+ json_object_int_add(json_pfx_stat, "aspathLoop", p->stat_pfx_aspath_loop);
+ json_object_int_add(json_pfx_stat, "originatorLoop", p->stat_pfx_originator_loop);
+ json_object_int_add(json_pfx_stat, "clusterLoop", p->stat_pfx_cluster_loop);
+ json_object_int_add(json_pfx_stat, "invalidNextHop", p->stat_pfx_nh_invalid);
+ json_object_int_add(json_pfx_stat, "withdrawn", p->stat_pfx_withdraw);
+ json_object_int_add(json_pfx_stat, "attributesDiscarded", p->stat_pfx_discard);
+ json_object_object_add(json_neigh, "prefixStats", json_pfx_stat);
} else {
atomic_size_t outq_count, inq_count, open_out, open_in,
notify_out, notify_in, update_out, update_in,
@@ -15605,8 +15646,18 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
refresh_in);
vty_out(vty, " Capability: %10zu %10zu\n",
dynamic_cap_out, dynamic_cap_in);
- vty_out(vty, " Total: %10u %10u\n",
- (uint32_t)PEER_TOTAL_TX(p), (uint32_t)PEER_TOTAL_RX(p));
+ vty_out(vty, " Total: %10u %10u\n\n", (uint32_t)PEER_TOTAL_TX(p),
+ (uint32_t)PEER_TOTAL_RX(p));
+
+ /* Prefix statistics */
+ vty_out(vty, " Prefix statistics:\n");
+ vty_out(vty, " Inbound filtered: %u\n", p->stat_pfx_filter);
+ vty_out(vty, " AS-PATH loop: %u\n", p->stat_pfx_aspath_loop);
+ vty_out(vty, " Originator loop: %u\n", p->stat_pfx_originator_loop);
+ vty_out(vty, " Cluster loop: %u\n", p->stat_pfx_cluster_loop);
+ vty_out(vty, " Invalid next-hop: %u\n", p->stat_pfx_nh_invalid);
+ vty_out(vty, " Withdrawn: %u\n", p->stat_pfx_withdraw);
+ vty_out(vty, " Attributes discarded: %u\n\n", p->stat_pfx_discard);
}
if (use_json) {
@@ -18804,7 +18855,11 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
/* enforce-first-as */
if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS)) {
- if (!peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
+ /* The `no` form is printed because by default this enforcing
+ * is enabled, thus we need to print it inverted.
+ * See peer_new().
+ */
+ if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
vty_out(vty, " no neighbor %s enforce-first-as\n", addr);
} else {
if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
@@ -19132,9 +19187,7 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp,
if (peergroup_af_flag_check(peer, afi, safi,
PEER_FLAG_SEND_EXT_COMMUNITY_RPKI))
- vty_out(vty,
- " no neighbor %s send-community extended rpki\n",
- addr);
+ vty_out(vty, " neighbor %s send-community extended rpki\n", addr);
}
/* Default information */
@@ -21940,6 +21993,9 @@ void bgp_vty_init(void)
/* "show [ip] bgp vrfs" commands. */
install_element(VIEW_NODE, &show_bgp_vrfs_cmd);
+ /* Some overall BGP information */
+ install_element(VIEW_NODE, &show_bgp_router_cmd);
+
/* Community-list. */
community_list_vty();
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 16f4a0d2df..e3465feda8 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -137,7 +137,7 @@ static void bgp_start_interface_nbrs(struct bgp *bgp, struct interface *ifp)
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (peer->conf_if && (strcmp(peer->conf_if, ifp->name) == 0) &&
!peer_established(peer->connection)) {
- if (peer_active(peer))
+ if (peer_active(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start);
}
@@ -421,11 +421,10 @@ static int bgp_interface_address_delete(ZAPI_CALLBACK_ARGS)
if (addr->family == AF_INET)
continue;
- if (!IN6_IS_ADDR_LINKLOCAL(&addr->u.prefix6)
- && memcmp(&peer->nexthop.v6_global,
- &addr->u.prefix6, 16)
- == 0) {
- memset(&peer->nexthop.v6_global, 0, 16);
+ if (!IN6_IS_ADDR_LINKLOCAL(&addr->u.prefix6) &&
+ memcmp(&peer->nexthop.v6_global, &addr->u.prefix6, IPV6_MAX_BYTELEN) ==
+ 0) {
+ memset(&peer->nexthop.v6_global, 0, IPV6_MAX_BYTELEN);
FOREACH_AFI_SAFI (afi, safi)
bgp_announce_route(peer, afi, safi,
true);
@@ -744,6 +743,7 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
int ret = 0;
struct interface *ifp = NULL;
bool v6_ll_avail = true;
+ bool shared_network_original = peer->shared_network;
memset(nexthop, 0, sizeof(struct bgp_nexthop));
@@ -838,9 +838,9 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
if (!v6_ll_avail && !peer->conf_if)
v6_ll_avail = true;
if (if_lookup_by_ipv4(&remote->sin.sin_addr, peer->bgp->vrf_id))
- peer->shared_network = 1;
+ peer->shared_network = true;
else
- peer->shared_network = 0;
+ peer->shared_network = false;
}
/* IPv6 connection, fetch and store IPv4 local address if any. */
@@ -903,11 +903,14 @@ bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
|| if_lookup_by_ipv6(&remote->sin6.sin6_addr,
remote->sin6.sin6_scope_id,
peer->bgp->vrf_id))
- peer->shared_network = 1;
+ peer->shared_network = true;
else
- peer->shared_network = 0;
+ peer->shared_network = false;
}
+ if (shared_network_original != peer->shared_network)
+ bgp_peer_bfd_update_source(peer);
+
/* KAME stack specific treatment. */
#ifdef KAME
if (IN6_IS_ADDR_LINKLOCAL(&nexthop->v6_global)
@@ -1187,9 +1190,10 @@ static bool update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp,
ifindex =
pi->peer->nexthop.ifp->ifindex;
if (!ifindex) {
- if (pi->peer->conf_if)
- ifindex = pi->peer->ifp->ifindex;
- else if (pi->peer->ifname)
+ if (pi->peer->conf_if) {
+ if (pi->peer->ifp)
+ ifindex = pi->peer->ifp->ifindex;
+ } else if (pi->peer->ifname)
ifindex = ifname2ifindex(
pi->peer->ifname,
pi->peer->bgp->vrf_id);
@@ -3025,6 +3029,48 @@ static void bgp_zebra_connected(struct zclient *zclient)
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(bgp, bgp->peer);
}
+void bgp_zebra_process_remote_routes_for_l2vni(struct event *e)
+{
+ /*
+ * If we have learnt and retained remote routes (VTEPs, MACs)
+ * for this VNI, install them.
+ */
+ install_uninstall_routes_for_vni(NULL, NULL, true);
+
+ /*
+ * If there are VNIs still pending to be processed, schedule them
+ * after a small sleep so that CPU can be used for other purposes.
+ */
+ if (zebra_l2_vni_count(&bm->zebra_l2_vni_head))
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l2vni, NULL,
+ 20, &bm->t_bgp_zebra_l2_vni);
+}
+
+void bgp_zebra_process_remote_routes_for_l3vrf(struct event *e)
+{
+ /*
+ * Install/Uninstall all remote routes belonging to l3vni
+ *
+ * NOTE:
+ * - At this point it does not matter whether we call
+ * install_routes_for_vrf/uninstall_routes_for_vrf.
+ * - Since we pass struct bgp as NULL,
+ * * we iterate the bm FIFO list
+ * * the second variable (true) is ignored as well and
+ * calculated based on the BGP-VRFs flags for ADD/DELETE.
+ */
+ install_uninstall_routes_for_vrf(NULL, true);
+
+ /*
+ * If there are L3VNIs still pending to be processed, schedule them
+ * after a small sleep so that CPU can be used for other purposes.
+ */
+ if (zebra_l3_vni_count(&bm->zebra_l3_vni_head)) {
+ event_add_timer_msec(bm->master, bgp_zebra_process_remote_routes_for_l3vrf, NULL,
+ 20, &bm->t_bgp_zebra_l3_vni);
+ }
+}
+
static int bgp_zebra_process_local_es_add(ZAPI_CALLBACK_ARGS)
{
esi_t esi;
diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h
index 8deecba747..7e9d57cb85 100644
--- a/bgpd/bgp_zebra.h
+++ b/bgpd/bgp_zebra.h
@@ -135,4 +135,6 @@ extern void bgp_zebra_release_label_range(uint32_t start, uint32_t end);
extern enum zclient_send_status
bgp_zebra_withdraw_actual(struct bgp_dest *dest, struct bgp_path_info *info,
struct bgp *bgp);
+extern void bgp_zebra_process_remote_routes_for_l2vni(struct event *e);
+extern void bgp_zebra_process_remote_routes_for_l3vrf(struct event *e);
#endif /* _QUAGGA_BGP_ZEBRA_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index aa2bd5c371..eda6bc31d2 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -309,9 +309,7 @@ static int bgp_router_id_set(struct bgp *bgp, const struct in_addr *id,
peer->last_reset = PEER_DOWN_RID_CHANGE;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
/* EVPN uses router id in RD, update them */
@@ -447,8 +445,7 @@ void bm_wait_for_fib_set(bool set)
peer->connection->status))
continue;
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
}
}
@@ -463,6 +460,10 @@ void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set)
if (bgp->inst_type == BGP_INSTANCE_TYPE_VIEW)
return;
+ /* Do nothing if already in a desired state */
+ if (set == !!CHECK_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_FIB_PENDING))
+ return;
+
if (set) {
SET_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_FIB_PENDING);
/* Send msg to zebra for the first instance of bgp enabled
@@ -503,8 +504,7 @@ void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set)
if (!BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
continue;
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
}
@@ -528,9 +528,7 @@ void bgp_cluster_id_set(struct bgp *bgp, struct in_addr *cluster_id)
peer->last_reset = PEER_DOWN_CLID_CHANGE;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
}
@@ -552,9 +550,7 @@ void bgp_cluster_id_unset(struct bgp *bgp)
peer->last_reset = PEER_DOWN_CLID_CHANGE;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
}
@@ -633,14 +629,10 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as, const char *as_str)
if (already_confed) {
if (ptype == BGP_PEER_EBGP) {
peer->local_as = as;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status)) {
+ if (peer_notify_config_change(peer->connection))
peer->last_reset =
PEER_DOWN_CONFED_ID_CHANGE;
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- } else
+ else
bgp_session_reset_safe(peer, &nnode);
}
} else {
@@ -651,14 +643,10 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as, const char *as_str)
/* Reset the local_as to be our EBGP one */
if (ptype == BGP_PEER_EBGP)
peer->local_as = as;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status)) {
+ if (peer_notify_config_change(peer->connection))
peer->last_reset =
PEER_DOWN_CONFED_ID_CHANGE;
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- } else
+ else
bgp_session_reset_safe(peer, &nnode);
}
}
@@ -680,12 +668,7 @@ void bgp_confederation_id_unset(struct bgp *bgp)
if (peer_sort(peer) != BGP_PEER_IBGP) {
peer->local_as = bgp->as;
peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status))
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset_safe(peer, &nnode);
}
}
@@ -732,14 +715,10 @@ void bgp_confederation_peers_add(struct bgp *bgp, as_t as, const char *as_str)
if (peer->as == as) {
peer->local_as = bgp->as;
(void)peer_sort(peer);
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status)) {
+ if (peer_notify_config_change(peer->connection))
peer->last_reset =
PEER_DOWN_CONFED_PEER_CHANGE;
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- } else
+ else
bgp_session_reset_safe(peer, &nnode);
}
}
@@ -789,14 +768,10 @@ void bgp_confederation_peers_remove(struct bgp *bgp, as_t as)
if (peer->as == as) {
peer->local_as = bgp->confed_id;
(void)peer_sort(peer);
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status)) {
+ if (peer_notify_config_change(peer->connection))
peer->last_reset =
PEER_DOWN_CONFED_PEER_CHANGE;
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- } else
+ else
bgp_session_reset_safe(peer, &nnode);
}
}
@@ -1252,8 +1227,6 @@ static void peer_free(struct peer *peer)
bgp_reads_off(peer->connection);
bgp_writes_off(peer->connection);
event_cancel_event_ready(bm->master, peer->connection);
- FOREACH_AFI_SAFI (afi, safi)
- EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!peer->connection->t_write);
assert(!peer->connection->t_read);
@@ -1565,19 +1538,13 @@ struct peer *peer_new(struct bgp *bgp)
/* Set default flags. */
FOREACH_AFI_SAFI (afi, safi) {
SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY);
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_SEND_EXT_COMMUNITY);
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_SEND_EXT_COMMUNITY_RPKI);
+ SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY);
SET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY);
SET_FLAG(peer->af_flags_invert[afi][safi],
PEER_FLAG_SEND_COMMUNITY);
- SET_FLAG(peer->af_flags_invert[afi][safi],
- PEER_FLAG_SEND_EXT_COMMUNITY);
- SET_FLAG(peer->af_flags_invert[afi][safi],
- PEER_FLAG_SEND_EXT_COMMUNITY_RPKI);
+ SET_FLAG(peer->af_flags_invert[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY);
SET_FLAG(peer->af_flags_invert[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY);
peer->addpath_type[afi][safi] = BGP_ADDPATH_NONE;
@@ -1593,8 +1560,13 @@ struct peer *peer_new(struct bgp *bgp)
SET_FLAG(peer->sflags, PEER_STATUS_CAPABILITY_OPEN);
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS))
- peer_flag_set(peer, PEER_FLAG_ENFORCE_FIRST_AS);
+ /* By default this is enabled, thus we need to mark it as
+ * inverted in order to display correctly in the configuration.
+ */
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_ENFORCE_FIRST_AS)) {
+ SET_FLAG(peer->flags_invert, PEER_FLAG_ENFORCE_FIRST_AS);
+ SET_FLAG(peer->flags, PEER_FLAG_ENFORCE_FIRST_AS);
+ }
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SOFT_VERSION_CAPABILITY))
peer_flag_set(peer, PEER_FLAG_CAPABILITY_SOFT_VERSION);
@@ -2017,7 +1989,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
bgp->coalesce_time = MIN(BGP_MAX_SUBGROUP_COALESCE_TIME, ct);
}
- active = peer_active(peer);
+ active = peer_active(peer->connection);
if (!active) {
if (peer->connection->su.sa.sa_family == AF_UNSPEC)
peer->last_reset = PEER_DOWN_NBR_ADDR;
@@ -2050,7 +2022,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
if (bgp->autoshutdown)
peer_flag_set(peer, PEER_FLAG_SHUTDOWN);
/* Set up peer's events and timers. */
- else if (!active && peer_active(peer))
+ else if (!active && peer_active(peer->connection))
bgp_timer_set(peer->connection);
bgp_peer_gr_flags_update(peer);
@@ -2101,10 +2073,7 @@ void peer_as_change(struct peer *peer, as_t as, enum peer_asn_type as_type,
/* Stop peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
peer->last_reset = PEER_DOWN_REMOTE_AS_CHANGE;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
}
origtype = peer_sort_lookup(peer);
@@ -2192,8 +2161,7 @@ int peer_remote_as(struct bgp *bgp, union sockunion *su, const char *conf_if,
/* When this peer is a member of peer-group. */
if (peer->group) {
/* peer-group already has AS number/internal/external */
- if (peer->group->conf->as
- || peer->group->conf->as_type) {
+ if (peer->group->conf->as || peer->group->conf->as_type != AS_UNSPECIFIED) {
/* Return peer group's AS number. */
*as = peer->group->conf->as;
return BGP_ERR_PEER_GROUP_MEMBER;
@@ -2446,13 +2414,13 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi)
if (peer_af_create(peer, afi, safi) == NULL)
return 1;
- active = peer_active(peer);
+ active = peer_active(peer->connection);
peer->afc[afi][safi] = 1;
if (peer->group)
peer_group2peer_config_copy_af(peer->group, peer, afi, safi);
- if (!active && peer_active(peer)) {
+ if (!active && peer_active(peer->connection)) {
bgp_timer_set(peer->connection);
} else {
peer->last_reset = PEER_DOWN_AF_ACTIVATE;
@@ -2469,15 +2437,11 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi)
false);
}
} else {
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
}
- if (peer->connection->status == OpenSent ||
- peer->connection->status == OpenConfirm)
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
+
/*
* If we are turning on a AFI/SAFI locally and we've
* started bringing a peer up, we need to tell
@@ -2488,10 +2452,8 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi)
* activation.
*/
other = peer->doppelganger;
- if (other && (other->connection->status == OpenSent ||
- other->connection->status == OpenConfirm))
- bgp_notify_send(other->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ if (other)
+ peer_notify_config_change(other->connection);
}
return 0;
@@ -2598,14 +2560,10 @@ static bool non_peergroup_deactivate_af(struct peer *peer, afi_t afi,
bgp_clear_route(peer, afi, safi);
peer->pcount[afi][safi] = 0;
} else {
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
- } else {
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- }
+ } else
+ peer_notify_config_change(peer->connection);
}
return false;
@@ -2721,8 +2679,6 @@ int peer_delete(struct peer *peer)
bgp_reads_off(peer->connection);
bgp_writes_off(peer->connection);
event_cancel_event_ready(bm->master, peer->connection);
- FOREACH_AFI_SAFI (afi, safi)
- EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!CHECK_FLAG(peer->connection->thread_flags,
PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->connection->thread_flags,
@@ -2903,6 +2859,7 @@ struct peer_group *peer_group_get(struct bgp *bgp, const char *name)
group->conf->host = XSTRDUP(MTYPE_BGP_PEER_HOST, name);
group->conf->group = group;
group->conf->as = 0;
+ group->conf->as_type = AS_UNSPECIFIED;
group->conf->ttl = BGP_DEFAULT_TTL;
group->conf->gtsm_hops = BGP_GTSM_HOPS_DISABLED;
group->conf->v_routeadv = BGP_DEFAULT_EBGP_ROUTEADV;
@@ -3024,6 +2981,7 @@ static void peer_group2peer_config_copy(struct peer_group *group,
PEER_ATTR_INHERIT(peer, group, local_role);
/* Update GR flags for the peer. */
+ PEER_ATTR_INHERIT(peer, group, peer_gr_new_status_flag);
bgp_peer_gr_flags_update(peer);
/* Apply BFD settings from group to peer if it exists. */
@@ -3077,11 +3035,20 @@ int peer_group_remote_as(struct bgp *bgp, const char *group_name, as_t *as,
return 0;
}
-void peer_notify_unconfig(struct peer *peer)
+bool peer_notify_config_change(struct peer_connection *connection)
{
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_PEER_UNCONFIG);
+ if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status)) {
+ bgp_notify_send(connection, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ return true;
+ }
+
+ return false;
+}
+
+void peer_notify_unconfig(struct peer_connection *connection)
+{
+ if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status))
+ bgp_notify_send(connection, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_PEER_UNCONFIG);
}
static void peer_notify_shutdown(struct peer *peer)
@@ -3108,9 +3075,9 @@ void peer_group_notify_unconfig(struct peer_group *group)
other = peer->doppelganger;
if (other && other->connection->status != Deleted) {
other->group = NULL;
- peer_notify_unconfig(other);
+ peer_notify_unconfig(other->connection);
} else
- peer_notify_unconfig(peer);
+ peer_notify_unconfig(peer->connection);
}
}
@@ -3357,10 +3324,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer,
peer->last_reset = PEER_DOWN_RMAP_BIND;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
}
@@ -3395,7 +3359,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer,
}
/* Set up peer's events and timers. */
- if (peer_active(peer))
+ if (peer_active(peer->connection))
bgp_timer_set(peer->connection);
}
@@ -4003,11 +3967,16 @@ int bgp_delete(struct bgp *bgp)
afi_t afi;
safi_t safi;
int i;
+ uint32_t vni_count;
+ struct bgpevpn *vpn = NULL;
struct bgp_dest *dest = NULL;
struct bgp_dest *dest_next = NULL;
struct bgp_table *dest_table = NULL;
struct graceful_restart_info *gr_info;
- uint32_t cnt_before, cnt_after;
+ uint32_t b_ann_cnt = 0, b_l2_cnt = 0, b_l3_cnt = 0;
+ uint32_t a_ann_cnt = 0, a_l2_cnt = 0, a_l3_cnt = 0;
+ struct bgp *bgp_to_proc = NULL;
+ struct bgp *bgp_to_proc_next = NULL;
assert(bgp);
@@ -4015,7 +3984,7 @@ int bgp_delete(struct bgp *bgp)
* Iterate the pending dest list and remove all the dest pertaininig to
* the bgp under delete.
*/
- cnt_before = zebra_announce_count(&bm->zebra_announce_head);
+ b_ann_cnt = zebra_announce_count(&bm->zebra_announce_head);
for (dest = zebra_announce_first(&bm->zebra_announce_head); dest;
dest = dest_next) {
dest_next = zebra_announce_next(&bm->zebra_announce_head, dest);
@@ -4027,10 +3996,36 @@ int bgp_delete(struct bgp *bgp)
}
}
- cnt_after = zebra_announce_count(&bm->zebra_announce_head);
- if (BGP_DEBUG(zebra, ZEBRA))
- zlog_debug("Zebra Announce Fifo cleanup count before %u and after %u during BGP %s deletion",
- cnt_before, cnt_after, bgp->name_pretty);
+ /*
+ * Pop all VPNs yet to be processed for remote routes install if the
+ * bgp-evpn instance is getting deleted
+ */
+ if (bgp == bgp_get_evpn()) {
+ b_l2_cnt = zebra_l2_vni_count(&bm->zebra_l2_vni_head);
+ vni_count = b_l2_cnt;
+ while (vni_count) {
+ vpn = zebra_l2_vni_pop(&bm->zebra_l2_vni_head);
+ UNSET_FLAG(vpn->flags, VNI_FLAG_ADD);
+ vni_count--;
+ }
+ }
+
+ b_l3_cnt = zebra_l3_vni_count(&bm->zebra_l3_vni_head);
+ for (bgp_to_proc = zebra_l3_vni_first(&bm->zebra_l3_vni_head); bgp_to_proc;
+ bgp_to_proc = bgp_to_proc_next) {
+ bgp_to_proc_next = zebra_l3_vni_next(&bm->zebra_l3_vni_head, bgp_to_proc);
+ if (bgp_to_proc == bgp)
+ zebra_l3_vni_del(&bm->zebra_l3_vni_head, bgp_to_proc);
+ }
+
+ if (BGP_DEBUG(zebra, ZEBRA)) {
+ a_ann_cnt = zebra_announce_count(&bm->zebra_announce_head);
+ a_l2_cnt = zebra_l2_vni_count(&bm->zebra_l2_vni_head);
+ a_l3_cnt = zebra_l3_vni_count(&bm->zebra_l3_vni_head);
+ zlog_debug("BGP %s deletion FIFO cnt Zebra_Ann before %u after %u, L2_VNI before %u after, %u L3_VNI before %u after %u",
+ bgp->name_pretty, b_ann_cnt, a_ann_cnt, b_l2_cnt, a_l2_cnt, b_l3_cnt,
+ a_l3_cnt);
+ }
bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL);
@@ -4042,6 +4037,9 @@ int bgp_delete(struct bgp *bgp)
bgp_vpn_leak_unimport(bgp);
+ bgp_vpn_release_label(bgp, AFI_IP, true);
+ bgp_vpn_release_label(bgp, AFI_IP6, true);
+
hook_call(bgp_inst_delete, bgp);
FOREACH_AFI_SAFI (afi, safi)
@@ -4242,6 +4240,14 @@ int bgp_delete(struct bgp *bgp)
}
}
+ /* Clean BGP address family parameters */
+ bgp_mh_info->ead_evi_rx = BGP_EVPN_MH_EAD_EVI_RX_DEF;
+ bgp_evpn_switch_ead_evi_rx();
+ bgp_mh_info->ead_evi_tx = BGP_EVPN_MH_EAD_EVI_TX_DEF;
+ bgp_mh_info->evi_per_es_frag = BGP_EVPN_MAX_EVI_PER_ES_FRAG;
+
+ bgp_address_family_distance_delete();
+
return 0;
}
@@ -4323,6 +4329,9 @@ void bgp_free(struct bgp *bgp)
XFREE(MTYPE_BGP_NAME, bgp->snmp_stats);
XFREE(MTYPE_BGP_CONFED_LIST, bgp->confed_peers);
+ bgp_meta_queue_free(bgp->mq);
+ bgp->mq = NULL;
+
XFREE(MTYPE_BGP, bgp);
}
@@ -4636,9 +4645,11 @@ bool bgp_path_attribute_treat_as_withdraw(struct peer *peer, char *buf,
}
/* If peer is configured at least one address family return 1. */
-bool peer_active(struct peer *peer)
+bool peer_active(struct peer_connection *connection)
{
- if (BGP_CONNECTION_SU_UNSPEC(peer->connection))
+ struct peer *peer = connection->peer;
+
+ if (BGP_CONNECTION_SU_UNSPEC(connection))
return false;
if (peer->bfd_config) {
@@ -4727,8 +4738,7 @@ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
PEER_FLAG_CONFIG_NODE)))
peer_delete(peer->doppelganger);
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
} else if (type == peer_change_reset_in) {
if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_RCV))
bgp_route_refresh_send(peer, afi, safi, 0, 0, 0,
@@ -4740,8 +4750,7 @@ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi,
PEER_FLAG_CONFIG_NODE)))
peer_delete(peer->doppelganger);
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
+ peer_notify_config_change(peer->connection);
}
} else if (type == peer_change_reset_out) {
paf = peer_af_find(peer, afi, safi);
@@ -4940,10 +4949,7 @@ static void peer_flag_modify_action(struct peer *peer, uint64_t flag)
peer->v_start = BGP_INIT_START_TIMER;
BGP_EVENT_ADD(peer->connection, BGP_Stop);
}
- } else if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) {
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- } else
+ } else if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
}
@@ -5427,12 +5433,7 @@ int peer_ebgp_multihop_set(struct peer *peer, int ttl)
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
if (peer->sort != BGP_PEER_IBGP) {
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status))
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/* Reconfigure BFD peer with new TTL. */
@@ -5447,10 +5448,7 @@ int peer_ebgp_multihop_set(struct peer *peer, int ttl)
member->ttl = group->conf->ttl;
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
/* Reconfigure BFD peer with new TTL. */
@@ -5485,10 +5483,7 @@ int peer_ebgp_multihop_unset(struct peer *peer)
peer->ttl = ttl;
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/* Reconfigure BFD peer with new TTL. */
@@ -5503,10 +5498,7 @@ int peer_ebgp_multihop_unset(struct peer *peer)
member->ttl = BGP_DEFAULT_TTL;
if (member->connection->fd >= 0) {
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
}
@@ -5658,10 +5650,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname)
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/* Apply new source configuration to BFD session. */
@@ -5696,10 +5685,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname)
member->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
/* Apply new source configuration to BFD session. */
@@ -5729,10 +5715,7 @@ void peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/* Apply new source configuration to BFD session. */
@@ -5766,10 +5749,7 @@ void peer_update_source_addr_set(struct peer *peer, const union sockunion *su)
member->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
/* Apply new source configuration to BFD session. */
@@ -5817,10 +5797,7 @@ void peer_update_source_unset(struct peer *peer)
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/* Apply new source configuration to BFD session. */
@@ -5853,10 +5830,7 @@ void peer_update_source_unset(struct peer *peer)
member->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
/* Apply new source configuration to BFD session. */
@@ -6370,7 +6344,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
/* Skip peer-group mechanics for regular peers. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
if (!peer_established(peer->connection)) {
- if (peer_active(peer))
+ if (peer_active(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start);
}
@@ -6391,7 +6365,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
member->v_connect = connect;
if (!peer_established(member->connection)) {
- if (peer_active(member))
+ if (peer_active(member->connection))
BGP_EVENT_ADD(member->connection, BGP_Stop);
BGP_EVENT_ADD(member->connection, BGP_Start);
}
@@ -6424,7 +6398,7 @@ int peer_timers_connect_unset(struct peer *peer)
/* Skip peer-group mechanics for regular peers. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
if (!peer_established(peer->connection)) {
- if (peer_active(peer))
+ if (peer_active(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start);
}
@@ -6445,7 +6419,7 @@ int peer_timers_connect_unset(struct peer *peer)
member->v_connect = peer->bgp->default_connect_retry;
if (!peer_established(member->connection)) {
- if (peer_active(member))
+ if (peer_active(member->connection))
BGP_EVENT_ADD(member->connection, BGP_Stop);
BGP_EVENT_ADD(member->connection, BGP_Start);
}
@@ -6712,7 +6686,7 @@ int peer_allowas_in_set(struct peer *peer, afi_t afi, safi_t safi,
SET_FLAG(member->af_flags[afi][safi],
PEER_FLAG_ALLOWAS_IN_ORIGIN);
member->allowas_in[afi][safi] = 0;
- peer_on_policy_change(peer, afi, safi, 0);
+ peer_on_policy_change(member, afi, safi, 0);
}
} else {
if (member->allowas_in[afi][safi] != allow_num
@@ -6721,7 +6695,7 @@ int peer_allowas_in_set(struct peer *peer, afi_t afi, safi_t safi,
UNSET_FLAG(member->af_flags[afi][safi],
PEER_FLAG_ALLOWAS_IN_ORIGIN);
member->allowas_in[afi][safi] = allow_num;
- peer_on_policy_change(peer, afi, safi, 0);
+ peer_on_policy_change(member, afi, safi, 0);
}
}
}
@@ -6886,10 +6860,7 @@ int peer_local_as_unset(struct peer *peer)
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE;
/* Send notification or stop peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop);
/* Skip peer-group mechanics for regular peers. */
@@ -6915,10 +6886,7 @@ int peer_local_as_unset(struct peer *peer)
member->last_reset = PEER_DOWN_LOCAL_AS_CHANGE;
/* Send notification or stop peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
}
@@ -6947,10 +6915,7 @@ int peer_password_set(struct peer *peer, const char *password)
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
peer->last_reset = PEER_DOWN_PASSWORD_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/*
@@ -6985,10 +6950,7 @@ int peer_password_set(struct peer *peer, const char *password)
member->last_reset = PEER_DOWN_PASSWORD_CHANGE;
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
/* Attempt to install password on socket. */
@@ -7031,10 +6993,7 @@ int peer_password_unset(struct peer *peer)
/* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status))
- bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(peer->connection))
bgp_session_reset(peer);
/* Attempt to uninstall password on socket. */
@@ -7058,10 +7017,7 @@ int peer_password_unset(struct peer *peer)
XFREE(MTYPE_PEER_PASSWORD, member->password);
/* Send notification or reset peer depending on state. */
- if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status))
- bgp_notify_send(member->connection, BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_CONFIG_CHANGE);
- else
+ if (!peer_notify_config_change(member->connection))
bgp_session_reset(member);
/* Attempt to uninstall password on socket. */
@@ -8582,6 +8538,8 @@ void bgp_master_init(struct event_loop *master, const int buffer_size,
bm = &bgp_master;
zebra_announce_init(&bm->zebra_announce_head);
+ zebra_l2_vni_init(&bm->zebra_l2_vni_head);
+ zebra_l3_vni_init(&bm->zebra_l3_vni_head);
bm->bgp = list_new();
bm->listen_sockets = list_new();
bm->port = BGP_PORT_DEFAULT;
@@ -8605,6 +8563,8 @@ void bgp_master_init(struct event_loop *master, const int buffer_size,
bm->stalepath_time = BGP_DEFAULT_STALEPATH_TIME;
bm->select_defer_time = BGP_DEFAULT_SELECT_DEFERRAL_TIME;
bm->rib_stale_time = BGP_DEFAULT_RIB_STALE_TIME;
+ bm->t_bgp_zebra_l2_vni = NULL;
+ bm->t_bgp_zebra_l3_vni = NULL;
bgp_mac_init();
/* init the rd id space.
@@ -8738,8 +8698,7 @@ static int peer_unshut_after_cfg(struct bgp *bgp)
peer->host);
peer->shut_during_cfg = false;
- if (peer_active(peer) &&
- peer->connection->status != Established) {
+ if (peer_active(peer->connection) && peer->connection->status != Established) {
if (peer->connection->status != Idle)
BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start);
@@ -8842,11 +8801,7 @@ void bgp_terminate(void)
peer);
continue;
}
- if (BGP_IS_VALID_STATE_FOR_NOTIF(
- peer->connection->status))
- bgp_notify_send(peer->connection,
- BGP_NOTIFY_CEASE,
- BGP_NOTIFY_CEASE_PEER_UNCONFIG);
+ peer_notify_unconfig(peer->connection);
}
}
@@ -8857,6 +8812,8 @@ void bgp_terminate(void)
EVENT_OFF(bm->t_bgp_sync_label_manager);
EVENT_OFF(bm->t_bgp_start_label_manager);
EVENT_OFF(bm->t_bgp_zebra_route);
+ EVENT_OFF(bm->t_bgp_zebra_l2_vni);
+ EVENT_OFF(bm->t_bgp_zebra_l3_vni);
bgp_mac_finish();
}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index f123188ae8..47214e52e5 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -19,6 +19,8 @@
#include "asn.h"
PREDECL_LIST(zebra_announce);
+PREDECL_LIST(zebra_l2_vni);
+PREDECL_LIST(zebra_l3_vni);
/* For union sockunion. */
#include "queue.h"
@@ -204,6 +206,14 @@ struct bgp_master {
/* To preserve ordering of installations into zebra across all Vrfs */
struct zebra_announce_head zebra_announce_head;
+ struct event *t_bgp_zebra_l2_vni;
+ /* To preserve ordering of processing of L2 VNIs in BGP */
+ struct zebra_l2_vni_head zebra_l2_vni_head;
+
+ struct event *t_bgp_zebra_l3_vni;
+ /* To preserve ordering of processing of BGP-VRFs for L3 VNIs */
+ struct zebra_l3_vni_head zebra_l3_vni_head;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp_master);
@@ -554,6 +564,8 @@ struct bgp {
#define BGP_FLAG_INSTANCE_HIDDEN (1ULL << 39)
/* Prohibit BGP from enabling IPv6 RA on interfaces */
#define BGP_FLAG_IPV6_NO_AUTO_RA (1ULL << 40)
+#define BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL (1ULL << 41)
+#define BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE (1ULL << 42)
/* BGP default address-families.
* New peers inherit enabled afi/safis from bgp instance.
@@ -830,6 +842,9 @@ struct bgp {
/* Process Queue for handling routes */
struct work_queue *process_queue;
+ /* Meta Queue Information */
+ struct meta_queue *mq;
+
bool fast_convergence;
/* BGP Conditional advertisement */
@@ -868,10 +883,14 @@ struct bgp {
uint64_t node_already_on_queue;
uint64_t node_deferred_on_queue;
+ struct zebra_l3_vni_item zl3vni;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp);
+DECLARE_LIST(zebra_l3_vni, struct bgp, zl3vni);
+
struct bgp_interface {
#define BGP_INTERFACE_MPLS_BGP_FORWARDING (1 << 0)
/* L3VPN multi domain switching */
@@ -1330,7 +1349,7 @@ struct peer {
union sockunion *su_local; /* Sockunion of local address. */
union sockunion *su_remote; /* Sockunion of remote address. */
- int shared_network; /* Is this peer shared same network. */
+ bool shared_network; /* Is this peer shared same network. */
struct bgp_nexthop nexthop; /* Nexthop */
/* Roles in bgp session */
@@ -1654,7 +1673,6 @@ struct peer {
/* Threads. */
struct event *t_llgr_stale[AFI_MAX][SAFI_MAX];
- struct event *t_revalidate_all[AFI_MAX][SAFI_MAX];
struct event *t_refresh_stalepath;
/* Thread flags. */
@@ -1710,7 +1728,8 @@ struct peer {
uint32_t stat_pfx_cluster_loop;
uint32_t stat_pfx_nh_invalid;
uint32_t stat_pfx_dup_withdraw;
- uint32_t stat_upd_7606; /* RFC7606: treat-as-withdraw */
+ uint32_t stat_pfx_withdraw; /* RFC7606: treat-as-withdraw */
+ uint32_t stat_pfx_discard; /* The number of prefixes with discarded attributes */
uint64_t stat_pfx_loc_rib; /* RFC7854 : Number of routes in Loc-RIB */
uint64_t stat_pfx_adj_rib_in; /* RFC7854 : Number of routes in Adj-RIBs-In */
@@ -2105,7 +2124,8 @@ struct bgp_nlri {
*/
#define BGP_DEFAULT_HOLDTIME 180
#define BGP_DEFAULT_KEEPALIVE 60
-#define BGP_DEFAULT_CONNECT_RETRY 120
+#define BGP_DEFAULT_CONNECT_RETRY 30
+#define BGP_MAX_CONNECT_RETRY 120
#define BGP_DEFAULT_EBGP_ROUTEADV 0
#define BGP_DEFAULT_IBGP_ROUTEADV 0
@@ -2295,7 +2315,7 @@ extern struct peer *peer_unlock_with_caller(const char *, struct peer *);
extern enum bgp_peer_sort peer_sort(struct peer *peer);
extern enum bgp_peer_sort peer_sort_lookup(struct peer *peer);
-extern bool peer_active(struct peer *);
+extern bool peer_active(struct peer_connection *connection);
extern bool peer_active_nego(struct peer *);
extern bool peer_afc_received(struct peer *peer);
extern bool peer_afc_advertised(struct peer *peer);
@@ -2385,7 +2405,8 @@ extern int peer_remote_as(struct bgp *bgp, union sockunion *su,
extern int peer_group_remote_as(struct bgp *bgp, const char *peer_str, as_t *as,
enum peer_asn_type as_type, const char *as_str);
extern int peer_delete(struct peer *peer);
-extern void peer_notify_unconfig(struct peer *peer);
+extern void peer_notify_unconfig(struct peer_connection *connection);
+extern bool peer_notify_config_change(struct peer_connection *connection);
extern int peer_group_delete(struct peer_group *);
extern int peer_group_remote_as_delete(struct peer_group *);
extern int peer_group_listen_range_add(struct peer_group *, struct prefix *);
@@ -2694,14 +2715,6 @@ static inline int peer_group_af_configured(struct peer_group *group)
return 0;
}
-static inline char *timestamp_string(time_t ts, char *timebuf)
-{
- time_t tbuf;
-
- tbuf = time(NULL) - (monotime(NULL) - ts);
- return ctime_r(&tbuf, timebuf);
-}
-
static inline bool peer_established(struct peer_connection *connection)
{
return connection->status == Established;
diff --git a/doc/developer/grpc.rst b/doc/developer/grpc.rst
index 4e81adf8b2..62d1594f4c 100644
--- a/doc/developer/grpc.rst
+++ b/doc/developer/grpc.rst
@@ -149,7 +149,6 @@ Below is how to compile and run the program, with the example output:
]
},
"frr-zebra:zebra": {
- "mcast-rpf-lookup": "mrib-then-urib",
"workqueue-hold-timer": 10,
"zapi-packets": 1000,
"import-kernel-table": {
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 5077745a15..9e05a99474 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -881,7 +881,7 @@ commands:
.. code:: console
make topotests-build
- TOPOTEST_PULL=0 make topotests
+ make topotests
.. _topotests-guidelines:
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 5e22c4cb72..45142f7d83 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -167,15 +167,7 @@ as early as possible, i.e. the first 2-week window.
For reference, the expected release schedule according to the above is:
-+---------+------------+------------+------------+
-| Release | 2024-03-12 | 2024-07-02 | 2024-11-05 |
-+---------+------------+------------+------------+
-| RC | 2024-02-27 | 2024-06-18 | 2024-10-22 |
-+---------+------------+------------+------------+
-| dev/X.Y | 2024-02-13 | 2024-06-04 | 2024-10-08 |
-+---------+------------+------------+------------+
-| freeze | 2024-01-30 | 2024-05-21 | 2024-09-24 |
-+---------+------------+------------+------------+
+.. graphviz:: ../figures/releases.dot
Here is the hint on how to get the dates easily:
diff --git a/doc/figures/releases.dot b/doc/figures/releases.dot
new file mode 100644
index 0000000000..57d35987f8
--- /dev/null
+++ b/doc/figures/releases.dot
@@ -0,0 +1,44 @@
+digraph ReleaseTimeline {
+ rankdir=LR;
+ node [shape=box, style=rounded, fontsize=10, width=1.5, fontname="Helvetica"];
+
+ subgraph cluster_dev {
+ label="Development";
+ style=dashed;
+ color=blue;
+ node [fillcolor=lightblue, style=filled];
+ "dev/X.Y";
+ }
+
+ subgraph cluster_rc {
+ label="Release Candidate";
+ style=dashed;
+ color=orange;
+ node [fillcolor=orange, style=filled];
+ "RC";
+ }
+
+ subgraph cluster_stable {
+ label="Stable Release";
+ style=dashed;
+ color=green;
+ node [fillcolor=lightgreen, style=filled];
+ "release";
+ }
+
+ // Release steps with actions
+ "freeze" [label="Freeze", shape=ellipse, style=dotted, fontcolor=red];
+ "dev/X.Y" [label="dev/X.Y\n(Development)", fillcolor=lightblue];
+ "RC" [label="RC\n(Release Candidate)", fillcolor=orange];
+ "release" [label="Release\n(Final)", fillcolor=lightgreen];
+
+ // Connect the steps with actions
+ "freeze" -> "dev/X.Y" [label=" "];
+ "dev/X.Y" -> "RC" [label=" "];
+ "RC" -> "release" [label=" "];
+
+ // Date connections (freeze -> dev/X.Y -> RC -> release)
+ "2025-01-21" -> "2025-02-04" -> "2025-02-18" -> "2025-03-04";
+ "2025-05-20" -> "2025-06-03" -> "2025-06-17" -> "2025-07-01";
+ "2025-09-23" -> "2025-10-07" -> "2025-10-21" -> "2025-11-04";
+}
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 0c7fcecb9b..3642681765 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -1803,7 +1803,7 @@ Configuring Peers
Send the extended RPKI communities to the peer. RPKI extended community
can be send only to iBGP and eBGP-OAD peers.
- Default: enabled.
+ Default: disabled.
.. clicmd:: neighbor PEER weight WEIGHT
@@ -2938,6 +2938,7 @@ BGP Extended Communities in Route Map
match on to for the purpose of determining what type of SR-TE Policy Tunnel
a BGP route can resolve over, and it also shows the order for resolving the
BGP route if there are different tunnels.
+
- ``00`` Can match on a specific endpoint only which should be the nexthop
of the route(Default Setting).
- ``01`` Can match on a specific endpoint or a null endpoint.
@@ -4349,6 +4350,10 @@ displays IPv6 routing table.
If ``detail`` option is specified after ``json``, more verbose JSON output
will be displayed.
+.. clicmd:: show bgp router [json]
+
+ This command displays information related BGP router and Graceful Restart.
+
Some other commands provide additional options for filtering the output.
.. clicmd:: show [ip] bgp regexp LINE
diff --git a/doc/user/filter.rst b/doc/user/filter.rst
index c1146e50aa..be63095166 100644
--- a/doc/user/filter.rst
+++ b/doc/user/filter.rst
@@ -9,9 +9,7 @@ defined, it can be applied in any direction.
IP Access List
==============
-.. clicmd:: access-list NAME [seq (1-4294967295)] permit IPV4-NETWORK
-
-.. clicmd:: access-list NAME [seq (1-4294967295)] deny IPV4-NETWORK
+.. clicmd:: access-list NAME [seq (1-4294967295)] <permit|deny> <A.B.C.D/M [exact-match]|any>
seq
seq `number` can be set either automatically or manually. In the
@@ -35,6 +33,29 @@ IP Access List
access-list filter permit 10.0.0.0/8
access-list filter seq 13 permit 10.0.0.0/7
+.. clicmd:: access-list NAME [seq (1-4294967295)] <deny|permit> ip <A.B.C.D A.B.C.D|host A.B.C.D|any> <A.B.C.D A.B.C.D|host A.B.C.D|any>
+
+ The extended access-list syntax enables filtering on both source and destination
+ IP addresses (or source and group, if used for multicast boundaries). The
+ source address is first in order in the command.
+
+ If providing a mask, note that the access-lists use wildcard masks (inverse
+ matching logic of subnet masks). If specifying ``host``, only the single address
+ given will be matched.
+
+ A basic example is as follows:
+
+ .. code-block:: frr
+
+ access-list filter seq 5 permit ip host 10.0.20.2 232.1.1.0 0.0.0.128
+ access-list filter seq 10 deny ip 10.0.20.0 0.0.0.255 232.1.1.0 0.0.0.255
+ access-list filter seq 15 permit ip any any
+
+ .. note ::
+
+ If an access-list is specified but no match is found, the default verdict
+ is deny.
+
.. clicmd:: show <ip|ipv6> access-list [json]
Display all IPv4 or IPv6 access lists.
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 0fe53247b0..ff45f21b56 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -6,9 +6,9 @@ PIM
PIM -- Protocol Independent Multicast
-*pimd* supports pim-sm as well as igmp v2 and v3. pim is
-vrf aware and can work within the context of vrf's in order to
-do S,G mrouting. Additionally PIM can be used in the EVPN underlay
+*pimd* supports PIM-SM as well as IGMP v2 and v3. PIM is
+VRF aware and can work within the context of VRFs in order to
+do S,G mrouting. Additionally, PIM can be used in the EVPN underlay
network for optimizing forwarding of overlay BUM traffic.
.. note::
@@ -217,32 +217,47 @@ PIM Routers
never do SM over. This command is vrf aware, to configure for a vrf, specify
the vrf in the router pim block.
-Global Multicast
-----------------
+.. clicmd:: rpf-lookup-mode MODE
-These commands are valid at the top-level of the configuration (or also per
-vrf where indicated), instead of under the 'router pim' submode.
+ MODE sets the method used to perform RPF lookups. Supported modes:
-.. clicmd:: ip multicast rpf-lookup-mode WORD
+ urib-only
+ Performs the lookup on the Unicast RIB. The Multicast RIB is never used.
- Modify how PIM does RPF lookups in the zebra routing table. You can use
- these choices:
+ mrib-only
+ Performs the lookup on the Multicast RIB. The Unicast RIB is never used.
- longer-prefix
- Lookup the RPF in both tables using the longer prefix as a match
+ mrib-then-urib
+ Tries to perform the lookup on the Multicast RIB. If any route is found,
+ that route is used. Otherwise, the Unicast RIB is tried.
lower-distance
- Lookup the RPF in both tables using the lower distance as a match
+ Performs a lookup on the Multicast RIB and Unicast RIB each. The result
+ with the lower administrative distance is used; if they're equal, the
+ Multicast RIB takes precedence.
- mrib-only
- Lookup in the Multicast RIB only
+ longer-prefix
+ Performs a lookup on the Multicast RIB and Unicast RIB each. The result
+ with the longer prefix length is used; if they're equal, the
+ Multicast RIB takes precedence.
- mrib-then-urib
- Lookup in the Multicast RIB then the Unicast Rib, returning first found.
- This is the default value for lookup if this command is not entered
+ The ``mrib-then-urib`` setting is the default behavior if nothing is
+ configured. If this is the desired behavior, it should be explicitly
+ configured to make the configuration immune against possible changes in
+ what the default behavior is.
- urib-only
- Lookup in the Unicast Rib only.
+.. warning::
+
+ Unreachable routes do not receive special treatment and do not cause
+ fallback to a second lookup.
+
+.. _pim-global-configuration:
+
+Global Multicast
+================
+
+These commands are valid at the top-level of the configuration (or also per
+vrf where indicated), instead of under the 'router pim' submode.
.. clicmd:: ip igmp generate-query-once [version (2-3)]
@@ -257,6 +272,70 @@ vrf where indicated), instead of under the 'router pim' submode.
'no' form of the command disables the warning generation. This command is
vrf aware. To configure per vrf, enter vrf submode.
+
+.. _pim-multicast-rib:
+
+Multicast RIB Commands
+----------------------
+
+The Multicast RIB provides a separate table of unicast destinations which
+is used for Multicast Reverse Path Forwarding decisions. It is used with
+a multicast source's IP address, hence contains not multicast group
+addresses but unicast addresses.
+
+This table is fully separate from the default unicast table. However,
+RPF lookup can include the unicast table.
+
+.. clicmd:: ip mroute PREFIX NEXTHOP [DISTANCE]
+
+ Adds a static route entry to the Multicast RIB. This performs exactly as the
+ ``ip route`` command, except that it inserts the route in the Multicast RIB
+ instead of the Unicast RIB.
+ These routes are only used for RPF lookup and will not be used by zebra for
+ insertion into the kernel *or* for normal rib processing. As such it is
+ possible to create weird states with these commands. Use with caution. Most
+ of the time this will not be necessary.
+
+.. clicmd:: show [ip|ipv6] rpf
+
+ Prints the entire Multicast RIB. Note that this is independent of the
+ configured RPF lookup mode, the Multicast RIB may be printed yet not
+ used at all.
+
+.. clicmd:: show [ip|ipv6] rpf ADDR
+
+ Performs a Multicast RPF lookup using the Multicast RIB only.
+ ADDR specifies the multicast source address to look up. Note that this is
+ independent of the configured RPF lookup mode.
+
+ ::
+
+ > show ip rpf 192.0.2.1
+ Routing entry for 192.0.2.0/24 using Multicast RIB
+ Known via "kernel", distance 0, metric 0, best
+ * 198.51.100.1, via eth0
+
+
+ Indicates that a multicast source lookup for 192.0.2.1 against the
+ Multicast RIB would use an entry for 192.0.2.0/24 with a gateway of
+ 198.51.100.1.
+
+.. clicmd:: show ip pim [vrf NAME] nexthop-lookup ADDR [GROUP]
+
+ Performs a nexthop lookup according to the configured RPF lookup mode.
+ This performs the lookup for a given source address, and optionally with
+ a group address, which may effect the nexthop decision.
+
+ ::
+
+ > show ip pim nexthop-lookup 192.0.2.1
+ (192.0.2.1, *) --- Nexthop 198.10.10.1 Interface eth1
+
+
+ Indicates the a source lookup for 192.0.2.1 according to the configured RPF
+ lookup mode would use the gateway address 192.10.10.1 on interface eth1.
+
+
.. _pim-interface-configuration:
PIM Interface Configuration
@@ -348,10 +427,46 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
.. clicmd:: ip multicast boundary oil WORD
- Set a pim multicast boundary, based upon the WORD prefix-list. If a pim join
- or IGMP report is received on this interface and the Group is denied by the
+ Set a PIM multicast boundary, based upon the WORD prefix-list. If a PIM join
+ or IGMP report is received on this interface and the group is denied by the
prefix-list, PIM will ignore the join or report.
+ .. code-block:: frr
+
+ prefix-list multicast-acl seq 5 permit 232.1.1.1/32
+ prefix-list multicast-acl seq 10 deny 232.1.1.0/24
+ prefix-list multicast-acl seq 15 permit any
+ !
+ interface r1-eth0
+ ip pim
+ ip igmp
+ ip multicast boundary oil multicast-acl
+ exit
+
+.. clicmd:: ip multicast boundary ACCESS-LIST
+
+ Set a PIM multicast boundary, based upon the ACCESS-LIST. If a PIM join
+ or IGMP report is received on this interface and the (S,G) tuple is denied by the
+ access-list, PIM will ignore the join or report.
+
+ To filter on both source and group, the extended access-list syntax must be used.
+
+ If both a prefix-list and access-list are configured for multicast boundaries,
+ the prefix-list will be evaluated first (and must have a terminating "permit any"
+ in order to also evaluate against the access-list).
+
+ .. code-block:: frr
+
+ access-list multicast-acl seq 5 permit ip host 10.0.20.2 host 232.1.1.1
+ access-list multicast-acl seq 10 deny ip 10.0.20.0 0.0.0.255 232.1.1.0 0.0.0.255
+ access-list multicast-acl seq 15 permit ip any any
+ !
+ interface r1-eth0
+ ip pim
+ ip igmp
+ ip multicast boundary pim-acl
+ exit
+
.. clicmd:: ip igmp last-member-query-count (1-255)
Set the IGMP last member query count. The default value is 2. 'no' form of
@@ -374,29 +489,6 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
:ref:`bfd-pim-peer-config`
-
-.. _pim-multicast-rib:
-
-PIM Multicast RIB
-=================
-
-In order to influence Multicast RPF lookup, it is possible to insert
-into zebra routes for the Multicast RIB. These routes are only
-used for RPF lookup and will not be used by zebra for insertion
-into the kernel *or* for normal rib processing. As such it is
-possible to create weird states with these commands. Use with
-caution. Most of the time this will not be necessary.
-
-.. clicmd:: ip mroute A.B.C.D/M A.B.C.D (1-255)
-
- Insert into the Multicast Rib Route A.B.C.D/M with specified nexthop. The
- distance can be specified as well if desired.
-
-.. clicmd:: ip mroute A.B.C.D/M INTERFACE (1-255)
-
- Insert into the Multicast Rib Route A.B.C.D/M using the specified INTERFACE.
- The distance can be specified as well if desired.
-
.. _msdp-configuration:
Multicast Source Discovery Protocol (MSDP) Configuration
@@ -467,6 +559,10 @@ Commands available for MSDP
The filtering will only take effect starting from the command
application.
+.. clicmd:: msdp peer A.B.C.D sa-limit <AMOUNT>
+
+ Configure the maximum number of SAs to learn from peer.
+
.. clicmd:: msdp peer A.B.C.D password WORD
Use MD5 authentication to connect with the remote peer.
@@ -478,6 +574,14 @@ Commands available for MSDP
To apply it immediately call `clear ip msdp peer A.B.C.D`.
+.. clicmd:: msdp originator-id A.B.C.D
+
+ Use the specified originator ID instead of the multicast RP group.
+
+.. clicmd:: msdp shutdown
+
+ Shutdown the MSDP sessions in this PIM instance.
+
.. _show-pim-information:
@@ -730,7 +834,7 @@ cause great confusion.
.. seealso::
- :ref:`multicast-rib-commands`
+ :ref:`pim-multicast-rib`
PIM Debug Commands
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index b862ba9f50..ac29b1c7d4 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -526,16 +526,6 @@ commands in relationship to VRF. Here is an extract of some of those commands:
The network administrator can however decide to provision this command in
configuration file to provide more clarity about the intended configuration.
-.. clicmd:: netns NAMESPACE
-
- This command is based on VRF configuration mode. This command is available
- when *Zebra* is run in :option:`-n` mode. This command reflects which *Linux
- network namespace* is to be mapped with *Zebra* VRF. It is to be noted that
- *Zebra* creates and detects added/suppressed VRFs from the Linux environment
- (in fact, those managed with iproute2). The network administrator can however
- decide to provision this command in configuration file to provide more clarity
- about the intended configuration.
-
.. clicmd:: show ip route vrf VRF
The show command permits dumping the routing table associated to the VRF. If
@@ -936,7 +926,7 @@ and this section also helps that case.
Create a new locator. If the name of an existing locator is specified,
move to specified locator's configuration node to change the settings it.
-.. clicmd:: prefix X:X::X:X/M [func-bits (0-64)] [block-len 40] [node-len 24]
+.. clicmd:: prefix X:X::X:X/M [block-len (16-64)] [node-len (16-64)] [func-bits (0-64)]
Set the ipv6 prefix block of the locator. SRv6 locator is defined by
RFC8986. The actual routing protocol specifies the locator and allocates a
@@ -1123,88 +1113,6 @@ and this section also helps that case.
!
...
-.. _multicast-rib-commands:
-
-Multicast RIB Commands
-======================
-
-The Multicast RIB provides a separate table of unicast destinations which
-is used for Multicast Reverse Path Forwarding decisions. It is used with
-a multicast source's IP address, hence contains not multicast group
-addresses but unicast addresses.
-
-This table is fully separate from the default unicast table. However,
-RPF lookup can include the unicast table.
-
-WARNING: RPF lookup results are non-responsive in this version of FRR,
-i.e. multicast routing does not actively react to changes in underlying
-unicast topology!
-
-.. clicmd:: ip multicast rpf-lookup-mode MODE
-
-
- MODE sets the method used to perform RPF lookups. Supported modes:
-
- urib-only
- Performs the lookup on the Unicast RIB. The Multicast RIB is never used.
-
- mrib-only
- Performs the lookup on the Multicast RIB. The Unicast RIB is never used.
-
- mrib-then-urib
- Tries to perform the lookup on the Multicast RIB. If any route is found,
- that route is used. Otherwise, the Unicast RIB is tried.
-
- lower-distance
- Performs a lookup on the Multicast RIB and Unicast RIB each. The result
- with the lower administrative distance is used; if they're equal, the
- Multicast RIB takes precedence.
-
- longer-prefix
- Performs a lookup on the Multicast RIB and Unicast RIB each. The result
- with the longer prefix length is used; if they're equal, the
- Multicast RIB takes precedence.
-
- The ``mrib-then-urib`` setting is the default behavior if nothing is
- configured. If this is the desired behavior, it should be explicitly
- configured to make the configuration immune against possible changes in
- what the default behavior is.
-
-.. warning::
-
- Unreachable routes do not receive special treatment and do not cause
- fallback to a second lookup.
-
-.. clicmd:: show [ip|ipv6] rpf ADDR
-
- Performs a Multicast RPF lookup, as configured with ``ip multicast
- rpf-lookup-mode MODE``. ADDR specifies the multicast source address to look
- up.
-
- ::
-
- > show ip rpf 192.0.2.1
- Routing entry for 192.0.2.0/24 using Unicast RIB
- Known via "kernel", distance 0, metric 0, best
- * 198.51.100.1, via eth0
-
-
- Indicates that a multicast source lookup for 192.0.2.1 would use an
- Unicast RIB entry for 192.0.2.0/24 with a gateway of 198.51.100.1.
-
-.. clicmd:: show [ip|ipv6] rpf
-
- Prints the entire Multicast RIB. Note that this is independent of the
- configured RPF lookup mode, the Multicast RIB may be printed yet not
- used at all.
-
-.. clicmd:: ip mroute PREFIX NEXTHOP [DISTANCE]
-
-
- Adds a static route entry to the Multicast RIB. This performs exactly as the
- ``ip route`` command, except that it inserts the route in the Multicast RIB
- instead of the Unicast RIB.
-
.. _zebra-route-filtering:
zebra Route Filtering
diff --git a/docker/ubuntu-ci/Dockerfile b/docker/ubuntu-ci/Dockerfile
index 5c4649dc32..aaad3bc172 100644
--- a/docker/ubuntu-ci/Dockerfile
+++ b/docker/ubuntu-ci/Dockerfile
@@ -84,10 +84,11 @@ RUN apt update && apt upgrade -y && \
python3 -m pip install xmltodict && \
python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311
+ARG UID=1000
RUN groupadd -r -g 92 frr && \
groupadd -r -g 85 frrvty && \
adduser --system --ingroup frr --home /home/frr \
- --gecos "FRR suite" --shell /bin/bash frr && \
+ --gecos "FRR suite" -u $UID --shell /bin/bash frr && \
usermod -a -G frrvty frr && \
useradd -d /var/run/exabgp/ -s /bin/false exabgp && \
echo 'frr ALL = NOPASSWD: ALL' | tee /etc/sudoers.d/frr && \
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 057edb33dc..9efc42382f 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -1,16 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_IF_PACKET_H
#define __LINUX_IF_PACKET_H
+#include <asm/byteorder.h>
#include <linux/types.h>
+struct sockaddr_pkt {
+ unsigned short spkt_family;
+ unsigned char spkt_device[14];
+ __be16 spkt_protocol;
+};
+
struct sockaddr_ll {
- unsigned short sll_family;
- __be16 sll_protocol;
- int sll_ifindex;
- unsigned short sll_hatype;
- unsigned char sll_pkttype;
- unsigned char sll_halen;
- unsigned char sll_addr[8];
+ unsigned short sll_family;
+ __be16 sll_protocol;
+ int sll_ifindex;
+ unsigned short sll_hatype;
+ unsigned char sll_pkttype;
+ unsigned char sll_halen;
+ unsigned char sll_addr[8];
+};
+
+/* Packet types */
+
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+#define PACKET_OUTGOING 4 /* Outgoing of any type */
+#define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
+#define PACKET_USER 6 /* To user space */
+#define PACKET_KERNEL 7 /* To kernel space */
+/* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
+#define PACKET_FASTROUTE 6 /* Fastrouted frame */
+
+/* Packet socket options */
+
+#define PACKET_ADD_MEMBERSHIP 1
+#define PACKET_DROP_MEMBERSHIP 2
+#define PACKET_RECV_OUTPUT 3
+/* Value 4 is still used by obsolete turbo-packet. */
+#define PACKET_RX_RING 5
+#define PACKET_STATISTICS 6
+#define PACKET_COPY_THRESH 7
+#define PACKET_AUXDATA 8
+#define PACKET_ORIGDEV 9
+#define PACKET_VERSION 10
+#define PACKET_HDRLEN 11
+#define PACKET_RESERVE 12
+#define PACKET_TX_RING 13
+#define PACKET_LOSS 14
+#define PACKET_VNET_HDR 15
+#define PACKET_TX_TIMESTAMP 16
+#define PACKET_TIMESTAMP 17
+#define PACKET_FANOUT 18
+#define PACKET_TX_HAS_OFF 19
+#define PACKET_QDISC_BYPASS 20
+#define PACKET_ROLLOVER_STATS 21
+#define PACKET_FANOUT_DATA 22
+#define PACKET_IGNORE_OUTGOING 23
+#define PACKET_VNET_HDR_SZ 24
+
+#define PACKET_FANOUT_HASH 0
+#define PACKET_FANOUT_LB 1
+#define PACKET_FANOUT_CPU 2
+#define PACKET_FANOUT_ROLLOVER 3
+#define PACKET_FANOUT_RND 4
+#define PACKET_FANOUT_QM 5
+#define PACKET_FANOUT_CBPF 6
+#define PACKET_FANOUT_EBPF 7
+#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
+#define PACKET_FANOUT_FLAG_UNIQUEID 0x2000
+#define PACKET_FANOUT_FLAG_IGNORE_OUTGOING 0x4000
+#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
+
+struct tpacket_stats {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+};
+
+struct tpacket_stats_v3 {
+ unsigned int tp_packets;
+ unsigned int tp_drops;
+ unsigned int tp_freeze_q_cnt;
+};
+
+struct tpacket_rollover_stats {
+ __aligned_u64 tp_all;
+ __aligned_u64 tp_huge;
+ __aligned_u64 tp_failed;
+};
+
+union tpacket_stats_u {
+ struct tpacket_stats stats1;
+ struct tpacket_stats_v3 stats3;
+};
+
+struct tpacket_auxdata {
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u16 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+};
+
+/* Rx ring - header status */
+#define TP_STATUS_KERNEL 0
+#define TP_STATUS_USER (1 << 0)
+#define TP_STATUS_COPY (1 << 1)
+#define TP_STATUS_LOSING (1 << 2)
+#define TP_STATUS_CSUMNOTREADY (1 << 3)
+#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
+#define TP_STATUS_BLK_TMO (1 << 5)
+#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID (1 << 7)
+#define TP_STATUS_GSO_TCP (1 << 8)
+
+/* Tx ring - header status */
+#define TP_STATUS_AVAILABLE 0
+#define TP_STATUS_SEND_REQUEST (1 << 0)
+#define TP_STATUS_SENDING (1 << 1)
+#define TP_STATUS_WRONG_FORMAT (1 << 2)
+
+/* Rx and Tx ring - header status */
+#define TP_STATUS_TS_SOFTWARE (1 << 29)
+#define TP_STATUS_TS_SYS_HARDWARE (1 << 30) /* deprecated, never set */
+#define TP_STATUS_TS_RAW_HARDWARE (1U << 31)
+
+/* Rx ring - feature request bits */
+#define TP_FT_REQ_FILL_RXHASH 0x1
+
+struct tpacket_hdr {
+ unsigned long tp_status;
+ unsigned int tp_len;
+ unsigned int tp_snaplen;
+ unsigned short tp_mac;
+ unsigned short tp_net;
+ unsigned int tp_sec;
+ unsigned int tp_usec;
+};
+
+#define TPACKET_ALIGNMENT 16
+#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
+#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
+
+struct tpacket2_hdr {
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u16 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u8 tp_padding[4];
+};
+
+struct tpacket_hdr_variant1 {
+ __u32 tp_rxhash;
+ __u32 tp_vlan_tci;
+ __u16 tp_vlan_tpid;
+ __u16 tp_padding;
+};
+
+struct tpacket3_hdr {
+ __u32 tp_next_offset;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u32 tp_snaplen;
+ __u32 tp_len;
+ __u32 tp_status;
+ __u16 tp_mac;
+ __u16 tp_net;
+ /* pkt_hdr variants */
+ union {
+ struct tpacket_hdr_variant1 hv1;
+ };
+ __u8 tp_padding[8];
+};
+
+struct tpacket_bd_ts {
+ unsigned int ts_sec;
+ union {
+ unsigned int ts_usec;
+ unsigned int ts_nsec;
+ };
+};
+
+struct tpacket_hdr_v1 {
+ __u32 block_status;
+ __u32 num_pkts;
+ __u32 offset_to_first_pkt;
+
+ /* Number of valid bytes (including padding)
+ * blk_len <= tp_block_size
+ */
+ __u32 blk_len;
+
+ /*
+ * Quite a few uses of sequence number:
+ * 1. Make sure cache flush etc worked.
+ * Well, one can argue - why not use the increasing ts below?
+ * But look at 2. below first.
+ * 2. When you pass around blocks to other user space decoders,
+ * you can see which blk[s] is[are] outstanding etc.
+ * 3. Validate kernel code.
+ */
+ __aligned_u64 seq_num;
+
+ /*
+ * ts_last_pkt:
+ *
+ * Case 1. Block has 'N'(N >=1) packets and TMO'd(timed out)
+ * ts_last_pkt == 'time-stamp of last packet' and NOT the
+ * time when the timer fired and the block was closed.
+ * By providing the ts of the last packet we can absolutely
+ * guarantee that time-stamp wise, the first packet in the
+ * next block will never precede the last packet of the
+ * previous block.
+ * Case 2. Block has zero packets and TMO'd
+ * ts_last_pkt = time when the timer fired and the block
+ * was closed.
+ * Case 3. Block has 'N' packets and NO TMO.
+ * ts_last_pkt = time-stamp of the last pkt in the block.
+ *
+ * ts_first_pkt:
+ * Is always the time-stamp when the block was opened.
+ * Case a) ZERO packets
+ * No packets to deal with but atleast you know the
+ * time-interval of this block.
+ * Case b) Non-zero packets
+ * Use the ts of the first packet in the block.
+ *
+ */
+ struct tpacket_bd_ts ts_first_pkt, ts_last_pkt;
+};
+
+union tpacket_bd_header_u {
+ struct tpacket_hdr_v1 bh1;
+};
+
+struct tpacket_block_desc {
+ __u32 version;
+ __u32 offset_to_priv;
+ union tpacket_bd_header_u hdr;
+};
+
+#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
+#define TPACKET3_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket3_hdr)) + sizeof(struct sockaddr_ll))
+
+enum tpacket_versions {
+ TPACKET_V1,
+ TPACKET_V2,
+ TPACKET_V3
};
+/*
+ Frame structure:
+
+ - Start. Frame must be aligned to TPACKET_ALIGNMENT=16
+ - struct tpacket_hdr
+ - pad to TPACKET_ALIGNMENT=16
+ - struct sockaddr_ll
+ - Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16
+ - Start+tp_mac: [ Optional MAC header ]
+ - Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
+ - Pad to align to TPACKET_ALIGNMENT=16
+ */
+
+struct tpacket_req {
+ unsigned int tp_block_size; /* Minimal size of contiguous block */
+ unsigned int tp_block_nr; /* Number of blocks */
+ unsigned int tp_frame_size; /* Size of frame */
+ unsigned int tp_frame_nr; /* Total number of frames */
+};
+
+struct tpacket_req3 {
+ unsigned int tp_block_size; /* Minimal size of contiguous block */
+ unsigned int tp_block_nr; /* Number of blocks */
+ unsigned int tp_frame_size; /* Size of frame */
+ unsigned int tp_frame_nr; /* Total number of frames */
+ unsigned int tp_retire_blk_tov; /* timeout in msecs */
+ unsigned int tp_sizeof_priv; /* offset to private data area */
+ unsigned int tp_feature_req_word;
+};
+
+union tpacket_req_u {
+ struct tpacket_req req;
+ struct tpacket_req3 req3;
+};
+
+struct packet_mreq {
+ int mr_ifindex;
+ unsigned short mr_type;
+ unsigned short mr_alen;
+ unsigned char mr_address[8];
+};
+
+struct fanout_args {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 id;
+ __u16 type_flags;
+#else
+ __u16 type_flags;
+ __u16 id;
+#endif
+ __u32 max_num_members;
+};
+
+#define PACKET_MR_MULTICAST 0
+#define PACKET_MR_PROMISC 1
+#define PACKET_MR_ALLMULTI 2
+#define PACKET_MR_UNICAST 3
+
#endif
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 9a967bc1e3..9ea2cfd0a1 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -1008,45 +1008,40 @@ void isis_circuit_print_json(struct isis_circuit *circuit,
circuit_t2string(level));
if (circuit->area->newmetric)
json_object_int_add(level_json, "metric",
- circuit->te_metric[0]);
+ circuit->te_metric[level - 1]);
else
json_object_int_add(level_json, "metric",
- circuit->metric[0]);
+ circuit->metric[level - 1]);
if (!circuit->is_passive) {
- json_object_int_add(level_json,
- "active-neighbors",
- circuit->upadjcount[0]);
- json_object_int_add(level_json,
- "hello-interval",
- circuit->hello_interval[0]);
+ json_object_int_add(level_json, "active-neighbors",
+ circuit->upadjcount[level - 1]);
+ json_object_int_add(level_json, "hello-interval",
+ circuit->hello_interval[level - 1]);
hold_json = json_object_new_object();
json_object_object_add(level_json, "holddown",
hold_json);
- json_object_int_add(
- hold_json, "count",
- circuit->hello_multiplier[0]);
+ json_object_int_add(hold_json, "count",
+ circuit->hello_multiplier[level - 1]);
json_object_string_add(
hold_json, "pad",
isis_hello_padding2string(
circuit->pad_hellos));
json_object_int_add(level_json, "cnsp-interval",
- circuit->csnp_interval[0]);
+ circuit->csnp_interval[level - 1]);
json_object_int_add(level_json, "psnp-interval",
- circuit->psnp_interval[0]);
+ circuit->psnp_interval[level - 1]);
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
lan_prio_json =
json_object_new_object();
json_object_object_add(level_json,
"lan",
lan_prio_json);
- json_object_int_add(
- lan_prio_json, "priority",
- circuit->priority[0]);
- json_object_string_add(
- lan_prio_json, "is-dis",
- (circuit->u.bc.is_dr[0]
- ? "yes"
- : "no"));
+ json_object_int_add(lan_prio_json, "priority",
+ circuit->priority[level - 1]);
+ json_object_string_add(lan_prio_json, "is-dis",
+ (circuit->u.bc.is_dr[level - 1]
+ ? "yes"
+ : "no"));
}
}
json_object_array_add(levels_json, level_json);
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index 93f7bbf753..652efee89a 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -2065,12 +2065,6 @@ void cli_show_isis_srv6_locator(struct vty *vty, const struct lyd_node *dnode,
vty_out(vty, " locator %s\n", yang_dnode_get_string(dnode, NULL));
}
-void cli_show_isis_srv6_locator_end(struct vty *vty,
- const struct lyd_node *dnode)
-{
- vty_out(vty, " exit\n");
-}
-
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/enabled
*/
@@ -2118,6 +2112,11 @@ void cli_show_isis_srv6_enabled(struct vty *vty, const struct lyd_node *dnode,
vty_out(vty, " segment-routing srv6\n");
}
+void cli_show_isis_srv6_end(struct vty *vty, const struct lyd_node *dnode)
+{
+ vty_out(vty, " exit\n");
+}
+
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd
*/
@@ -2248,6 +2247,11 @@ void cli_show_isis_srv6_node_msd(struct vty *vty, const struct lyd_node *dnode,
yang_dnode_get_uint8(dnode, "max-end-d"));
}
+void cli_show_isis_srv6_node_msd_end(struct vty *vty, const struct lyd_node *dnode)
+{
+ vty_out(vty, " exit\n");
+}
+
/*
* XPath: /frr-isisd:isis/instance/segment-routing-srv6/interface
*/
diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c
index 8608d2b9bd..3024bb57ea 100644
--- a/isisd/isis_nb.c
+++ b/isisd/isis_nb.c
@@ -861,6 +861,12 @@ const struct frr_yang_module_info frr_isisd_info = {
},
},
{
+ .xpath = "/frr-isisd:isis/instance/segment-routing-srv6",
+ .cbs = {
+ .cli_show_end = cli_show_isis_srv6_end,
+ },
+ },
+ {
.xpath = "/frr-isisd:isis/instance/segment-routing-srv6/enabled",
.cbs = {
.modify = isis_instance_segment_routing_srv6_enabled_modify,
@@ -873,7 +879,6 @@ const struct frr_yang_module_info frr_isisd_info = {
.modify = isis_instance_segment_routing_srv6_locator_modify,
.destroy = isis_instance_segment_routing_srv6_locator_destroy,
.cli_show = cli_show_isis_srv6_locator,
- .cli_show_end = cli_show_isis_srv6_locator_end,
},
},
{
@@ -904,6 +909,7 @@ const struct frr_yang_module_info frr_isisd_info = {
.xpath = "/frr-isisd:isis/instance/segment-routing-srv6/msd/node-msd",
.cbs = {
.cli_show = cli_show_isis_srv6_node_msd,
+ .cli_show_end = cli_show_isis_srv6_node_msd_end,
},
},
{
diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h
index 1bf95e3db3..10b3bd4009 100644
--- a/isisd/isis_nb.h
+++ b/isisd/isis_nb.h
@@ -322,6 +322,7 @@ int isis_instance_flex_algo_affinity_mapping_value_modify(
struct nb_cb_modify_args *args);
int isis_instance_flex_algo_affinity_mapping_value_destroy(
struct nb_cb_destroy_args *args);
+void cli_show_isis_srv6_end(struct vty *vty, const struct lyd_node *dnode);
int isis_instance_segment_routing_srv6_enabled_modify(
struct nb_cb_modify_args *args);
void cli_show_isis_srv6_enabled(struct vty *vty, const struct lyd_node *dnode,
@@ -332,8 +333,6 @@ int isis_instance_segment_routing_srv6_locator_destroy(
struct nb_cb_destroy_args *args);
void cli_show_isis_srv6_locator(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
-void cli_show_isis_srv6_locator_end(struct vty *vty,
- const struct lyd_node *dnode);
int isis_instance_segment_routing_srv6_msd_node_msd_max_segs_left_modify(
struct nb_cb_modify_args *args);
int isis_instance_segment_routing_srv6_msd_node_msd_max_end_pop_modify(
@@ -344,6 +343,7 @@ int isis_instance_segment_routing_srv6_msd_node_msd_max_end_d_modify(
struct nb_cb_modify_args *args);
void cli_show_isis_srv6_node_msd(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
+void cli_show_isis_srv6_node_msd_end(struct vty *vty, const struct lyd_node *dnode);
int isis_instance_segment_routing_srv6_interface_modify(
struct nb_cb_modify_args *args);
void cli_show_isis_srv6_interface(struct vty *vty, const struct lyd_node *dnode,
diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c
index 23238d314a..c2ada459eb 100644
--- a/isisd/isis_pdu.c
+++ b/isisd/isis_pdu.c
@@ -231,7 +231,8 @@ static int process_p2p_hello(struct iih_info *iih)
return ISIS_OK;
}
}
- if (!adj || adj->level != iih->calculated_type) {
+ if (!adj || adj->level != iih->calculated_type ||
+ !(iih->circuit->is_type & iih->circ_type)) {
if (!adj) {
adj = isis_new_adj(iih->sys_id, NULL,
iih->calculated_type, iih->circuit);
diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c
index af69fac1cd..634bdca7cb 100644
--- a/isisd/isis_pfpacket.c
+++ b/isisd/isis_pfpacket.c
@@ -10,7 +10,7 @@
#include <zebra.h>
#if ISIS_METHOD == ISIS_METHOD_PFPACKET
#include <net/ethernet.h> /* the L2 protocols */
-#include <netpacket/packet.h>
+#include "linux/if_packet.h"
#include <linux/filter.h>
@@ -134,6 +134,13 @@ static int open_packet_socket(struct isis_circuit *circuit)
return ISIS_WARNING;
}
+ int val = 1;
+ if (setsockopt(fd, SOL_PACKET, PACKET_AUXDATA, &val, sizeof(val)) == -1 &&
+ errno != ENOPROTOOPT) {
+ zlog_warn("%s: PACKET_AUXDATA failed: %s", __func__,
+ safe_strerror(errno));
+ }
+
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf))) {
zlog_warn("%s: SO_ATTACH_FILTER failed: %s", __func__,
safe_strerror(errno));
@@ -284,13 +291,54 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa)
? circuit->interface->mtu
: circuit->interface->mtu6;
uint8_t temp_buff[max_size];
- bytesread =
- recvfrom(circuit->fd, temp_buff, max_size, MSG_DONTWAIT,
- (struct sockaddr *)&s_addr, (socklen_t *)&addr_len);
+
+ union {
+ struct cmsghdr cmsg;
+ char buf[CMSG_SPACE(sizeof(struct tpacket_auxdata))];
+ } cmsg_buf;
+ struct iovec iov;
+ struct msghdr msg;
+ memset(&cmsg_buf, 0x00, sizeof(cmsg_buf));
+ memset(&iov, 0x00, sizeof(iov));
+ memset(&msg, 0x00, sizeof(msg));
+
+ iov.iov_base = temp_buff;
+ iov.iov_len = max_size;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ msg.msg_name = &s_addr;
+ msg.msg_namelen = addr_len;
+
+ msg.msg_control = &cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ bytesread = recvmsg(circuit->fd, &msg, MSG_DONTWAIT);
if (bytesread < 0) {
zlog_warn("%s: recvfrom() failed", __func__);
return ISIS_WARNING;
}
+
+ bool vlan_packet = false;
+
+ for (struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); cmsg;
+ cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_len >= CMSG_LEN(sizeof(struct tpacket_auxdata)) &&
+ cmsg->cmsg_level == SOL_PACKET &&
+ cmsg->cmsg_type == PACKET_AUXDATA) {
+ struct tpacket_auxdata *aux =
+ (struct tpacket_auxdata *)CMSG_DATA(cmsg);
+
+ if (aux && (aux->tp_status & TP_STATUS_VLAN_VALID))
+ vlan_packet = true;
+ break;
+ }
+ }
+
+ if (vlan_packet)
+ return ISIS_WARNING;
+
/* then we lose the LLC */
stream_write(circuit->rcv_stream, temp_buff + LLC_LEN,
bytesread - LLC_LEN);
diff --git a/lib/bfd.c b/lib/bfd.c
index 4535fc1233..bc4b1c5b51 100644
--- a/lib/bfd.c
+++ b/lib/bfd.c
@@ -32,6 +32,8 @@ enum bfd_session_event {
BSE_UNINSTALL,
/** Install the BFD session configuration. */
BSE_INSTALL,
+ /** We should install but it couldn't because of a error talking to zebra */
+ BSE_VALID_FOR_INSTALL,
};
/**
@@ -527,6 +529,10 @@ static void _bfd_sess_send(struct event *t)
vrf_id_to_name(bsp->args.vrf_id), bsp->args.vrf_id,
bsp->lastev == BSE_INSTALL ? "installed"
: "uninstalled");
+
+ bsp->installed = false;
+ if (bsp->lastev == BSE_INSTALL)
+ bsp->lastev = BSE_VALID_FOR_INSTALL;
}
}
@@ -883,7 +889,7 @@ int zclient_bfd_session_replay(ZAPI_CALLBACK_ARGS)
/* Replay all activated peers. */
TAILQ_FOREACH (bsp, &bsglobal.bsplist, entry) {
/* Skip not installed sessions. */
- if (!bsp->installed)
+ if (!bsp->installed && bsp->lastev != BSE_VALID_FOR_INSTALL)
continue;
/* We are reconnecting, so we must send installation. */
diff --git a/lib/darr.c b/lib/darr.c
index 7a01274104..0cffa64425 100644
--- a/lib/darr.c
+++ b/lib/darr.c
@@ -8,6 +8,7 @@
#include <zebra.h>
#include "darr.h"
#include "memory.h"
+#include "printfrr.h"
DEFINE_MTYPE(LIB, DARR, "Dynamic Array");
DEFINE_MTYPE(LIB, DARR_STR, "Dynamic Array String");
@@ -70,7 +71,7 @@ char *__darr_in_vsprintf(char **sp, bool concat, const char *fmt, va_list ap)
*darr_append(*sp) = 0;
again:
va_copy(ap_copy, ap);
- len = vsnprintf(darr_last(*sp), darr_avail(*sp) + 1, fmt, ap_copy);
+ len = vsnprintfrr(darr_last(*sp), darr_avail(*sp) + 1, fmt, ap_copy);
va_end(ap_copy);
if (len < 0)
darr_in_strcat(*sp, fmt);
diff --git a/lib/darr.h b/lib/darr.h
index 2b9a0a0c02..121e3dd14e 100644
--- a/lib/darr.h
+++ b/lib/darr.h
@@ -272,10 +272,10 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
*/
#define darr_ensure_avail_mt(A, S, MT) \
({ \
- ssize_t need = (ssize_t)(S) - \
- (ssize_t)(darr_cap(A) - darr_len(A)); \
- if (need > 0) \
- _darr_resize_mt((A), darr_cap(A) + need, MT); \
+ ssize_t __dea_need = (ssize_t)(S) - \
+ (ssize_t)(darr_cap(A) - darr_len(A)); \
+ if (__dea_need > 0) \
+ _darr_resize_mt((A), darr_cap(A) + __dea_need, MT); \
(A); \
})
#define darr_ensure_avail(A, S) darr_ensure_avail_mt(A, S, MTYPE_DARR)
@@ -301,9 +301,9 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
#define darr_ensure_cap_mt(A, C, MT) \
({ \
/* Cast to avoid warning when C == 0 */ \
- uint _c = (C) > 0 ? (C) : 1; \
- if ((size_t)darr_cap(A) < _c) \
- _darr_resize_mt((A), _c, MT); \
+ uint __dec_c = (C) > 0 ? (C) : 1; \
+ if ((size_t)darr_cap(A) < __dec_c) \
+ _darr_resize_mt((A), __dec_c, MT); \
(A); \
})
#define darr_ensure_cap(A, C) darr_ensure_cap_mt(A, C, MTYPE_DARR)
@@ -428,12 +428,12 @@ void *__darr_resize(void *a, uint count, size_t esize, struct memtype *mt);
#define _darr_append_n(A, N, Z, MT) \
({ \
- uint __len = darr_len(A); \
- darr_ensure_cap_mt(A, __len + (N), MT); \
- _darr_len(A) = __len + (N); \
+ uint __da_len = darr_len(A); \
+ darr_ensure_cap_mt(A, __da_len + (N), MT); \
+ _darr_len(A) = __da_len + (N); \
if (Z) \
- memset(&(A)[__len], 0, (N)*_darr_esize(A)); \
- &(A)[__len]; \
+ memset(&(A)[__da_len], 0, (N)*_darr_esize(A)); \
+ &(A)[__da_len]; \
})
/**
* Extending the array's length by N.
diff --git a/lib/log.c b/lib/log.c
index 2b049cebe4..bc1ed5c5cc 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -358,7 +358,7 @@ static const struct zebra_desc_table command_types[] = {
DESC_ENTRY(ZEBRA_BFD_CLIENT_DEREGISTER),
DESC_ENTRY(ZEBRA_INTERFACE_ENABLE_RADV),
DESC_ENTRY(ZEBRA_INTERFACE_DISABLE_RADV),
- DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP_MRIB),
+ DESC_ENTRY(ZEBRA_NEXTHOP_LOOKUP),
DESC_ENTRY(ZEBRA_INTERFACE_LINK_PARAMS),
DESC_ENTRY(ZEBRA_MPLS_LABELS_ADD),
DESC_ENTRY(ZEBRA_MPLS_LABELS_DELETE),
diff --git a/lib/mgmt_msg_native.h b/lib/mgmt_msg_native.h
index ef03b66edc..587a002801 100644
--- a/lib/mgmt_msg_native.h
+++ b/lib/mgmt_msg_native.h
@@ -554,8 +554,8 @@ extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
*/
#define mgmt_msg_native_alloc_msg(msg_type, var_len, mem_type) \
({ \
- uint8_t *buf = NULL; \
- (msg_type *)darr_append_nz_mt(buf, \
+ uint8_t *__nam_buf = NULL; \
+ (msg_type *)darr_append_nz_mt(__nam_buf, \
sizeof(msg_type) + (var_len), \
mem_type); \
})
@@ -590,10 +590,10 @@ extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
*/
#define mgmt_msg_native_append(msg, data, len) \
({ \
- uint8_t **darrp = mgmt_msg_native_get_darrp(msg); \
- uint8_t *p = darr_append_n(*darrp, len); \
- memcpy(p, data, len); \
- p; \
+ uint8_t **__na_darrp = mgmt_msg_native_get_darrp(msg); \
+ uint8_t *__na_p = darr_append_n(*__na_darrp, len); \
+ memcpy(__na_p, data, len); \
+ __na_p; \
})
/**
@@ -611,8 +611,8 @@ extern int vmgmt_msg_native_send_error(struct msg_conn *conn,
*/
#define mgmt_msg_native_add_str(msg, s) \
do { \
- int __len = strlen(s) + 1; \
- mgmt_msg_native_append(msg, s, __len); \
+ int __nas_len = strlen(s) + 1; \
+ mgmt_msg_native_append(msg, s, __nas_len); \
} while (0)
/**
diff --git a/lib/monotime.h b/lib/monotime.h
index f7ae1bbbe1..5e1bfe754e 100644
--- a/lib/monotime.h
+++ b/lib/monotime.h
@@ -129,6 +129,22 @@ static inline char *time_to_string(time_t ts, char *buf)
return ctime_r(&tbuf, buf);
}
+/* A wrapper for time_to_string() which removes newline at the end.
+ * This is needed for JSON outputs, where newline is not expected.
+ */
+static inline char *time_to_string_json(time_t ts, char *buf)
+{
+ size_t len;
+
+ time_to_string(ts, buf);
+ len = strlen(buf);
+
+ if (len && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ return buf;
+}
+
/* Convert interval to human-friendly string, used in cli output e.g. */
static inline const char *frrtime_to_interval(time_t t, char *buf,
size_t buflen)
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 98b05295b9..332581fbd8 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -581,6 +581,32 @@ void nexthop_del_labels(struct nexthop *nexthop)
nexthop->nh_label_type = ZEBRA_LSP_NONE;
}
+void nexthop_change_labels(struct nexthop *nexthop, struct mpls_label_stack *new_stack)
+{
+ struct mpls_label_stack *nh_label_tmp;
+ uint32_t i;
+
+ /* Enforce limit on label stack size */
+ if (new_stack->num_labels > MPLS_MAX_LABELS)
+ new_stack->num_labels = MPLS_MAX_LABELS;
+
+ /* Resize the array to accommodate the new label stack */
+ if (new_stack->num_labels > nexthop->nh_label->num_labels) {
+ nh_label_tmp = XREALLOC(MTYPE_NH_LABEL, nexthop->nh_label,
+ sizeof(struct mpls_label_stack) +
+ new_stack->num_labels * sizeof(mpls_label_t));
+ if (nh_label_tmp) {
+ nexthop->nh_label = nh_label_tmp;
+ nexthop->nh_label->num_labels = new_stack->num_labels;
+ } else
+ new_stack->num_labels = nexthop->nh_label->num_labels;
+ }
+
+ /* Copy the label stack into the array */
+ for (i = 0; i < new_stack->num_labels; i++)
+ nexthop->nh_label->label[i] = new_stack->label[i];
+}
+
void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
const struct seg6local_context *ctx)
{
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 02ea4d96f2..5dfb58d846 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -154,6 +154,8 @@ void nexthops_free(struct nexthop *nexthop);
void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype,
uint8_t num_labels, const mpls_label_t *labels);
void nexthop_del_labels(struct nexthop *);
+void nexthop_change_labels(struct nexthop *nexthop, struct mpls_label_stack *new_stack);
+
void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
const struct seg6local_context *ctx);
void nexthop_del_srv6_seg6local(struct nexthop *nexthop);
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index f9794bee3c..b199dd61f8 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -83,6 +83,7 @@ static int nb_cli_classic_commit(struct vty *vty)
static void nb_cli_pending_commit_clear(struct vty *vty)
{
vty->pending_commit = 0;
+ vty->buffer_cmd_count = 0;
XFREE(MTYPE_TMP, vty->pending_cmds_buf);
vty->pending_cmds_buflen = 0;
vty->pending_cmds_bufpos = 0;
@@ -102,12 +103,19 @@ int nb_cli_pending_commit_check(struct vty *vty)
static int nb_cli_schedule_command(struct vty *vty)
{
- /* Append command to dynamically sized buffer of scheduled commands. */
+ /* Append command to dynamically sized buffer of scheduled commands.
+ * vty->buf -Incoming config
+ * vty->pending_cmds_buf - Pending buffer where incoming configs are
+ * accumulated for later processing
+ * vty->pending_cmds_bufpos - length of the pending buffer
+ *
+ */
if (!vty->pending_cmds_buf) {
vty->pending_cmds_buflen = 4096;
vty->pending_cmds_buf =
XCALLOC(MTYPE_TMP, vty->pending_cmds_buflen);
}
+
if ((strlen(vty->buf) + 3)
> (vty->pending_cmds_buflen - vty->pending_cmds_bufpos)) {
vty->pending_cmds_buflen *= 2;
@@ -121,6 +129,9 @@ static int nb_cli_schedule_command(struct vty *vty)
/* Schedule the commit operation. */
vty->pending_commit = 1;
+ vty->buffer_cmd_count++;
+ if (vty->buffer_cmd_count == NB_CMD_BATCH_SIZE)
+ nb_cli_pending_commit_check(vty);
return CMD_SUCCESS;
}
diff --git a/lib/northbound_cli.h b/lib/northbound_cli.h
index 4c8dc50bd2..43c40f49e1 100644
--- a/lib/northbound_cli.h
+++ b/lib/northbound_cli.h
@@ -20,6 +20,9 @@ enum nb_cfg_format {
NB_CFG_FMT_XML,
};
+/* Maximum config commands in a batch*/
+#define NB_CMD_BATCH_SIZE 5000
+
extern struct nb_config *vty_shared_candidate_config;
/*
diff --git a/lib/plist.c b/lib/plist.c
index 2cfaa7d81d..6950ab5761 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -1136,8 +1136,10 @@ static int vty_show_prefix_list_prefix(struct vty *vty, afi_t afi,
match = 0;
if (type == normal_display || type == first_match_display)
- if (prefix_same(&p, &pentry->prefix))
+ if (prefix_list_entry_match(pentry, &p, false)) {
+ pentry->hitcnt++;
match = 1;
+ }
if (type == longer_display) {
if ((p.family == pentry->prefix.family)
diff --git a/lib/routemap.c b/lib/routemap.c
index 120731fa61..7aaa5d3be8 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -875,6 +875,28 @@ void route_map_walk_update_list(void (*route_map_update_fn)(char *name))
}
}
+static const char *route_map_action_reason2str(enum route_map_action_reason reason)
+{
+ switch (reason) {
+ case route_map_action_none:
+ return "none";
+ case route_map_action_map_null:
+ return "route-map is null";
+ case route_map_action_no_index:
+ return "no index";
+ case route_map_action_next_deny:
+ return "next statement is deny";
+ case route_map_action_exit:
+ return "exit policy";
+ case route_map_action_goto_null:
+ return "goto index is null";
+ case route_map_action_index_deny:
+ return "deny index";
+ }
+
+ return "Invalid reason";
+}
+
/* Return route map's type string. */
static const char *route_map_type_str(enum route_map_type type)
{
@@ -2554,6 +2576,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
RUSAGE_T mbefore, mafter;
RUSAGE_T ibefore, iafter;
unsigned long cputime;
+ enum route_map_action_reason reason = route_map_action_none;
if (recursion > RMAP_RECURSION_LIMIT) {
if (map)
@@ -2571,6 +2594,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
if (map)
map->applied++;
ret = RMAP_DENYMATCH;
+ reason = route_map_action_map_null;
goto route_map_apply_end;
}
@@ -2614,6 +2638,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
ret = RMAP_PERMITMATCH;
else
ret = RMAP_DENYMATCH;
+ reason = route_map_action_no_index;
goto route_map_apply_end;
}
@@ -2701,12 +2726,15 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
}
/* If nextrm returned 'deny', finish. */
- if (ret == RMAP_DENYMATCH)
+ if (ret == RMAP_DENYMATCH) {
+ reason = route_map_action_next_deny;
goto route_map_apply_end;
+ }
}
switch (index->exitpolicy) {
case RMAP_EXIT:
+ reason = route_map_action_exit;
goto route_map_apply_end;
case RMAP_NEXT:
continue;
@@ -2722,6 +2750,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
}
if (next == NULL) {
/* No clauses match! */
+ reason = route_map_action_goto_null;
goto route_map_apply_end;
}
}
@@ -2730,6 +2759,7 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
/* 'deny' */
{
ret = RMAP_DENYMATCH;
+ reason = route_map_action_index_deny;
goto route_map_apply_end;
}
}
@@ -2741,9 +2771,9 @@ route_map_result_t route_map_apply_ext(struct route_map *map,
route_map_apply_end:
if (unlikely(CHECK_FLAG(rmap_debug, DEBUG_ROUTEMAP)))
- zlog_debug("Route-map: %s, prefix: %pFX, result: %s",
- (map ? map->name : "null"), prefix,
- route_map_result_str(ret));
+ zlog_debug("Route-map: %s, prefix: %pFX, result: %s, reason: %s",
+ (map ? map->name : "null"), prefix, route_map_result_str(ret),
+ route_map_action_reason2str(reason));
if (pref) {
if (index != NULL && ret == RMAP_PERMITMATCH)
diff --git a/lib/routemap.h b/lib/routemap.h
index e0f738502b..8dcc17ecc3 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -29,6 +29,17 @@ extern uint32_t rmap_debug;
/* Route map's type. */
enum route_map_type { RMAP_PERMIT, RMAP_DENY, RMAP_ANY };
+/* Route-map's action reason */
+enum route_map_action_reason {
+ route_map_action_none,
+ route_map_action_map_null,
+ route_map_action_no_index,
+ route_map_action_next_deny,
+ route_map_action_exit,
+ route_map_action_goto_null,
+ route_map_action_index_deny,
+};
+
typedef enum {
RMAP_DENYMATCH,
RMAP_PERMITMATCH
diff --git a/lib/sockopt.c b/lib/sockopt.c
index 74bc034ccd..003ddb72dc 100644
--- a/lib/sockopt.c
+++ b/lib/sockopt.c
@@ -19,7 +19,7 @@
#define HAVE_BSD_STRUCT_IP_MREQ_HACK
#endif
-void setsockopt_so_recvbuf(int sock, int size)
+int setsockopt_so_recvbuf(int sock, int size)
{
int orig_req = size;
@@ -34,9 +34,11 @@ void setsockopt_so_recvbuf(int sock, int size)
flog_err(EC_LIB_SOCKET,
"%s: fd %d: SO_RCVBUF set to %d (requested %d)",
__func__, sock, size, orig_req);
+
+ return size;
}
-void setsockopt_so_sendbuf(const int sock, int size)
+int setsockopt_so_sendbuf(const int sock, int size)
{
int orig_req = size;
@@ -51,6 +53,8 @@ void setsockopt_so_sendbuf(const int sock, int size)
flog_err(EC_LIB_SOCKET,
"%s: fd %d: SO_SNDBUF set to %d (requested %d)",
__func__, sock, size, orig_req);
+
+ return size;
}
int getsockopt_so_sendbuf(const int sock)
diff --git a/lib/sockopt.h b/lib/sockopt.h
index e6fb78d5e4..cbf988cbe7 100644
--- a/lib/sockopt.h
+++ b/lib/sockopt.h
@@ -12,8 +12,8 @@
extern "C" {
#endif
-extern void setsockopt_so_recvbuf(int sock, int size);
-extern void setsockopt_so_sendbuf(const int sock, int size);
+extern int setsockopt_so_recvbuf(int sock, int size);
+extern int setsockopt_so_sendbuf(const int sock, int size);
extern int getsockopt_so_sendbuf(const int sock);
extern int getsockopt_so_recvbuf(const int sock);
diff --git a/lib/table.c b/lib/table.c
index 3bf93894ec..cf185de226 100644
--- a/lib/table.c
+++ b/lib/table.c
@@ -208,32 +208,6 @@ struct route_node *route_node_match(struct route_table *table,
return NULL;
}
-struct route_node *route_node_match_ipv4(struct route_table *table,
- const struct in_addr *addr)
-{
- struct prefix_ipv4 p;
-
- memset(&p, 0, sizeof(p));
- p.family = AF_INET;
- p.prefixlen = IPV4_MAX_BITLEN;
- p.prefix = *addr;
-
- return route_node_match(table, (struct prefix *)&p);
-}
-
-struct route_node *route_node_match_ipv6(struct route_table *table,
- const struct in6_addr *addr)
-{
- struct prefix_ipv6 p;
-
- memset(&p, 0, sizeof(p));
- p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_BITLEN;
- p.prefix = *addr;
-
- return route_node_match(table, &p);
-}
-
/* Lookup same prefix node. Return NULL when we can't find route. */
struct route_node *route_node_lookup(struct route_table *table,
union prefixconstptr pu)
diff --git a/lib/table.h b/lib/table.h
index acfc876154..c31be2b688 100644
--- a/lib/table.h
+++ b/lib/table.h
@@ -195,10 +195,6 @@ extern struct route_node *route_node_lookup_maynull(struct route_table *table,
union prefixconstptr pu);
extern struct route_node *route_node_match(struct route_table *table,
union prefixconstptr pu);
-extern struct route_node *route_node_match_ipv4(struct route_table *table,
- const struct in_addr *addr);
-extern struct route_node *route_node_match_ipv6(struct route_table *table,
- const struct in6_addr *addr);
extern unsigned long route_table_count(struct route_table *table);
diff --git a/lib/vty.c b/lib/vty.c
index 256a3bb3f5..1d04e75bf4 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -43,6 +43,7 @@
#include "northbound_cli.h"
#include "printfrr.h"
#include "json.h"
+#include "sockopt.h"
#include <arpa/telnet.h>
#include <termios.h>
@@ -352,7 +353,7 @@ int vty_out(struct vty *vty, const char *format, ...)
* put the data of collective vty->obuf Linked List items on the
* socket and free the vty->obuf data.
*/
- if (vty->vty_buf_size_accumulated >= VTY_MAX_INTERMEDIATE_FLUSH) {
+ if (vty->vty_buf_size_accumulated >= vty->buf_size_intermediate) {
vty->vty_buf_size_accumulated = 0;
vtysh_flush(vty);
}
@@ -2157,15 +2158,15 @@ static void vtysh_accept(struct event *thread)
* Increasing the SEND socket buffer size so that the socket can hold
* before sending it to VTY shell.
*/
- ret = setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&sndbufsize,
- sizeof(sndbufsize));
- if (ret < 0) {
+ ret = setsockopt_so_sendbuf(sock, sndbufsize);
+ if (ret <= 0) {
flog_err(EC_LIB_SOCKET,
"Cannot set socket %d send buffer size, %s", sock,
safe_strerror(errno));
close(sock);
return;
}
+
set_cloexec(sock);
#ifdef VTYSH_DEBUG
@@ -2173,6 +2174,13 @@ static void vtysh_accept(struct event *thread)
#endif /* VTYSH_DEBUG */
vty = vty_new();
+
+ vty->buf_size_set = ret;
+ if (vty->buf_size_set < VTY_MAX_INTERMEDIATE_FLUSH)
+ vty->buf_size_intermediate = vty->buf_size_set / 2;
+ else
+ vty->buf_size_intermediate = VTY_MAX_INTERMEDIATE_FLUSH;
+
vty->fd = sock;
vty->wfd = sock;
vty->type = VTY_SHELL_SERV;
diff --git a/lib/vty.h b/lib/vty.h
index e511e8e79a..c6f9f5a3a7 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -149,6 +149,7 @@ struct vty {
struct nb_config *candidate_config_base;
/* Dynamic transaction information. */
+ size_t buffer_cmd_count;
bool pending_allowed;
bool pending_commit;
char *pending_cmds_buf;
@@ -237,6 +238,9 @@ struct vty {
bool mgmt_locked_candidate_ds;
bool mgmt_locked_running_ds;
uint64_t vty_buf_size_accumulated;
+
+ int buf_size_set;
+ uint64_t buf_size_intermediate;
};
static inline void vty_push_context(struct vty *vty, int node, uint64_t id)
diff --git a/lib/zclient.c b/lib/zclient.c
index 557d9c3eb9..063944fd3b 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -4693,6 +4693,9 @@ void zclient_redistribute_default(int command, struct zclient *zclient,
zebra_redistribute_default_send(command, zclient, afi, vrf_id);
}
+#define ZCLIENT_QUICK_RECONNECT 1
+#define ZCLIENT_SLOW_RECONNECT 5
+#define ZCLIENT_SWITCH_TO_SLOW 30
static void zclient_event(enum zclient_event event, struct zclient *zclient)
{
switch (event) {
@@ -4702,11 +4705,13 @@ static void zclient_event(enum zclient_event event, struct zclient *zclient)
break;
case ZCLIENT_CONNECT:
if (zclient_debug)
- zlog_debug(
- "zclient connect failures: %d schedule interval is now %d",
- zclient->fail, zclient->fail < 3 ? 10 : 60);
+ zlog_debug("zclient connect failures: %d schedule interval is now %d",
+ zclient->fail,
+ zclient->fail < ZCLIENT_SWITCH_TO_SLOW ? ZCLIENT_QUICK_RECONNECT
+ : ZCLIENT_SLOW_RECONNECT);
event_add_timer(zclient->master, zclient_connect, zclient,
- zclient->fail < 3 ? 10 : 60,
+ zclient->fail < ZCLIENT_SWITCH_TO_SLOW ? ZCLIENT_QUICK_RECONNECT
+ : ZCLIENT_SLOW_RECONNECT,
&zclient->t_connect);
break;
case ZCLIENT_READ:
diff --git a/lib/zclient.h b/lib/zclient.h
index 6da9558aa5..2385a8a219 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -131,7 +131,7 @@ typedef enum {
ZEBRA_BFD_CLIENT_DEREGISTER,
ZEBRA_INTERFACE_ENABLE_RADV,
ZEBRA_INTERFACE_DISABLE_RADV,
- ZEBRA_NEXTHOP_LOOKUP_MRIB,
+ ZEBRA_NEXTHOP_LOOKUP,
ZEBRA_INTERFACE_LINK_PARAMS,
ZEBRA_MPLS_LABELS_ADD,
ZEBRA_MPLS_LABELS_DELETE,
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
index 93c9bcac44..45e154d83b 100644
--- a/mgmtd/mgmt_be_adapter.c
+++ b/mgmtd/mgmt_be_adapter.c
@@ -83,7 +83,7 @@ static const char *const zebra_oper_xpaths[] = {
NULL,
};
-#if HAVE_RIPD
+#ifdef HAVE_RIPD
static const char *const ripd_config_xpaths[] = {
"/frr-filter:lib",
"/frr-interface:lib/interface",
@@ -104,7 +104,7 @@ static const char *const ripd_rpc_xpaths[] = {
};
#endif
-#if HAVE_RIPNGD
+#ifdef HAVE_RIPNGD
static const char *const ripngd_config_xpaths[] = {
"/frr-filter:lib",
"/frr-interface:lib/interface",
@@ -123,7 +123,7 @@ static const char *const ripngd_rpc_xpaths[] = {
};
#endif
-#if HAVE_STATICD
+#ifdef HAVE_STATICD
static const char *const staticd_config_xpaths[] = {
"/frr-vrf:lib",
"/frr-interface:lib",
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
index 32f28a5774..7f7a5d9a8e 100644
--- a/mgmtd/mgmt_fe_adapter.c
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -190,7 +190,7 @@ static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **sessionp)
assert(session->adapter->refcount > 1);
mgmt_fe_adapter_unlock(&session->adapter);
}
-
+ darr_free_free(session->notify_xpaths);
hash_release(mgmt_fe_sessions, session);
XFREE(MTYPE_MGMTD_FE_SESSION, session);
*sessionp = NULL;
diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c
index 199f4d75d4..cae93c6e53 100644
--- a/nhrpd/nhrp_vty.c
+++ b/nhrpd/nhrp_vty.c
@@ -933,6 +933,10 @@ static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx)
if (ctx->afi != family2afi(sockunion_family(&c->remote_addr)))
return;
+ if (ctx->count && !ctx->json)
+ vty_out(ctx->vty, "\n");
+ ctx->count++;
+
sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0]));
if (c->cur.peer)
sockunion2str(&c->cur.peer->vc->remote.nbma, buf[1],
@@ -985,8 +989,6 @@ static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx)
if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC)
vty_out(ctx->vty, "NBMA-NAT-OA-Address: %s\n", buf[2]);
-
- vty_out(ctx->vty, "\n\n");
}
DEFUN(show_ip_nhrp, show_ip_nhrp_cmd,
@@ -1030,7 +1032,6 @@ DEFUN(show_ip_nhrp, show_ip_nhrp_cmd,
else
json_object_string_add(json_vrf, "status", "ok");
- ctx.count++;
FOR_ALL_INTERFACES (vrf, ifp)
nhrp_cache_foreach(ifp, show_ip_opennhrp_cache, &ctx);
}
diff --git a/ospfd/ospf_abr.c b/ospfd/ospf_abr.c
index 93779991b5..eed1bfcb30 100644
--- a/ospfd/ospf_abr.c
+++ b/ospfd/ospf_abr.c
@@ -1823,7 +1823,7 @@ static void ospf_abr_nssa_type7_default_create(struct ospf *ospf,
"Announcing Type-7 default route into NSSA area %pI4",
&area->area_id);
- /* Prepare the extrenal_info for aggregator */
+ /* Prepare the external_info for aggregator */
memset(&ei, 0, sizeof(struct external_info));
ei.p.family = AF_INET;
ei.p.prefixlen = 0;
diff --git a/ospfd/ospf_asbr.c b/ospfd/ospf_asbr.c
index b47c390088..aa11467027 100644
--- a/ospfd/ospf_asbr.c
+++ b/ospfd/ospf_asbr.c
@@ -168,6 +168,38 @@ void ospf_external_info_delete(struct ospf *ospf, uint8_t type,
}
}
+/*
+ * ospf_external_info_delete_multi_instance
+ *
+ * Delete instances of the external route information for a given route type.
+ * The preserve_instance parameter may be used to prevent the current instance
+ * from being deleted.
+ */
+void ospf_external_info_delete_multi_instance(struct ospf *ospf, uint8_t type, struct prefix_ipv4 p,
+ unsigned long preserve_instance)
+{
+ struct route_node *rn;
+ struct ospf_external *ext;
+ struct list *ext_list;
+ struct listnode *node;
+
+ ext_list = ospf->external[type];
+ if (!ext_list)
+ return;
+
+ for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) {
+ if (ext->instance != preserve_instance) {
+ rn = route_node_lookup(EXTERNAL_INFO(ext), (struct prefix *)&p);
+ if (rn) {
+ ospf_external_info_free(rn->info);
+ rn->info = NULL;
+ route_unlock_node(rn);
+ route_unlock_node(rn);
+ }
+ }
+ }
+}
+
struct external_info *ospf_external_info_lookup(struct ospf *ospf, uint8_t type,
unsigned short instance,
struct prefix_ipv4 *p)
@@ -189,6 +221,44 @@ struct external_info *ospf_external_info_lookup(struct ospf *ospf, uint8_t type,
return NULL;
}
+/*
+ * ospf_external_info_default_lookup
+ *
+ * For default information criteria, we really don't care about the
+ * source of the route and there only should be one.
+ */
+struct external_info *ospf_external_info_default_lookup(struct ospf *ospf)
+{
+ struct ospf_external *ext;
+ struct external_info *ei;
+ struct list *ext_list;
+ struct listnode *node;
+ struct route_node *rn;
+ struct prefix_ipv4 p = {
+ .family = AF_INET,
+ .prefixlen = 0,
+ .prefix.s_addr = INADDR_ANY,
+ };
+
+ ext_list = ospf->external[DEFAULT_ROUTE];
+ if (!ext_list)
+ return (NULL);
+
+ for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) {
+ rn = route_node_lookup(EXTERNAL_INFO(ext), (struct prefix *)&p);
+ if (rn) {
+ route_unlock_node(rn);
+ if (rn->info) {
+ ei = rn->info;
+ if (ei->type != ZEBRA_ROUTE_OSPF || ei->instance != ospf->instance)
+ return ei;
+ }
+ }
+ }
+
+ return NULL;
+}
+
struct ospf_lsa *ospf_external_info_find_lsa(struct ospf *ospf,
struct prefix_ipv4 *p)
{
@@ -422,7 +492,7 @@ static void ospf_aggr_handle_external_info(void *data)
ei->to_be_processed = true;
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
- zlog_debug("%s: Handle extrenal route(%pI4/%d)", __func__,
+ zlog_debug("%s: Handle external route(%pI4/%d)", __func__,
&ei->p.prefix, ei->p.prefixlen);
assert(ospf);
@@ -501,7 +571,7 @@ static void ospf_external_aggr_delete(struct ospf *ospf, struct route_node *rn)
}
struct ospf_external_aggr_rt *
-ospf_extrenal_aggregator_lookup(struct ospf *ospf, struct prefix_ipv4 *p)
+ospf_external_aggregator_lookup(struct ospf *ospf, struct prefix_ipv4 *p)
{
struct route_node *rn;
struct ospf_external_aggr_rt *summary_rt = NULL;
@@ -547,7 +617,7 @@ void ospf_unlink_ei_from_aggr(struct ospf *ospf,
{
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
zlog_debug(
- "%s: Unlinking extrenal route(%pI4/%d) from aggregator(%pI4/%d), external route count:%ld",
+ "%s: Unlinking external route(%pI4/%d) from aggregator(%pI4/%d), external route count:%ld",
__func__, &ei->p.prefix, ei->p.prefixlen,
&aggr->p.prefix, aggr->p.prefixlen,
OSPF_EXTERNAL_RT_COUNT(aggr));
@@ -578,7 +648,7 @@ static void ospf_link_ei_to_aggr(struct ospf_external_aggr_rt *aggr,
{
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
zlog_debug(
- "%s: Linking extrenal route(%pI4/%d) to aggregator(%pI4/%d)",
+ "%s: Linking external route(%pI4/%d) to aggregator(%pI4/%d)",
__func__, &ei->p.prefix, ei->p.prefixlen,
&aggr->p.prefix, aggr->p.prefixlen);
(void)hash_get(aggr->match_extnl_hash, ei, hash_alloc_intern);
@@ -633,7 +703,7 @@ struct ospf_lsa *ospf_originate_summary_lsa(struct ospf *ospf,
return NULL;
}
- /* Prepare the extrenal_info for aggregator */
+ /* Prepare the external_info for aggregator */
memset(&ei_aggr, 0, sizeof(ei_aggr));
ei_aggr.p = aggr->p;
ei_aggr.tag = aggr->tag;
@@ -993,7 +1063,7 @@ static void ospf_handle_external_aggr_update(struct ospf *ospf)
aggr->action = OSPF_ROUTE_AGGR_NONE;
- /* Prepare the extrenal_info for aggregator */
+ /* Prepare the external_info for aggregator */
memset(&ei_aggr, 0, sizeof(ei_aggr));
ei_aggr.p = aggr->p;
ei_aggr.tag = aggr->tag;
@@ -1106,7 +1176,7 @@ int ospf_asbr_external_aggregator_set(struct ospf *ospf, struct prefix_ipv4 *p,
{
struct ospf_external_aggr_rt *aggregator;
- aggregator = ospf_extrenal_aggregator_lookup(ospf, p);
+ aggregator = ospf_external_aggregator_lookup(ospf, p);
if (aggregator) {
if (CHECK_FLAG(aggregator->flags,
@@ -1166,7 +1236,7 @@ int ospf_asbr_external_rt_no_advertise(struct ospf *ospf, struct prefix_ipv4 *p)
struct ospf_external_aggr_rt *aggr;
route_tag_t tag = 0;
- aggr = ospf_extrenal_aggregator_lookup(ospf, p);
+ aggr = ospf_external_aggregator_lookup(ospf, p);
if (aggr) {
if (CHECK_FLAG(aggr->flags, OSPF_EXTERNAL_AGGRT_NO_ADVERTISE))
return OSPF_SUCCESS;
diff --git a/ospfd/ospf_asbr.h b/ospfd/ospf_asbr.h
index 6158d65f22..0b3b695f3e 100644
--- a/ospfd/ospf_asbr.h
+++ b/ospfd/ospf_asbr.h
@@ -109,6 +109,10 @@ ospf_external_info_add(struct ospf *, uint8_t, unsigned short,
route_tag_t, uint32_t metric);
extern void ospf_external_info_delete(struct ospf *, uint8_t, unsigned short,
struct prefix_ipv4);
+extern void ospf_external_info_delete_multi_instance(struct ospf *ospf, uint8_t type,
+ struct prefix_ipv4 p,
+ unsigned long preserve_instance);
+#define OSPF_DELETE_ANY_INSTANCE 0xffffffff
extern struct external_info *ospf_external_info_lookup(struct ospf *, uint8_t,
unsigned short,
struct prefix_ipv4 *);
@@ -140,7 +144,7 @@ extern int ospf_external_aggregator_timer_set(struct ospf *ospf,
extern void ospf_external_aggrigator_free(struct ospf_external_aggr_rt *aggr);
extern struct ospf_external_aggr_rt *
-ospf_extrenal_aggregator_lookup(struct ospf *ospf, struct prefix_ipv4 *p);
+ospf_external_aggregator_lookup(struct ospf *ospf, struct prefix_ipv4 *p);
void ospf_unset_all_aggr_flag(struct ospf *ospf);
diff --git a/ospfd/ospf_flood.c b/ospfd/ospf_flood.c
index e3398af74b..bcb35315d8 100644
--- a/ospfd/ospf_flood.c
+++ b/ospfd/ospf_flood.c
@@ -325,7 +325,7 @@ static void ospf_process_self_originated_lsa(struct ospf *ospf,
LSA_REFRESH_FORCE, false);
} else {
aggr = (struct ospf_external_aggr_rt *)
- ospf_extrenal_aggregator_lookup(ospf, &p);
+ ospf_external_aggregator_lookup(ospf, &p);
if (aggr) {
struct external_info ei_aggr;
diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c
index 1350487898..ac53f3a19f 100644
--- a/ospfd/ospf_lsa.c
+++ b/ospfd/ospf_lsa.c
@@ -2407,15 +2407,10 @@ struct ospf_lsa *ospf_nssa_lsa_refresh(struct ospf_area *area,
static struct external_info *ospf_default_external_info(struct ospf *ospf)
{
int type;
- struct prefix_ipv4 p;
struct external_info *default_ei;
int ret = 0;
- p.family = AF_INET;
- p.prefix.s_addr = 0;
- p.prefixlen = 0;
-
- default_ei = ospf_external_info_lookup(ospf, DEFAULT_ROUTE, 0, &p);
+ default_ei = ospf_external_info_default_lookup(ospf);
if (!default_ei)
return NULL;
@@ -4069,7 +4064,7 @@ struct ospf_lsa *ospf_lsa_refresh(struct ospf *ospf, struct ospf_lsa *lsa)
ospf, lsa, ei, LSA_REFRESH_FORCE, false);
else {
aggr = (struct ospf_external_aggr_rt *)
- ospf_extrenal_aggregator_lookup(ospf, &p);
+ ospf_external_aggregator_lookup(ospf, &p);
if (aggr) {
struct external_info ei_aggr;
diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c
index 97dc578679..89db97922f 100644
--- a/ospfd/ospf_sr.c
+++ b/ospfd/ospf_sr.c
@@ -1334,6 +1334,12 @@ static void update_out_nhlfe(struct hash_bucket *bucket, void *args)
continue;
for (ALL_LIST_ELEMENTS_RO(srp->route->paths, pnode, path)) {
+ /* Compute NHFLE if path has not been initialized */
+ if (!path->srni.nexthop) {
+ compute_prefix_nhlfe(srp);
+ continue;
+ }
+
/* Skip path that has not next SR-Node as nexthop */
if (path->srni.nexthop != srnext)
continue;
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 0457b13337..27528f6594 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -7347,6 +7347,9 @@ DEFPY (show_ip_ospf_database,
struct in_addr *adv_router_p = NULL;
json_object *json = NULL;
+ if (instance_id != ospf_instance)
+ return CMD_NOT_MY_INSTANCE;
+
if (uj)
json = json_object_new_object();
if (lsid_str)
diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c
index c7cba1e20f..b718d498ae 100644
--- a/ospfd/ospf_zebra.c
+++ b/ospfd/ospf_zebra.c
@@ -1292,15 +1292,14 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS)
* originate)ZEBRA_ROUTE_MAX is used to delete the ex-info.
* Resolved this inconsistency by maintaining same route type.
*/
- if ((is_default_prefix(&pgen)) && (api.type != ZEBRA_ROUTE_OSPF))
+ if ((is_default_prefix(&pgen)) &&
+ ((api.type != ZEBRA_ROUTE_OSPF) || (api.instance != ospf->instance)))
rt_type = DEFAULT_ROUTE;
if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE))
- zlog_debug("%s: cmd %s from client %s: vrf %s(%u), p %pFX, metric %d",
- __func__, zserv_command_string(cmd),
- zebra_route_string(api.type),
- ospf_vrf_id_to_name(vrf_id), vrf_id, &api.prefix,
- api.metric);
+ zlog_debug("%s: cmd %s from client %s-%d: vrf %s(%u), p %pFX, metric %d", __func__,
+ zserv_command_string(cmd), zebra_route_string(api.type), api.instance,
+ ospf_vrf_id_to_name(vrf_id), vrf_id, &api.prefix, api.metric);
if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD) {
/* XXX|HACK|TODO|FIXME:
@@ -1315,16 +1314,17 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS)
api.tag = ospf->dtag[rt_type];
/*
- * Given zebra sends update for a prefix via ADD message, it
- * should
- * be considered as an implicit DEL for that prefix with other
- * source
- * types.
+ * Given zebra sends an update for a prefix via an ADD message, it
+ * will be considered as an impilict DELETE for that prefix for other
+ * types and instances other than the type and instance associated with
+ * the prefix.
*/
- for (i = 0; i <= ZEBRA_ROUTE_MAX; i++)
- if (i != rt_type)
- ospf_external_info_delete(ospf, i, api.instance,
- p);
+ for (i = 0; i <= ZEBRA_ROUTE_MAX; i++) {
+ unsigned long preserve_instance;
+
+ preserve_instance = (i == rt_type) ? api.instance : OSPF_DELETE_ANY_INSTANCE;
+ ospf_external_info_delete_multi_instance(ospf, i, p, preserve_instance);
+ }
ei = ospf_external_info_add(ospf, rt_type, api.instance, p,
ifindex, nexthop, api.tag,
diff --git a/ospfd/ospf_zebra.h b/ospfd/ospf_zebra.h
index 86a5678fc4..b83524303f 100644
--- a/ospfd/ospf_zebra.h
+++ b/ospfd/ospf_zebra.h
@@ -47,6 +47,9 @@ extern uint8_t ospf_distance_apply(struct ospf *ospf, struct prefix_ipv4 *,
struct ospf_route *);
extern struct ospf_external *ospf_external_lookup(struct ospf *, uint8_t,
unsigned short);
+
+extern struct external_info *ospf_external_info_default_lookup(struct ospf *ospf);
+
extern struct ospf_external *ospf_external_add(struct ospf *, uint8_t,
unsigned short);
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index d72afec1e4..90330d368d 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -843,7 +843,7 @@ static void ospf_finish_final(struct ospf *ospf)
ospf_distance_reset(ospf);
route_table_finish(ospf->distance_table);
- /* Release extrenal Aggregator table */
+ /* Release external Aggregator table */
for (rn = route_top(ospf->rt_aggr_tbl); rn; rn = route_next(rn)) {
struct ospf_external_aggr_rt *aggr;
diff --git a/pathd/path_pcep_debug.c b/pathd/path_pcep_debug.c
index 7bff9c7b9c..89e7574324 100644
--- a/pathd/path_pcep_debug.c
+++ b/pathd/path_pcep_debug.c
@@ -1321,8 +1321,7 @@ void _format_pcep_event(int ps, pcep_event *event)
PATHD_FORMAT("\n");
PATHD_FORMAT("%*sevent_type: %s\n", ps2, "",
pcep_event_type_name(event->event_type));
- PATHD_FORMAT("%*sevent_time: %s", ps2, "",
- ctime_r(&event->event_time, buf));
+ PATHD_FORMAT("%*sevent_time: %s", ps2, "", time_to_string(event->event_time, buf));
if (event->session == NULL) {
PATHD_FORMAT("%*ssession: NULL\n", ps2, "");
} else {
diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c
index 8f7a46377c..ea0e2e4eaf 100644
--- a/pbrd/pbr_map.c
+++ b/pbrd/pbr_map.c
@@ -732,6 +732,14 @@ void pbr_map_schedule_policy_from_nhg(const char *nh_group, bool installed)
pbr_map_check(pbrms, false);
}
+
+ /*
+ * vrf_unchanged pbrms have no nhg but their
+ * installation is contingent on other sequences which
+ * may...
+ */
+ if (pbrms->vrf_unchanged)
+ pbr_map_check(pbrms, false);
}
}
}
diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c
index b44169c522..acfb0c3af3 100644
--- a/pimd/pim6_mld.c
+++ b/pimd/pim6_mld.c
@@ -449,7 +449,7 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
* this data structure.
*/
if (sg->oil)
- pim_channel_oil_del(sg->oil, __func__);
+ sg->oil = pim_channel_oil_del(sg->oil, __func__);
/* multiple paths can lead to the last state going away;
* t_sg_expire can still be running if we're arriving from
diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c
index d36b792e39..caed914a87 100644
--- a/pimd/pim_autorp.c
+++ b/pimd/pim_autorp.c
@@ -19,6 +19,7 @@
#include "lib/json.h"
#include "pimd.h"
+#include "pim_util.h"
#include "pim_iface.h"
#include "pim_rp.h"
#include "pim_sock.h"
@@ -26,61 +27,90 @@
#include "pim_autorp.h"
DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP, "PIM AutoRP info");
-DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_RP, "PIM AutoRP advertised RP info");
-DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_CRP, "PIM AutoRP candidate RP info");
+DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_RP, "PIM AutoRP discovered RP info");
DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_ANNOUNCE, "PIM AutoRP announcement packet");
+DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_GRPPFIX, "PIM AutoRP group prefix list");
static const char *PIM_AUTORP_ANNOUNCEMENT_GRP = "224.0.1.39";
static const char *PIM_AUTORP_DISCOVERY_GRP = "224.0.1.40";
static const in_port_t PIM_AUTORP_PORT = 496;
-static int pim_autorp_rp_cmp(const struct pim_autorp_rp *l,
- const struct pim_autorp_rp *r)
+static int pim_autorp_rp_cmp(const struct pim_autorp_rp *l, const struct pim_autorp_rp *r)
{
return pim_addr_cmp(l->addr, r->addr);
}
-DECLARE_SORTLIST_UNIQ(pim_autorp_rp, struct pim_autorp_rp, list,
- pim_autorp_rp_cmp);
+DECLARE_SORTLIST_UNIQ(pim_autorp_rp, struct pim_autorp_rp, item, pim_autorp_rp_cmp);
-static void pim_autorp_rp_free(struct pim_autorp_rp *rp)
+static int pim_autorp_grppfix_cmp(const struct pim_autorp_grppfix *l,
+ const struct pim_autorp_grppfix *r)
{
- event_cancel(&rp->hold_timer);
+ return prefix_cmp(&l->grp, &r->grp);
+}
- /* Clean up installed RP info */
- if (pim_rp_del(rp->autorp->pim, rp->addr, rp->grp,
- (strlen(rp->grplist) ? rp->grplist : NULL),
- RP_SRC_AUTORP))
- if (PIM_DEBUG_AUTORP)
- zlog_err("%s: Failed to delete RP %pI4", __func__,
- &rp->addr);
+DECLARE_SORTLIST_UNIQ(pim_autorp_grppfix, struct pim_autorp_grppfix, item, pim_autorp_grppfix_cmp);
- XFREE(MTYPE_PIM_AUTORP_RP, rp);
+static void pim_autorp_grppfix_free(struct pim_autorp_grppfix_head *head)
+{
+ struct pim_autorp_grppfix *grp;
+
+ while ((grp = pim_autorp_grppfix_pop(head)))
+ XFREE(MTYPE_PIM_AUTORP_GRPPFIX, grp);
}
-static void pim_autorp_rplist_free(struct pim_autorp_rp_head *head)
+static void pim_autorp_rp_free(struct pim_autorp_rp *rp, bool installed)
{
- struct pim_autorp_rp *rp;
+ event_cancel(&rp->hold_timer);
- while ((rp = pim_autorp_rp_pop(head)))
- pim_autorp_rp_free(rp);
+ /* Clean up installed RP info */
+ if (installed) {
+ if (pim_rp_del(rp->autorp->pim, rp->addr, rp->grp,
+ (strlen(rp->grplist) ? rp->grplist : NULL), RP_SRC_AUTORP)) {
+ zlog_warn("%s: Failed to delete RP %pI4", __func__, &rp->addr);
+ }
+
+ if (strlen(rp->grplist)) {
+ struct prefix_list *pl;
+
+ pl = prefix_list_lookup(AFI_IP, rp->grplist);
+ if (pl)
+ prefix_list_delete(pl);
+ }
+ }
+
+ pim_autorp_grppfix_free(&rp->grp_pfix_list);
+ pim_autorp_grppfix_fini(&rp->grp_pfix_list);
+
+ XFREE(MTYPE_PIM_AUTORP_RP, rp);
}
-static void pim_autorp_rplist_cfree(struct pim_autorp_rp_head *head)
+static void pim_autorp_rplist_free(struct pim_autorp_rp_head *head, bool installed)
{
struct pim_autorp_rp *rp;
while ((rp = pim_autorp_rp_pop(head)))
- XFREE(MTYPE_PIM_AUTORP_CRP, rp);
+ pim_autorp_rp_free(rp, installed);
}
static void pim_autorp_free(struct pim_autorp *autorp)
{
- pim_autorp_rplist_free(&(autorp->discovery_rp_list));
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Freeing PIM AutoRP", __func__);
+
+ pim_autorp_rplist_free(&(autorp->discovery_rp_list), true);
pim_autorp_rp_fini(&(autorp->discovery_rp_list));
- pim_autorp_rplist_cfree(&(autorp->candidate_rp_list));
+ pim_autorp_rplist_free(&(autorp->candidate_rp_list), false);
pim_autorp_rp_fini(&(autorp->candidate_rp_list));
+
+ pim_autorp_rplist_free(&(autorp->mapping_rp_list), false);
+ pim_autorp_rp_fini(&(autorp->mapping_rp_list));
+
+ pim_autorp_rplist_free(&(autorp->advertised_rp_list), false);
+ pim_autorp_rp_fini(&(autorp->advertised_rp_list));
+
+ if (autorp->announce_pkt)
+ XFREE(MTYPE_PIM_AUTORP_ANNOUNCE, autorp->announce_pkt);
}
static bool pim_autorp_join_groups(struct interface *ifp)
@@ -97,26 +127,22 @@ static bool pim_autorp_join_groups(struct interface *ifp)
inet_pton(PIM_AF, PIM_AUTORP_DISCOVERY_GRP, &grp);
if (pim_socket_join(autorp->sock, grp, pim_ifp->primary_address,
ifp->ifindex, pim_ifp)) {
- zlog_err("Failed to join group %pI4 on interface %s", &grp,
- ifp->name);
+ zlog_warn("Failed to join group %pI4 on interface %s", &grp, ifp->name);
return false;
}
- /* TODO: Future Mapping agent implementation
- * Join announcement group for AutoRP mapping agent
- * inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp);
- * if (pim_socket_join(pim->autorp->sock, grp,
- * pim_ifp->primary_address,
- * ifp->ifindex, pim_ifp)) {
- * zlog_err("Failed to join group %pI4 on interface %s",
- * &grp, ifp->name);
- * return errno;
- * }
- */
+ zlog_info("%s: Joined AutoRP discovery group %pPA on interface %s", __func__, &grp,
+ ifp->name);
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Joined AutoRP groups on interface %s", __func__,
- ifp->name);
+ inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp);
+ if (pim_socket_join(pim->autorp->sock, grp, pim_ifp->primary_address, ifp->ifindex,
+ pim_ifp)) {
+ zlog_warn("Failed to join group %pI4 on interface %s", &grp, ifp->name);
+ return errno;
+ }
+
+ zlog_info("%s: Joined AutoRP announcement group %pPA on interface %s", __func__, &grp,
+ ifp->name);
return true;
}
@@ -135,31 +161,26 @@ static bool pim_autorp_leave_groups(struct interface *ifp)
inet_pton(PIM_AF, PIM_AUTORP_DISCOVERY_GRP, &grp);
if (pim_socket_leave(autorp->sock, grp, pim_ifp->primary_address,
ifp->ifindex, pim_ifp)) {
- zlog_err("Failed to leave group %pI4 on interface %s", &grp,
- ifp->name);
+ zlog_warn("Failed to leave group %pI4 on interface %s", &grp, ifp->name);
return false;
}
- /* TODO: Future Mapping agent implementation
- * Leave announcement group for AutoRP mapping agent
- * inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp);
- * if (pim_socket_leave(pim->autorp->sock, grp,
- * pim_ifp->primary_address,
- * ifp->ifindex, pim_ifp)) {
- * zlog_err("Failed to leave group %pI4 on interface %s",
- * &grp, ifp->name);
- * return errno;
- * }
- */
+ zlog_info("%s: Left AutoRP discovery group %pPA on interface %s", __func__, &grp, ifp->name);
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Left AutoRP groups on interface %s", __func__,
- ifp->name);
+ inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp);
+ if (pim_socket_leave(pim->autorp->sock, grp, pim_ifp->primary_address, ifp->ifindex,
+ pim_ifp)) {
+ zlog_warn("Failed to leave group %pI4 on interface %s", &grp, ifp->name);
+ return errno;
+ }
+
+ zlog_info("%s: Left AutoRP announcement group %pPA on interface %s", __func__, &grp,
+ ifp->name);
return true;
}
-static bool pim_autorp_setup(struct pim_autorp *autorp)
+static bool pim_autorp_setup(int fd)
{
#if defined(HAVE_IP_PKTINFO)
int data;
@@ -170,35 +191,39 @@ static bool pim_autorp_setup(struct pim_autorp *autorp)
.sin_addr = { .s_addr = INADDR_ANY },
.sin_port = htons(PIM_AUTORP_PORT) };
- setsockopt_so_recvbuf(autorp->sock, 1024 * 1024 * 8);
+ setsockopt_so_recvbuf(fd, 1024 * 1024 * 8);
#if defined(HAVE_IP_PKTINFO)
/* Linux and Solaris IP_PKTINFO */
data = 1;
- if (setsockopt(autorp->sock, PIM_IPPROTO, IP_PKTINFO, &data, data_len)) {
- zlog_err("Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s",
- autorp->sock, errno, safe_strerror(errno));
+ if (setsockopt(fd, PIM_IPPROTO, IP_PKTINFO, &data, data_len)) {
+ zlog_warn("%s: Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s", __func__,
+ fd, errno, safe_strerror(errno));
return false;
}
#endif
- if (set_nonblocking(autorp->sock) < 0) {
- zlog_err("Could not set non blocking on socket fd=%d: errno=%d: %s",
- autorp->sock, errno, safe_strerror(errno));
+ if (set_nonblocking(fd) < 0) {
+ zlog_warn("%s: Could not set non blocking on socket fd=%d: errno=%d: %s", __func__,
+ fd, errno, safe_strerror(errno));
+ return false;
+ }
+
+ if (sockopt_reuseaddr(fd)) {
+ zlog_warn("%s: Could not set reuse addr on socket fd=%d: errno=%d: %s", __func__,
+ fd, errno, safe_strerror(errno));
return false;
}
- if (sockopt_reuseaddr(autorp->sock)) {
- zlog_err("Could not set reuse addr on socket fd=%d: errno=%d: %s",
- autorp->sock, errno, safe_strerror(errno));
+ if (setsockopt_ipv4_multicast_loop(fd, 1) < 0) {
+ zlog_warn("%s: Could not enable multicast loopback on socket fd=%d: errno=%d: %s",
+ __func__, fd, errno, safe_strerror(errno));
return false;
}
- if (bind(autorp->sock, (const struct sockaddr *)&autorp_addr,
- sizeof(autorp_addr)) < 0) {
- zlog_err("Could not bind socket: %pSUp, fd=%d, errno=%d, %s",
- (union sockunion *)&autorp_addr, autorp->sock, errno,
- safe_strerror(errno));
+ if (bind(fd, (const struct sockaddr *)&autorp_addr, sizeof(autorp_addr)) < 0) {
+ zlog_warn("%s: Could not bind socket: %pSUp, fd=%d, errno=%d, %s", __func__,
+ (union sockunion *)&autorp_addr, fd, errno, safe_strerror(errno));
return false;
}
@@ -208,20 +233,148 @@ static bool pim_autorp_setup(struct pim_autorp *autorp)
return true;
}
-static bool pim_autorp_announcement(struct pim_autorp *autorp, uint8_t rpcnt,
- uint16_t holdtime, char *buf,
- size_t buf_size)
+static void autorp_ma_rp_holdtime(struct event *evt)
{
- /* TODO: Future Mapping agent implementation
- * Implement AutoRP mapping agent logic using received announcement messages
- */
+ /* Mapping agent RP hold time expired, remove the RP */
+ struct pim_autorp_rp *rp = EVENT_ARG(evt);
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP hold time expired, RP removed from mapping agent: addr=%pI4, grp=%pFX, grplist=%s",
+ __func__, &rp->addr, &rp->grp,
+ (strlen(rp->grplist) ? rp->grplist : "NONE"));
+
+ pim_autorp_rp_del(&(rp->autorp->mapping_rp_list), rp);
+ pim_autorp_rp_free(rp, false);
+}
+
+static bool autorp_recv_announcement(struct pim_autorp *autorp, uint8_t rpcnt, uint16_t holdtime,
+ char *buf, size_t buf_size)
+{
+ int i, j;
+ struct autorp_pkt_rp *rp;
+ struct autorp_pkt_grp *grp;
+ size_t offset = 0;
+ pim_addr rp_addr;
+ struct pim_autorp_rp *ma_rp;
+ struct pim_autorp_rp *trp;
+
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: AutoRP processed announcement message",
- __func__);
+ zlog_debug("%s: Processing AutoRP Announcement (rpcnt=%u, holdtime=%u)", __func__,
+ rpcnt, holdtime);
+
+ for (i = 0; i < rpcnt; ++i) {
+ if ((buf_size - offset) < AUTORP_RPLEN) {
+ zlog_warn("%s: Failed to parse AutoRP Announcement RP, invalid buffer size (%u < %u)",
+ __func__, (uint32_t)(buf_size - offset), AUTORP_RPLEN);
+ return false;
+ }
+
+ rp = (struct autorp_pkt_rp *)(buf + offset);
+ offset += AUTORP_RPLEN;
+
+ rp_addr.s_addr = rp->addr;
+
+ /* Ignore RP's limited to PIM version 1 or with an unknown version */
+ if (rp->pimver == AUTORP_PIM_V1 || rp->pimver == AUTORP_PIM_VUNKNOWN) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Ignoring unsupported PIM version (%u) in AutoRP Announcement for RP %pI4",
+ __func__, rp->pimver, (in_addr_t *)&(rp->addr));
+ /* Update the offset to skip past the groups advertised for this RP */
+ offset += (AUTORP_GRPLEN * rp->grpcnt);
+ continue;
+ }
+
+ if (rp->grpcnt == 0) {
+ /* No groups?? */
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Announcement message has no groups for RP %pI4",
+ __func__, (in_addr_t *)&(rp->addr));
+ continue;
+ }
+
+ if ((buf_size - offset) < AUTORP_GRPLEN) {
+ zlog_warn("%s: Buffer underrun parsing groups for RP %pI4", __func__,
+ (in_addr_t *)&(rp->addr));
+ return false;
+ }
+
+ /* Store all announced RP's, calculate what to send in discovery when discovery is sent. */
+ ma_rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(struct pim_autorp_rp));
+ memcpy(&(ma_rp->addr), &rp_addr, sizeof(pim_addr));
+ trp = pim_autorp_rp_add(&(autorp->mapping_rp_list), ma_rp);
+ if (trp == NULL) {
+ /* RP was brand new, finish initializing */
+ ma_rp->autorp = autorp;
+ ma_rp->holdtime = holdtime;
+ ma_rp->hold_timer = NULL;
+ ma_rp->grplist[0] = '\0';
+ memset(&(ma_rp->grp), 0, sizeof(ma_rp->grp));
+ pim_autorp_grppfix_init(&ma_rp->grp_pfix_list);
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: New candidate RP learned (%pPA)", __func__,
+ &rp_addr);
+ } else {
+ /* Returned an existing entry, free allocated RP */
+ XFREE(MTYPE_PIM_AUTORP_RP, ma_rp);
+ ma_rp = trp;
+ /* Free the existing group prefix list, in case the advertised groups changed */
+ pim_autorp_grppfix_free(&ma_rp->grp_pfix_list);
+ }
+
+ /* Cancel any existing timer and restart it */
+ event_cancel(&ma_rp->hold_timer);
+ if (holdtime > 0)
+ event_add_timer(router->master, autorp_ma_rp_holdtime, ma_rp,
+ ma_rp->holdtime, &(ma_rp->hold_timer));
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Parsing %u group(s) for candidate RP %pPA", __func__,
+ rp->grpcnt, &rp_addr);
+
+ for (j = 0; j < rp->grpcnt; ++j) {
+ /* grp is already pointing at the first group in the buffer */
+ struct pim_autorp_grppfix *lgrp;
+ struct pim_autorp_grppfix *tgrp;
+
+ if ((buf_size - offset) < AUTORP_GRPLEN) {
+ zlog_warn("%s: Failed parsing AutoRP announcement, RP(%pI4), invalid buffer size (%u < %u)",
+ __func__, &rp_addr, (uint32_t)(buf_size - offset),
+ AUTORP_GRPLEN);
+ return false;
+ }
+
+ grp = (struct autorp_pkt_grp *)(buf + offset);
+ offset += AUTORP_GRPLEN;
+
+ lgrp = XCALLOC(MTYPE_PIM_AUTORP_GRPPFIX, sizeof(struct pim_autorp_grppfix));
+ lgrp->grp.family = AF_INET;
+ lgrp->grp.prefixlen = grp->masklen;
+ lgrp->grp.u.prefix4.s_addr = grp->addr;
+ lgrp->negative = grp->negprefix;
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: %s%pFX added to candidate RP %pPA", __func__,
+ (lgrp->negative ? "!" : ""), &lgrp->grp, &rp_addr);
+
+ tgrp = pim_autorp_grppfix_add(&ma_rp->grp_pfix_list, lgrp);
+ if (tgrp != NULL) {
+ /* This should never happen but if there was an existing entry just free the
+ * allocated group prefix
+ */
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: %pFX was duplicated in AutoRP announcement",
+ __func__, &lgrp->grp);
+ XFREE(MTYPE_PIM_AUTORP_GRPPFIX, lgrp);
+ }
+ }
+ }
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP processed announcement message", __func__);
return true;
}
-static void autorp_rp_holdtime(struct event *evt)
+static void autorp_cand_rp_holdtime(struct event *evt)
{
/* RP hold time expired, remove the RP */
struct pim_autorp_rp *rp = EVENT_ARG(evt);
@@ -232,79 +385,320 @@ static void autorp_rp_holdtime(struct event *evt)
(strlen(rp->grplist) ? rp->grplist : "NONE"));
pim_autorp_rp_del(&(rp->autorp->discovery_rp_list), rp);
- pim_autorp_rp_free(rp);
+ pim_autorp_rp_free(rp, true);
}
-static bool pim_autorp_add_rp(struct pim_autorp *autorp, pim_addr rpaddr,
- struct prefix grp, char *listname,
- uint16_t holdtime)
+static bool pim_autorp_add_rp(struct pim_autorp *autorp, pim_addr rpaddr, struct prefix grp,
+ char *listname, uint16_t holdtime)
{
struct pim_autorp_rp *rp;
struct pim_autorp_rp *trp = NULL;
int ret;
ret = pim_rp_new(autorp->pim, rpaddr, grp, listname, RP_SRC_AUTORP);
+
/* There may not be a path to the RP right now, but that doesn't mean it failed to add the RP */
if (ret != PIM_SUCCESS && ret != PIM_RP_NO_PATH) {
- zlog_err("%s: Failed to add new RP addr=%pI4, grp=%pFX, grplist=%s",
- __func__, &rpaddr, &grp,
- (listname ? listname : "NONE"));
+ zlog_warn("%s: Failed to add active RP addr=%pI4, grp=%pFX, grplist=%s", __func__,
+ &rpaddr, &grp, (listname ? listname : "NONE"));
return false;
}
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Added new AutoRP learned RP addr=%pI4, grp=%pFX, grplist=%s",
- __func__, &rpaddr, &grp,
- (listname ? listname : "NONE"));
-
rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(*rp));
rp->autorp = autorp;
memcpy(&(rp->addr), &rpaddr, sizeof(pim_addr));
- prefix_copy(&(rp->grp), &grp);
- if (listname)
- snprintf(rp->grplist, sizeof(rp->grplist), "%s", listname);
- else
- rp->grplist[0] = '\0';
-
- rp->holdtime = holdtime;
- rp->hold_timer = NULL;
trp = pim_autorp_rp_add(&(autorp->discovery_rp_list), rp);
if (trp == NULL) {
/* RP was brand new */
trp = pim_autorp_rp_find(&(autorp->discovery_rp_list),
(const struct pim_autorp_rp *)rp);
+ /* Make sure the timer is NULL so the cancel below doesn't mess up */
+ trp->hold_timer = NULL;
+ zlog_info("%s: Added new AutoRP learned RP addr=%pI4, grp=%pFX, grplist=%s",
+ __func__, &rpaddr, &grp, (listname ? listname : "NONE"));
} else {
- /* RP already existed */
+ /* RP already existed, free the temp one */
XFREE(MTYPE_PIM_AUTORP_RP, rp);
- event_cancel(&trp->hold_timer);
-
- /* We know the address matches, but these values may have changed */
- trp->holdtime = holdtime;
- prefix_copy(&(trp->grp), &grp);
- if (listname) {
- snprintf(trp->grplist, sizeof(trp->grplist), "%s",
- listname);
- } else {
- trp->grplist[0] = '\0';
- }
}
+ /* Cancel any existing timer before restarting it */
+ event_cancel(&trp->hold_timer);
+ trp->holdtime = holdtime;
+ prefix_copy(&(trp->grp), &grp);
+ if (listname)
+ snprintf(trp->grplist, sizeof(trp->grplist), "%s", listname);
+ else
+ trp->grplist[0] = '\0';
+
if (holdtime > 0) {
- event_add_timer(router->master, autorp_rp_holdtime, trp,
- holdtime, &(trp->hold_timer));
+ event_add_timer(router->master, autorp_cand_rp_holdtime, trp, holdtime,
+ &(trp->hold_timer));
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Started %u second hold timer for RP %pI4", __func__,
holdtime, &trp->addr);
- } else {
- /* If hold time is zero, make sure there doesn't exist a hold timer for it already */
- event_cancel(&trp->hold_timer);
}
return true;
}
-static bool pim_autorp_discovery(struct pim_autorp *autorp, uint8_t rpcnt,
- uint16_t holdtime, char *buf, size_t buf_size)
+static size_t autorp_build_disc_rps(struct pim_autorp *autorp, uint8_t *buf, size_t buf_sz,
+ size_t *sz)
+{
+ /* Header has already been added, fill in starting with the address of RP1
+ * buf_sz is the max size of the buf
+ * sz is the current size of the packet, update as buf is filled
+ * return the total number of RP's added
+ *
+ *
+ * We need to resolve the announced RP's following these rules:
+ * 1) Co-existence of longer and shorter group prefixes, from different RPs. E.g. when RP1
+ * announces 224.2.*.*, and RP2 announces 224.2.2.*, both are accepted;
+ * 2) For announcements for identical group prefixes from two different RPs, the one from the
+ * RP with the higher IP address is accepted;
+ * 3) No duplicates are sent to the AUTORP-DISCOVERY address. E.g. if an RP announces both
+ * 224.2.2.* and 224.2.*.*, the former group-prefix is not sent and only 224.2.*.* is sent
+ * to the AUTORP-DISCOVERY address.
+ *
+ *
+ * The approach to resolution, first loop the stored RP's and extract the group prefixes, stored
+ * in a sorted list, sorted from least specific to most 0.0.0.0/0 -> 239.255.255.255/32. Each
+ * group prefix will then store the RP advertising that group prefix, this will resolve 2.
+ * The next step is to then loop the group prefix list and store them back into a list sorted by
+ * RP address, where the least specific group address will be stored, resolving 3. 1 is more
+ * about what is allowed, and in the example above the different prefixes will be unique in the
+ * list of group prefixes, and when they go back into RP's, they are also from different RP's
+ * and will therefore be sent.
+ */
+
+ struct pim_autorp_rp *rp;
+ struct pim_autorp_rp *trp;
+ struct pim_autorp_grppfix *grp;
+ struct pim_autorp_grppfix *grp2;
+ struct pim_autorp_grppfix *tgrp;
+ struct pim_autorp_grppfix_head grplist;
+ bool skip = false;
+ size_t rpcnt = 0;
+ size_t bsz = 0;
+
+ /* Initialize the lists, grplist is temporary, disc rp list is stored long term for
+ * show output, so make sure it's empty
+ */
+ pim_autorp_grppfix_init(&grplist);
+ pim_autorp_rplist_free(&autorp->advertised_rp_list, false);
+
+ /* Loop the advertised RP's and their group prefixes and make a unique list of group prefixes,
+ * keeping just the highest IP RP for each group prefix
+ */
+ frr_each (pim_autorp_rp, &autorp->mapping_rp_list, rp) {
+ frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grp) {
+ grp2 = XCALLOC(MTYPE_PIM_AUTORP_GRPPFIX, sizeof(struct pim_autorp_grppfix));
+ prefix_copy(&grp2->grp, &grp->grp);
+ grp2->negative = grp->negative;
+ grp2->rp = rp->addr;
+ tgrp = pim_autorp_grppfix_add(&grplist, grp2);
+ if (tgrp != NULL) {
+ /* Returned an existing entry. Use the highest RP addr and free allocated object */
+ if (IPV4_ADDR_CMP(&tgrp->rp, &grp2->rp))
+ tgrp->rp = grp2->rp;
+ XFREE(MTYPE_PIM_AUTORP_GRPPFIX, grp2);
+ }
+ }
+ }
+
+ /* Now loop the unique group prefixes and put it back into an RP list */
+ frr_each (pim_autorp_grppfix, &grplist, grp) {
+ rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(struct pim_autorp_rp));
+ rp->addr = grp->rp;
+ trp = pim_autorp_rp_add(&autorp->advertised_rp_list, rp);
+ if (trp == NULL) {
+ /* RP was brand new, finish initializing */
+ rp->autorp = NULL;
+ rp->holdtime = 0;
+ rp->hold_timer = NULL;
+ rp->grplist[0] = '\0';
+ memset(&(rp->grp), 0, sizeof(rp->grp));
+ pim_autorp_grppfix_init(&rp->grp_pfix_list);
+ } else {
+ /* Returned an existing entry, free allocated RP */
+ XFREE(MTYPE_PIM_AUTORP_RP, rp);
+ rp = trp;
+ }
+
+ /* Groups are in order from least specific to most, so go through the existing
+ * groups for this RP and see if the current group is within the prefix of one that
+ * is already in the list, if so, skip it, if not, add it
+ * If one is a positive match and the other is negative, then still include it.
+ */
+ skip = false;
+ frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grp2) {
+ if (prefix_match(&grp2->grp, &grp->grp) && grp->negative == grp2->negative) {
+ skip = true;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ /* add the group to the RP's group list */
+ grp2 = XCALLOC(MTYPE_PIM_AUTORP_GRPPFIX, sizeof(struct pim_autorp_grppfix));
+ prefix_copy(&grp2->grp, &grp->grp);
+ grp2->negative = grp->negative;
+ tgrp = pim_autorp_grppfix_add(&rp->grp_pfix_list, grp2);
+ assert(tgrp == NULL);
+ }
+
+ /* Done with temporary group prefix list, so free and finish */
+ pim_autorp_grppfix_free(&grplist);
+ pim_autorp_grppfix_fini(&grplist);
+
+ /* Now finally we can loop the disc rp list and build the packet */
+ frr_each (pim_autorp_rp, &autorp->advertised_rp_list, rp) {
+ struct autorp_pkt_rp *brp;
+ struct autorp_pkt_grp *bgrp;
+ size_t rp_sz;
+ size_t grpcnt;
+
+ grpcnt = pim_autorp_grppfix_count(&rp->grp_pfix_list);
+ rp_sz = sizeof(struct autorp_pkt_rp) + (grpcnt * sizeof(struct autorp_pkt_grp));
+ if (buf_sz < *sz + rp_sz) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Failed to pack AutoRP discovery packet, buffer overrun, (%u < %u)",
+ __func__, (uint32_t)buf_sz, (uint32_t)(*sz + rp_sz));
+ break;
+ }
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Add RP %pI4 (grpcnt=%u) to discovery message", __func__,
+ &rp->addr, (uint32_t)grpcnt);
+
+ rpcnt++;
+
+ brp = (struct autorp_pkt_rp *)(buf + bsz);
+ bsz += sizeof(struct autorp_pkt_rp);
+
+ /* Since this is an in_addr, assume it's already the right byte order */
+ brp->addr = rp->addr.s_addr;
+ brp->pimver = AUTORP_PIM_V2;
+ brp->reserved = 0;
+ brp->grpcnt = grpcnt;
+
+ frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grp) {
+ bgrp = (struct autorp_pkt_grp *)(buf + bsz);
+ bsz += sizeof(struct autorp_pkt_grp);
+
+ bgrp->addr = grp->grp.u.prefix4.s_addr;
+ bgrp->masklen = grp->grp.prefixlen;
+ bgrp->negprefix = grp->negative;
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Add group %s%pFX for RP %pI4 to discovery message",
+ __func__, (grp->negative ? "!" : ""), &grp->grp,
+ &rp->addr);
+ }
+
+ /* Update the size with this RP now that it is packed */
+ *sz += bsz;
+ }
+
+ return rpcnt;
+}
+
+static size_t autorp_build_disc_packet(struct pim_autorp *autorp, uint8_t *buf, size_t buf_sz)
+{
+ size_t sz = 0;
+ struct autorp_pkt_hdr *hdr;
+
+ if (buf_sz >= AUTORP_HDRLEN) {
+ hdr = (struct autorp_pkt_hdr *)buf;
+ hdr->version = AUTORP_VERSION;
+ hdr->type = AUTORP_DISCOVERY_TYPE;
+ hdr->holdtime = htons(autorp->discovery_holdtime);
+ hdr->reserved = 0;
+ sz += AUTORP_HDRLEN;
+ hdr->rpcnt = autorp_build_disc_rps(autorp, buf + sizeof(struct autorp_pkt_hdr),
+ (buf_sz - AUTORP_HDRLEN), &sz);
+ if (hdr->rpcnt == 0)
+ sz = 0;
+ }
+ return sz;
+}
+
+static void autorp_send_discovery(struct event *evt)
+{
+ struct pim_autorp *autorp = EVENT_ARG(evt);
+ struct sockaddr_in discGrp;
+ size_t disc_sz;
+ size_t buf_sz = 65535;
+ uint8_t buf[65535] = { 0 };
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP sending discovery info", __func__);
+
+ /* Mark true, even if nothing is sent */
+ autorp->mapping_agent_active = true;
+ disc_sz = autorp_build_disc_packet(autorp, buf, buf_sz);
+
+ if (disc_sz > 0) {
+ discGrp.sin_family = AF_INET;
+ discGrp.sin_port = htons(PIM_AUTORP_PORT);
+ inet_pton(PIM_AF, PIM_AUTORP_DISCOVERY_GRP, &discGrp.sin_addr);
+
+ if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_TTL,
+ &(autorp->discovery_scope), sizeof(autorp->discovery_scope)) == 0) {
+ if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_IF,
+ &(autorp->mapping_agent_addrsel.run_addr),
+ sizeof(autorp->mapping_agent_addrsel.run_addr)) == 0) {
+ if (sendto(autorp->sock, buf, disc_sz, 0,
+ (struct sockaddr *)&discGrp, sizeof(discGrp)) > 0) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP discovery message sent",
+ __func__);
+ } else if (PIM_DEBUG_AUTORP)
+ zlog_warn("%s: Failed to send AutoRP discovery message, errno=%d, %s",
+ __func__, errno, safe_strerror(errno));
+ } else if (PIM_DEBUG_AUTORP)
+ zlog_warn("%s: Failed to set Multicast Interface for sending AutoRP discovery message, errno=%d, %s",
+ __func__, errno, safe_strerror(errno));
+ } else if (PIM_DEBUG_AUTORP)
+ zlog_warn("%s: Failed to set Multicast TTL for sending AutoRP discovery message, errno=%d, %s",
+ __func__, errno, safe_strerror(errno));
+ }
+
+ /* Start the new timer for the entire send discovery interval */
+ event_add_timer(router->master, autorp_send_discovery, autorp, autorp->discovery_interval,
+ &(autorp->send_discovery_timer));
+}
+
+static void autorp_send_discovery_on(struct pim_autorp *autorp)
+{
+ int interval = 5;
+
+ /* Send the first discovery shortly after being enabled.
+ * If the configured interval is less than 5 seconds, then just use that.
+ */
+ if (interval > autorp->discovery_interval)
+ interval = autorp->discovery_interval;
+
+ if (autorp->send_discovery_timer)
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP discovery sending enabled in %u seconds", __func__,
+ interval);
+
+ event_add_timer(router->master, autorp_send_discovery, autorp, interval,
+ &(autorp->send_discovery_timer));
+}
+
+static void autorp_send_discovery_off(struct pim_autorp *autorp)
+{
+ if (autorp->send_discovery_timer)
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP discovery sending disabled", __func__);
+ event_cancel(&(autorp->send_discovery_timer));
+}
+
+static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint16_t holdtime,
+ char *buf, size_t buf_size, pim_addr src)
{
int i, j;
struct autorp_pkt_rp *rp;
@@ -318,65 +712,122 @@ static bool pim_autorp_discovery(struct pim_autorp *autorp, uint8_t rpcnt,
int64_t seq = 1;
bool success = true;
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Received AutoRP discovery message (src=%pI4, rpcnt=%u, holdtime=%u)",
+ __func__, &src, rpcnt, holdtime);
+
+ if (autorp->send_rp_discovery &&
+ (pim_addr_cmp(autorp->mapping_agent_addrsel.run_addr, src) < 0)) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP send discovery suppressed -- Discovery received with higher IP address",
+ __func__);
+
+ /* Cancel the existing send timer and restart for 3X the send discovery interval */
+ event_cancel(&(autorp->send_discovery_timer));
+ event_add_timer(router->master, autorp_send_discovery, autorp,
+ (autorp->discovery_interval * 3), &(autorp->send_discovery_timer));
+
+ /* Clear the last sent discovery RP's, since it is no longer valid */
+ pim_autorp_rplist_free(&autorp->advertised_rp_list, false);
+ /* Unset flag indicating we are active */
+ autorp->mapping_agent_active = false;
+ }
+
for (i = 0; i < rpcnt; ++i) {
- if ((buf_size - offset) < AUTORP_RPLEN)
+ if ((buf_size - offset) < AUTORP_RPLEN) {
+ zlog_warn("%s: Failed to parse AutoRP discovery message, invalid buffer size (%u < %u)",
+ __func__, (uint32_t)(buf_size - offset), AUTORP_RPLEN);
return false;
+ }
rp = (struct autorp_pkt_rp *)(buf + offset);
offset += AUTORP_RPLEN;
rp_addr.s_addr = rp->addr;
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Parsing RP %pI4 (grpcnt=%u)", __func__,
+ (in_addr_t *)&rp->addr, rp->grpcnt);
+
/* Ignore RP's limited to PIM version 1 or with an unknown version */
- if (rp->pimver == PIM_V1 || rp->pimver == PIM_VUNKNOWN) {
- zlog_warn("%s: Ignoring unsupported PIM version in AutoRP Discovery for RP %pI4",
- __func__, (in_addr_t *)&(rp->addr));
+ if (rp->pimver == AUTORP_PIM_V1 || rp->pimver == AUTORP_PIM_VUNKNOWN) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Ignoring unsupported PIM version in AutoRP Discovery for RP %pI4",
+ __func__, (in_addr_t *)&(rp->addr));
/* Update the offset to skip past the groups advertised for this RP */
offset += (AUTORP_GRPLEN * rp->grpcnt);
continue;
}
-
if (rp->grpcnt == 0) {
/* No groups?? */
- zlog_warn("%s: Discovery message has no groups for RP %pI4",
- __func__, (in_addr_t *)&(rp->addr));
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Discovery message has no groups for RP %pI4",
+ __func__, (in_addr_t *)&(rp->addr));
continue;
}
- if ((buf_size - offset) < AUTORP_GRPLEN) {
- zlog_warn("%s: Buffer underrun parsing groups for RP %pI4",
- __func__, (in_addr_t *)&(rp->addr));
+ /* Make sure there is enough buffer to parse all the groups */
+ if ((buf_size - offset) < (AUTORP_GRPLEN * rp->grpcnt)) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Buffer underrun parsing groups for RP %pI4 (%u < %u)",
+ __func__, (in_addr_t *)&(rp->addr),
+ (uint32_t)(buf_size - offset),
+ (uint32_t)(AUTORP_GRPLEN * rp->grpcnt));
return false;
}
+ /* Get the first group so we can check for a negative prefix */
+ /* Don't add to offset yet to make the multiple group loop easier */
grp = (struct autorp_pkt_grp *)(buf + offset);
- offset += AUTORP_GRPLEN;
if (rp->grpcnt == 1 && grp->negprefix == 0) {
/* Only one group with positive prefix, we can use the standard RP API */
+ offset += AUTORP_GRPLEN;
grppfix.family = AF_INET;
grppfix.prefixlen = grp->masklen;
grppfix.u.prefix4.s_addr = grp->addr;
- if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, NULL,
- holdtime))
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Parsing group %s%pFX for RP %pI4", __func__,
+ (grp->negprefix ? "!" : ""), &grppfix,
+ (in_addr_t *)&rp->addr);
+
+ if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, NULL, holdtime))
success = false;
} else {
- /* More than one grp, or the only group is a negative prefix, need to make a prefix list for this RP */
- snprintfrr(plname, sizeof(plname), "__AUTORP_%pI4__",
- &rp_addr);
+ /* More than one grp, or the only group is a negative prefix.
+ * Need to make a prefix list for this RP
+ */
+ snprintfrr(plname, sizeof(plname), "__AUTORP_%pI4__", &rp_addr);
+ pl = prefix_list_lookup(AFI_IP, plname);
+
+ if (pl) {
+ /* Existing prefix list found, delete it first */
+ /* TODO: Instead of deleting completely, maybe we can just clear it and re-add entries */
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Found existing prefix list %s, replacing it",
+ __func__, plname);
+ prefix_list_delete(pl);
+ }
+
+ /* Now get a new prefix list */
pl = prefix_list_get(AFI_IP, 0, plname);
for (j = 0; j < rp->grpcnt; ++j) {
- /* grp is already pointing at the first group in the buffer */
+ /* This will just set grp to the same pointer on the first loop, but offset will
+ * be updated correctly while parsing
+ */
+ grp = (struct autorp_pkt_grp *)(buf + offset);
+ offset += AUTORP_GRPLEN;
+
ple = prefix_list_entry_new();
ple->pl = pl;
ple->seq = seq;
seq += 5;
memset(&ple->prefix, 0, sizeof(ple->prefix));
prefix_list_entry_update_start(ple);
- ple->type = (grp->negprefix ? PREFIX_DENY
- : PREFIX_PERMIT);
+ ple->type = (grp->negprefix ? PREFIX_DENY : PREFIX_PERMIT);
ple->prefix.family = AF_INET;
ple->prefix.prefixlen = grp->masklen;
ple->prefix.u.prefix4.s_addr = grp->addr;
@@ -385,60 +836,59 @@ static bool pim_autorp_discovery(struct pim_autorp *autorp, uint8_t rpcnt,
ple->le = 32;
prefix_list_entry_update_finish(ple);
- if ((buf_size - offset) < AUTORP_GRPLEN)
- return false;
-
- grp = (struct autorp_pkt_grp *)(buf + offset);
- offset += AUTORP_GRPLEN;
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Parsing group %s%pFX for RP %pI4", __func__,
+ (grp->negprefix ? "!" : ""), &ple->prefix,
+ (in_addr_t *)&rp->addr);
}
- if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, plname,
- holdtime))
+ if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, plname, holdtime))
success = false;
}
}
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Processed AutoRP Discovery message", __func__);
-
return success;
}
-static bool pim_autorp_msg(struct pim_autorp *autorp, char *buf, size_t buf_size)
+static bool autorp_recv_msg(struct pim_autorp *autorp, char *buf, size_t buf_size, pim_addr src)
{
struct autorp_pkt_hdr *h;
- if (buf_size < AUTORP_HDRLEN)
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Received AutoRP message", __func__);
+
+ if (buf_size < AUTORP_HDRLEN) {
+ zlog_warn("%s: Invalid AutoRP Header size (%u < %u)", __func__, (uint32_t)buf_size,
+ AUTORP_HDRLEN);
return false;
+ }
h = (struct autorp_pkt_hdr *)buf;
- if (h->version != AUTORP_VERSION)
+ if (h->version != AUTORP_VERSION) {
+ zlog_warn("%s: Unsupported AutoRP version (%u != %u)", __func__, h->version,
+ AUTORP_VERSION);
return false;
+ }
- if (h->type == AUTORP_ANNOUNCEMENT_TYPE &&
- !pim_autorp_announcement(autorp, h->rpcnt, htons(h->holdtime),
- buf + AUTORP_HDRLEN,
- buf_size - AUTORP_HDRLEN))
- return false;
+ if (h->type == AUTORP_ANNOUNCEMENT_TYPE)
+ return autorp_recv_announcement(autorp, h->rpcnt, htons(h->holdtime),
+ buf + AUTORP_HDRLEN, buf_size - AUTORP_HDRLEN);
- if (h->type == AUTORP_DISCOVERY_TYPE &&
- !pim_autorp_discovery(autorp, h->rpcnt, htons(h->holdtime),
- buf + AUTORP_HDRLEN, buf_size - AUTORP_HDRLEN))
- return false;
+ if (h->type == AUTORP_DISCOVERY_TYPE)
+ return autorp_recv_discovery(autorp, h->rpcnt, htons(h->holdtime),
+ buf + AUTORP_HDRLEN, buf_size - AUTORP_HDRLEN, src);
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: Processed AutoRP packet", __func__);
+ zlog_warn("%s: Unknown AutoRP message type (%u)", __func__, h->type);
- return true;
+ return false;
}
static void autorp_read(struct event *t);
static void autorp_read_on(struct pim_autorp *autorp)
{
- event_add_read(router->master, autorp_read, autorp, autorp->sock,
- &(autorp->read_event));
+ event_add_read(router->master, autorp_read, autorp, autorp->sock, &(autorp->read_event));
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP socket read enabled", __func__);
}
@@ -456,26 +906,35 @@ static void autorp_read(struct event *evt)
int fd = evt->u.fd;
char buf[10000];
int rd;
+ struct sockaddr_storage from;
+ socklen_t fromlen = sizeof(from);
+ pim_addr src;
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Reading from AutoRP socket", __func__);
while (1) {
- rd = pim_socket_recvfromto(fd, (uint8_t *)buf, sizeof(buf),
- NULL, NULL, NULL, NULL, NULL);
+ rd = pim_socket_recvfromto(fd, (uint8_t *)buf, sizeof(buf), &from, &fromlen, NULL,
+ NULL, NULL);
if (rd <= 0) {
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK || errno == EAGAIN)
break;
+ zlog_warn("%s: Failure reading rd=%d: fd=%d: errno=%d: %s", __func__, rd,
+ fd, errno, safe_strerror(errno));
+ goto err;
+ }
- zlog_warn("%s: Failure reading rd=%d: fd=%d: errno=%d: %s",
- __func__, rd, fd, errno, safe_strerror(errno));
+ if (from.ss_family == AF_INET)
+ src.s_addr = ((struct sockaddr_in *)&from)->sin_addr.s_addr;
+ else {
+ zlog_warn("%s: AutoRP message is not IPV4", __func__);
goto err;
}
- if (!pim_autorp_msg(autorp, buf, rd))
- zlog_err("%s: Failure parsing AutoRP message", __func__);
+ if (!autorp_recv_msg(autorp, buf, rd, src))
+ zlog_warn("%s: Failure parsing AutoRP message", __func__);
/* Keep reading until would block */
}
@@ -493,23 +952,23 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp)
frr_with_privs (&pimd_privs) {
fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if (fd < 0) {
- zlog_warn("Could not create autorp socket: errno=%d: %s",
- errno, safe_strerror(errno));
+ zlog_warn("Could not create autorp socket: errno=%d: %s", errno,
+ safe_strerror(errno));
return false;
}
- autorp->sock = fd;
- if (!pim_autorp_setup(autorp)) {
- zlog_warn("Could not setup autorp socket fd=%d: errno=%d: %s",
- fd, errno, safe_strerror(errno));
+ if (!pim_autorp_setup(fd)) {
+ zlog_warn("Could not setup autorp socket fd=%d: errno=%d: %s", fd, errno,
+ safe_strerror(errno));
close(fd);
- autorp->sock = -1;
return false;
}
}
+ autorp->sock = fd;
+
if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: AutoRP socket enabled", __func__);
+ zlog_debug("%s: AutoRP socket enabled (fd=%u)", __func__, fd);
return true;
}
@@ -517,8 +976,8 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp)
static bool pim_autorp_socket_disable(struct pim_autorp *autorp)
{
if (close(autorp->sock)) {
- zlog_warn("Failure closing autorp socket: fd=%d errno=%d: %s",
- autorp->sock, errno, safe_strerror(errno));
+ zlog_warn("Failure closing autorp socket: fd=%d errno=%d: %s", autorp->sock, errno,
+ safe_strerror(errno));
return false;
}
@@ -542,13 +1001,15 @@ static void autorp_send_announcement(struct event *evt)
announceGrp.sin_port = htons(PIM_AUTORP_PORT);
inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &announceGrp.sin_addr);
- if (autorp->annouce_pkt_sz >= MIN_AUTORP_PKT_SZ) {
+ if (autorp->announce_pkt_sz >= MIN_AUTORP_PKT_SZ) {
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Sending AutoRP announcement", __func__);
+
if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_TTL,
- &(autorp->announce_scope),
- sizeof(autorp->announce_scope)) < 0) {
- if (PIM_DEBUG_AUTORP)
- zlog_err("%s: Failed to set Multicast TTL for sending AutoRP announcement message, errno=%d, %s",
- __func__, errno, safe_strerror(errno));
+ &(autorp->announce_scope), sizeof(autorp->announce_scope)) < 0) {
+ zlog_warn("%s: Failed to set Multicast TTL for sending AutoRP announcement message, errno=%d, %s",
+ __func__, errno, safe_strerror(errno));
+ return;
}
FOR_ALL_INTERFACES (autorp->pim->vrf, ifp) {
@@ -556,57 +1017,56 @@ static void autorp_send_announcement(struct event *evt)
/* Only send on active interfaces with full pim enabled, non-passive
* and have a primary address set.
*/
- if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) &&
- pim_ifp && pim_ifp->pim_enable &&
- !pim_ifp->pim_passive_enable &&
+ if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp &&
+ pim_ifp->pim_enable && !pim_ifp->pim_passive_enable &&
!pim_addr_is_any(pim_ifp->primary_address)) {
- if (setsockopt(autorp->sock, IPPROTO_IP,
- IP_MULTICAST_IF,
+ if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_IF,
&(pim_ifp->primary_address),
- sizeof(pim_ifp->primary_address)) <
- 0) {
- if (PIM_DEBUG_AUTORP)
- zlog_err("%s: Failed to set Multicast Interface for sending AutoRP announcement message, errno=%d, %s",
- __func__, errno,
- safe_strerror(errno));
+ sizeof(pim_ifp->primary_address)) < 0) {
+ zlog_warn("%s: Failed to set Multicast Interface for sending AutoRP announcement message, errno=%d, %s",
+ __func__, errno, safe_strerror(errno));
+ continue;
}
- if (sendto(autorp->sock, autorp->annouce_pkt,
- autorp->annouce_pkt_sz, 0,
+
+ if (sendto(autorp->sock, autorp->announce_pkt,
+ autorp->announce_pkt_sz, 0,
(struct sockaddr *)&announceGrp,
- sizeof(announceGrp)) <= 0) {
- if (PIM_DEBUG_AUTORP)
- zlog_err("%s: Failed to send AutoRP announcement message, errno=%d, %s",
- __func__, errno,
- safe_strerror(errno));
- }
+ sizeof(announceGrp)) <= 0)
+ zlog_warn("%s: Failed to send AutoRP announcement message, errno=%d, %s",
+ __func__, errno, safe_strerror(errno));
}
}
}
/* Start the new timer for the entire announce interval */
- event_add_timer(router->master, autorp_send_announcement, autorp,
- autorp->announce_interval, &(autorp->announce_timer));
+ event_add_timer(router->master, autorp_send_announcement, autorp, autorp->announce_interval,
+ &(autorp->announce_timer));
}
static void autorp_announcement_on(struct pim_autorp *autorp)
{
int interval = 5;
- if (interval > autorp->announce_interval) {
- /* If the configured interval is less than 5 seconds, then just use that */
+ /* Send the first announcement shortly after being enabled.
+ * If the configured interval is less than 5 seconds, then just use that.
+ */
+ if (interval > autorp->announce_interval)
interval = autorp->announce_interval;
- }
- event_add_timer(router->master, autorp_send_announcement, autorp,
- interval, &(autorp->announce_timer));
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: AutoRP announcement sending enabled", __func__);
+
+ if (autorp->announce_timer == NULL)
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP announcement sending enabled", __func__);
+
+ event_add_timer(router->master, autorp_send_announcement, autorp, interval,
+ &(autorp->announce_timer));
}
static void autorp_announcement_off(struct pim_autorp *autorp)
{
+ if (autorp->announce_timer != NULL)
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP announcement sending disabled", __func__);
event_cancel(&(autorp->announce_timer));
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: AutoRP announcement sending disabled", __func__);
}
/* Pack the groups of the RP
@@ -614,34 +1074,31 @@ static void autorp_announcement_off(struct pim_autorp *autorp)
* buf - Pointer to the buffer where to start packing groups
* returns - Total group count packed
*/
-static uint8_t pim_autorp_new_announcement_rp_grps(struct pim_autorp_rp *rp,
- uint8_t *buf)
+static uint8_t pim_autorp_new_announcement_rp_grps(struct pim_autorp_rp *rp, uint8_t *buf)
{
- struct prefix_list *plist;
- struct prefix_list_entry *ple;
struct autorp_pkt_grp *grpp = (struct autorp_pkt_grp *)buf;
uint8_t cnt = 0;
- in_addr_t taddr;
if (is_default_prefix(&(rp->grp))) {
/* No group so pack from the prefix list
* The grplist should be set and the prefix list exist with at least one group address
*/
+ struct prefix_list *plist;
+ struct prefix_list_entry *ple;
+
plist = prefix_list_lookup(AFI_IP, rp->grplist);
for (ple = plist->head; ple; ple = ple->next) {
- taddr = ntohl(ple->prefix.u.prefix4.s_addr);
- if ((taddr & 0xF0000000) == 0xE0000000) {
+ if (pim_addr_is_multicast(ple->prefix.u.prefix4) &&
+ ple->prefix.prefixlen >= 4) {
grpp->addr = ple->prefix.u.prefix4.s_addr;
grpp->masklen = ple->prefix.prefixlen;
- grpp->negprefix =
- (ple->type == PREFIX_PERMIT ? 0 : 1);
+ grpp->negprefix = (ple->type == PREFIX_PERMIT ? 0 : 1);
grpp->reserved = 0;
++cnt;
- grpp = (struct autorp_pkt_grp
- *)(buf +
- (sizeof(struct autorp_pkt_grp) *
- cnt));
+ grpp = (struct autorp_pkt_grp *)(buf +
+ (sizeof(struct autorp_pkt_grp) *
+ cnt));
}
}
@@ -661,20 +1118,16 @@ static uint8_t pim_autorp_new_announcement_rp_grps(struct pim_autorp_rp *rp,
* buf - Pointer to the buffer where to start packing the RP
* returns - Buffer pointer pointing to the start of the next RP
*/
-static uint8_t *pim_autorp_new_announcement_rp(struct pim_autorp_rp *rp,
- uint8_t *buf)
+static uint8_t *pim_autorp_new_announcement_rp(struct pim_autorp_rp *rp, uint8_t *buf)
{
struct autorp_pkt_rp *brp = (struct autorp_pkt_rp *)buf;
/* Since this is an in_addr, assume it's already the right byte order */
brp->addr = rp->addr.s_addr;
- brp->pimver = PIM_V2;
+ brp->pimver = AUTORP_PIM_V2;
brp->reserved = 0;
- brp->grpcnt =
- pim_autorp_new_announcement_rp_grps(rp,
- buf + sizeof(struct autorp_pkt_rp));
- return buf + sizeof(struct autorp_pkt_rp) +
- (brp->grpcnt * sizeof(struct autorp_pkt_grp));
+ brp->grpcnt = pim_autorp_new_announcement_rp_grps(rp, buf + sizeof(struct autorp_pkt_rp));
+ return buf + sizeof(struct autorp_pkt_rp) + (brp->grpcnt * sizeof(struct autorp_pkt_grp));
}
/* Pack the candidate RP's on the announcement packet
@@ -683,36 +1136,45 @@ static uint8_t *pim_autorp_new_announcement_rp(struct pim_autorp_rp *rp,
* bufsz - Output parameter to track size of packed bytes
* returns - Total count of RP's packed
*/
-static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp,
- uint8_t *buf, uint16_t *bufsz)
+static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp, uint8_t *buf, uint16_t *bufsz)
{
int cnt = 0;
struct pim_autorp_rp *rp;
/* Keep the original buffer pointer to calculate final size after packing */
uint8_t *obuf = buf;
- struct prefix_list *plist;
- struct prefix_list_entry *ple;
- in_addr_t taddr;
frr_each_safe (pim_autorp_rp, &(autorp->candidate_rp_list), rp) {
- /* We must have an rp address and either group or list in order to pack this RP, so skip this one */
+ /* We must have an rp address and either group or list in order to pack this RP,
+ * so skip this one
+ */
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: Evaluating AutoRP candidate %pI4, group range %pFX, group list %s",
+ __func__, &rp->addr, &rp->grp, rp->grplist);
+
if (pim_addr_is_any(rp->addr) ||
- (is_default_prefix(&(rp->grp)) && strlen(rp->grplist) == 0))
+ (is_default_prefix(&rp->grp) && strlen(rp->grplist) == 0))
continue;
- /* Group is net set, so list must be set, make sure the prefix list exists and has valid multicast groups */
- if (is_default_prefix(&(rp->grp))) {
+ /* Make sure that either group prefix is set, or that the prefix list exists and has at
+ * least one valid multicast prefix in it. Only multicast prefixes will be used.
+ */
+ if (is_default_prefix(&rp->grp)) {
+ struct prefix_list *plist;
+ struct prefix_list_entry *ple;
+
plist = prefix_list_lookup(AFI_IP, rp->grplist);
if (plist == NULL)
continue;
plist = prefix_list_lookup(AFI_IP, rp->grplist);
for (ple = plist->head; ple; ple = ple->next) {
- taddr = ntohl(ple->prefix.u.prefix4.s_addr);
- if ((taddr & 0xF0000000) == 0xE0000000)
+ if (pim_addr_is_multicast(ple->prefix.u.prefix4) &&
+ ple->prefix.prefixlen >= 4)
break;
}
- /* If we went through the entire list without finding a multicast prefix, then skip this RP */
+ /* If we went through the entire list without finding a multicast prefix,
+ * then skip this RP
+ */
if (ple == NULL)
continue;
}
@@ -721,6 +1183,10 @@ static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp,
++cnt;
/* This will return the buffer pointer at the location to start packing the next RP */
buf = pim_autorp_new_announcement_rp(rp, buf);
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP candidate %pI4 added to announcement", __func__,
+ &rp->addr);
}
if (cnt > 0)
@@ -729,7 +1195,9 @@ static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp,
return cnt;
}
-/* Build the new announcement packet. If there is a packet to send, restart the send timer with a short wait */
+/* Build the new announcement packet. If there is a packet to send, restart the send timer
+ * with a short wait
+ */
static void pim_autorp_new_announcement(struct pim_instance *pim)
{
struct pim_autorp *autorp = pim->autorp;
@@ -739,70 +1207,87 @@ static void pim_autorp_new_announcement(struct pim_instance *pim)
/* First disable any existing send timer */
autorp_announcement_off(autorp);
- if (!autorp->annouce_pkt) {
- /*
- * First time building, allocate the space
- * Allocate the max packet size of 65536 so we don't need to resize later.
- * This should be ok since we are only allocating the memory once for a single packet (potentially per vrf)
- */
- autorp->annouce_pkt = XCALLOC(MTYPE_PIM_AUTORP_ANNOUNCE, 65536);
- }
+ /*
+ * First time building, allocate the space
+ * Allocate the max packet size of 65536 so we don't need to resize later.
+ * This should be ok since we are only allocating the memory once for a single packet
+ * (potentially per vrf)
+ */
+ if (!autorp->announce_pkt)
+ autorp->announce_pkt = XCALLOC(MTYPE_PIM_AUTORP_ANNOUNCE, 65536);
- autorp->annouce_pkt_sz = 0;
+ autorp->announce_pkt_sz = 0;
holdtime = autorp->announce_holdtime;
- if (holdtime == DEFAULT_ANNOUNCE_HOLDTIME)
+ if (holdtime == DEFAULT_AUTORP_ANNOUNCE_HOLDTIME)
holdtime = autorp->announce_interval * 3;
if (holdtime > UINT16_MAX)
holdtime = UINT16_MAX;
- hdr = (struct autorp_pkt_hdr *)autorp->annouce_pkt;
+ hdr = (struct autorp_pkt_hdr *)autorp->announce_pkt;
hdr->version = AUTORP_VERSION;
hdr->type = AUTORP_ANNOUNCEMENT_TYPE;
hdr->holdtime = htons((uint16_t)holdtime);
hdr->reserved = 0;
- hdr->rpcnt =
- pim_autorp_new_announcement_rps(autorp,
- autorp->annouce_pkt +
- sizeof(struct autorp_pkt_hdr),
- &(autorp->annouce_pkt_sz));
+ hdr->rpcnt = pim_autorp_new_announcement_rps(autorp,
+ autorp->announce_pkt +
+ sizeof(struct autorp_pkt_hdr),
+ &(autorp->announce_pkt_sz));
/* Still need to add on the size of the header */
- autorp->annouce_pkt_sz += sizeof(struct autorp_pkt_hdr);
+ autorp->announce_pkt_sz += sizeof(struct autorp_pkt_hdr);
/* Only turn on the announcement timer if we have a packet to send */
- if (autorp->annouce_pkt_sz >= MIN_AUTORP_PKT_SZ)
+ if (autorp->announce_pkt_sz >= MIN_AUTORP_PKT_SZ)
autorp_announcement_on(autorp);
}
+void pim_autorp_prefix_list_update(struct pim_instance *pim, struct prefix_list *plist)
+{
+ struct pim_autorp_rp *rp = NULL;
+ struct pim_autorp *autorp = NULL;
+
+ autorp = pim->autorp;
+ if (autorp == NULL)
+ return;
+
+ /* Search for a candidate RP using this prefix list */
+ frr_each_safe (pim_autorp_rp, &(autorp->candidate_rp_list), rp) {
+ if (strmatch(rp->grplist, plist->name))
+ break;
+ }
+
+ /* If we broke out of the loop early because we found a match, then rebuild the announcement */
+ if (rp != NULL)
+ pim_autorp_new_announcement(pim);
+}
+
bool pim_autorp_rm_candidate_rp(struct pim_instance *pim, pim_addr rpaddr)
{
struct pim_autorp *autorp = pim->autorp;
struct pim_autorp_rp *rp;
struct pim_autorp_rp find = { .addr = rpaddr };
- rp = pim_autorp_rp_find(&(autorp->candidate_rp_list),
- (const struct pim_autorp_rp *)&find);
+ rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find);
if (!rp)
return false;
pim_autorp_rp_del(&(autorp->candidate_rp_list), rp);
- pim_autorp_rp_free(rp);
+ pim_autorp_rp_free(rp, false);
pim_autorp_new_announcement(pim);
return true;
}
-void pim_autorp_add_candidate_rp_group(struct pim_instance *pim,
- pim_addr rpaddr, struct prefix group)
+void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr,
+ struct prefix group)
{
struct pim_autorp *autorp = pim->autorp;
struct pim_autorp_rp *rp;
struct pim_autorp_rp find = { .addr = rpaddr };
- rp = pim_autorp_rp_find(&(autorp->candidate_rp_list),
- (const struct pim_autorp_rp *)&find);
+ rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find);
if (!rp) {
- rp = XCALLOC(MTYPE_PIM_AUTORP_CRP, sizeof(*rp));
+ rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(*rp));
memset(rp, 0, sizeof(struct pim_autorp_rp));
rp->autorp = autorp;
memcpy(&(rp->addr), &rpaddr, sizeof(pim_addr));
@@ -817,15 +1302,13 @@ void pim_autorp_add_candidate_rp_group(struct pim_instance *pim,
pim_autorp_new_announcement(pim);
}
-bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr,
- struct prefix group)
+bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, struct prefix group)
{
struct pim_autorp *autorp = pim->autorp;
struct pim_autorp_rp *rp;
struct pim_autorp_rp find = { .addr = rpaddr };
- rp = pim_autorp_rp_find(&(autorp->candidate_rp_list),
- (const struct pim_autorp_rp *)&find);
+ rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find);
if (!rp)
return false;
@@ -834,17 +1317,15 @@ bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr,
return true;
}
-void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim,
- pim_addr rpaddr, const char *plist)
+void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist)
{
struct pim_autorp *autorp = pim->autorp;
struct pim_autorp_rp *rp;
struct pim_autorp_rp find = { .addr = rpaddr };
- rp = pim_autorp_rp_find(&(autorp->candidate_rp_list),
- (const struct pim_autorp_rp *)&find);
+ rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find);
if (!rp) {
- rp = XCALLOC(MTYPE_PIM_AUTORP_CRP, sizeof(*rp));
+ rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(*rp));
memset(rp, 0, sizeof(struct pim_autorp_rp));
rp->autorp = autorp;
memcpy(&(rp->addr), &rpaddr, sizeof(pim_addr));
@@ -859,15 +1340,13 @@ void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim,
pim_autorp_new_announcement(pim);
}
-bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr,
- const char *plist)
+bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist)
{
struct pim_autorp *autorp = pim->autorp;
struct pim_autorp_rp *rp;
struct pim_autorp_rp find = { .addr = rpaddr };
- rp = pim_autorp_rp_find(&(autorp->candidate_rp_list),
- (const struct pim_autorp_rp *)&find);
+ rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find);
if (!rp)
return false;
@@ -880,7 +1359,7 @@ void pim_autorp_announce_scope(struct pim_instance *pim, uint8_t scope)
{
struct pim_autorp *autorp = pim->autorp;
- scope = (scope == 0 ? DEFAULT_ANNOUNCE_SCOPE : scope);
+ scope = (scope == 0 ? DEFAULT_AUTORP_ANNOUNCE_SCOPE : scope);
if (autorp->announce_scope != scope) {
autorp->announce_scope = scope;
pim_autorp_new_announcement(pim);
@@ -891,7 +1370,7 @@ void pim_autorp_announce_interval(struct pim_instance *pim, uint16_t interval)
{
struct pim_autorp *autorp = pim->autorp;
- interval = (interval == 0 ? DEFAULT_ANNOUNCE_INTERVAL : interval);
+ interval = (interval == 0 ? DEFAULT_AUTORP_ANNOUNCE_INTERVAL : interval);
if (autorp->announce_interval != interval) {
autorp->announce_interval = interval;
pim_autorp_new_announcement(pim);
@@ -908,6 +1387,16 @@ void pim_autorp_announce_holdtime(struct pim_instance *pim, int32_t holdtime)
}
}
+void pim_autorp_send_discovery_apply(struct pim_autorp *autorp)
+{
+ if (!autorp->mapping_agent_addrsel.run || !autorp->send_rp_discovery) {
+ autorp_send_discovery_off(autorp);
+ return;
+ }
+
+ autorp_send_discovery_on(autorp);
+}
+
void pim_autorp_add_ifp(struct interface *ifp)
{
/* Add a new interface for autorp
@@ -923,17 +1412,15 @@ void pim_autorp_add_ifp(struct interface *ifp)
struct pim_interface *pim_ifp;
pim_ifp = ifp->info;
- if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp &&
- pim_ifp->pim_enable) {
+ if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp && pim_ifp->pim_enable) {
pim = pim_ifp->pim;
if (pim && pim->autorp && pim->autorp->do_discovery) {
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Adding interface %s to AutoRP, joining AutoRP groups",
__func__, ifp->name);
- if (!pim_autorp_join_groups(ifp)) {
- zlog_err("Could not join AutoRP groups, errno=%d, %s",
- errno, safe_strerror(errno));
- }
+ if (!pim_autorp_join_groups(ifp))
+ zlog_warn("Could not join AutoRP groups, errno=%d, %s", errno,
+ safe_strerror(errno));
}
}
}
@@ -954,10 +1441,9 @@ void pim_autorp_rm_ifp(struct interface *ifp)
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: Removing interface %s from AutoRP, leaving AutoRP groups",
__func__, ifp->name);
- if (!pim_autorp_leave_groups(ifp)) {
- zlog_err("Could not leave AutoRP groups, errno=%d, %s",
- errno, safe_strerror(errno));
- }
+ if (!pim_autorp_leave_groups(ifp))
+ zlog_warn("Could not leave AutoRP groups, errno=%d, %s", errno,
+ safe_strerror(errno));
}
}
}
@@ -1008,18 +1494,28 @@ void pim_autorp_init(struct pim_instance *pim)
autorp->read_event = NULL;
autorp->announce_timer = NULL;
autorp->do_discovery = false;
+ autorp->send_discovery_timer = NULL;
+ autorp->send_rp_discovery = false;
pim_autorp_rp_init(&(autorp->discovery_rp_list));
pim_autorp_rp_init(&(autorp->candidate_rp_list));
- autorp->announce_scope = DEFAULT_ANNOUNCE_SCOPE;
- autorp->announce_interval = DEFAULT_ANNOUNCE_INTERVAL;
- autorp->announce_holdtime = DEFAULT_ANNOUNCE_HOLDTIME;
+ pim_autorp_rp_init(&(autorp->mapping_rp_list));
+ pim_autorp_rp_init(&autorp->advertised_rp_list);
+ autorp->announce_scope = DEFAULT_AUTORP_ANNOUNCE_SCOPE;
+ autorp->announce_interval = DEFAULT_AUTORP_ANNOUNCE_INTERVAL;
+ autorp->announce_holdtime = DEFAULT_AUTORP_ANNOUNCE_HOLDTIME;
+ autorp->discovery_scope = DEFAULT_AUTORP_DISCOVERY_SCOPE;
+ autorp->discovery_interval = DEFAULT_AUTORP_DISCOVERY_INTERVAL;
+ autorp->discovery_holdtime = DEFAULT_AUTORP_DISCOVERY_HOLDTIME;
+ cand_addrsel_clear(&(autorp->mapping_agent_addrsel));
+
+ pim->autorp = autorp;
if (!pim_autorp_socket_enable(autorp)) {
- zlog_err("%s: AutoRP failed to initialize", __func__);
+ zlog_warn("%s: AutoRP failed to initialize, feature will not work correctly",
+ __func__);
return;
}
- pim->autorp = autorp;
if (PIM_DEBUG_AUTORP)
zlog_debug("%s: AutoRP Initialized", __func__);
@@ -1032,24 +1528,20 @@ void pim_autorp_finish(struct pim_instance *pim)
struct pim_autorp *autorp = pim->autorp;
autorp_read_off(autorp);
+ autorp_announcement_off(autorp);
+ autorp_send_discovery_off(autorp);
pim_autorp_free(autorp);
- if (pim_autorp_socket_disable(autorp)) {
- if (PIM_DEBUG_AUTORP)
- zlog_debug("%s: AutoRP Finished", __func__);
- } else
- zlog_err("%s: AutoRP failed to finish", __func__);
-
+ pim_autorp_socket_disable(autorp);
XFREE(MTYPE_PIM_AUTORP, pim->autorp);
+
+ if (PIM_DEBUG_AUTORP)
+ zlog_debug("%s: AutoRP Finished", __func__);
}
int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty)
{
struct pim_autorp_rp *rp;
struct pim_autorp *autorp = pim->autorp;
- char interval_str[16] = { 0 };
- char scope_str[16] = { 0 };
- char holdtime_str[32] = { 0 };
- char grp_str[64] = { 0 };
int writes = 0;
if (!autorp->do_discovery) {
@@ -1057,24 +1549,17 @@ int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty)
++writes;
}
- if (autorp->announce_interval != DEFAULT_ANNOUNCE_INTERVAL) {
- snprintf(interval_str, sizeof(interval_str), " interval %u",
- autorp->announce_interval);
- }
-
- if (autorp->announce_scope != DEFAULT_ANNOUNCE_SCOPE) {
- snprintf(scope_str, sizeof(scope_str), " scope %u",
- autorp->announce_scope);
- }
-
- if (autorp->announce_holdtime != DEFAULT_ANNOUNCE_HOLDTIME) {
- snprintf(holdtime_str, sizeof(holdtime_str), " holdtime %u",
- autorp->announce_holdtime);
- }
-
- if (strlen(interval_str) || strlen(scope_str) || strlen(holdtime_str)) {
- vty_out(vty, " autorp announce%s%s%s\n", interval_str,
- scope_str, holdtime_str);
+ if (autorp->announce_interval != DEFAULT_AUTORP_ANNOUNCE_INTERVAL ||
+ autorp->announce_scope != DEFAULT_AUTORP_ANNOUNCE_SCOPE ||
+ autorp->announce_holdtime != DEFAULT_AUTORP_ANNOUNCE_HOLDTIME) {
+ vty_out(vty, " autorp announce");
+ if (autorp->announce_interval != DEFAULT_AUTORP_ANNOUNCE_INTERVAL)
+ vty_out(vty, " interval %u", autorp->announce_interval);
+ if (autorp->announce_scope != DEFAULT_AUTORP_ANNOUNCE_SCOPE)
+ vty_out(vty, " scope %u", autorp->announce_scope);
+ if (autorp->announce_holdtime != DEFAULT_AUTORP_ANNOUNCE_HOLDTIME)
+ vty_out(vty, " holdtime %u", autorp->announce_holdtime);
+ vty_out(vty, "\n");
++writes;
}
@@ -1084,83 +1569,371 @@ int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty)
(is_default_prefix(&(rp->grp)) && strlen(rp->grplist) == 0))
continue;
- /* Don't make sure the prefix list has multicast groups, user may not have created it yet */
-
+ vty_out(vty, " autorp announce %pI4", &(rp->addr));
if (!is_default_prefix(&(rp->grp)))
- snprintfrr(grp_str, sizeof(grp_str), "%pFX", &(rp->grp));
+ vty_out(vty, " %pFX", &(rp->grp));
else
- snprintfrr(grp_str, sizeof(grp_str), "group-list %s",
- rp->grplist);
-
- vty_out(vty, " autorp announce %pI4 %s\n", &(rp->addr), grp_str);
+ vty_out(vty, " group-list %s", rp->grplist);
+ vty_out(vty, "\n");
++writes;
}
+ if (autorp->send_rp_discovery) {
+ if (autorp->mapping_agent_addrsel.cfg_enable) {
+ vty_out(vty, " autorp send-rp-discovery");
+ switch (autorp->mapping_agent_addrsel.cfg_mode) {
+ case CAND_ADDR_LO:
+ break;
+ case CAND_ADDR_ANY:
+ vty_out(vty, " source any");
+ break;
+ case CAND_ADDR_IFACE:
+ vty_out(vty, " source interface %s",
+ autorp->mapping_agent_addrsel.cfg_ifname);
+ break;
+ case CAND_ADDR_EXPLICIT:
+ vty_out(vty, " source address %pPA",
+ &autorp->mapping_agent_addrsel.cfg_addr);
+ break;
+ }
+ vty_out(vty, "\n");
+ ++writes;
+ }
+
+ if (autorp->discovery_interval != DEFAULT_AUTORP_DISCOVERY_INTERVAL ||
+ autorp->discovery_scope != DEFAULT_AUTORP_DISCOVERY_SCOPE ||
+ autorp->discovery_holdtime != DEFAULT_AUTORP_DISCOVERY_HOLDTIME) {
+ vty_out(vty, " autorp send-rp-discovery");
+ if (autorp->discovery_interval != DEFAULT_AUTORP_DISCOVERY_INTERVAL)
+ vty_out(vty, " interval %u", autorp->discovery_interval);
+ if (autorp->discovery_scope != DEFAULT_AUTORP_DISCOVERY_SCOPE)
+ vty_out(vty, " scope %u", autorp->discovery_scope);
+ if (autorp->discovery_holdtime != DEFAULT_AUTORP_DISCOVERY_HOLDTIME)
+ vty_out(vty, " holdtime %u", autorp->discovery_holdtime);
+ vty_out(vty, "\n");
+ ++writes;
+ }
+ }
+
return writes;
}
-void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim,
- json_object *json)
+static void pim_autorp_show_autorp_json(struct pim_autorp *autorp, const char *component,
+ json_object *json, struct ttable *cand_table)
{
struct pim_autorp_rp *rp;
+
+ if (!component || strmatch(component, "discovery")) {
+ json_object *disc_obj;
+
+ disc_obj = json_object_new_object();
+ json_object_boolean_add(disc_obj, "enabled", autorp->do_discovery);
+ if (autorp->do_discovery) {
+ json_object *rplist_obj;
+
+ rplist_obj = json_object_new_object();
+ frr_each (pim_autorp_rp, &(autorp->discovery_rp_list), rp) {
+ json_object *rp_obj;
+ json_object *grp_arr;
+
+ rp_obj = json_object_new_object();
+ json_object_string_addf(rp_obj, "rpAddress", "%pI4", &rp->addr);
+ json_object_int_add(rp_obj, "holdtime", rp->holdtime);
+ grp_arr = json_object_new_array();
+
+ if (strlen(rp->grplist)) {
+ struct prefix_list *pl;
+ struct prefix_list_entry *ple;
+
+ pl = prefix_list_lookup(AFI_IP, rp->grplist);
+ if (pl == NULL)
+ continue;
+
+ for (ple = pl->head; ple != NULL; ple = ple->next) {
+ json_object *grp_obj;
+
+ grp_obj = json_object_new_object();
+ json_object_boolean_add(grp_obj, "negative",
+ ple->type == PREFIX_DENY);
+ json_object_string_addf(grp_obj, "prefix", "%pFX",
+ &ple->prefix);
+ json_object_array_add(grp_arr, grp_obj);
+ }
+ } else {
+ json_object *grp_obj;
+
+ grp_obj = json_object_new_object();
+ json_object_boolean_add(grp_obj, "negative", false);
+ json_object_string_addf(grp_obj, "prefix", "%pFX", &rp->grp);
+ json_object_array_add(grp_arr, grp_obj);
+ }
+
+ json_object_object_add(rp_obj, "groupRanges", grp_arr);
+ json_object_object_addf(rplist_obj, rp_obj, "%pI4", &rp->addr);
+ }
+ json_object_object_add(disc_obj, "rpList", rplist_obj);
+ }
+ json_object_object_add(json, "discovery", disc_obj);
+ }
+
+ if (!component || strmatch(component, "candidate")) {
+ json_object *announce_obj;
+
+ announce_obj = json_object_new_object();
+ json_object_boolean_add(announce_obj, "enabled",
+ pim_autorp_rp_count(&autorp->candidate_rp_list) > 0);
+ if (pim_autorp_rp_count(&autorp->candidate_rp_list) > 0) {
+ json_object_int_add(announce_obj, "scope", autorp->announce_scope);
+ json_object_int_add(announce_obj, "interval", autorp->announce_interval);
+ json_object_int_add(announce_obj, "holdtime",
+ (autorp->announce_holdtime ==
+ DEFAULT_AUTORP_ANNOUNCE_HOLDTIME
+ ? (autorp->announce_interval * 3)
+ : autorp->announce_holdtime));
+ json_object_object_add(announce_obj, "rpList",
+ ttable_json_with_json_text(cand_table, "sss",
+ "rpAddress|groupRange|prefixList"));
+ }
+ json_object_object_add(json, "announce", announce_obj);
+ }
+
+ if (!component || strmatch(component, "mapping-agent")) {
+ json_object *adv_obj;
+
+ adv_obj = json_object_new_object();
+ json_object_boolean_add(adv_obj, "enabled", autorp->send_rp_discovery);
+ if (autorp->send_rp_discovery) {
+ json_object *rplist_obj;
+
+ json_object_boolean_add(adv_obj, "active", autorp->mapping_agent_active);
+ json_object_int_add(adv_obj, "scope", autorp->discovery_scope);
+ json_object_int_add(adv_obj, "interval", autorp->discovery_interval);
+ json_object_int_add(adv_obj, "holdtime", autorp->discovery_holdtime);
+ switch (autorp->mapping_agent_addrsel.cfg_mode) {
+ case CAND_ADDR_LO:
+ json_object_string_add(adv_obj, "source", "loopback");
+ break;
+ case CAND_ADDR_ANY:
+ json_object_string_add(adv_obj, "source", "any");
+ break;
+ case CAND_ADDR_IFACE:
+ json_object_string_add(adv_obj, "source", "interface");
+ json_object_string_add(adv_obj, "interface",
+ autorp->mapping_agent_addrsel.cfg_ifname);
+ break;
+ case CAND_ADDR_EXPLICIT:
+ json_object_string_add(adv_obj, "source", "address");
+ break;
+ }
+ json_object_string_addf(adv_obj, "address", "%pPA",
+ &autorp->mapping_agent_addrsel.run_addr);
+
+ rplist_obj = json_object_new_object();
+ frr_each (pim_autorp_rp, &(autorp->advertised_rp_list), rp) {
+ json_object *rp_obj;
+ json_object *grp_arr;
+ struct pim_autorp_grppfix *grppfix;
+
+ rp_obj = json_object_new_object();
+ json_object_string_addf(rp_obj, "rpAddress", "%pI4", &rp->addr);
+ grp_arr = json_object_new_array();
+ frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grppfix) {
+ json_object *grp_obj;
+
+ grp_obj = json_object_new_object();
+ json_object_boolean_add(grp_obj, "negative",
+ grppfix->negative);
+ json_object_string_addf(grp_obj, "prefix", "%pFX",
+ &grppfix->grp);
+ json_object_array_add(grp_arr, grp_obj);
+ }
+ json_object_object_add(rp_obj, "groupRanges", grp_arr);
+ json_object_object_addf(rplist_obj, rp_obj, "%pI4", &rp->addr);
+ }
+ json_object_object_add(adv_obj, "rpList", rplist_obj);
+ }
+ json_object_object_add(json, "mapping-agent", adv_obj);
+ }
+}
+
+void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, const char *component,
+ json_object *json)
+{
struct pim_autorp *autorp = pim->autorp;
- struct ttable *tt = NULL;
- char *table = NULL;
- char grp_str[64] = { 0 };
- char plist_str[64] = { 0 };
- json_object *annouce_jobj;
-
- /* Prepare table. */
- tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
- ttable_add_row(tt, "RP address|group|prefix-list");
- tt->style.cell.rpad = 2;
- tt->style.corner = '+';
- ttable_restyle(tt);
+ struct pim_autorp_rp *rp;
+ struct ttable *cand_table = NULL;
+ struct ttable *adv_table = NULL;
+ struct ttable *disc_table = NULL;
+ char *tmp;
- frr_each_safe (pim_autorp_rp, &(autorp->candidate_rp_list), rp) {
- if (!is_default_prefix(&(rp->grp)))
- snprintfrr(grp_str, sizeof(grp_str), "%pFX", &(rp->grp));
- else
- snprintfrr(plist_str, sizeof(plist_str), "%s",
- rp->grplist);
+ if (autorp == NULL)
+ return;
- ttable_add_row(tt, "%pI4|%s|%s", &(rp->addr), grp_str,
- plist_str);
+ /* We may use the candidate table in the json output, so prepare it first. */
+ if (!component || strmatch(component, "candidate")) {
+ cand_table = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(cand_table, "RP address|Group Range|Prefix-List");
+ cand_table->style.cell.rpad = 2;
+ cand_table->style.corner = '+';
+ ttable_restyle(cand_table);
+
+ frr_each (pim_autorp_rp, &(autorp->candidate_rp_list), rp) {
+ if (strlen(rp->grplist))
+ ttable_add_row(cand_table, "%pI4|%s|%s", &(rp->addr), "-",
+ rp->grplist);
+ else
+ ttable_add_row(cand_table, "%pI4|%pFX|%s", &(rp->addr), &(rp->grp),
+ "-");
+ }
}
if (json) {
- json_object_boolean_add(json, "discoveryEnabled",
- autorp->do_discovery);
-
- annouce_jobj = json_object_new_object();
- json_object_int_add(annouce_jobj, "scope",
- autorp->announce_scope);
- json_object_int_add(annouce_jobj, "interval",
- autorp->announce_interval);
- json_object_int_add(annouce_jobj, "holdtime",
- autorp->announce_holdtime);
- json_object_object_add(annouce_jobj, "rpList",
- ttable_json_with_json_text(
- tt, "sss",
- "rpAddress|group|prefixList"));
-
- json_object_object_add(json, "announce", annouce_jobj);
- } else {
- vty_out(vty, "AutoRP Discovery is %sabled\n",
- (autorp->do_discovery ? "en" : "dis"));
- vty_out(vty, "AutoRP Candidate RPs\n");
- vty_out(vty, " interval %us, scope %u, holdtime %us\n",
- autorp->announce_interval, autorp->announce_scope,
- (autorp->announce_holdtime == DEFAULT_ANNOUNCE_HOLDTIME
- ? (autorp->announce_interval * 3)
- : autorp->announce_holdtime));
+ pim_autorp_show_autorp_json(autorp, component, json, cand_table);
+ if (cand_table)
+ ttable_del(cand_table);
+ return;
+ }
- vty_out(vty, "\n");
+ /* Prepare discovered RP's table. */
+ if (!component || strmatch(component, "discovery")) {
+ disc_table = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(disc_table, "RP address|Group Range");
+ disc_table->style.cell.rpad = 2;
+ disc_table->style.corner = '+';
+ ttable_restyle(disc_table);
+
+ frr_each (pim_autorp_rp, &(autorp->discovery_rp_list), rp) {
+ if (strlen(rp->grplist)) {
+ struct prefix_list *pl;
+ struct prefix_list_entry *ple;
+ bool first = true;
+
+ pl = prefix_list_lookup(AFI_IP, rp->grplist);
+
+ if (pl == NULL) {
+ ttable_add_row(disc_table,
+ "%pI4|failed to find prefix list %s",
+ &(rp->addr), rp->grplist);
+ continue;
+ }
- table = ttable_dump(tt, "\n");
- vty_out(vty, "%s\n", table);
- XFREE(MTYPE_TMP_TTABLE, table);
+ for (ple = pl->head; ple != NULL; ple = ple->next) {
+ if (first)
+ ttable_add_row(disc_table, "%pI4|%s%pFX",
+ &(rp->addr),
+ (ple->type == PREFIX_DENY ? "!"
+ : " "),
+ &ple->prefix);
+ else
+ ttable_add_row(disc_table, "%s|%s%pFX", " ",
+ (ple->type == PREFIX_DENY ? "!"
+ : " "),
+ &ple->prefix);
+ first = false;
+ }
+ } else
+ ttable_add_row(disc_table, "%pI4| %pFX", &(rp->addr), &(rp->grp));
+ }
+ }
+
+ /* Prepare discovery RP's table (mapping-agent). */
+ if (!component || strmatch(component, "mapping-agent")) {
+ adv_table = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(adv_table, "RP address|Group Range");
+ adv_table->style.cell.rpad = 2;
+ adv_table->style.corner = '+';
+ ttable_restyle(adv_table);
+
+ frr_each (pim_autorp_rp, &(autorp->advertised_rp_list), rp) {
+ struct pim_autorp_grppfix *grppfix;
+ bool first = true;
+
+ frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grppfix) {
+ if (first)
+ ttable_add_row(adv_table, "%pI4|%s%pFX", &rp->addr,
+ grppfix->negative ? "!" : " ", &grppfix->grp);
+ else
+ ttable_add_row(adv_table, "%s|%s%pFX", " ",
+ grppfix->negative ? "!" : " ", &grppfix->grp);
+ first = false;
+ }
+ }
+ }
+
+ if (!component || strmatch(component, "discovery")) {
+ vty_out(vty, "AutoRP Discovery is %sabled\n", (autorp->do_discovery ? "en" : "dis"));
+ if (autorp->do_discovery) {
+ tmp = ttable_dump(disc_table, "\n");
+ vty_out(vty, "\n");
+ vty_out(vty, "Discovered RP's (count=%u)\n",
+ (uint32_t)pim_autorp_rp_count(&autorp->discovery_rp_list));
+ vty_out(vty, "%s\n", tmp);
+ XFREE(MTYPE_TMP_TTABLE, tmp);
+ } else
+ vty_out(vty, "\n");
+ }
+
+ if (!component || strmatch(component, "candidate")) {
+ vty_out(vty, "AutoRP Announcement is %sabled\n",
+ (pim_autorp_rp_count(&autorp->candidate_rp_list) > 0 ? "en" : "dis"));
+ if (pim_autorp_rp_count(&autorp->candidate_rp_list) > 0) {
+ tmp = ttable_dump(cand_table, "\n");
+ vty_out(vty, " interval %us scope %u holdtime %us\n",
+ autorp->announce_interval, autorp->announce_scope,
+ (autorp->announce_holdtime == DEFAULT_AUTORP_ANNOUNCE_HOLDTIME
+ ? (autorp->announce_interval * 3)
+ : autorp->announce_holdtime));
+ vty_out(vty, "\n");
+ vty_out(vty, "Candidate RP's (count=%u)\n",
+ (uint32_t)pim_autorp_rp_count(&autorp->candidate_rp_list));
+ vty_out(vty, "%s\n", tmp);
+ XFREE(MTYPE_TMP_TTABLE, tmp);
+ } else
+ vty_out(vty, "\n");
+ }
+
+ if (!component || strmatch(component, "mapping-agent")) {
+ vty_out(vty, "AutoRP Mapping-Agent is %sabled\n",
+ (autorp->send_rp_discovery ? "en" : "dis"));
+ if (autorp->send_rp_discovery) {
+ vty_out(vty, " interval %us scope %u holdtime %us\n",
+ autorp->discovery_interval, autorp->discovery_scope,
+ autorp->discovery_holdtime);
+ vty_out(vty, " source %pPA", &autorp->mapping_agent_addrsel.run_addr);
+ switch (autorp->mapping_agent_addrsel.cfg_mode) {
+ case CAND_ADDR_LO:
+ vty_out(vty, " (loopback)");
+ break;
+ case CAND_ADDR_ANY:
+ vty_out(vty, " (any)");
+ break;
+ case CAND_ADDR_IFACE:
+ vty_out(vty, " (interface %s)",
+ autorp->mapping_agent_addrsel.cfg_ifname);
+ break;
+ case CAND_ADDR_EXPLICIT:
+ vty_out(vty, " (explicit address)");
+ break;
+ }
+ vty_out(vty, "\n");
+
+ if (autorp->mapping_agent_active) {
+ tmp = ttable_dump(adv_table, "\n");
+ vty_out(vty, "\n");
+ vty_out(vty, "Advertised RP's (count=%u)\n",
+ (uint32_t)pim_autorp_rp_count(&autorp->advertised_rp_list));
+ vty_out(vty, "%s\n", tmp);
+ XFREE(MTYPE_TMP_TTABLE, tmp);
+ } else
+ vty_out(vty, " Mapping agent is inactive\n");
+ } else
+ vty_out(vty, "\n");
}
- ttable_del(tt);
+ if (cand_table)
+ ttable_del(cand_table);
+ if (adv_table)
+ ttable_del(adv_table);
+ if (disc_table)
+ ttable_del(disc_table);
}
diff --git a/pimd/pim_autorp.h b/pimd/pim_autorp.h
index a0b029d00a..e4c6530109 100644
--- a/pimd/pim_autorp.h
+++ b/pimd/pim_autorp.h
@@ -14,16 +14,21 @@
#define AUTORP_VERSION 1
#define AUTORP_ANNOUNCEMENT_TYPE 1
#define AUTORP_DISCOVERY_TYPE 2
-#define PIM_VUNKNOWN 0
-#define PIM_V1 1
-#define PIM_V2 2
-#define PIM_V1_2 3
+#define AUTORP_PIM_VUNKNOWN 0
+#define AUTORP_PIM_V1 1
+#define AUTORP_PIM_V2 2
+#define AUTORP_PIM_V1_2 3
-#define DEFAULT_ANNOUNCE_INTERVAL 60
-#define DEFAULT_ANNOUNCE_SCOPE 31
-#define DEFAULT_ANNOUNCE_HOLDTIME -1
+#define DEFAULT_AUTORP_ANNOUNCE_INTERVAL 60
+#define DEFAULT_AUTORP_ANNOUNCE_SCOPE 31
+#define DEFAULT_AUTORP_ANNOUNCE_HOLDTIME -1
+
+#define DEFAULT_AUTORP_DISCOVERY_INTERVAL 60
+#define DEFAULT_AUTORP_DISCOVERY_SCOPE 31
+#define DEFAULT_AUTORP_DISCOVERY_HOLDTIME 180
PREDECL_SORTLIST_UNIQ(pim_autorp_rp);
+PREDECL_SORTLIST_UNIQ(pim_autorp_grppfix);
struct autorp_pkt_grp {
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -79,7 +84,15 @@ struct pim_autorp_rp {
struct event *hold_timer;
struct prefix grp;
char grplist[32];
- struct pim_autorp_rp_item list;
+ struct pim_autorp_grppfix_head grp_pfix_list;
+ struct pim_autorp_rp_item item;
+};
+
+struct pim_autorp_grppfix {
+ struct prefix grp;
+ struct in_addr rp;
+ bool negative;
+ struct pim_autorp_grppfix_item item;
};
struct pim_autorp {
@@ -96,13 +109,18 @@ struct pim_autorp {
struct event *announce_timer;
/* Event for sending discovery packets*/
- /* struct event *discovery_timer; */
+ struct event *send_discovery_timer;
/* Flag enabling reading discovery packets */
bool do_discovery;
/* Flag enabling mapping agent (reading announcements and sending discovery)*/
- /* bool do_mapping; */
+ bool send_rp_discovery;
+
+ /* Flag indicating if we are sending discovery messages (true) or if a higher IP mapping
+ * agent preemptied our sending (false)
+ */
+ bool mapping_agent_active;
/* List of RP's in received discovery packets */
struct pim_autorp_rp_head discovery_rp_list;
@@ -111,7 +129,12 @@ struct pim_autorp {
struct pim_autorp_rp_head candidate_rp_list;
/* List of announced RP's to send in discovery packets */
- /* struct pim_autorp_rp_head mapping_rp_list; */
+ struct pim_autorp_rp_head mapping_rp_list;
+
+ /* List of the last advertised RP's, via mapping agent discovery
+ * This is only filled if a discovery message was sent
+ */
+ struct pim_autorp_rp_head advertised_rp_list;
/* Packet parameters for sending announcement packets */
uint8_t announce_scope;
@@ -119,32 +142,32 @@ struct pim_autorp {
int32_t announce_holdtime;
/* Pre-built announcement packet, only changes when configured RP's or packet parameters change */
- uint8_t *annouce_pkt;
- uint16_t annouce_pkt_sz;
-
- /* TODO: Packet parameters for sending discovery packets
- * int discovery_scope;
- * int discovery_interval;
- * int discovery_holdtime;
- */
+ uint8_t *announce_pkt;
+ uint16_t announce_pkt_sz;
+
+ /* Packet parameters for sending discovery packets */
+ uint8_t discovery_scope;
+ uint16_t discovery_interval;
+ uint16_t discovery_holdtime;
+ struct cand_addrsel mapping_agent_addrsel;
};
#define AUTORP_GRPLEN 6
#define AUTORP_RPLEN 6
#define AUTORP_HDRLEN 8
+void pim_autorp_prefix_list_update(struct pim_instance *pim, struct prefix_list *plist);
bool pim_autorp_rm_candidate_rp(struct pim_instance *pim, pim_addr rpaddr);
-void pim_autorp_add_candidate_rp_group(struct pim_instance *pim,
- pim_addr rpaddr, struct prefix group);
+void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr,
+ struct prefix group);
bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr,
struct prefix group);
-void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim,
- pim_addr rpaddr, const char *plist);
-bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr,
- const char *plist);
+void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist);
+bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist);
void pim_autorp_announce_scope(struct pim_instance *pim, uint8_t scope);
void pim_autorp_announce_interval(struct pim_instance *pim, uint16_t interval);
void pim_autorp_announce_holdtime(struct pim_instance *pim, int32_t holdtime);
+void pim_autorp_send_discovery_apply(struct pim_autorp *autorp);
void pim_autorp_add_ifp(struct interface *ifp);
void pim_autorp_rm_ifp(struct interface *ifp);
void pim_autorp_start_discovery(struct pim_instance *pim);
@@ -152,7 +175,7 @@ void pim_autorp_stop_discovery(struct pim_instance *pim);
void pim_autorp_init(struct pim_instance *pim);
void pim_autorp_finish(struct pim_instance *pim);
int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty);
-void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim,
+void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, const char *component,
json_object *json);
#endif
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index a44e4e08f3..6c4d649235 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -480,9 +480,7 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
pend = bsm_rpinfos_first(bsgrp_node->partial_bsrp_list);
- if (!pim_get_all_mcast_group(&group_all))
- return;
-
+ pim_get_all_mcast_group(&group_all);
rp_all = pim_rp_find_match_group(pim, &group_all);
rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
@@ -727,11 +725,9 @@ void pim_bsm_clear(struct pim_instance *pim)
__func__, &nht_p);
}
- pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
-
- if (!pim_get_all_mcast_group(&g_all))
- return;
+ pim_nht_delete_tracked(pim, nht_p, NULL, rp_info);
+ pim_get_all_mcast_group(&g_all);
rp_all = pim_rp_find_match_group(pim, &g_all);
if (rp_all == rp_info) {
@@ -1769,14 +1765,14 @@ static inline pim_addr if_highest_addr(pim_addr cur, struct interface *ifp)
return cur;
}
-static void cand_addrsel_clear(struct cand_addrsel *asel)
+void cand_addrsel_clear(struct cand_addrsel *asel)
{
asel->run = false;
asel->run_addr = PIMADDR_ANY;
}
/* returns whether address or active changed */
-static bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf)
+bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf)
{
bool is_any = false, prev_run = asel->run;
struct interface *ifp = NULL;
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
index 1eacc1be57..b1582d0dfa 100644
--- a/pimd/pim_bsm.h
+++ b/pimd/pim_bsm.h
@@ -64,7 +64,7 @@ enum cand_addr {
CAND_ADDR_EXPLICIT,
};
-/* used separately for Cand-RP and Cand-BSR */
+/* used separately for Cand-RP, Cand-BSR, and AutoRP mapping agent */
struct cand_addrsel {
bool cfg_enable;
enum cand_addr cfg_mode : 8;
@@ -369,6 +369,9 @@ void pim_cand_rp_trigger(struct bsm_scope *scope);
void pim_cand_rp_grp_add(struct bsm_scope *scope, const prefix_pim *p);
void pim_cand_rp_grp_del(struct bsm_scope *scope, const prefix_pim *p);
+void cand_addrsel_clear(struct cand_addrsel *asel);
+bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf);
+
void pim_cand_addrs_changed(void);
int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf,
diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c
index 6e93b65f4b..02e7a69ff1 100644
--- a/pimd/pim_bsr_rpdb.c
+++ b/pimd/pim_bsr_rpdb.c
@@ -413,11 +413,11 @@ void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
struct bsr_crp_rp *rp, ref;
bool ok;
- ref.addr = pnc->rpf.rpf_addr;
+ ref.addr = pnc->addr;
rp = bsr_crp_rps_find(scope->ebsr_rps, &ref);
assertf(rp, "addr=%pPA", &ref.addr);
- ok = CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID);
+ ok = pim_nht_pnc_is_valid(pim, pnc);
if (ok == rp->nht_ok)
return;
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 934da2d53e..a34fb344fe 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -2822,65 +2822,49 @@ DEFPY (show_ip_pim_rp_vrf_all,
DEFPY (show_ip_pim_autorp,
show_ip_pim_autorp_cmd,
- "show ip pim [vrf NAME] autorp [json$json]",
+ "show ip pim [vrf <NAME|all>] autorp [discovery|candidate|mapping-agent]$component [json$json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
+ "All VRF's\n"
"PIM AutoRP information\n"
+ "RP Discovery details\n"
+ "Candidate RP details\n"
+ "Mapping Agent details\n"
JSON_STR)
{
- struct vrf *v;
json_object *json_parent = NULL;
-
- v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
- if (!v || !v->info) {
- if (!json)
- vty_out(vty, "%% Unable to find pim instance\n");
- return CMD_WARNING;
- }
+ struct vrf *v;
if (json)
json_parent = json_object_new_object();
- pim_autorp_show_autorp(vty, v->info, json_parent);
-
- if (json)
- vty_json(vty, json_parent);
-
- return CMD_SUCCESS;
-}
+ if (vrf && strmatch(vrf, "all")) {
+ json_object *json_vrf = NULL;
-DEFPY (show_ip_pim_autorp_vrf_all,
- show_ip_pim_autorp_vrf_all_cmd,
- "show ip pim vrf all autorp [json$json]",
- SHOW_STR
- IP_STR
- PIM_STR
- VRF_CMD_HELP_STR
- "PIM AutoRP information\n"
- JSON_STR)
-{
- struct vrf *vrf;
- json_object *json_parent = NULL;
- json_object *json_vrf = NULL;
-
- if (json)
- json_parent = json_object_new_object();
+ RB_FOREACH (v, vrf_name_head, &vrfs_by_name) {
+ if (!v || !v->info)
+ continue;
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if (vrf->info) {
- if (!json)
- vty_out(vty, "VRF: %s\n", vrf->name);
- else
+ if (json)
json_vrf = json_object_new_object();
+ else
+ vty_out(vty, "VRF: %s\n", v->name);
- pim_autorp_show_autorp(vty, vrf->info, json_vrf);
+ pim_autorp_show_autorp(vty, v->info, component, json_vrf);
if (json)
- json_object_object_add(json_parent, vrf->name,
- json_vrf);
+ json_object_object_add(json_parent, v->name, json_vrf);
}
+ } else {
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+ if (!v || !v->info) {
+ if (!json)
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+ pim_autorp_show_autorp(vty, v->info, component, json_parent);
}
if (json)
@@ -2930,7 +2914,7 @@ DEFPY (show_ip_pim_nexthop,
DEFPY (show_ip_pim_nexthop_lookup,
show_ip_pim_nexthop_lookup_cmd,
- "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source A.B.C.D$group",
+ "show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source [A.B.C.D$group]",
SHOW_STR
IP_STR
PIM_STR
@@ -2942,6 +2926,14 @@ DEFPY (show_ip_pim_nexthop_lookup,
return pim_show_nexthop_lookup_cmd_helper(vrf, vty, source, group);
}
+ALIAS_DEPRECATED (show_ip_pim_nexthop_lookup,
+ show_ip_rpf_source_cmd,
+ "show ip rpf A.B.C.D$source",
+ SHOW_STR
+ IP_STR
+ "Display RPF information for multicast source\n"
+ "Nexthop lookup for specific source address\n");
+
DEFPY (show_ip_pim_interface_traffic,
show_ip_pim_interface_traffic_cmd,
"show ip pim [vrf NAME] interface traffic [WORD$if_name] [json$json]",
@@ -3304,7 +3296,7 @@ DEFUN (show_ip_rib,
return CMD_WARNING;
}
- if (!pim_nexthop_lookup(vrf->info, &nexthop, addr, 0)) {
+ if (!pim_nht_lookup(vrf->info, &nexthop, addr, 0)) {
vty_out(vty,
"Failure querying RIB nexthop for unicast address %s\n",
addr_str);
@@ -4609,13 +4601,17 @@ DEFPY (pim_autorp_announce_rp,
"Prefix list\n"
"List name\n")
{
- return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, (grp_str ? grp : NULL),
- plist);
+ if (grp_str && (!pim_addr_is_multicast(grp->prefix) || grp->prefixlen < 4)) {
+ vty_out(vty, "%% group prefix %pFX is not a valid multicast range\n", grp);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, grp_str, plist);
}
DEFPY (pim_autorp_announce_scope_int,
pim_autorp_announce_scope_int_cmd,
- "[no] autorp announce ![{scope (1-255) | interval (1-65535) | holdtime (0-65535)}]",
+ "[no] autorp announce {scope (1-255) | interval (1-65535) | holdtime (0-65535)}",
NO_STR
"AutoRP\n"
"AutoRP Candidate RP announcement\n"
@@ -4626,11 +4622,44 @@ DEFPY (pim_autorp_announce_scope_int,
"Announcement holdtime\n"
"Time in seconds\n")
{
- return pim_process_autorp_announce_scope_int_cmd(vty, no, scope_str,
- interval_str,
+ return pim_process_autorp_announce_scope_int_cmd(vty, no, scope_str, interval_str,
holdtime_str);
}
+DEFPY (pim_autorp_send_rp_discovery,
+ pim_autorp_send_rp_discovery_cmd,
+ "[no] autorp send-rp-discovery [source <address A.B.C.D | interface IFNAME | loopback$loopback | any$any>]",
+ NO_STR
+ "AutoRP\n"
+ "Enable AutoRP mapping agent\n"
+ "Specify AutoRP discovery source\n"
+ "Local address\n"
+ IP_ADDR_STR
+ "Local Interface (uses highest address)\n"
+ IFNAME_STR
+ "Highest loopback address (default)\n"
+ "Highest address of any interface\n")
+{
+ return pim_process_autorp_send_rp_discovery_cmd(vty, no, any, loopback, ifname, address_str);
+}
+
+DEFPY (pim_autorp_send_rp_discovery_scope_int,
+ pim_autorp_send_rp_discovery_scope_int_cmd,
+ "[no] autorp send-rp-discovery {scope (0-255) | interval (1-65535) | holdtime (0-65535)}",
+ NO_STR
+ "AutoRP\n"
+ "Enable AutoRP mapping agent\n"
+ "Packet scope (TTL)\n"
+ "TTL value\n"
+ "Discovery TX interval\n"
+ "Time in seconds\n"
+ "Announcement holdtime\n"
+ "Time in seconds\n")
+{
+ return pim_process_autorp_send_rp_discovery_scope_int_cmd(vty, no, scope_str, interval_str,
+ holdtime_str);
+}
+
DEFPY (pim_bsr_candidate_bsr,
pim_bsr_candidate_bsr_cmd,
"[no] bsr candidate-bsr [{priority (0-255)|source <address A.B.C.D|interface IFNAME|loopback$loopback|any$any>}]",
@@ -5850,6 +5879,21 @@ DEFUN(interface_no_ip_pim_boundary_oil,
return pim_process_no_ip_pim_boundary_oil_cmd(vty);
}
+DEFPY_YANG(interface_ip_pim_boundary_acl,
+ interface_ip_pim_boundary_acl_cmd,
+ "[no] ip multicast boundary ACCESSLIST4_NAME$name",
+ NO_STR
+ IP_STR
+ "Generic multicast configuration options\n"
+ "Define multicast boundary\n"
+ "Access-list to filter OIL with by source and group\n")
+{
+ nb_cli_enqueue_change(vty, "./multicast-boundary-acl",
+ (!!no ? NB_OP_DESTROY : NB_OP_MODIFY), name);
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
+}
+
DEFUN (interface_ip_mroute,
interface_ip_mroute_cmd,
"ip mroute INTERFACE A.B.C.D [A.B.C.D]",
@@ -7539,6 +7583,65 @@ DEFPY_ATTR(no_ip_pim_msdp_mesh_group,
return ret;
}
+DEFPY(msdp_shutdown,
+ msdp_shutdown_cmd,
+ "[no] msdp shutdown",
+ NO_STR
+ CFG_MSDP_STR
+ "Shutdown MSDP operation\n")
+{
+ char xpath_value[XPATH_MAXLEN];
+
+ snprintf(xpath_value, sizeof(xpath_value), "./msdp/shutdown");
+ if (no)
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(msdp_peer_sa_limit, msdp_peer_sa_limit_cmd,
+ "[no] msdp peer A.B.C.D$peer sa-limit ![(1-4294967294)$sa_limit]",
+ NO_STR
+ CFG_MSDP_STR
+ "Configure MSDP peer\n"
+ "MSDP peer address\n"
+ "Limit amount of SA\n"
+ "Maximum number of SA\n")
+{
+ const struct lyd_node *peer_node;
+ char xpath[XPATH_MAXLEN + 24];
+
+ snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']", VTY_CURR_XPATH, peer_str);
+ peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
+ if (peer_node == NULL) {
+ vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
+ return CMD_SUCCESS;
+ }
+
+ nb_cli_enqueue_change(vty, "./sa-limit", NB_OP_MODIFY, sa_limit_str);
+ return nb_cli_apply_changes(vty, "%s", xpath);
+}
+
+DEFPY(msdp_originator_id, msdp_originator_id_cmd,
+ "[no] msdp originator-id ![A.B.C.D$originator_id]",
+ NO_STR
+ CFG_MSDP_STR
+ "Configure MSDP RP originator\n"
+ "MSDP RP originator identifier\n")
+{
+ char xpath_value[XPATH_MAXLEN];
+
+ snprintf(xpath_value, sizeof(xpath_value), "./msdp/originator-id");
+ if (no)
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, originator_id_str);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
static void ip_msdp_show_mesh_group(struct vty *vty, struct pim_msdp_mg *mg,
struct json_object *json)
{
@@ -8267,6 +8370,37 @@ DEFUN (show_ip_msdp_sa_sg_vrf_all,
return CMD_SUCCESS;
}
+DEFPY(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd,
+ "[no] msdp log neighbor-events",
+ NO_STR
+ MSDP_STR
+ "MSDP log messages\n"
+ "MSDP log neighbor event messages\n")
+{
+ char xpath_value[XPATH_MAXLEN + 32];
+
+ snprintf(xpath_value, sizeof(xpath_value), "%s/msdp/log-neighbor-events", VTY_CURR_XPATH);
+ nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(msdp_log_sa_changes, msdp_log_sa_changes_cmd,
+ "[no] msdp log sa-events",
+ NO_STR
+ MSDP_STR
+ "MSDP log messages\n"
+ "MSDP log SA event messages\n")
+{
+ char xpath_value[XPATH_MAXLEN + 32];
+
+ snprintf(xpath_value, sizeof(xpath_value), "%s/msdp/log-sa-events", VTY_CURR_XPATH);
+ nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true");
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+
struct pim_sg_cache_walk_data {
struct vty *vty;
json_object *json;
@@ -8743,6 +8877,24 @@ done:
return ret;
}
+DEFPY_YANG(pim_rpf_lookup_mode, pim_rpf_lookup_mode_cmd,
+ "[no] rpf-lookup-mode ![urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix]$mode",
+ NO_STR
+ "RPF lookup behavior\n"
+ "Lookup in unicast RIB only\n"
+ "Lookup in multicast RIB only\n"
+ "Try multicast RIB first, fall back to unicast RIB\n"
+ "Lookup both, use entry with lower distance\n"
+ "Lookup both, use entry with longer prefix\n")
+{
+ if (no)
+ nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, "./mcast-rpf-lookup", NB_OP_MODIFY, mode);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
struct cmd_node pim_node = {
.name = "pim",
.node = PIM_NODE,
@@ -8855,6 +9007,8 @@ void pim_cmd_init(void)
install_element(PIM_NODE, &pim_autorp_discovery_cmd);
install_element(PIM_NODE, &pim_autorp_announce_rp_cmd);
install_element(PIM_NODE, &pim_autorp_announce_scope_int_cmd);
+ install_element(PIM_NODE, &pim_autorp_send_rp_discovery_cmd);
+ install_element(PIM_NODE, &pim_autorp_send_rp_discovery_scope_int_cmd);
install_element(PIM_NODE, &no_pim_ssm_prefix_list_cmd);
install_element(PIM_NODE, &no_pim_ssm_prefix_list_name_cmd);
install_element(PIM_NODE, &pim_ssm_prefix_list_cmd);
@@ -8898,11 +9052,18 @@ void pim_cmd_init(void)
install_element(PIM_NODE, &pim_msdp_mesh_group_source_cmd);
install_element(PIM_NODE, &no_pim_msdp_mesh_group_source_cmd);
install_element(PIM_NODE, &no_pim_msdp_mesh_group_cmd);
+ install_element(PIM_NODE, &msdp_log_neighbor_changes_cmd);
+ install_element(PIM_NODE, &msdp_log_sa_changes_cmd);
+ install_element(PIM_NODE, &msdp_shutdown_cmd);
+ install_element(PIM_NODE, &msdp_peer_sa_limit_cmd);
+ install_element(PIM_NODE, &msdp_originator_id_cmd);
install_element(PIM_NODE, &pim_bsr_candidate_rp_cmd);
install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd);
install_element(PIM_NODE, &pim_bsr_candidate_bsr_cmd);
+ install_element(PIM_NODE, &pim_rpf_lookup_mode_cmd);
+
install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd);
@@ -8943,6 +9104,7 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_no_ip_pim_hello_cmd);
install_element(INTERFACE_NODE, &interface_ip_pim_boundary_oil_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_pim_boundary_oil_cmd);
+ install_element(INTERFACE_NODE, &interface_ip_pim_boundary_acl_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_query_generate_cmd);
// Static mroutes NEB
@@ -9010,7 +9172,6 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_pim_rp_cmd);
install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_pim_autorp_cmd);
- install_element(VIEW_NODE, &show_ip_pim_autorp_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsr_cmd);
install_element(VIEW_NODE, &show_ip_multicast_cmd);
install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd);
@@ -9026,6 +9187,7 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_ssmpingd_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
+ install_element(VIEW_NODE, &show_ip_rpf_source_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
install_element(VIEW_NODE, &show_ip_pim_bsr_rpinfo_cmd);
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 02ddea8252..8aebce7d27 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -630,139 +630,88 @@ int pim_process_no_autorp_cmd(struct vty *vty)
return nb_cli_apply_changes(vty, NULL);
}
-int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no,
- const char *rpaddr_str,
- const struct prefix_ipv4 *grp,
- const char *plist)
+int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, const char *rpaddr_str,
+ const char *grp, const char *plist)
{
- char xpath[XPATH_MAXLEN];
- char grpstr[64];
-
if (no) {
- if ((grp && !is_default_prefix((const struct prefix *)grp)) || plist) {
+ if (grp || plist) {
/* If any single values are set, only destroy those */
- if (grp && !is_default_prefix((const struct prefix *)grp)) {
- snprintfrr(xpath, sizeof(xpath),
- "%s/candidate-rp-list[rp-address='%s']/group",
- FRR_PIM_AUTORP_XPATH, rpaddr_str);
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY,
- NULL);
- }
- if (plist) {
- snprintfrr(xpath, sizeof(xpath),
- "%s/candidate-rp-list[rp-address='%s']/prefix-list",
- FRR_PIM_AUTORP_XPATH, rpaddr_str);
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY,
- NULL);
- }
- } else {
+ if (grp)
+ nb_cli_enqueue_change(vty, "./group", NB_OP_DESTROY, NULL);
+ if (plist)
+ nb_cli_enqueue_change(vty, "./prefix-list", NB_OP_DESTROY, NULL);
+ } else
/* No values set, remove the entire RP */
- snprintfrr(xpath, sizeof(xpath),
- "%s/candidate-rp-list[rp-address='%s']",
- FRR_PIM_AUTORP_XPATH, rpaddr_str);
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- }
+ nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
} else {
- if ((grp && !is_default_prefix((const struct prefix *)grp)) || plist) {
- snprintfrr(xpath, sizeof(xpath),
- "%s/candidate-rp-list[rp-address='%s']",
- FRR_PIM_AUTORP_XPATH, rpaddr_str);
- nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
- if (grp && !is_default_prefix((const struct prefix *)grp)) {
- snprintfrr(xpath, sizeof(xpath),
- "%s/candidate-rp-list[rp-address='%s']/group",
- FRR_PIM_AUTORP_XPATH, rpaddr_str);
- nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY,
- prefix2str(grp, grpstr,
- sizeof(grpstr)));
- }
- if (plist) {
- snprintfrr(xpath, sizeof(xpath),
- "%s/candidate-rp-list[rp-address='%s']/prefix-list",
- FRR_PIM_AUTORP_XPATH, rpaddr_str);
- nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY,
- plist);
- }
- } else {
- return CMD_WARNING_CONFIG_FAILED;
- }
+ nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL);
+ if (grp)
+ nb_cli_enqueue_change(vty, "./group", NB_OP_MODIFY, grp);
+ if (plist)
+ nb_cli_enqueue_change(vty, "./prefix-list", NB_OP_MODIFY, plist);
}
- return nb_cli_apply_changes(vty, NULL);
+ return nb_cli_apply_changes(vty, "%s/candidate-rp-list[rp-address='%s']",
+ FRR_PIM_AUTORP_XPATH, rpaddr_str);
}
-int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no,
- const char *scope,
- const char *interval,
- const char *holdtime)
+int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no, const char *scope,
+ const char *interval, const char *holdtime)
{
- char xpath[XPATH_MAXLEN];
+ /* At least one value is required, so set/delete anything defined */
+ enum nb_operation op = (no ? NB_OP_DESTROY : NB_OP_MODIFY);
+
+ if (scope)
+ nb_cli_enqueue_change(vty, "./announce-scope", op, scope);
+ if (interval)
+ nb_cli_enqueue_change(vty, "./announce-interval", op, interval);
+ if (holdtime)
+ nb_cli_enqueue_change(vty, "./announce-holdtime", op, holdtime);
+ return nb_cli_apply_changes(vty, "%s", FRR_PIM_AUTORP_XPATH);
+}
+
+int pim_process_autorp_send_rp_discovery_cmd(struct vty *vty, bool no, bool any, bool loopback,
+ const char *ifname, const char *addr)
+{
+ /* Just take any "no" version of this command as disable the mapping agent */
+ nb_cli_enqueue_change(vty, "./send-rp-discovery", NB_OP_MODIFY, (no ? "false" : "true"));
if (no) {
- if (scope || interval || holdtime) {
- /* If any single values are set, only destroy those */
- if (scope) {
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH,
- "announce-scope");
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY,
- NULL);
- }
- if (interval) {
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH,
- "announce-interval");
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY,
- NULL);
- }
- if (holdtime) {
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH,
- "announce-holdtime");
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY,
- NULL);
- }
- } else {
- /* No values set, remove all */
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH, "announce-scope");
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH, "announce-interval");
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH, "announce-holdtime");
- nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- }
+ nb_cli_enqueue_change(vty, "./if-any", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./interface", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./address", NB_OP_DESTROY, NULL);
+ nb_cli_enqueue_change(vty, "./if-loopback", NB_OP_DESTROY, NULL);
} else {
- if (scope || interval || holdtime) {
- if (scope) {
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH,
- "announce-scope");
- nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY,
- scope);
- }
- if (interval) {
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH,
- "announce-interval");
- nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY,
- interval);
- }
- if (holdtime) {
- snprintfrr(xpath, sizeof(xpath), "%s/%s",
- FRR_PIM_AUTORP_XPATH,
- "announce-holdtime");
- nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY,
- holdtime);
- }
- } else {
- return CMD_WARNING_CONFIG_FAILED;
- }
+ /* Enabling mapping agent. Loopback is default, so any non-no for of the command will
+ * enable the mapping agent.
+ */
+ if (any)
+ nb_cli_enqueue_change(vty, "./if-any", NB_OP_CREATE, NULL);
+ else if (ifname)
+ nb_cli_enqueue_change(vty, "./interface", NB_OP_MODIFY, ifname);
+ else if (addr)
+ nb_cli_enqueue_change(vty, "./address", NB_OP_MODIFY, addr);
+ else
+ nb_cli_enqueue_change(vty, "./if-loopback", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, NULL);
+ return nb_cli_apply_changes(vty, "%s/%s", FRR_PIM_AUTORP_XPATH, "mapping-agent");
+}
+
+int pim_process_autorp_send_rp_discovery_scope_int_cmd(struct vty *vty, bool no, const char *scope,
+ const char *interval, const char *holdtime)
+{
+ /* At least one value is required, so only set/delete the values specified */
+ enum nb_operation op = (no ? NB_OP_DESTROY : NB_OP_MODIFY);
+
+ if (scope)
+ nb_cli_enqueue_change(vty, "./discovery-scope", op, scope);
+ if (interval)
+ nb_cli_enqueue_change(vty, "./discovery-interval", op, interval);
+ if (holdtime)
+ nb_cli_enqueue_change(vty, "./discovery-holdtime", op, holdtime);
+
+ return nb_cli_apply_changes(vty, "%s/%s", FRR_PIM_AUTORP_XPATH, "mapping-agent");
}
bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match)
@@ -2876,31 +2825,39 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
struct vty *vty = cwd->vty;
struct pim_instance *pim = cwd->pim;
struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
struct interface *ifp = NULL;
struct ttable *tt = NULL;
char *table = NULL;
/* Prepare table. */
tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
- ttable_add_row(tt, "Address|Interface|Nexthop");
+ ttable_add_row(tt, "Address|Interface|Nexthop|Table");
tt->style.cell.rpad = 2;
tt->style.corner = '+';
ttable_restyle(tt);
- for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
-
- ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+#if PIM_IPV == 4
+ ttable_add_row(tt, "%pPA|%s|%pI4|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv4, "MRIB");
+#else
+ ttable_add_row(tt, "%pPA|%s|%pI6|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv6, "MRIB");
+#endif
+ }
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
#if PIM_IPV == 4
- ttable_add_row(tt, "%pPA|%s|%pI4", &pnc->rpf.rpf_addr,
- ifp ? ifp->name : "NULL", &nh_node->gate.ipv4);
+ ttable_add_row(tt, "%pPA|%s|%pI4|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv4, "URIB");
#else
- ttable_add_row(tt, "%pPA|%s|%pI6", &pnc->rpf.rpf_addr,
- ifp ? ifp->name : "NULL", &nh_node->gate.ipv6);
+ ttable_add_row(tt, "%pPA|%s|%pI6|%s", &pnc->addr, ifp ? ifp->name : "NULL",
+ &nh_node->gate.ipv6, "URIB");
#endif
}
+
/* Dump the generated table. */
table = ttable_dump(tt, "\n");
vty_out(vty, "%s\n", table);
@@ -2910,56 +2867,58 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg)
return CMD_SUCCESS;
}
-static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
- void *arg)
+static void pim_print_json_nexthop(json_object *json_obj, struct nexthop *nh_node,
+ struct interface *ifp, char *addr_str, const char *type)
{
- struct pim_nexthop_cache *pnc = backet->data;
- struct json_pnc_cache_walk_data *cwd = arg;
- struct pim_instance *pim = cwd->pim;
- struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
- struct interface *ifp = NULL;
- char addr_str[PIM_ADDRSTRLEN];
json_object *json_row = NULL;
json_object *json_ifp = NULL;
json_object *json_arr = NULL;
struct pim_interface *pim_ifp = NULL;
- bool pim_enable = false;
-
- for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
- ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
- snprintfrr(addr_str, sizeof(addr_str), "%pPA",
- &pnc->rpf.rpf_addr);
- json_object_object_get_ex(cwd->json_obj, addr_str, &json_row);
- if (!json_row) {
- json_row = json_object_new_object();
- json_object_string_addf(json_row, "address", "%pPA",
- &pnc->rpf.rpf_addr);
- json_object_object_addf(cwd->json_obj, json_row, "%pPA",
- &pnc->rpf.rpf_addr);
- json_arr = json_object_new_array();
- json_object_object_add(json_row, "nexthops", json_arr);
- }
- json_ifp = json_object_new_object();
- json_object_string_add(json_ifp, "interface",
- ifp ? ifp->name : "NULL");
- if (ifp)
- pim_ifp = ifp->info;
+ if (ifp)
+ pim_ifp = ifp->info;
+
+ json_object_object_get_ex(json_obj, addr_str, &json_row);
- if (pim_ifp && pim_ifp->pim_enable)
- pim_enable = true;
+ if (!json_row) {
+ json_row = json_object_new_object();
+ json_object_string_addf(json_row, "address", "%s", addr_str);
+ json_object_object_addf(json_obj, json_row, "%s", addr_str);
+ json_arr = json_object_new_array();
+ json_object_object_add(json_row, "nexthops", json_arr);
+ }
- json_object_boolean_add(json_ifp, "pimEnabled", pim_enable);
+ json_ifp = json_object_new_object();
+ json_object_string_add(json_ifp, "interface", ifp ? ifp->name : "NULL");
+ json_object_boolean_add(json_ifp, "pimEnabled", (pim_ifp && pim_ifp->pim_enable));
#if PIM_IPV == 4
- json_object_string_addf(json_ifp, "nexthop", "%pI4",
- &nh_node->gate.ipv4);
+ json_object_string_addf(json_ifp, "nexthop", "%pI4", &nh_node->gate.ipv4);
#else
- json_object_string_addf(json_ifp, "nexthop", "%pI6",
- &nh_node->gate.ipv6);
+ json_object_string_addf(json_ifp, "nexthop", "%pI6", &nh_node->gate.ipv6);
#endif
- json_object_array_add(json_arr, json_ifp);
+ json_object_string_add(json_ifp, "table", type);
+ json_object_array_add(json_arr, json_ifp);
+}
+
+static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet, void *arg)
+{
+ struct pim_nexthop_cache *pnc = backet->data;
+ struct json_pnc_cache_walk_data *cwd = arg;
+ json_object *json_obj = cwd->json_obj;
+ struct pim_instance *pim = cwd->pim;
+ char addr_str[PIM_ADDRSTRLEN];
+ struct nexthop *nh_node = NULL;
+ struct interface *ifp = NULL;
+
+ snprintfrr(addr_str, sizeof(addr_str), "%pPA", &pnc->addr);
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ pim_print_json_nexthop(json_obj, nh_node, ifp, addr_str, "MRIB");
+ }
+
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+ ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ pim_print_json_nexthop(json_obj, nh_node, ifp, addr_str, "URIB");
}
return CMD_SUCCESS;
}
@@ -2967,7 +2926,6 @@ static int pim_print_json_pnc_cache_walkcb(struct hash_bucket *backet,
int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
pim_addr source, pim_addr group)
{
- int result = 0;
pim_addr vif_source;
struct prefix grp;
struct pim_nexthop nexthop;
@@ -2980,34 +2938,36 @@ int pim_show_nexthop_lookup_cmd_helper(const char *vrf, struct vty *vty,
#if PIM_IPV == 4
if (pim_is_group_224_4(source)) {
- vty_out(vty,
- "Invalid argument. Expected Valid Source Address.\n");
+ vty_out(vty, "Invalid argument. Expected Valid Source Address.\n");
return CMD_WARNING;
}
-
- if (!pim_is_group_224_4(group)) {
- vty_out(vty,
- "Invalid argument. Expected Valid Multicast Group Address.\n");
+ /* Only require group if source is not provided */
+ if (pim_addr_is_any(source) && !pim_is_group_224_4(group)) {
+ vty_out(vty, "Invalid argument. Expected Valid Multicast Group Address.\n");
return CMD_WARNING;
}
#endif
- if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group))
+ /* This call will set vif_source=source, if source is not ANY. Otherwise vif_source
+ * will be set to the RP address according to the group address. If no RP is configured
+ * for the group, then return 0 and set vif_source to ANY
+ */
+ if (!pim_rp_set_upstream_addr(v->info, &vif_source, source, group)) {
+ vty_out(vty, "(%pPAs, %pPA) --- Nexthop Lookup failed, no RP.\n", &source, &group);
return CMD_SUCCESS;
+ }
+
pim_addr_to_prefix(&grp, group);
memset(&nexthop, 0, sizeof(nexthop));
- result =
- pim_ecmp_nexthop_lookup(v->info, &nexthop, vif_source, &grp, 0);
-
- if (!result) {
- vty_out(vty,
- "Nexthop Lookup failed, no usable routes returned.\n");
+ if (!pim_nht_lookup_ecmp(v->info, &nexthop, vif_source, &grp, false)) {
+ vty_out(vty, "(%pPAs, %pPA) --- Nexthop Lookup failed, no usable routes returned.\n",
+ &source, &group);
return CMD_SUCCESS;
}
- vty_out(vty, "Group %pFXh --- Nexthop %pPAs Interface %s\n", &grp,
+ vty_out(vty, "(%pPAs, %pPAs) --- Nexthop %pPAs Interface %s\n", &source, &group,
&nexthop.mrib_nexthop_addr, nexthop.interface->name);
return CMD_SUCCESS;
@@ -3036,19 +2996,16 @@ void pim_show_nexthop(struct pim_instance *pim, struct vty *vty, bool uj)
cwd.pim = pim;
jcwd.pim = pim;
- if (uj) {
+ if (uj)
jcwd.json_obj = json_object_new_object();
- } else {
- vty_out(vty, "Number of registered addresses: %lu\n",
- pim->rpf_hash->count);
- }
+ else
+ vty_out(vty, "Number of registered addresses: %lu\n", pim->nht_hash->count);
if (uj) {
- hash_walk(pim->rpf_hash, pim_print_json_pnc_cache_walkcb,
- &jcwd);
+ hash_walk(pim->nht_hash, pim_print_json_pnc_cache_walkcb, &jcwd);
vty_json(vty, jcwd.json_obj);
} else
- hash_walk(pim->rpf_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
+ hash_walk(pim->nht_hash, pim_print_vty_pnc_cache_walkcb, &cwd);
}
int pim_show_neighbors_cmd_helper(const char *vrf, struct vty *vty,
diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h
index d7c97e31d4..a067647113 100644
--- a/pimd/pim_cmd_common.h
+++ b/pimd/pim_cmd_common.h
@@ -37,14 +37,14 @@ int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str,
const char *prefix_list);
int pim_process_autorp_cmd(struct vty *vty);
int pim_process_no_autorp_cmd(struct vty *vty);
-int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no,
- const char *rpaddr_str,
- const struct prefix_ipv4 *grp,
- const char *plist);
-int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no,
- const char *scope,
- const char *interval,
- const char *holdtime);
+int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, const char *rpaddr_str,
+ const char *grp, const char *plist);
+int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no, const char *scope,
+ const char *interval, const char *holdtime);
+int pim_process_autorp_send_rp_discovery_cmd(struct vty *vty, bool no, bool any, bool loopback,
+ const char *ifname, const char *addr);
+int pim_process_autorp_send_rp_discovery_scope_int_cmd(struct vty *vty, bool no, const char *scope,
+ const char *interval, const char *holdtime);
int pim_process_ip_pim_cmd(struct vty *vty);
int pim_process_no_ip_pim_cmd(struct vty *vty);
int pim_process_ip_pim_passive_cmd(struct vty *vty, bool enable);
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 20e3ba184b..9316cebc0a 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -38,6 +38,7 @@
#include "pim_igmp_join.h"
#include "pim_vxlan.h"
#include "pim_tib.h"
+#include "pim_util.h"
#include "pim6_mld.h"
@@ -215,7 +216,6 @@ void pim_if_delete(struct interface *ifp)
if (pim_ifp->bfd_config.profile)
XFREE(MTYPE_TMP, pim_ifp->bfd_config.profile);
- XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
XFREE(MTYPE_PIM_INTERFACE, pim_ifp);
ifp->info = NULL;
@@ -601,26 +601,13 @@ void pim_if_addr_add(struct connected *ifc)
ifp->name);
}
}
- struct pim_nexthop_cache *pnc = NULL;
- struct pim_rpf rpf;
- struct zclient *zclient = NULL;
-
- zclient = pim_zebra_zclient_get();
- /* RP config might come prior to (local RP's interface)
- IF UP event.
- In this case, pnc would not have pim enabled
- nexthops.
- Once Interface is UP and pim info is available,
- reregister
- with RNH address to receive update and add the
- interface as nexthop. */
- memset(&rpf, 0, sizeof(struct pim_rpf));
- rpf.rpf_addr = pim_addr_from_prefix(ifc->address);
- pnc = pim_nexthop_cache_find(pim_ifp->pim, &rpf);
- if (pnc)
- pim_sendmsg_zebra_rnh(pim_ifp->pim, zclient,
- pnc,
- ZEBRA_NEXTHOP_REGISTER);
+
+ /* RP config might come prior to local RP's interface IF UP event.
+ * In this case, pnc would not have pim enabled nexthops. Once
+ * Interface is UP and pim info is available, reregister with RNH
+ * address to receive update and add the interface as nexthop.
+ */
+ pim_nht_get(pim_ifp->pim, pim_addr_from_prefix(ifc->address));
}
} /* pim */
@@ -1258,6 +1245,14 @@ static int gm_join_sock(const char *ifname, ifindex_t ifindex,
{
int join_fd;
+ if (pim_is_group_filtered(pim_ifp, &group_addr, &source_addr)) {
+ if (PIM_DEBUG_GM_EVENTS) {
+ zlog_debug("%s: join failed for (S,G)=(%pPAs,%pPAs) due to multicast boundary filtering",
+ __func__, &source_addr, &group_addr);
+ }
+ return -1;
+ }
+
pim_ifp->igmp_ifstat_joins_sent++;
join_fd = pim_socket_raw(IPPROTO_GM);
@@ -1464,8 +1459,7 @@ static void pim_if_gm_join_del_all(struct interface *ifp)
return;
for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, nextnode, ij))
- pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr,
- GM_JOIN_STATIC);
+ pim_if_gm_join_del(ifp, ij->group_addr, ij->source_addr, ij->join_type);
}
ferr_r pim_if_static_group_add(struct interface *ifp, pim_addr group_addr,
@@ -2036,7 +2030,7 @@ void pim_pim_interface_delete(struct interface *ifp)
* pim_ifp->pim_neighbor_list.
*/
pim_sock_delete(ifp, "pim unconfigured on interface");
- pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+ pim_nht_upstream_if_update(pim_ifp->pim, ifp);
if (!pim_ifp->gm_enable) {
pim_if_addr_del_all(ifp);
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 95bac084d2..90a81a21d0 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -133,8 +133,10 @@ struct pim_interface {
uint32_t pim_dr_priority; /* config */
int pim_dr_num_nondrpri_neighbors; /* neighbors without dr_pri */
- /* boundary prefix-list */
- char *boundary_oil_plist;
+ /* boundary prefix-list (group) */
+ struct prefix_list *boundary_oil_plist;
+ /* boundary access-list (source and group) */
+ struct access_list *boundary_acl;
/* Turn on Active-Active for this interface */
bool activeactive;
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 1ba9bc45a2..12f424248f 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -666,7 +666,7 @@ static int igmp_v1_recv_report(struct gm_sock *igmp, struct in_addr from,
memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
- if (pim_is_group_filtered(ifp->info, &group_addr))
+ if (pim_is_group_filtered(ifp->info, &group_addr, NULL))
return -1;
/* non-existent group is created as INCLUDE {empty} */
diff --git a/pimd/pim_igmp_mtrace.c b/pimd/pim_igmp_mtrace.c
index 309da138d2..ad6f265101 100644
--- a/pimd/pim_igmp_mtrace.c
+++ b/pimd/pim_igmp_mtrace.c
@@ -16,6 +16,7 @@
#include "pim_oil.h"
#include "pim_ifchannel.h"
#include "pim_macro.h"
+#include "pim_nht.h"
#include "pim_igmp_mtrace.h"
static struct in_addr mtrace_primary_address(struct interface *ifp)
@@ -58,14 +59,14 @@ static bool mtrace_fwd_info_weak(struct pim_instance *pim,
memset(&nexthop, 0, sizeof(nexthop));
- if (!pim_nexthop_lookup(pim, &nexthop, mtracep->src_addr, 1)) {
+ if (!pim_nht_lookup(pim, &nexthop, mtracep->src_addr, 1)) {
if (PIM_DEBUG_MTRACE)
zlog_debug("mtrace not found neighbor");
return false;
}
if (PIM_DEBUG_MTRACE)
- zlog_debug("mtrace pim_nexthop_lookup OK");
+ zlog_debug("mtrace pim_nht_lookup OK");
if (PIM_DEBUG_MTRACE)
zlog_debug("mtrace next_hop=%pPAs", &nexthop.mrib_nexthop_addr);
@@ -353,7 +354,7 @@ static int mtrace_un_forward_packet(struct pim_instance *pim, struct ip *ip_hdr,
if (interface == NULL) {
memset(&nexthop, 0, sizeof(nexthop));
- if (!pim_nexthop_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) {
+ if (!pim_nht_lookup(pim, &nexthop, ip_hdr->ip_dst, 0)) {
if (PIM_DEBUG_MTRACE)
zlog_debug(
"Dropping mtrace packet, no route to destination");
@@ -535,7 +536,7 @@ static int mtrace_send_response(struct pim_instance *pim,
} else {
memset(&nexthop, 0, sizeof(nexthop));
/* TODO: should use unicast rib lookup */
- if (!pim_nexthop_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) {
+ if (!pim_nht_lookup(pim, &nexthop, mtracep->rsp_addr, 1)) {
if (PIM_DEBUG_MTRACE)
zlog_debug(
"Dropped response qid=%ud, no route to response address",
diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c
index 944dffdc33..720a4944fe 100644
--- a/pimd/pim_igmpv2.c
+++ b/pimd/pim_igmpv2.c
@@ -134,6 +134,9 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from,
ifp->name, group_str);
}
+ if (pim_is_group_filtered(pim_ifp, &group_addr, NULL))
+ return -1;
+
/*
* RFC 4604
* section 2.2.1
diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c
index 2c5ad4d44b..7348d8130f 100644
--- a/pimd/pim_igmpv3.c
+++ b/pimd/pim_igmpv3.c
@@ -9,6 +9,8 @@
#include "memory.h"
#include "if.h"
#include "lib_errors.h"
+#include "plist.h"
+#include "plist_int.h"
#include "pimd.h"
#include "pim_instance.h"
@@ -507,6 +509,8 @@ static void allow(struct gm_sock *igmp, struct in_addr from,
struct in_addr *src_addr;
src_addr = sources + i;
+ if (pim_is_group_filtered(igmp->interface->info, &group_addr, src_addr))
+ continue;
source = igmp_get_source_by_addr(group, *src_addr, NULL);
if (!source)
@@ -646,7 +650,7 @@ void igmpv3_report_isex(struct gm_sock *igmp, struct in_addr from,
on_trace(__func__, ifp, from, group_addr, num_sources, sources);
- if (pim_is_group_filtered(ifp->info, &group_addr))
+ if (pim_is_group_filtered(ifp->info, &group_addr, NULL))
return;
/* non-existent group is created as INCLUDE {empty} */
@@ -1809,12 +1813,14 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str,
pim_ifp = ifp->info;
/* determine filtering status for group */
- if (pim_is_group_filtered(pim_ifp, &grp)) {
+ if (pim_is_group_filtered(pim_ifp, &grp, NULL)) {
if (PIM_DEBUG_GM_PACKETS) {
- zlog_debug(
- "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s",
- &grp.s_addr, from_str, ifp->name,
- pim_ifp->boundary_oil_plist);
+ zlog_debug("Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s or access-list %s",
+ &grp.s_addr, from_str, ifp->name,
+ (pim_ifp->boundary_oil_plist ? pim_ifp->boundary_oil_plist->name
+ : "(not found)"),
+ (pim_ifp->boundary_acl ? pim_ifp->boundary_acl->name
+ : "(not found)"));
}
return false;
}
@@ -1943,11 +1949,9 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from,
sizeof(struct in_addr));
if (PIM_DEBUG_GM_PACKETS) {
- zlog_debug(
- " Recv IGMP report v3 from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
- from_str, ifp->name, i, rec_type,
- rec_auxdatalen, rec_num_sources,
- &rec_group);
+ zlog_debug(" Recv IGMP report v3 (type %d) from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4",
+ rec_type, from_str, ifp->name, i, rec_type, rec_auxdatalen,
+ rec_num_sources, &rec_group);
}
/* Scan sources */
diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c
index e18ae97d6f..3945c5923d 100644
--- a/pimd/pim_instance.c
+++ b/pimd/pim_instance.c
@@ -15,6 +15,7 @@
#include "pim_ssm.h"
#include "pim_rpf.h"
#include "pim_rp.h"
+#include "pim_nht.h"
#include "pim_mroute.h"
#include "pim_oil.h"
#include "pim_static.h"
@@ -46,14 +47,15 @@ static void pim_instance_terminate(struct pim_instance *pim)
pim_bsm_proc_free(pim);
- /* Traverse and cleanup rpf_hash */
- hash_clean_and_free(&pim->rpf_hash, (void *)pim_rp_list_hash_clean);
+ pim_nht_terminate(pim);
pim_if_terminate(pim);
pim_oil_terminate(pim);
+#if PIM_IPV == 4
pim_msdp_exit(pim);
+#endif /* PIM_IPV == 4 */
close(pim->reg_sock);
@@ -73,7 +75,6 @@ static void pim_instance_terminate(struct pim_instance *pim)
static struct pim_instance *pim_instance_init(struct vrf *vrf)
{
struct pim_instance *pim;
- char hash_name[64];
pim = XCALLOC(MTYPE_PIM_PIM_INSTANCE, sizeof(struct pim_instance));
@@ -91,15 +92,12 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
pim->spt.switchover = PIM_SPT_IMMEDIATE;
pim->spt.plist = NULL;
+#if PIM_IPV == 4
pim_msdp_init(pim, router->master);
+#endif /* PIM_IPV == 4 */
pim_vxlan_init(pim);
- snprintf(hash_name, sizeof(hash_name), "PIM %s RPF Hash", vrf->name);
- pim->rpf_hash = hash_create_size(256, pim_rpf_hash_key, pim_rpf_equal,
- hash_name);
-
- if (PIM_DEBUG_ZEBRA)
- zlog_debug("%s: NHT rpf hash init ", __func__);
+ pim_nht_init(pim);
pim->ssm_info = pim_ssm_init();
@@ -126,11 +124,6 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
if (pim->reg_sock < 0)
assert(0);
- /* MSDP global timer defaults. */
- pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
- pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
- pim->msdp.connection_retry = PIM_MSDP_PEER_CONNECT_RETRY_TIME;
-
#if PIM_IPV == 4
pim_autorp_init(pim);
#endif
@@ -272,3 +265,13 @@ void pim_vrf_terminate(void)
vrf_terminate();
}
+
+bool pim_msdp_log_neighbor_events(const struct pim_instance *pim)
+{
+ return (pim->log_flags & PIM_MSDP_LOG_NEIGHBOR_EVENTS);
+}
+
+bool pim_msdp_log_sa_events(const struct pim_instance *pim)
+{
+ return (pim->log_flags & PIM_MSDP_LOG_SA_EVENTS);
+}
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index 1b7815d86c..7f022111bc 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -115,7 +115,8 @@ struct pim_instance {
/* The name of the register-accept prefix-list */
char *register_plist;
- struct hash *rpf_hash;
+ struct hash *nht_hash;
+ enum pim_rpf_lookup_mode rpf_mode;
void *ssm_info; /* per-vrf SSM configuration */
@@ -150,7 +151,9 @@ struct pim_instance {
struct rb_pim_oil_head channel_oil_head;
+#if PIM_IPV == 4
struct pim_msdp msdp;
+#endif /* PIM_IPV == 4 */
struct pim_vxlan_instance vxlan;
struct pim_autorp *autorp;
@@ -192,6 +195,13 @@ struct pim_instance {
uint64_t gm_rx_drop_sys;
+ /** Log information flags. */
+ uint32_t log_flags;
+/** Log neighbor event messages. */
+#define PIM_MSDP_LOG_NEIGHBOR_EVENTS 0x01
+/** Log SA event messages. */
+#define PIM_MSDP_LOG_SA_EVENTS 0x02
+
bool stopping;
#if PIM_IPV == 6
diff --git a/pimd/pim_join.c b/pimd/pim_join.c
index 2feafabb4d..7796e8b951 100644
--- a/pimd/pim_join.c
+++ b/pimd/pim_join.c
@@ -245,7 +245,7 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
uint16_t msg_num_pruned_sources;
int source;
struct pim_ifchannel *starg_ch = NULL, *sg_ch = NULL;
- bool filtered = false;
+ bool group_filtered = false;
memset(&sg, 0, sizeof(sg));
addr_offset = pim_parse_addr_group(&sg, buf, pastend - buf);
@@ -275,7 +275,7 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
&src_addr, ifp->name);
/* boundary check */
- filtered = pim_is_group_filtered(pim_ifp, &sg.grp);
+ group_filtered = pim_is_group_filtered(pim_ifp, &sg.grp, NULL);
/* Scan joined sources */
for (source = 0; source < msg_num_joined_sources; ++source) {
@@ -287,8 +287,8 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
buf += addr_offset;
- /* if we are filtering this group, skip the join */
- if (filtered)
+ /* if we are filtering this group or (S,G), skip the join */
+ if (group_filtered || pim_is_group_filtered(pim_ifp, &sg.grp, &sg.src))
continue;
recv_join(ifp, neigh, msg_holdtime, msg_upstream_addr,
@@ -312,10 +312,6 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
buf += addr_offset;
- /* if we are filtering this group, skip the prune */
- if (filtered)
- continue;
-
recv_prune(ifp, neigh, msg_holdtime, msg_upstream_addr,
&sg, msg_source_flags);
/*
@@ -361,7 +357,7 @@ int pim_joinprune_recv(struct interface *ifp, struct pim_neighbor *neigh,
}
}
}
- if (starg_ch && !filtered)
+ if (starg_ch && !group_filtered)
pim_ifchannel_set_star_g_join_state(starg_ch, 1, 0);
starg_ch = NULL;
} /* scan groups */
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index 9d290c3c6f..93bdd8dac9 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -35,6 +35,8 @@
#include "pim_sock.h"
#include "pim_vxlan.h"
#include "pim_msg.h"
+#include "pim_util.h"
+#include "pim_nht.h"
static void mroute_read_on(struct pim_instance *pim);
static int pim_upstream_mroute_update(struct channel_oil *c_oil,
@@ -271,7 +273,9 @@ int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg)
*oil_incoming_vif(up->channel_oil) >= MAXVIFS) {
pim_upstream_mroute_iif_update(up->channel_oil, __func__);
}
- pim_register_join(up);
+
+ if (!pim_is_group_filtered(pim_ifp, &sg.grp, &sg.src))
+ pim_register_join(up);
/* if we have receiver, inherit from parent */
pim_upstream_inherited_olist_decide(pim_ifp->pim, up);
@@ -563,8 +567,7 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
* setting the SPTBIT to true
*/
if (!(pim_addr_is_any(up->upstream_register)) &&
- pim_nexthop_lookup(pim_ifp->pim, &source,
- up->upstream_register, 0)) {
+ pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0)) {
pim_register_stop_send(source.interface, &sg,
pim_ifp->primary_address,
up->upstream_register);
@@ -577,9 +580,7 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
__func__);
} else {
if (I_am_RP(pim_ifp->pim, up->sg.grp)) {
- if (pim_nexthop_lookup(pim_ifp->pim, &source,
- up->upstream_register,
- 0))
+ if (pim_nht_lookup(pim_ifp->pim, &source, up->upstream_register, 0))
pim_register_stop_send(
source.interface, &sg,
pim_ifp->primary_address,
@@ -632,7 +633,8 @@ int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
pim_upstream_keep_alive_timer_start(
up, pim_ifp->pim->keep_alive_time);
up->channel_oil->cc.pktcnt++;
- pim_register_join(up);
+ if (!pim_is_group_filtered(pim_ifp, &sg.grp, &sg.src))
+ pim_register_join(up);
pim_upstream_inherited_olist(pim_ifp->pim, up);
if (!up->channel_oil->installed)
pim_upstream_mroute_add(up->channel_oil, __func__);
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
index 215cc3c502..5e5ee5e91f 100644
--- a/pimd/pim_msdp.c
+++ b/pimd/pim_msdp.c
@@ -26,13 +26,12 @@
#include "pim_time.h"
#include "pim_upstream.h"
#include "pim_oil.h"
+#include "pim_nht.h"
#include "pim_msdp.h"
#include "pim_msdp_packet.h"
#include "pim_msdp_socket.h"
-// struct pim_msdp pim_msdp, *msdp = &pim_msdp;
-
static void pim_msdp_peer_listen(struct pim_msdp_peer *mp);
static void pim_msdp_peer_cr_timer_setup(struct pim_msdp_peer *mp, bool start);
static void pim_msdp_peer_ka_timer_setup(struct pim_msdp_peer *mp, bool start);
@@ -46,21 +45,34 @@ static void pim_msdp_sa_deref(struct pim_msdp_sa *sa,
static int pim_msdp_mg_mbr_comp(const void *p1, const void *p2);
static void pim_msdp_mg_mbr_free(struct pim_msdp_mg_mbr *mbr);
-/************************ SA cache management ******************************/
-static void pim_msdp_sa_timer_expiry_log(struct pim_msdp_sa *sa,
- const char *timer_str)
+void pim_msdp_originator_id(struct pim_instance *pim, const struct prefix *group,
+ struct in_addr *originator_id)
{
- zlog_debug("MSDP SA %s %s timer expired", sa->sg_str, timer_str);
+ struct rp_info *rp_info;
+
+ originator_id->s_addr = INADDR_ANY;
+
+ /* Originator ID was configured, use it. */
+ if (pim->msdp.originator_id.s_addr != INADDR_ANY) {
+ *originator_id = pim->msdp.originator_id;
+ return;
+ }
+
+ rp_info = pim_rp_find_match_group(pim, group);
+ if (rp_info) {
+ *originator_id = rp_info->rp.rpf_addr;
+ return;
+ }
}
+/************************ SA cache management ******************************/
/* RFC-3618:Sec-5.1 - global active source advertisement timer */
static void pim_msdp_sa_adv_timer_cb(struct event *t)
{
struct pim_instance *pim = EVENT_ARG(t);
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA advertisement timer expired");
- }
+ if (pim_msdp_log_sa_events(pim))
+ zlog_info("MSDP SA advertisement timer expired");
pim_msdp_sa_adv_timer_setup(pim, true /* start */);
pim_msdp_pkt_sa_tx(pim);
@@ -83,9 +95,8 @@ static void pim_msdp_sa_state_timer_cb(struct event *t)
sa = EVENT_ARG(t);
- if (PIM_DEBUG_MSDP_EVENTS) {
- pim_msdp_sa_timer_expiry_log(sa, "state");
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s state timer expired", sa->sg_str);
pim_msdp_sa_deref(sa, PIM_MSDP_SAF_PEER);
}
@@ -120,9 +131,8 @@ static void pim_msdp_sa_upstream_del(struct pim_msdp_sa *sa)
sa->flags &= ~PIM_MSDP_SAF_UP_DEL_IN_PROG;
}
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s de-referenced SPT", sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s de-referenced SPT", sa->sg_str);
}
static bool pim_msdp_sa_upstream_add_ok(struct pim_msdp_sa *sa,
@@ -185,10 +195,8 @@ static void pim_msdp_sa_upstream_update(struct pim_msdp_sa *sa,
if (up && (PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(up->flags))) {
/* somehow we lost track of the upstream ptr? best log it */
sa->up = up;
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s SPT reference missing",
- sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s SPT reference missing", sa->sg_str);
return;
}
@@ -204,14 +212,11 @@ static void pim_msdp_sa_upstream_update(struct pim_msdp_sa *sa,
/* should we also start the kat in parallel? we will need it
* when the
* SA ages out */
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s referenced SPT", sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s referenced SPT", sa->sg_str);
} else {
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s SPT reference failed",
- sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s SPT reference failed", sa->sg_str);
}
}
@@ -240,9 +245,8 @@ static struct pim_msdp_sa *pim_msdp_sa_new(struct pim_instance *pim,
sa = hash_get(pim->msdp.sa_hash, sa, hash_alloc_intern);
listnode_add_sort(pim->msdp.sa_list, sa);
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s created", sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(pim))
+ zlog_info("MSDP SA %s created", sa->sg_str);
return sa;
}
@@ -282,9 +286,8 @@ static void pim_msdp_sa_del(struct pim_msdp_sa *sa)
listnode_delete(sa->pim->msdp.sa_list, sa);
hash_release(sa->pim->msdp.sa_hash, sa);
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s deleted", sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s deleted", sa->sg_str);
/* free up any associated memory */
pim_msdp_sa_free(sa);
@@ -333,10 +336,9 @@ static void pim_msdp_sa_deref(struct pim_msdp_sa *sa,
if ((sa->flags & PIM_MSDP_SAF_LOCAL)) {
if (flags & PIM_MSDP_SAF_LOCAL) {
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s local reference removed",
- sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s local reference removed", sa->sg_str);
+
if (sa->pim->msdp.local_cnt)
--sa->pim->msdp.local_cnt;
}
@@ -346,10 +348,9 @@ static void pim_msdp_sa_deref(struct pim_msdp_sa *sa,
if (flags & PIM_MSDP_SAF_PEER) {
struct in_addr rp;
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s peer reference removed",
- sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP SA %s peer reference removed", sa->sg_str);
+
pim_msdp_sa_state_timer_setup(sa, false /* start */);
rp.s_addr = INADDR_ANY;
pim_msdp_sa_peer_ip_set(sa, NULL /* mp */, rp);
@@ -374,9 +375,17 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
pim_sgaddr *sg, struct in_addr rp)
{
struct pim_msdp_sa *sa;
- struct rp_info *rp_info;
struct prefix grp;
+ /* Check peer SA limit. */
+ if (mp && mp->sa_limit && mp->sa_cnt >= mp->sa_limit) {
+ if (pim_msdp_log_sa_events(pim))
+ zlog_debug("MSDP peer %pI4 reject SA (%pI4, %pI4): SA limit %u of %u",
+ &mp->peer, &sg->src, &sg->grp, mp->sa_cnt, mp->sa_limit);
+
+ return;
+ }
+
sa = pim_msdp_sa_add(pim, sg, rp);
if (!sa) {
return;
@@ -386,10 +395,8 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
if (mp) {
if (!(sa->flags & PIM_MSDP_SAF_PEER)) {
sa->flags |= PIM_MSDP_SAF_PEER;
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s added by peer",
- sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(pim))
+ zlog_info("MSDP SA %s added by peer", sa->sg_str);
}
pim_msdp_sa_peer_ip_set(sa, mp, rp);
/* start/re-start the state timer to prevent cache expiry */
@@ -403,18 +410,12 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp,
if (!(sa->flags & PIM_MSDP_SAF_LOCAL)) {
sa->flags |= PIM_MSDP_SAF_LOCAL;
++sa->pim->msdp.local_cnt;
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP SA %s added locally",
- sa->sg_str);
- }
+ if (pim_msdp_log_sa_events(pim))
+ zlog_info("MSDP SA %s added locally", sa->sg_str);
+
/* send an immediate SA update to peers */
pim_addr_to_prefix(&grp, sa->sg.grp);
- rp_info = pim_rp_find_match_group(pim, &grp);
- if (rp_info) {
- sa->rp = rp_info->rp.rpf_addr;
- } else {
- sa->rp = pim->msdp.originator_id;
- }
+ pim_msdp_originator_id(pim, &grp, &sa->rp);
pim_msdp_pkt_sa_tx_one(sa);
}
sa->flags &= ~PIM_MSDP_SAF_STALE;
@@ -705,11 +706,14 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp)
}
/* check if the MSDP peer is the nexthop for the RP */
- if (pim_nexthop_lookup(mp->pim, &nexthop, rp, 0) &&
+ if (pim_nht_lookup(mp->pim, &nexthop, rp, 0) &&
nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) {
return true;
}
+ if (pim_msdp_log_sa_events(mp->pim))
+ zlog_info("MSDP peer %pI4 RPF failure for %pI4", &mp->peer, &rp);
+
return false;
}
@@ -744,7 +748,7 @@ static void pim_msdp_peer_state_chg_log(struct pim_msdp_peer *mp)
char state_str[PIM_MSDP_STATE_STRLEN];
pim_msdp_state_dump(mp->state, state_str, sizeof(state_str));
- zlog_debug("MSDP peer %s state chg to %s", mp->key_str, state_str);
+ zlog_info("MSDP peer %s state changed to %s", mp->key_str, state_str);
}
/* MSDP Connection State Machine actions (defined in RFC-3618:Sec-11.2) */
@@ -752,10 +756,13 @@ static void pim_msdp_peer_state_chg_log(struct pim_msdp_peer *mp)
* a tcp connection will be made */
static void pim_msdp_peer_connect(struct pim_msdp_peer *mp)
{
+ /* Stop here if we are shutdown. */
+ if (mp->pim->msdp.shutdown)
+ return;
+
mp->state = PIM_MSDP_CONNECTING;
- if (PIM_DEBUG_MSDP_EVENTS) {
+ if (pim_msdp_log_neighbor_events(mp->pim))
pim_msdp_peer_state_chg_log(mp);
- }
pim_msdp_peer_cr_timer_setup(mp, true /* start */);
}
@@ -763,10 +770,13 @@ static void pim_msdp_peer_connect(struct pim_msdp_peer *mp)
/* 11.2.A3: passive peer - just listen for connections */
static void pim_msdp_peer_listen(struct pim_msdp_peer *mp)
{
+ /* Stop here if we are shutdown. */
+ if (mp->pim->msdp.shutdown)
+ return;
+
mp->state = PIM_MSDP_LISTEN;
- if (PIM_DEBUG_MSDP_EVENTS) {
+ if (pim_msdp_log_neighbor_events(mp->pim))
pim_msdp_peer_state_chg_log(mp);
- }
/* this is interntionally asymmetric i.e. we set up listen-socket when
* the
@@ -790,9 +800,8 @@ void pim_msdp_peer_established(struct pim_msdp_peer *mp)
mp->state = PIM_MSDP_ESTABLISHED;
mp->uptime = pim_time_monotonic_sec();
- if (PIM_DEBUG_MSDP_EVENTS) {
+ if (pim_msdp_log_neighbor_events(mp->pim))
pim_msdp_peer_state_chg_log(mp);
- }
/* stop retry timer on active peers */
pim_msdp_peer_cr_timer_setup(mp, false /* start */);
@@ -816,9 +825,9 @@ void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state)
++mp->est_flaps;
}
mp->state = PIM_MSDP_INACTIVE;
- if (PIM_DEBUG_MSDP_EVENTS) {
+
+ if (pim_msdp_log_neighbor_events(mp->pim))
pim_msdp_peer_state_chg_log(mp);
- }
}
if (PIM_DEBUG_MSDP_INTERNAL) {
@@ -851,10 +860,10 @@ void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state)
/* RFC-3618:Sec-5.6 - stop the peer tcp connection and startover */
void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str)
{
- if (PIM_DEBUG_EVENTS) {
- zlog_debug("MSDP peer %s tcp reset %s", mp->key_str, rc_str);
- snprintf(mp->last_reset, sizeof(mp->last_reset), "%s", rc_str);
- }
+ if (pim_msdp_log_neighbor_events(mp->pim))
+ zlog_info("MSDP peer %s tcp reset %s", mp->key_str, rc_str);
+
+ snprintf(mp->last_reset, sizeof(mp->last_reset), "%s", rc_str);
/* close the connection and transition to listening or connecting */
pim_msdp_peer_stop_tcp_conn(mp, true /* chg_state */);
@@ -865,12 +874,6 @@ void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str)
}
}
-static void pim_msdp_peer_timer_expiry_log(struct pim_msdp_peer *mp,
- const char *timer_str)
-{
- zlog_debug("MSDP peer %s %s timer expired", mp->key_str, timer_str);
-}
-
/* RFC-3618:Sec-5.4 - peer hold timer */
static void pim_msdp_peer_hold_timer_cb(struct event *t)
{
@@ -878,17 +881,16 @@ static void pim_msdp_peer_hold_timer_cb(struct event *t)
mp = EVENT_ARG(t);
- if (PIM_DEBUG_MSDP_EVENTS) {
- pim_msdp_peer_timer_expiry_log(mp, "hold");
- }
+ if (pim_msdp_log_neighbor_events(mp->pim))
+ zlog_info("MSDP peer %s hold timer expired", mp->key_str);
if (mp->state != PIM_MSDP_ESTABLISHED) {
return;
}
- if (PIM_DEBUG_MSDP_EVENTS) {
+ if (pim_msdp_log_neighbor_events(mp->pim))
pim_msdp_peer_state_chg_log(mp);
- }
+
pim_msdp_peer_reset_tcp_conn(mp, "ht-expired");
}
@@ -910,9 +912,8 @@ static void pim_msdp_peer_ka_timer_cb(struct event *t)
mp = EVENT_ARG(t);
- if (PIM_DEBUG_MSDP_EVENTS) {
- pim_msdp_peer_timer_expiry_log(mp, "ka");
- }
+ if (pim_msdp_log_neighbor_events(mp->pim))
+ zlog_info("MSDP peer %s keep alive timer expired", mp->key_str);
pim_msdp_pkt_ka_tx(mp);
pim_msdp_peer_ka_timer_setup(mp, true /* start */);
@@ -970,9 +971,8 @@ static void pim_msdp_peer_cr_timer_cb(struct event *t)
mp = EVENT_ARG(t);
- if (PIM_DEBUG_MSDP_EVENTS) {
- pim_msdp_peer_timer_expiry_log(mp, "connect-retry");
- }
+ if (pim_msdp_log_neighbor_events(mp->pim))
+ zlog_info("MSDP peer %s connection retry timer expired", mp->key_str);
if (mp->state != PIM_MSDP_CONNECTING || PIM_MSDP_PEER_IS_LISTENER(mp)) {
return;
@@ -1012,16 +1012,6 @@ void pim_msdp_peer_pkt_txed(struct pim_msdp_peer *mp)
}
}
-static void pim_msdp_addr2su(union sockunion *su, struct in_addr addr)
-{
- sockunion_init(su);
- su->sin.sin_addr = addr;
- su->sin.sin_family = AF_INET;
-#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
- su->sin.sin_len = sizeof(struct sockaddr_in);
-#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
-}
-
/* 11.2.A1: create a new peer and transition state to listen or connecting */
struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
const struct in_addr *peer,
@@ -1037,11 +1027,7 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
mp->pim = pim;
mp->peer = *peer;
pim_inet4_dump("<peer?>", mp->peer, mp->key_str, sizeof(mp->key_str));
- pim_msdp_addr2su(&mp->su_peer, mp->peer);
mp->local = *local;
- /* XXX: originator_id setting needs to move to the mesh group */
- pim->msdp.originator_id = *local;
- pim_msdp_addr2su(&mp->su_local, mp->local);
if (mesh_group_name)
mp->mesh_group_name =
XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
@@ -1063,8 +1049,8 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
mp = hash_get(pim->msdp.peer_hash, mp, hash_alloc_intern);
listnode_add_sort(pim->msdp.peer_list, mp);
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP peer %s created", mp->key_str);
+ if (pim_msdp_log_neighbor_events(pim)) {
+ zlog_info("MSDP peer %s created", mp->key_str);
pim_msdp_peer_state_chg_log(mp);
}
@@ -1078,8 +1064,7 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
return mp;
}
-struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim,
- struct in_addr peer_addr)
+struct pim_msdp_peer *pim_msdp_peer_find(const struct pim_instance *pim, struct in_addr peer_addr)
{
struct pim_msdp_peer lookup;
@@ -1129,9 +1114,8 @@ void pim_msdp_peer_del(struct pim_msdp_peer **mp)
listnode_delete((*mp)->pim->msdp.peer_list, *mp);
hash_release((*mp)->pim->msdp.peer_hash, *mp);
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP peer %s deleted", (*mp)->key_str);
- }
+ if (pim_msdp_log_neighbor_events((*mp)->pim))
+ zlog_info("MSDP peer %s deleted", (*mp)->key_str);
/* free up any associated memory */
pim_msdp_peer_free(*mp);
@@ -1206,10 +1190,8 @@ void pim_msdp_mg_free(struct pim_instance *pim, struct pim_msdp_mg **mgp)
for (ALL_LIST_ELEMENTS((*mgp)->mbr_list, n, nn, mbr))
pim_msdp_mg_mbr_del((*mgp), mbr);
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP mesh-group %s deleted",
- (*mgp)->mesh_group_name);
- }
+ if (pim_msdp_log_neighbor_events(pim))
+ zlog_info("MSDP mesh-group %s deleted", (*mgp)->mesh_group_name);
XFREE(MTYPE_PIM_MSDP_MG_NAME, (*mgp)->mesh_group_name);
@@ -1226,15 +1208,14 @@ struct pim_msdp_mg *pim_msdp_mg_new(struct pim_instance *pim,
struct pim_msdp_mg *mg;
mg = XCALLOC(MTYPE_PIM_MSDP_MG, sizeof(*mg));
-
+ mg->pim = pim;
mg->mesh_group_name = XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
mg->mbr_list = list_new();
mg->mbr_list->del = (void (*)(void *))pim_msdp_mg_mbr_free;
mg->mbr_list->cmp = (int (*)(void *, void *))pim_msdp_mg_mbr_comp;
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP mesh-group %s created", mg->mesh_group_name);
- }
+ if (pim_msdp_log_neighbor_events(pim))
+ zlog_info("MSDP mesh-group %s created", mg->mesh_group_name);
SLIST_INSERT_HEAD(&pim->msdp.mglist, mg, mg_entry);
@@ -1268,12 +1249,10 @@ void pim_msdp_mg_mbr_del(struct pim_msdp_mg *mg, struct pim_msdp_mg_mbr *mbr)
}
listnode_delete(mg->mbr_list, mbr);
- if (PIM_DEBUG_MSDP_EVENTS) {
- char ip_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<mbr?>", mbr->mbr_ip, ip_str, sizeof(ip_str));
- zlog_debug("MSDP mesh-group %s mbr %s deleted",
- mg->mesh_group_name, ip_str);
- }
+ if (pim_msdp_log_neighbor_events(mg->pim))
+ zlog_info("MSDP mesh-group %s neighbor %pI4 deleted", mg->mesh_group_name,
+ &mbr->mbr_ip);
+
pim_msdp_mg_mbr_free(mbr);
if (mg->mbr_cnt) {
--mg->mbr_cnt;
@@ -1290,10 +1269,9 @@ static void pim_msdp_src_del(struct pim_msdp_mg *mg)
if (mbr->mp)
pim_msdp_peer_del(&mbr->mp);
}
- if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP mesh-group %s src cleared",
- mg->mesh_group_name);
- }
+
+ if (pim_msdp_log_neighbor_events(mg->pim))
+ zlog_info("MSDP mesh-group %s source cleared", mg->mesh_group_name);
}
/*********************** MSDP feature APIs *********************************/
@@ -1305,6 +1283,22 @@ int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty)
char src_str[INET_ADDRSTRLEN];
int count = 0;
+ if (pim->msdp.hold_time != PIM_MSDP_PEER_HOLD_TIME ||
+ pim->msdp.keep_alive != PIM_MSDP_PEER_KA_TIME ||
+ pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME) {
+ vty_out(vty, " msdp timers %u %u", pim->msdp.hold_time, pim->msdp.keep_alive);
+ if (pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME)
+ vty_out(vty, " %u", pim->msdp.connection_retry);
+ vty_out(vty, "\n");
+ }
+
+ if (pim_msdp_log_neighbor_events(pim))
+ vty_out(vty, " msdp log neighbor-events\n");
+ if (pim_msdp_log_sa_events(pim))
+ vty_out(vty, " msdp log sa-events\n");
+ if (pim->msdp.shutdown)
+ vty_out(vty, " msdp shutdown\n");
+
if (SLIST_EMPTY(&pim->msdp.mglist))
return count;
@@ -1353,9 +1347,18 @@ bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim)
vty_out(vty, " msdp peer %pI4 sa-filter %s out\n",
&mp->peer, mp->acl_out);
+ if (mp->sa_limit)
+ vty_out(vty, " msdp peer %pI4 sa-limit %u\n", &mp->peer, mp->sa_limit);
+
written = true;
}
+ if (pim->msdp.originator_id.s_addr != INADDR_ANY)
+ vty_out(vty, " msdp originator-id %pI4\n", &pim->msdp.originator_id);
+
+ if (pim->msdp.shutdown)
+ vty_out(vty, " msdp shutdown\n");
+
return written;
}
@@ -1395,6 +1398,11 @@ void pim_msdp_init(struct pim_instance *pim, struct event_loop *master)
pim->msdp.sa_list = list_new();
pim->msdp.sa_list->del = (void (*)(void *))pim_msdp_sa_free;
pim->msdp.sa_list->cmp = (int (*)(void *, void *))pim_msdp_sa_comp;
+
+ /* MSDP global timer defaults. */
+ pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
+ pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
+ pim->msdp.connection_retry = PIM_MSDP_PEER_CONNECT_RETRY_TIME;
}
/* counterpart to MSDP init; XXX: unused currently */
@@ -1439,9 +1447,8 @@ void pim_msdp_mg_src_add(struct pim_instance *pim, struct pim_msdp_mg *mg,
/* No new address, disable everyone. */
if (ai->s_addr == INADDR_ANY) {
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP mesh-group %s src unset",
- mg->mesh_group_name);
+ if (pim_msdp_log_neighbor_events(pim))
+ zlog_info("MSDP mesh-group %s source unset", mg->mesh_group_name);
return;
}
@@ -1450,9 +1457,8 @@ void pim_msdp_mg_src_add(struct pim_instance *pim, struct pim_msdp_mg *mg,
mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip,
mg->mesh_group_name);
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP mesh-group %s src %pI4 set",
- mg->mesh_group_name, &mg->src_ip);
+ if (pim_msdp_log_neighbor_events(pim))
+ zlog_info("MSDP mesh-group %s source %pI4 set", mg->mesh_group_name, &mg->src_ip);
}
struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim,
@@ -1470,11 +1476,69 @@ struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim,
mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip,
mg->mesh_group_name);
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP mesh-group %s mbr %pI4 created",
- mg->mesh_group_name, &mbr->mbr_ip);
+ if (pim_msdp_log_neighbor_events(pim))
+ zlog_info("MSDP mesh-group %s neighbor %pI4 created", mg->mesh_group_name,
+ &mbr->mbr_ip);
++mg->mbr_cnt;
return mbr;
}
+
+/* MSDP on RP needs to know if a source is registerable to this RP */
+static void pim_upstream_msdp_reg_timer(struct event *t)
+{
+ struct pim_upstream *up = EVENT_ARG(t);
+ struct pim_instance *pim = up->channel_oil->pim;
+
+ /* source is no longer active - pull the SA from MSDP's cache */
+ pim_msdp_sa_local_del(pim, &up->sg);
+}
+
+void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
+{
+ EVENT_OFF(up->t_msdp_reg_timer);
+ event_add_timer(router->master, pim_upstream_msdp_reg_timer, up, PIM_MSDP_REG_RXED_PERIOD,
+ &up->t_msdp_reg_timer);
+
+ pim_msdp_sa_local_update(up);
+}
+
+void pim_msdp_shutdown(struct pim_instance *pim, bool state)
+{
+ struct pim_msdp_peer *peer;
+ struct listnode *node;
+
+ /* Same value nothing to do. */
+ if (pim->msdp.shutdown == state)
+ return;
+
+ if (state) {
+ pim->msdp.shutdown = true;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, peer)) {
+ /* Stop the tcp connection and shutdown all timers */
+ pim_msdp_peer_stop_tcp_conn(peer, true);
+
+ /* Stop listening socket if any. */
+ event_cancel(&peer->auth_listen_ev);
+ if (peer->auth_listen_sock != -1)
+ close(peer->auth_listen_sock);
+
+ /* Disable and remove listener flag. */
+ UNSET_FLAG(pim->msdp.flags, PIM_MSDPF_ENABLE | PIM_MSDPF_LISTENER);
+ }
+ } else {
+ pim->msdp.shutdown = false;
+
+ for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, peer)) {
+ /* Start connection again. */
+ if (PIM_MSDP_PEER_IS_LISTENER(peer))
+ pim_msdp_peer_listen(peer);
+ else
+ pim_msdp_peer_connect(peer);
+
+ SET_FLAG(pim->msdp.flags, PIM_MSDPF_ENABLE);
+ }
+ }
+}
diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h
index f77b0e1a3a..4edb6e6166 100644
--- a/pimd/pim_msdp.h
+++ b/pimd/pim_msdp.h
@@ -114,9 +114,6 @@ struct pim_msdp_peer {
enum pim_msdp_peer_state state;
enum pim_msdp_peer_flags flags;
- /* TCP socket info */
- union sockunion su_local;
- union sockunion su_peer;
int fd;
/* protocol timers */
@@ -155,6 +152,9 @@ struct pim_msdp_peer {
char *acl_in;
/** SA output access list name. */
char *acl_out;
+
+ /** SA maximum amount. */
+ uint32_t sa_limit;
};
struct pim_msdp_mg_mbr {
@@ -168,6 +168,7 @@ struct pim_msdp_mg {
struct in_addr src_ip;
uint32_t mbr_cnt;
struct list *mbr_list;
+ struct pim_instance *pim;
/** Belongs to PIM instance list. */
SLIST_ENTRY(pim_msdp_mg) mg_entry;
@@ -218,6 +219,9 @@ struct pim_msdp {
uint32_t keep_alive;
/** MSDP global connection retry period. */
uint32_t connection_retry;
+
+ /** MSDP operation state. */
+ bool shutdown;
};
#define PIM_MSDP_PEER_READ_ON(mp) \
@@ -231,15 +235,12 @@ struct pim_msdp {
#define PIM_MSDP_PEER_READ_OFF(mp) event_cancel(&mp->t_read)
#define PIM_MSDP_PEER_WRITE_OFF(mp) event_cancel(&mp->t_write)
-#if PIM_IPV != 6
-// struct pim_msdp *msdp;
struct pim_instance;
void pim_msdp_init(struct pim_instance *pim, struct event_loop *master);
void pim_msdp_exit(struct pim_instance *pim);
char *pim_msdp_state_dump(enum pim_msdp_peer_state state, char *buf,
int buf_size);
-struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim,
- struct in_addr peer_addr);
+struct pim_msdp_peer *pim_msdp_peer_find(const struct pim_instance *pim, struct in_addr peer_addr);
void pim_msdp_peer_established(struct pim_msdp_peer *mp);
void pim_msdp_peer_pkt_rxed(struct pim_msdp_peer *mp);
void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state);
@@ -260,6 +261,8 @@ void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg);
enum pim_msdp_err pim_msdp_mg_del(struct pim_instance *pim,
const char *mesh_group_name);
+extern void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up);
+
/**
* Allocates a new mesh group data structure under PIM instance.
*/
@@ -330,49 +333,25 @@ void pim_msdp_peer_change_source(struct pim_msdp_peer *mp,
*/
void pim_msdp_peer_restart(struct pim_msdp_peer *mp);
-#else /* PIM_IPV == 6 */
-static inline void pim_msdp_init(struct pim_instance *pim,
- struct event_loop *master)
-{
-}
-
-static inline void pim_msdp_exit(struct pim_instance *pim)
-{
-}
-
-static inline void pim_msdp_i_am_rp_changed(struct pim_instance *pim)
-{
-}
-
-static inline void pim_msdp_up_join_state_changed(struct pim_instance *pim,
- struct pim_upstream *xg_up)
-{
-}
-
-static inline void pim_msdp_up_del(struct pim_instance *pim, pim_sgaddr *sg)
-{
-}
-
-static inline void pim_msdp_sa_local_update(struct pim_upstream *up)
-{
-}
-
-static inline void pim_msdp_sa_local_del(struct pim_instance *pim,
- pim_sgaddr *sg)
-{
-}
-
-static inline int pim_msdp_config_write(struct pim_instance *pim,
- struct vty *vty)
-{
- return 0;
-}
-
-static inline bool pim_msdp_peer_config_write(struct vty *vty,
- struct pim_instance *pim)
-{
- return false;
-}
-#endif /* PIM_IPV == 6 */
+/**
+ * Toggle MSDP functionality administrative state.
+ *
+ * \param pim PIM instance we want to shutdown.
+ * \param state shutdown state.
+ */
+void pim_msdp_shutdown(struct pim_instance *pim, bool state);
+
+/**
+ * Get the configured originator ID for the SA RP field or the RP for the group.
+ *
+ * \param[in] pim PIM instance that MSDP connection belongs to.
+ * \param[in] group Multicast group.
+ * \param[out] originator_id Originator output value.
+ */
+void pim_msdp_originator_id(struct pim_instance *pim, const struct prefix *group,
+ struct in_addr *originator_id);
+
+extern bool pim_msdp_log_neighbor_events(const struct pim_instance *pim);
+extern bool pim_msdp_log_sa_events(const struct pim_instance *pim);
#endif
diff --git a/pimd/pim_msdp_packet.c b/pimd/pim_msdp_packet.c
index 27f4966a1c..8c821cb5e5 100644
--- a/pimd/pim_msdp_packet.c
+++ b/pimd/pim_msdp_packet.c
@@ -367,53 +367,6 @@ static void pim_msdp_pkt_sa_fill_one(struct pim_msdp_sa *sa)
stream_put_ipv4(sa->pim->msdp.work_obuf, sa->sg.src.s_addr);
}
-static bool msdp_cisco_match(const struct filter *filter,
- const struct in_addr *source,
- const struct in_addr *group)
-{
- const struct filter_cisco *cfilter = &filter->u.cfilter;
- uint32_t source_addr;
- uint32_t group_addr;
-
- group_addr = group->s_addr & ~cfilter->mask_mask.s_addr;
-
- if (cfilter->extended) {
- source_addr = source->s_addr & ~cfilter->addr_mask.s_addr;
- if (group_addr == cfilter->mask.s_addr &&
- source_addr == cfilter->addr.s_addr)
- return true;
- } else if (group_addr == cfilter->addr.s_addr)
- return true;
-
- return false;
-}
-
-static enum filter_type msdp_access_list_apply(struct access_list *access,
- const struct in_addr *source,
- const struct in_addr *group)
-{
- struct filter *filter;
- struct prefix group_prefix;
-
- if (access == NULL)
- return FILTER_DENY;
-
- for (filter = access->head; filter; filter = filter->next) {
- if (filter->cisco) {
- if (msdp_cisco_match(filter, source, group))
- return filter->type;
- } else {
- group_prefix.family = AF_INET;
- group_prefix.prefixlen = IPV4_MAX_BITLEN;
- group_prefix.u.prefix4.s_addr = group->s_addr;
- if (access_list_apply(access, &group_prefix))
- return filter->type;
- }
- }
-
- return FILTER_DENY;
-}
-
bool msdp_peer_sa_filter(const struct pim_msdp_peer *mp,
const struct pim_msdp_sa *sa)
{
@@ -425,7 +378,7 @@ bool msdp_peer_sa_filter(const struct pim_msdp_peer *mp,
/* Find access list and test it. */
acl = access_list_lookup(AFI_IP, mp->acl_out);
- if (msdp_access_list_apply(acl, &sa->sg.src, &sa->sg.grp) == FILTER_DENY)
+ if (pim_access_list_apply(acl, &sa->sg.src, &sa->sg.grp) == FILTER_DENY)
return true;
return false;
@@ -456,7 +409,6 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
{
struct listnode *sanode;
struct pim_msdp_sa *sa;
- struct rp_info *rp_info;
struct prefix group_all;
struct in_addr rp;
int sa_count;
@@ -467,14 +419,8 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
zlog_debug(" sa gen %d", local_cnt);
}
- rp = pim->msdp.originator_id;
- if (pim_get_all_mcast_group(&group_all)) {
- rp_info = pim_rp_find_match_group(pim, &group_all);
- if (rp_info) {
- rp = rp_info->rp.rpf_addr;
- }
- }
-
+ pim_get_all_mcast_group(&group_all);
+ pim_msdp_originator_id(pim, &group_all, &rp);
local_cnt = pim_msdp_pkt_sa_fill_hdr(pim, local_cnt, rp);
for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
@@ -487,9 +433,8 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
}
if (msdp_peer_sa_filter(mp, sa)) {
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP peer %pI4 filter SA out %s",
- &mp->peer, sa->sg_str);
+ if (pim_msdp_log_sa_events(pim))
+ zlog_info("MSDP peer %pI4 filter SA out %s", &mp->peer, sa->sg_str);
continue;
}
@@ -505,8 +450,7 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim,
zlog_debug(" sa gen for remainder %d",
local_cnt);
}
- local_cnt = pim_msdp_pkt_sa_fill_hdr(
- pim, local_cnt, rp);
+ local_cnt = pim_msdp_pkt_sa_fill_hdr(pim, local_cnt, rp);
}
}
@@ -551,9 +495,9 @@ void pim_msdp_pkt_sa_tx_one(struct pim_msdp_sa *sa)
pim_msdp_pkt_sa_fill_one(sa);
for (ALL_LIST_ELEMENTS_RO(sa->pim->msdp.peer_list, node, mp)) {
if (msdp_peer_sa_filter(mp, sa)) {
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP peer %pI4 filter SA out %s",
- &mp->peer, sa->sg_str);
+ if (pim_msdp_log_sa_events(sa->pim))
+ zlog_info("MSDP peer %pI4 filter SA out %s", &mp->peer, sa->sg_str);
+
continue;
}
@@ -583,9 +527,10 @@ void pim_msdp_pkt_sa_tx_one_to_one_peer(struct pim_msdp_peer *mp,
/* Don't push it if filtered. */
if (msdp_peer_sa_filter(mp, &sa)) {
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP peer %pI4 filter SA out (%pI4, %pI4)",
- &mp->peer, &sa.sg.src, &sa.sg.grp);
+ if (pim_msdp_log_sa_events(mp->pim))
+ zlog_info("MSDP peer %pI4 filter SA out (%pI4, %pI4)", &mp->peer,
+ &sa.sg.src, &sa.sg.grp);
+
return;
}
@@ -641,11 +586,10 @@ static void pim_msdp_pkt_sa_rx_one(struct pim_msdp_peer *mp, struct in_addr rp)
/* Filter incoming SA with configured access list. */
if (mp->acl_in) {
acl = access_list_lookup(AFI_IP, mp->acl_in);
- if (msdp_access_list_apply(acl, &sg.src, &sg.grp) ==
- FILTER_DENY) {
- if (PIM_DEBUG_MSDP_EVENTS)
- zlog_debug("MSDP peer %pI4 filter SA in (%pI4, %pI4)",
- &mp->peer, &sg.src, &sg.grp);
+ if (pim_access_list_apply(acl, &sg.src, &sg.grp) == FILTER_DENY) {
+ if (pim_msdp_log_sa_events(mp->pim))
+ zlog_info("MSDP peer %pI4 filter SA in (%pI4, %pI4)", &mp->peer,
+ &sg.src, &sg.grp);
return;
}
}
diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c
index 2fb0bb87c7..b29993304d 100644
--- a/pimd/pim_msdp_socket.c
+++ b/pimd/pim_msdp_socket.c
@@ -49,6 +49,16 @@ static void pim_msdp_update_sock_send_buffer_size(int fd)
}
}
+static void pim_msdp_addr2su(union sockunion *su, struct in_addr addr)
+{
+ sockunion_init(su);
+ su->sin.sin_addr = addr;
+ su->sin.sin_family = AF_INET;
+#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+ su->sin.sin_len = sizeof(struct sockaddr_in);
+#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
+}
+
/**
* Helper function to reduce code duplication.
*
@@ -64,7 +74,6 @@ static int _pim_msdp_sock_listen(const struct vrf *vrf,
int rv;
socklen_t socklen;
struct sockaddr_in sin = {};
- union sockunion su_peer = {};
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock == -1) {
@@ -117,7 +126,9 @@ static int _pim_msdp_sock_listen(const struct vrf *vrf,
/* Set MD5 authentication. */
if (mp && mp->auth_key) {
- su_peer = mp->su_peer;
+ union sockunion su_peer = {};
+
+ pim_msdp_addr2su(&su_peer, mp->peer);
frr_with_privs (&pimd_privs) {
sockopt_tcp_signature(sock, &su_peer, mp->auth_key);
}
@@ -349,6 +360,7 @@ int pim_msdp_sock_listen(struct pim_instance *pim)
int pim_msdp_sock_connect(struct pim_msdp_peer *mp)
{
int rc;
+ union sockunion su_peer = {}, su_local = {};
if (PIM_DEBUG_MSDP_INTERNAL) {
zlog_debug("MSDP peer %s attempt connect%s", mp->key_str,
@@ -366,8 +378,11 @@ int pim_msdp_sock_connect(struct pim_msdp_peer *mp)
pim_msdp_peer_stop_tcp_conn(mp, false /* chg_state */);
}
+ pim_msdp_addr2su(&su_peer, mp->peer);
+ pim_msdp_addr2su(&su_local, mp->local);
+
/* Make socket for the peer. */
- mp->fd = sockunion_socket(&mp->su_peer);
+ mp->fd = sockunion_socket(&su_peer);
if (mp->fd < 0) {
flog_err_sys(EC_LIB_SOCKET,
"pim_msdp_socket socket failure: %s",
@@ -402,7 +417,7 @@ int pim_msdp_sock_connect(struct pim_msdp_peer *mp)
sockopt_reuseport(mp->fd);
/* source bind */
- rc = sockunion_bind(mp->fd, &mp->su_local, 0, &mp->su_local);
+ rc = sockunion_bind(mp->fd, &su_local, 0, &su_local);
if (rc < 0) {
flog_err_sys(EC_LIB_SOCKET,
"pim_msdp_socket connect bind failure: %s",
@@ -421,12 +436,10 @@ int pim_msdp_sock_connect(struct pim_msdp_peer *mp)
/* Set authentication (if configured). */
if (mp->auth_key) {
frr_with_privs (&pimd_privs) {
- sockopt_tcp_signature(mp->fd, &mp->su_peer,
- mp->auth_key);
+ sockopt_tcp_signature(mp->fd, &su_peer, mp->auth_key);
}
}
/* Connect to the remote mp. */
- return (sockunion_connect(mp->fd, &mp->su_peer,
- htons(PIM_MSDP_TCP_PORT), 0));
+ return (sockunion_connect(mp->fd, &su_peer, htons(PIM_MSDP_TCP_PORT), 0));
}
diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c
index 1dc66be82d..b55541b810 100644
--- a/pimd/pim_nb.c
+++ b/pimd/pim_nb.c
@@ -130,6 +130,31 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-neighbor-events",
+ .cbs = {
+ .modify = pim_msdp_log_neighbor_events_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-sa-events",
+ .cbs = {
+ .modify = pim_msdp_log_sa_events_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/originator-id",
+ .cbs = {
+ .modify = pim_msdp_originator_id_modify,
+ .destroy = pim_msdp_originator_id_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/shutdown",
+ .cbs = {
+ .modify = pim_msdp_shutdown_modify,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups",
.cbs = {
.create = pim_msdp_mesh_group_create,
@@ -191,6 +216,13 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/sa-limit",
+ .cbs = {
+ .modify = pim_msdp_peer_sa_limit_modify,
+ .destroy = pim_msdp_peer_sa_limit_destroy,
+ }
+ },
+ {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag",
.cbs = {
.create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create,
@@ -232,6 +264,12 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify,
+ }
+ },
+ {
.xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family",
.cbs = {
.create = lib_interface_pim_address_family_create,
@@ -335,6 +373,13 @@ const struct frr_yang_module_info frr_pim_info = {
}
},
{
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/multicast-boundary-acl",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_multicast_boundary_acl_modify,
+ .destroy = lib_interface_pim_address_family_multicast_boundary_acl_destroy,
+ }
+ },
+ {
.xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/mroute",
.cbs = {
.create = lib_interface_pim_address_family_mroute_create,
@@ -448,6 +493,58 @@ const struct frr_yang_module_info frr_pim_rp_info = {
}
},
{
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/send-rp-discovery",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-scope",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-interval",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-holdtime",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/address",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/interface",
+ .cbs = {
+ .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-loopback",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-any",
+ .cbs = {
+ .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create,
+ .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy,
+ }
+ },
+ {
.xpath = NULL,
},
}
diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h
index b45af3d589..a5ef6ad60a 100644
--- a/pimd/pim_nb.h
+++ b/pimd/pim_nb.h
@@ -54,6 +54,11 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args);
int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args);
int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args);
+int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args);
+int pim_msdp_log_sa_events_modify(struct nb_cb_modify_args *args);
+int pim_msdp_originator_id_modify(struct nb_cb_modify_args *args);
+int pim_msdp_originator_id_destroy(struct nb_cb_destroy_args *args);
+int pim_msdp_shutdown_modify(struct nb_cb_modify_args *args);
int pim_msdp_mesh_group_create(struct nb_cb_create_args *args);
int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args);
int pim_msdp_mesh_group_members_create(struct nb_cb_create_args *args);
@@ -73,6 +78,8 @@ int pim_msdp_peer_sa_filter_out_destroy(struct nb_cb_destroy_args *args);
int pim_msdp_peer_authentication_type_modify(struct nb_cb_modify_args *args);
int pim_msdp_peer_authentication_key_modify(struct nb_cb_modify_args *args);
int pim_msdp_peer_authentication_key_destroy(struct nb_cb_destroy_args *args);
+int pim_msdp_peer_sa_limit_modify(struct nb_cb_modify_args *args);
+int pim_msdp_peer_sa_limit_destroy(struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
struct nb_cb_create_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy(
@@ -95,6 +102,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re
struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_register_accept_list_destroy(
struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify(
+ struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_dr_priority_modify(
struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_create(struct nb_cb_create_args *args);
@@ -137,6 +146,8 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_multicast_boundary_oil_destroy(
struct nb_cb_destroy_args *args);
+int lib_interface_pim_address_family_multicast_boundary_acl_modify(struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_multicast_boundary_acl_destroy(struct nb_cb_destroy_args *args);
int lib_interface_pim_address_family_mroute_create(
struct nb_cb_create_args *args);
int lib_interface_pim_address_family_mroute_destroy(
@@ -191,6 +202,20 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy(
struct nb_cb_destroy_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create(
+ struct nb_cb_create_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify(
+ struct nb_cb_modify_args *args);
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy(
+ struct nb_cb_destroy_args *args);
/* frr-cand-bsr */
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create(
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 87338f37c0..b55d08bab9 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -41,12 +41,24 @@ int funcname(struct argtype *args) \
} \
MACRO_REQUIRE_SEMICOLON()
+#define pim6_autorp_err(funcname, argtype) \
+ int funcname(struct argtype *args) \
+ { \
+ snprintf(args->errmsg, args->errmsg_len, \
+ "Trying to configure AutoRP in pim6d. " \
+ "AutoRP does not exist for IPv6."); \
+ return NB_ERR_VALIDATION; \
+ } \
+ MACRO_REQUIRE_SEMICOLON()
+
#define yang_dnode_get_pimaddr yang_dnode_get_ipv6
#else /* PIM_IPV != 6 */
#define pim6_msdp_err(funcname, argtype) \
MACRO_REQUIRE_SEMICOLON()
+#define pim6_autorp_err(funcname, argtype) MACRO_REQUIRE_SEMICOLON()
+
#define yang_dnode_get_pimaddr yang_dnode_get_ipv4
#endif /* PIM_IPV != 6 */
@@ -145,7 +157,7 @@ static int pim_cmd_interface_add(struct interface *ifp)
pim_ifp->pim_enable = true;
pim_if_addr_add_all(ifp);
- pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+ pim_nht_upstream_if_update(pim_ifp->pim, ifp);
pim_if_membership_refresh(ifp);
pim_if_create_pimreg(pim_ifp->pim);
@@ -490,6 +502,26 @@ static void change_query_max_response_time(struct interface *ifp,
#endif /* PIM_IPV == 4 */
}
+static void yang_addrsel(struct cand_addrsel *addrsel, const struct lyd_node *node)
+{
+ memset(addrsel->cfg_ifname, 0, sizeof(addrsel->cfg_ifname));
+ addrsel->cfg_addr = PIMADDR_ANY;
+
+ if (yang_dnode_exists(node, "if-any")) {
+ addrsel->cfg_mode = CAND_ADDR_ANY;
+ } else if (yang_dnode_exists(node, "address")) {
+ addrsel->cfg_mode = CAND_ADDR_EXPLICIT;
+ yang_dnode_get_pimaddr(&addrsel->cfg_addr, node, "address");
+ } else if (yang_dnode_exists(node, "interface")) {
+ addrsel->cfg_mode = CAND_ADDR_IFACE;
+ strlcpy(addrsel->cfg_ifname, yang_dnode_get_string(node, "interface"),
+ sizeof(addrsel->cfg_ifname));
+ } else if (yang_dnode_exists(node, "if-loopback")) {
+ addrsel->cfg_mode = CAND_ADDR_LO;
+ }
+ addrsel->cfg_enable = true;
+}
+
int routing_control_plane_protocols_name_validate(
struct nb_cb_create_args *args)
{
@@ -976,6 +1008,40 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss
return NB_OK;
}
+pim6_msdp_err(pim_msdp_hold_time_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_keep_alive_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_connection_retry_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_mesh_group_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_mesh_group_create, nb_cb_create_args);
+pim6_msdp_err(pim_msdp_mesh_group_source_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_mesh_group_source_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_mesh_group_members_create, nb_cb_create_args);
+pim6_msdp_err(pim_msdp_mesh_group_members_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_in_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_in_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_out_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_sa_filter_out_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_peer_sa_limit_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_sa_limit_destroy, nb_cb_destroy_args);
+pim6_msdp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
+ nb_cb_modify_args);
+pim6_msdp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy,
+ nb_cb_destroy_args);
+pim6_msdp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create,
+ nb_cb_create_args);
+pim6_msdp_err(pim_msdp_peer_authentication_type_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_authentication_key_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_peer_authentication_key_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_log_neighbor_events_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_log_sa_events_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_originator_id_modify, nb_cb_modify_args);
+pim6_msdp_err(pim_msdp_originator_id_destroy, nb_cb_destroy_args);
+pim6_msdp_err(pim_msdp_shutdown_modify, nb_cb_modify_args);
+
+#if PIM_IPV != 6
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/hold-time
@@ -1049,23 +1115,134 @@ int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args)
return NB_OK;
}
-pim6_msdp_err(pim_msdp_mesh_group_destroy, nb_cb_destroy_args);
-pim6_msdp_err(pim_msdp_mesh_group_create, nb_cb_create_args);
-pim6_msdp_err(pim_msdp_mesh_group_source_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_mesh_group_source_destroy, nb_cb_destroy_args);
-pim6_msdp_err(pim_msdp_mesh_group_members_create, nb_cb_create_args);
-pim6_msdp_err(pim_msdp_mesh_group_members_destroy, nb_cb_destroy_args);
-pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
- nb_cb_modify_args);
-pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy,
- nb_cb_destroy_args);
-pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create,
- nb_cb_create_args);
-pim6_msdp_err(pim_msdp_peer_authentication_type_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_peer_authentication_key_modify, nb_cb_modify_args);
-pim6_msdp_err(pim_msdp_peer_authentication_key_destroy, nb_cb_destroy_args);
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-neighbor-events
+ */
+int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (yang_dnode_get_bool(args->dnode, NULL))
+ SET_FLAG(pim->log_flags, PIM_MSDP_LOG_NEIGHBOR_EVENTS);
+ else
+ UNSET_FLAG(pim->log_flags, PIM_MSDP_LOG_NEIGHBOR_EVENTS);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-sa-events
+ */
+int pim_msdp_log_sa_events_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (yang_dnode_get_bool(args->dnode, NULL))
+ SET_FLAG(pim->log_flags, PIM_MSDP_LOG_SA_EVENTS);
+ else
+ UNSET_FLAG(pim->log_flags, PIM_MSDP_LOG_SA_EVENTS);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/originator-id
+ */
+int pim_msdp_originator_id_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_ipv4(&pim->msdp.originator_id, args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_msdp_originator_id_destroy(struct nb_cb_destroy_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.originator_id.s_addr = INADDR_ANY;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/shutdown
+ */
+int pim_msdp_shutdown_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim_msdp_shutdown(pim, yang_dnode_get_bool(args->dnode, NULL));
+ break;
+ }
+
+ return NB_OK;
+}
-#if PIM_IPV != 6
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups
@@ -1370,7 +1547,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ms
return NB_OK;
}
-#endif /* PIM_IPV != 6 */
/*
* XPath:
@@ -1461,6 +1637,49 @@ int pim_msdp_peer_sa_filter_out_destroy(struct nb_cb_destroy_args *args)
}
/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/sa-limit
+ */
+int pim_msdp_peer_sa_limit_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_msdp_peer *mp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ mp = nb_running_get_entry(args->dnode, NULL, true);
+ mp->sa_limit = yang_dnode_get_uint32(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+int pim_msdp_peer_sa_limit_destroy(struct nb_cb_destroy_args *args)
+{
+ struct pim_msdp_peer *mp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ /* NOTHING */
+ break;
+ case NB_EV_APPLY:
+ mp = nb_running_get_entry(args->dnode, NULL, true);
+ mp->sa_limit = 0;
+ break;
+ }
+
+ return NB_OK;
+}
+#endif /* PIM_IPV != 6 */
+
+/*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag
*/
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
@@ -1674,6 +1893,39 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_re
}
/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mcast-rpf-lookup
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mcast_rpf_lookup_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ enum pim_rpf_lookup_mode old_mode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ old_mode = pim->rpf_mode;
+ pim->rpf_mode = yang_dnode_get_enum(args->dnode, NULL);
+
+ if (pim->rpf_mode != old_mode &&
+ /* MCAST_MIX_MRIB_FIRST is the default if not configured */
+ (old_mode != MCAST_NO_CONFIG && pim->rpf_mode != MCAST_MIX_MRIB_FIRST)) {
+ pim_nht_mode_changed(pim);
+ }
+
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
* XPath: /frr-interface:lib/interface/frr-pim:pim/address-family
*/
int lib_interface_pim_address_family_create(struct nb_cb_create_args *args)
@@ -2271,7 +2523,6 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
{
struct interface *ifp;
struct pim_interface *pim_ifp;
- const char *plist;
const struct lyd_node *if_dnode;
switch (args->event) {
@@ -2279,7 +2530,12 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
if_dnode = yang_dnode_get_parent(args->dnode, "interface");
if (!is_pim_interface(if_dnode)) {
snprintf(args->errmsg, args->errmsg_len,
- "Pim not enabled on this interface");
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ if (!prefix_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL))) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Specified prefix-list not found");
return NB_ERR_VALIDATION;
}
break;
@@ -2289,13 +2545,8 @@ int lib_interface_pim_address_family_multicast_boundary_oil_modify(
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
- plist = yang_dnode_get_string(args->dnode, NULL);
-
- if (pim_ifp->boundary_oil_plist)
- XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
-
pim_ifp->boundary_oil_plist =
- XSTRDUP(MTYPE_PIM_INTERFACE, plist);
+ prefix_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL));
break;
}
@@ -2325,8 +2576,72 @@ int lib_interface_pim_address_family_multicast_boundary_oil_destroy(
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
- if (pim_ifp->boundary_oil_plist)
- XFREE(MTYPE_PIM_INTERFACE, pim_ifp->boundary_oil_plist);
+ pim_ifp->boundary_oil_plist = NULL;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/multicast-boundary-acl
+ */
+int lib_interface_pim_address_family_multicast_boundary_acl_modify(struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ if (!access_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL))) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Specified access-list not found");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->boundary_acl =
+ access_list_lookup(AFI_IP, yang_dnode_get_string(args->dnode, NULL));
+ break;
+ }
+
+ return NB_OK;
+}
+
+int lib_interface_pim_address_family_multicast_boundary_acl_destroy(struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ const struct lyd_node *if_dnode;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if_dnode = yang_dnode_get_parent(args->dnode, "interface");
+ if (!is_pim_interface(if_dnode)) {
+ snprintf(args->errmsg, args->errmsg_len,
+ "%% Enable PIM and/or IGMP on this interface first");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->boundary_acl = NULL;
break;
}
@@ -2430,9 +2745,8 @@ int lib_interface_pim_address_family_mroute_oif_modify(
#ifdef PIM_ENFORCE_LOOPFREE_MFC
iif = nb_running_get_entry(args->dnode, NULL, false);
- if (!iif) {
+ if (!iif)
return NB_OK;
- }
pim_iifp = iif->info;
pim = pim_iifp->pim;
@@ -2543,13 +2857,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
else if (yang_dnode_get(args->dnode, "prefix-list")) {
plist = yang_dnode_get_string(args->dnode,
"./prefix-list");
- if (!pim_get_all_mcast_group(&group)) {
- flog_err(
- EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
- return NB_ERR_INCONSISTENCY;
- }
-
+ pim_get_all_mcast_group(&group);
result = pim_no_rp_cmd_worker(pim, rp_addr, group,
plist, args->errmsg,
args->errmsg_len);
@@ -2641,11 +2949,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
plist = yang_dnode_get_string(args->dnode, NULL);
yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
- if (!pim_get_all_mcast_group(&group)) {
- flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
- return NB_ERR_INCONSISTENCY;
- }
+ pim_get_all_mcast_group(&group);
return pim_rp_cmd_worker(pim, rp_addr, group, plist,
args->errmsg, args->errmsg_len);
}
@@ -2672,11 +2976,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
plist = yang_dnode_get_string(args->dnode, NULL);
- if (!pim_get_all_mcast_group(&group)) {
- flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert 224.0.0.0/4 to prefix");
- return NB_ERR_INCONSISTENCY;
- }
+ pim_get_all_mcast_group(&group);
return pim_no_rp_cmd_worker(pim, rp_addr, group, plist,
args->errmsg, args->errmsg_len);
break;
@@ -2794,13 +3094,77 @@ int pim_embedded_rp_maximum_rps_modify(struct nb_cb_modify_args *args)
}
}
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_create,
+ nb_cb_create_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy,
+ nb_cb_destroy_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create,
+ nb_cb_create_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify,
+ nb_cb_modify_args);
+pim6_autorp_err(
+ routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy,
+ nb_cb_destroy_args);
+
+#if PIM_IPV == 4
/*
* XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/discovery-enabled
*/
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
bool enabled;
@@ -2820,14 +3184,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim_autorp_stop_discovery(pim);
break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
bool enabled;
@@ -2846,7 +3208,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim_autorp_start_discovery(pim);
break;
}
-#endif
return NB_OK;
}
@@ -2857,7 +3218,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
uint8_t scope;
@@ -2872,15 +3232,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
scope = yang_dnode_get_uint8(args->dnode, NULL);
pim_autorp_announce_scope(pim, scope);
+ break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
@@ -2893,8 +3252,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
pim_autorp_announce_scope(pim, 0);
+ break;
}
-#endif
return NB_OK;
}
@@ -2905,7 +3264,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
uint16_t interval;
@@ -2920,15 +3278,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
interval = yang_dnode_get_uint16(args->dnode, NULL);
pim_autorp_announce_interval(pim, interval);
+ break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
@@ -2941,8 +3298,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
vrf = nb_running_get_entry(args->dnode, NULL, true);
pim = vrf->info;
pim_autorp_announce_interval(pim, 0);
+ break;
}
-#endif
return NB_OK;
}
@@ -2953,7 +3310,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
uint16_t holdtime;
@@ -2968,15 +3324,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
holdtime = yang_dnode_get_uint16(args->dnode, NULL);
pim_autorp_announce_holdtime(pim, holdtime);
+ break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
@@ -2990,8 +3345,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
pim = vrf->info;
/* 0 is a valid value, so -1 indicates deleting (go back to default) */
pim_autorp_announce_holdtime(pim, -1);
+ break;
}
-#endif
return NB_OK;
}
@@ -3002,7 +3357,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_create(
struct nb_cb_create_args *args)
{
-#if PIM_IPV == 4
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
@@ -3010,14 +3364,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
case NB_EV_APPLY:
break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
pim_addr rp_addr;
@@ -3035,7 +3387,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
return NB_ERR_INCONSISTENCY;
break;
}
-#endif
return NB_OK;
}
@@ -3046,7 +3397,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
@@ -3064,15 +3414,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
yang_dnode_get_prefix(&group, args->dnode, NULL);
apply_mask(&group);
pim_autorp_add_candidate_rp_group(pim, rp_addr, group);
+ break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
struct prefix group;
@@ -3091,8 +3440,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
apply_mask(&group);
if (!pim_autorp_rm_candidate_rp_group(pim, rp_addr, group))
return NB_ERR_INCONSISTENCY;
+ break;
}
-#endif
return NB_OK;
}
@@ -3103,7 +3452,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
pim_addr rp_addr;
@@ -3120,15 +3468,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
plist = yang_dnode_get_string(args->dnode, NULL);
yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address");
pim_autorp_add_candidate_rp_plist(pim, rp_addr, plist);
+ break;
}
-#endif
return NB_OK;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy(
struct nb_cb_destroy_args *args)
{
-#if PIM_IPV == 4
struct vrf *vrf;
struct pim_instance *pim;
pim_addr rp_addr;
@@ -3148,31 +3495,211 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp
return NB_ERR_INCONSISTENCY;
break;
}
-#endif
return NB_OK;
}
-static void yang_addrsel(struct cand_addrsel *addrsel,
- const struct lyd_node *node)
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/send-rp-discovery
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify(
+ struct nb_cb_modify_args *args)
{
- memset(addrsel->cfg_ifname, 0, sizeof(addrsel->cfg_ifname));
- addrsel->cfg_addr = PIMADDR_ANY;
+ struct vrf *vrf;
+ struct pim_instance *pim;
- if (yang_dnode_exists(node, "if-any")) {
- addrsel->cfg_mode = CAND_ADDR_ANY;
- } else if (yang_dnode_exists(node, "address")) {
- addrsel->cfg_mode = CAND_ADDR_EXPLICIT;
- yang_dnode_get_pimaddr(&addrsel->cfg_addr, node, "address");
- } else if (yang_dnode_exists(node, "interface")) {
- addrsel->cfg_mode = CAND_ADDR_IFACE;
- strlcpy(addrsel->cfg_ifname,
- yang_dnode_get_string(node, "interface"),
- sizeof(addrsel->cfg_ifname));
- } else if (yang_dnode_exists(node, "if-loopback")) {
- addrsel->cfg_mode = CAND_ADDR_LO;
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp) {
+ pim->autorp->send_rp_discovery = yang_dnode_get_bool(args->dnode, NULL);
+ pim_autorp_send_discovery_apply(pim->autorp);
+ } else
+ return NB_ERR_INCONSISTENCY;
+ break;
}
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-scope
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp)
+ pim->autorp->discovery_scope = yang_dnode_get_uint8(args->dnode, NULL);
+ else
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-interval
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp)
+ pim->autorp->discovery_interval = yang_dnode_get_uint16(args->dnode, NULL);
+ else
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-holdtime
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp)
+ pim->autorp->discovery_holdtime = yang_dnode_get_uint16(args->dnode, NULL);
+ else
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+
+ return NB_OK;
+}
+
+static int pim_autorp_mapping_agent_addrsel(struct pim_autorp *autorp,
+ const struct lyd_node *mapping_agent_node,
+ struct vrf *vrf)
+{
+ yang_addrsel(&autorp->mapping_agent_addrsel, mapping_agent_node);
+ if (cand_addrsel_update(&autorp->mapping_agent_addrsel, vrf))
+ pim_autorp_send_discovery_apply(autorp);
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/address
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/interface
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-loopback
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-any
+ */
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create(
+ struct nb_cb_create_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ const struct lyd_node *mapping_agent_node;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mapping_agent_node = yang_dnode_get_parent(args->dnode, "mapping-agent");
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp)
+ return pim_autorp_mapping_agent_addrsel(pim->autorp, mapping_agent_node,
+ vrf);
+ else
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+ const struct lyd_node *mapping_agent_node;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ mapping_agent_node = yang_dnode_get_parent(args->dnode, "mapping-agent");
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp)
+ return pim_autorp_mapping_agent_addrsel(pim->autorp, mapping_agent_node,
+ vrf);
+ else
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+ return NB_OK;
+}
+
+int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ struct vrf *vrf;
+ struct pim_instance *pim;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ if (pim && pim->autorp)
+ pim->autorp->mapping_agent_addrsel.cfg_enable = false;
+ else
+ return NB_ERR_INCONSISTENCY;
+ break;
+ }
+
+ return NB_OK;
}
+#endif /* PIM_IPV == 4 (for AutoRP)*/
static int candidate_bsr_addrsel(struct bsm_scope *scope,
const struct lyd_node *cand_bsr_node)
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index f2dbfa9765..00ab46b4cd 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -38,118 +38,267 @@
* pim_sendmsg_zebra_rnh -- Format and send a nexthop register/Unregister
* command to Zebra.
*/
-void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
- struct pim_nexthop_cache *pnc, int command)
+static void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient, pim_addr addr,
+ int command)
{
struct prefix p;
int ret;
- pim_addr_to_prefix(&p, pnc->rpf.rpf_addr);
- ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false,
- pim->vrf->vrf_id);
+ pim_addr_to_prefix(&p, addr);
+
+ /* Register to track nexthops from the MRIB */
+ ret = zclient_send_rnh(zclient, command, &p, SAFI_MULTICAST, false, false, pim->vrf->vrf_id);
+ if (ret == ZCLIENT_SEND_FAILURE)
+ zlog_warn(
+ "sendmsg_nexthop: zclient_send_message() failed registering MRIB tracking");
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: MRIB NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", __func__,
+ (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, pim->vrf->name,
+ ret);
+
+ /* Also register to track nexthops from the URIB */
+ ret = zclient_send_rnh(zclient, command, &p, SAFI_UNICAST, false, false, pim->vrf->vrf_id);
if (ret == ZCLIENT_SEND_FAILURE)
- zlog_warn("sendmsg_nexthop: zclient_send_message() failed");
+ zlog_warn(
+ "sendmsg_nexthop: zclient_send_message() failed registering URIB tracking");
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: NHT %sregistered addr %pFX(%s) with Zebra ret:%d ",
- __func__,
- (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p,
- pim->vrf->name, ret);
+ zlog_debug("%s: URIB NHT %sregistered addr %pFX(%s) with Zebra ret:%d ", __func__,
+ (command == ZEBRA_NEXTHOP_REGISTER) ? " " : "de", &p, pim->vrf->name,
+ ret);
return;
}
-struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
- struct pim_rpf *rpf)
+static struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
- lookup.rpf.rpf_addr = rpf->rpf_addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ lookup.addr = addr;
+ pnc = hash_lookup(pim->nht_hash, &lookup);
return pnc;
}
-static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
- struct pim_rpf *rpf_addr)
+static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc;
char hash_name[64];
- pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE,
- sizeof(struct pim_nexthop_cache));
- pnc->rpf.rpf_addr = rpf_addr->rpf_addr;
+ /* This function is only ever called if we are unable to find an entry, so
+ * the hash_get should always add a new entry
+ */
+ pnc = XCALLOC(MTYPE_PIM_NEXTHOP_CACHE, sizeof(struct pim_nexthop_cache));
+ pnc->addr = addr;
- pnc = hash_get(pim->rpf_hash, pnc, hash_alloc_intern);
+ pnc = hash_get(pim->nht_hash, pnc, hash_alloc_intern);
pnc->rp_list = list_new();
pnc->rp_list->cmp = pim_rp_list_cmp;
- snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash",
- &pnc->rpf.rpf_addr, pim->vrf->name);
- pnc->upstream_hash = hash_create_size(8192, pim_upstream_hash_key,
- pim_upstream_equal, hash_name);
+ snprintfrr(hash_name, sizeof(hash_name), "PNC %pPA(%s) Upstream Hash", &pnc->addr,
+ pim->vrf->name);
+ pnc->upstream_hash = hash_create_size(32, pim_upstream_hash_key, pim_upstream_equal,
+ hash_name);
return pnc;
}
-static struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim,
- pim_addr addr)
+static bool pim_nht_pnc_has_answer(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
+{
+ switch (pim->rpf_mode) {
+ case MCAST_MRIB_ONLY:
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ case MCAST_URIB_ONLY:
+ return CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ case MCAST_MIX_MRIB_FIRST:
+ case MCAST_NO_CONFIG:
+ case MCAST_MIX_DISTANCE:
+ case MCAST_MIX_PFXLEN:
+ /* This check is to determine if we've received an answer necessary to make a NH decision.
+ * For the mixed modes, where we may lookup from MRIB or URIB, let's require an answer
+ * for both tables.
+ */
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_ANSWER_RECEIVED) &&
+ CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+
+ default:
+ break;
+ }
+ return false;
+}
+
+static struct pim_nexthop_cache_rib *pim_pnc_get_rib(struct pim_instance *pim,
+ struct pim_nexthop_cache *pnc)
+{
+ struct pim_nexthop_cache_rib *pnc_rib = NULL;
+
+ if (pim->rpf_mode == MCAST_MRIB_ONLY)
+ pnc_rib = &pnc->mrib;
+ else if (pim->rpf_mode == MCAST_URIB_ONLY)
+ pnc_rib = &pnc->urib;
+ else if (pim->rpf_mode == MCAST_MIX_MRIB_FIRST || pim->rpf_mode == MCAST_NO_CONFIG) {
+ if (pnc->mrib.nexthop_num > 0)
+ pnc_rib = &pnc->mrib;
+ else
+ pnc_rib = &pnc->urib;
+ } else if (pim->rpf_mode == MCAST_MIX_DISTANCE) {
+ if (pnc->mrib.distance <= pnc->urib.distance)
+ pnc_rib = &pnc->mrib;
+ else
+ pnc_rib = &pnc->urib;
+ } else if (pim->rpf_mode == MCAST_MIX_PFXLEN) {
+ if (pnc->mrib.prefix_len >= pnc->urib.prefix_len)
+ pnc_rib = &pnc->mrib;
+ else
+ pnc_rib = &pnc->urib;
+ }
+
+ return pnc_rib;
+}
+
+bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
+{
+ switch (pim->rpf_mode) {
+ case MCAST_MRIB_ONLY:
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID);
+
+ case MCAST_URIB_ONLY:
+ return CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_VALID);
+
+ case MCAST_MIX_MRIB_FIRST:
+ case MCAST_NO_CONFIG:
+ case MCAST_MIX_DISTANCE:
+ case MCAST_MIX_PFXLEN:
+ /* The valid flag is set if there are nexthops...so when doing mixed, mrib might not have
+ * any nexthops, so consider valid if at least one RIB is valid
+ */
+ return CHECK_FLAG(pnc->mrib.flags, PIM_NEXTHOP_VALID) ||
+ CHECK_FLAG(pnc->urib.flags, PIM_NEXTHOP_VALID);
+
+ default:
+ break;
+ }
+ return false;
+}
+
+struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr)
{
struct pim_nexthop_cache *pnc = NULL;
- struct pim_rpf rpf;
struct zclient *zclient = NULL;
zclient = pim_zebra_zclient_get();
- memset(&rpf, 0, sizeof(rpf));
- rpf.rpf_addr = addr;
+ pnc = pim_nexthop_cache_find(pim, addr);
- pnc = pim_nexthop_cache_find(pim, &rpf);
- if (!pnc) {
- pnc = pim_nexthop_cache_add(pim, &rpf);
- pim_sendmsg_zebra_rnh(pim, zclient, pnc,
- ZEBRA_NEXTHOP_REGISTER);
- if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug(
- "%s: NHT cache and zebra notification added for %pPA(%s)",
- __func__, &addr, pim->vrf->name);
- }
+ if (pnc)
+ return pnc;
+
+ pnc = pim_nexthop_cache_add(pim, addr);
+ pim_sendmsg_zebra_rnh(pim, zclient, pnc->addr, ZEBRA_NEXTHOP_REGISTER);
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: NHT cache and zebra notification added for %pPA(%s)", __func__,
+ &addr, pim->vrf->name);
return pnc;
}
-/* TBD: this does several distinct things and should probably be split up.
- * (checking state vs. returning pnc vs. adding upstream vs. adding rp)
+void pim_nht_set_gateway(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr addr,
+ struct interface *ifp)
+{
+ struct nexthop *nh_node = NULL;
+ struct interface *ifp1 = NULL;
+
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ /* If the gateway is already set, then keep it */
+#if PIM_IPV == 4
+ if (!pim_addr_is_any(nh_node->gate.ipv4))
+ continue;
+#else
+ if (!pim_addr_is_any(nh_node->gate.ipv6))
+ continue;
+#endif
+
+ /* Only set gateway on the correct interface */
+ ifp1 = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+ if (ifp != ifp1)
+ continue;
+
+ /* Update the gateway address with the given address */
+#if PIM_IPV == 4
+ nh_node->gate.ipv4 = addr;
+#else
+ nh_node->gate.ipv6 = addr;
+#endif
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: addr %pPA new MRIB nexthop addr %pPAs interface %s",
+ __func__, &pnc->addr, &addr, ifp1->name);
+ }
+
+ /* Now do the same with URIB nexthop entries */
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+#if PIM_IPV == 4
+ if (!pim_addr_is_any(nh_node->gate.ipv4))
+ continue;
+#else
+ if (!pim_addr_is_any(nh_node->gate.ipv6))
+ continue;
+#endif
+
+ ifp1 = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+
+ if (ifp != ifp1)
+ continue;
+
+#if PIM_IPV == 4
+ nh_node->gate.ipv4 = addr;
+#else
+ nh_node->gate.ipv6 = addr;
+#endif
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: addr %pPA new URIB nexthop addr %pPAs interface %s",
+ __func__, &pnc->addr, &addr, ifp1->name);
+ }
+}
+
+/* Finds the nexthop cache entry for the given address. If no cache, add it for tracking.
+ * Up and/or rp may be given to add to the nexthop cache entry so that they get updates when the nexthop changes
+ * If out_pnc is not null, then copy the nexthop cache entry to it.
+ * Return true if an entry was found and is valid.
*/
-int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp,
- struct pim_nexthop_cache *out_pnc)
+bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp, struct pim_nexthop_cache *out_pnc)
{
struct pim_nexthop_cache *pnc;
struct listnode *ch_node = NULL;
+ /* This will find the entry and add it to tracking if not found */
pnc = pim_nht_get(pim, addr);
assertf(up || rp, "addr=%pPA", &addr);
+ /* Store the RP if provided and not currently in the list */
if (rp != NULL) {
ch_node = listnode_lookup(pnc->rp_list, rp);
if (ch_node == NULL)
listnode_add_sort(pnc->rp_list, rp);
}
+ /* Store the upstream if provided and not currently in the list */
if (up != NULL)
(void)hash_get(pnc->upstream_hash, up, hash_alloc_intern);
- if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
+ if (pim_nht_pnc_is_valid(pim, pnc)) {
if (out_pnc)
memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache));
- return 1;
+ return true;
}
- return 0;
+ return false;
}
void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
@@ -157,7 +306,6 @@ void pim_nht_bsr_add(struct pim_instance *pim, pim_addr addr)
struct pim_nexthop_cache *pnc;
pnc = pim_nht_get(pim, addr);
-
pnc->bsr_count++;
}
@@ -166,47 +314,47 @@ bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr)
struct pim_nexthop_cache *pnc;
pnc = pim_nht_get(pim, addr);
-
pnc->candrp_count++;
- return CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID);
+ return pim_nht_pnc_is_valid(pim, pnc);
}
-static void pim_nht_drop_maybe(struct pim_instance *pim,
- struct pim_nexthop_cache *pnc)
+static void pim_nht_drop_maybe(struct pim_instance *pim, struct pim_nexthop_cache *pnc)
{
if (PIM_DEBUG_PIM_NHT)
zlog_debug("%s: NHT %pPA(%s) rp_list count:%d upstream count:%ld BSR count:%u Cand-RP count:%u",
- __func__, &pnc->rpf.rpf_addr, pim->vrf->name,
- pnc->rp_list->count, pnc->upstream_hash->count,
- pnc->bsr_count, pnc->candrp_count);
+ __func__, &pnc->addr, pim->vrf->name, pnc->rp_list->count,
+ pnc->upstream_hash->count, pnc->bsr_count, pnc->candrp_count);
- if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 &&
- pnc->bsr_count == 0 && pnc->candrp_count == 0) {
+ if (pnc->rp_list->count == 0 && pnc->upstream_hash->count == 0 && pnc->bsr_count == 0 &&
+ pnc->candrp_count == 0) {
struct zclient *zclient = pim_zebra_zclient_get();
- pim_sendmsg_zebra_rnh(pim, zclient, pnc,
- ZEBRA_NEXTHOP_UNREGISTER);
+ pim_sendmsg_zebra_rnh(pim, zclient, pnc->addr, ZEBRA_NEXTHOP_UNREGISTER);
list_delete(&pnc->rp_list);
+
hash_free(pnc->upstream_hash);
+ hash_release(pim->nht_hash, pnc);
+
+ if (pnc->urib.nexthop)
+ nexthops_free(pnc->urib.nexthop);
+ if (pnc->mrib.nexthop)
+ nexthops_free(pnc->mrib.nexthop);
- hash_release(pim->rpf_hash, pnc);
- if (pnc->nexthop)
- nexthops_free(pnc->nexthop);
XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
}
}
-void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp)
+void pim_nht_delete_tracked(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
struct pim_upstream *upstream = NULL;
/* Remove from RPF hash if it is the last entry */
- lookup.rpf.rpf_addr = addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ lookup.addr = addr;
+ pnc = hash_lookup(pim->nht_hash, &lookup);
if (!pnc) {
zlog_warn("attempting to delete nonexistent NHT entry %pPA",
&addr);
@@ -251,9 +399,9 @@ void pim_nht_bsr_del(struct pim_instance *pim, pim_addr addr)
if (pim_addr_is_any(addr))
return;
- lookup.rpf.rpf_addr = addr;
+ lookup.addr = addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ pnc = hash_lookup(pim->nht_hash, &lookup);
if (!pnc) {
zlog_warn("attempting to delete nonexistent NHT BSR entry %pPA",
@@ -272,9 +420,9 @@ void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr)
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
- lookup.rpf.rpf_addr = addr;
+ lookup.addr = addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
+ pnc = hash_lookup(pim->nht_hash, &lookup);
if (!pnc) {
zlog_warn("attempting to delete nonexistent NHT C-RP entry %pPA",
@@ -297,10 +445,10 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
struct nexthop *nh;
struct interface *ifp;
- lookup.rpf.rpf_addr = bsr_addr;
+ lookup.addr = bsr_addr;
- pnc = hash_lookup(pim->rpf_hash, &lookup);
- if (!pnc || !CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED)) {
+ pnc = hash_lookup(pim->nht_hash, &lookup);
+ if (!pnc || !pim_nht_pnc_has_answer(pim, pnc)) {
/* BSM from a new freshly registered BSR - do a synchronous
* zebra query since otherwise we'd drop the first packet,
* leading to additional delay in picking up BSM data
@@ -359,91 +507,92 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
return false;
}
- if (!CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID))
- return false;
-
- /* if we accept BSMs from more than one ECMP nexthop, this will cause
- * BSM message "multiplication" for each ECMP hop. i.e. if you have
- * 4-way ECMP and 4 hops you end up with 256 copies of each BSM
- * message.
- *
- * so... only accept the first (IPv4) valid nexthop as source.
- */
+ if (pim_nht_pnc_is_valid(pim, pnc)) {
+ /* if we accept BSMs from more than one ECMP nexthop, this will cause
+ * BSM message "multiplication" for each ECMP hop. i.e. if you have
+ * 4-way ECMP and 4 hops you end up with 256 copies of each BSM
+ * message.
+ *
+ * so... only accept the first (IPv4) valid nexthop as source.
+ */
+ struct pim_nexthop_cache_rib *rib = pim_pnc_get_rib(pim, pnc);
- for (nh = pnc->nexthop; nh; nh = nh->next) {
- pim_addr nhaddr;
+ for (nh = rib->nexthop; nh; nh = nh->next) {
+ pim_addr nhaddr;
- switch (nh->type) {
+ switch (nh->type) {
#if PIM_IPV == 4
- case NEXTHOP_TYPE_IPV4:
- if (nh->ifindex == IFINDEX_INTERNAL)
- continue;
+ case NEXTHOP_TYPE_IPV4:
+ if (nh->ifindex == IFINDEX_INTERNAL)
+ continue;
- fallthrough;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- nhaddr = nh->gate.ipv4;
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- continue;
-#else
- case NEXTHOP_TYPE_IPV6:
- if (nh->ifindex == IFINDEX_INTERNAL)
+ fallthrough;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ nhaddr = nh->gate.ipv4;
+ break;
+
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
continue;
+#else
+ case NEXTHOP_TYPE_IPV6:
+ if (nh->ifindex == IFINDEX_INTERNAL)
+ continue;
- fallthrough;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- nhaddr = nh->gate.ipv6;
- break;
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- continue;
+ fallthrough;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ nhaddr = nh->gate.ipv6;
+ break;
+
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ continue;
#endif
- case NEXTHOP_TYPE_IFINDEX:
- nhaddr = bsr_addr;
- break;
+ case NEXTHOP_TYPE_IFINDEX:
+ nhaddr = bsr_addr;
+ break;
- case NEXTHOP_TYPE_BLACKHOLE:
- continue;
- }
+ case NEXTHOP_TYPE_BLACKHOLE:
+ continue;
+ }
- ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id);
- if (!ifp || !ifp->info)
- continue;
+ ifp = if_lookup_by_index(nh->ifindex, pim->vrf->vrf_id);
+ if (!ifp || !ifp->info)
+ continue;
- if (if_is_loopback(ifp) && if_is_loopback(src_ifp))
- return true;
+ if (if_is_loopback(ifp) && if_is_loopback(src_ifp))
+ return true;
- /* MRIB (IGP) may be pointing at a router where PIM is down */
- nbr = pim_neighbor_find(ifp, nhaddr, true);
- if (!nbr)
- continue;
+ /* MRIB (IGP) may be pointing at a router where PIM is down */
+ nbr = pim_neighbor_find(ifp, nhaddr, true);
+ if (!nbr)
+ continue;
- /* Are we on the correct interface? */
- if (nh->ifindex == src_ifp->ifindex) {
- /* Do we have the correct NH ? */
- if (!pim_addr_cmp(nhaddr, src_ip))
- return true;
- /*
- * check If the packet came from the neighbor,
- * and the dst is a secondary address on the connected interface
- */
- return (!pim_addr_cmp(nbr->source_addr, src_ip) &&
- pim_if_connected_to_source(ifp, nhaddr));
+ /* Are we on the correct interface? */
+ if (nh->ifindex == src_ifp->ifindex) {
+ /* Do we have the correct NH ? */
+ if (!pim_addr_cmp(nhaddr, src_ip))
+ return true;
+ /*
+ * check If the packet came from the neighbor,
+ * and the dst is a secondary address on the connected interface
+ */
+ return (!pim_addr_cmp(nbr->source_addr, src_ip) &&
+ pim_if_connected_to_source(ifp, nhaddr));
+ }
+ return false;
}
- return false;
}
return false;
}
-void pim_rp_nexthop_del(struct rp_info *rp_info)
+void pim_nht_rp_del(struct rp_info *rp_info)
{
rp_info->rp.source_nexthop.interface = NULL;
rp_info->rp.source_nexthop.mrib_nexthop_addr = PIMADDR_ANY;
rp_info->rp.source_nexthop.mrib_metric_preference =
router->infinite_assert_metric.metric_preference;
- rp_info->rp.source_nexthop.mrib_route_metric =
- router->infinite_assert_metric.route_metric;
+ rp_info->rp.source_nexthop.mrib_route_metric = router->infinite_assert_metric.route_metric;
}
/* Update RP nexthop info based on Nexthop update received from Zebra.*/
@@ -461,10 +610,9 @@ static void pim_update_rp_nh(struct pim_instance *pim,
ifp = rp_info->rp.source_nexthop.interface;
// Compute PIM RPF using cached nexthop
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- rp_info->rp.rpf_addr,
- &rp_info->group, 1))
- pim_rp_nexthop_del(rp_info);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
+ &rp_info->group, true))
+ pim_nht_rp_del(rp_info);
/*
* If we transition from no path to a path
@@ -544,33 +692,43 @@ static int pim_upstream_nh_if_update_helper(struct hash_bucket *bucket,
struct pim_instance *pim = pwd->pim;
struct interface *ifp = pwd->ifp;
struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
- for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
- if (ifp != if_lookup_by_index(first_ifindex, pim->vrf->vrf_id))
- continue;
+ /* This update happens when an interface is added to/removed from pim.
+ * So go through both MRIB and URIB and update any upstreams for any
+ * matching nexthop
+ */
+ for (nh_node = pnc->mrib.nexthop; nh_node; nh_node = nh_node->next) {
+ if (ifp->ifindex == nh_node->ifindex) {
+ if (pnc->upstream_hash->count) {
+ pim_update_upstream_nh(pim, pnc);
+ break;
+ }
+ }
+ }
- if (pnc->upstream_hash->count) {
- pim_update_upstream_nh(pim, pnc);
- break;
+ for (nh_node = pnc->urib.nexthop; nh_node; nh_node = nh_node->next) {
+ if (ifp->ifindex == nh_node->ifindex) {
+ if (pnc->upstream_hash->count) {
+ pim_update_upstream_nh(pim, pnc);
+ break;
+ }
}
}
return HASHWALK_CONTINUE;
}
-void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp)
+void pim_nht_upstream_if_update(struct pim_instance *pim, struct interface *ifp)
{
struct pnc_hash_walk_data pwd;
pwd.pim = pim;
pwd.ifp = ifp;
- hash_walk(pim->rpf_hash, pim_upstream_nh_if_update_helper, &pwd);
+ hash_walk(pim->nht_hash, pim_upstream_nh_if_update_helper, &pwd);
}
-uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
+static uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
{
uint32_t hash_val;
@@ -583,47 +741,42 @@ uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp)
return hash_val;
}
-static int pim_ecmp_nexthop_search(struct pim_instance *pim,
- struct pim_nexthop_cache *pnc,
- struct pim_nexthop *nexthop, pim_addr src,
- struct prefix *grp, int neighbor_needed)
+static bool pim_ecmp_nexthop_search(struct pim_instance *pim, struct pim_nexthop_cache *pnc,
+ struct pim_nexthop *nexthop, pim_addr src, struct prefix *grp,
+ bool neighbor_needed)
{
- struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
- struct interface *ifps[router->multipath];
struct nexthop *nh_node = NULL;
- ifindex_t first_ifindex;
- struct interface *ifp = NULL;
- uint32_t hash_val = 0, mod_val = 0;
- uint16_t nh_iter = 0, found = 0;
- uint32_t i, num_nbrs = 0;
- struct pim_interface *pim_ifp;
-
- if (!pnc || !pnc->nexthop_num || !nexthop)
- return 0;
-
- pim_addr nh_addr = nexthop->mrib_nexthop_addr;
- pim_addr grp_addr = pim_addr_from_prefix(grp);
+ uint32_t hash_val = 0;
+ uint32_t mod_val = 0;
+ uint16_t nh_iter = 0;
+ bool found = false;
+ uint32_t num_nbrs = 0;
+ pim_addr nh_addr;
+ pim_addr grp_addr;
+ struct pim_nexthop_cache_rib *rib;
- memset(&nbrs, 0, sizeof(nbrs));
- memset(&ifps, 0, sizeof(ifps));
+ /* Early return if required parameters aren't provided */
+ if (!pim || !pnc || !pim_nht_pnc_is_valid(pim, pnc) || !nexthop || !grp)
+ return false;
+ nh_addr = nexthop->mrib_nexthop_addr;
+ grp_addr = pim_addr_from_prefix(grp);
+ rib = pim_pnc_get_rib(pim, pnc);
- // Current Nexthop is VALID, check to stay on the current path.
+ /* Current Nexthop is VALID, check to stay on the current path. */
if (nexthop->interface && nexthop->interface->info &&
(!pim_addr_is_any(nh_addr))) {
- /* User configured knob to explicitly switch
- to new path is disabled or current path
- metric is less than nexthop update.
+ /* User configured knob to explicitly switch to new path is disabled or
+ * current path metric is less than nexthop update.
*/
+ if (!pim->ecmp_rebalance_enable) {
+ bool curr_route_valid = false;
- if (pim->ecmp_rebalance_enable == 0) {
- uint8_t curr_route_valid = 0;
- // Check if current nexthop is present in new updated
- // Nexthop list.
- // If the current nexthop is not valid, candidate to
- // choose new Nexthop.
- for (nh_node = pnc->nexthop; nh_node;
- nh_node = nh_node->next) {
+ /* Check if current nexthop is present in new updated Nexthop list.
+ * If the current nexthop is not valid, candidate to choose new
+ * Nexthop.
+ */
+ for (nh_node = rib->nexthop; nh_node; nh_node = nh_node->next) {
curr_route_valid = (nexthop->interface->ifindex
== nh_node->ifindex);
if (curr_route_valid)
@@ -633,9 +786,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (curr_route_valid &&
!pim_if_connected_to_source(nexthop->interface,
src)) {
- nbr = pim_neighbor_find(
- nexthop->interface,
- nexthop->mrib_nexthop_addr, true);
+ struct pim_neighbor *nbr =
+ pim_neighbor_find(nexthop->interface,
+ nexthop->mrib_nexthop_addr, true);
if (!nbr
&& !if_is_loopback(nexthop->interface)) {
if (PIM_DEBUG_PIM_NHT)
@@ -646,10 +799,8 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
/* update metric even if the upstream
* neighbor stays unchanged
*/
- nexthop->mrib_metric_preference =
- pnc->distance;
- nexthop->mrib_route_metric =
- pnc->metric;
+ nexthop->mrib_metric_preference = rib->distance;
+ nexthop->mrib_route_metric = rib->metric;
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: (%pPA,%pPA)(%s) current nexthop %s is valid, skipping new path selection",
@@ -657,40 +808,39 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
&grp_addr,
pim->vrf->name,
nexthop->interface->name);
- return 1;
+ return true;
}
}
}
}
- /*
- * Look up all interfaces and neighbors,
- * store for later usage
- */
- for (nh_node = pnc->nexthop, i = 0; nh_node;
- nh_node = nh_node->next, i++) {
- ifps[i] =
- if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
- if (ifps[i]) {
+ /* Count the number of neighbors for ECMP */
+ for (nh_node = rib->nexthop; nh_node; nh_node = nh_node->next) {
+ struct pim_neighbor *nbr;
+ struct interface *ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+
+ if (!ifp)
+ continue;
+
#if PIM_IPV == 4
- pim_addr nhaddr = nh_node->gate.ipv4;
+ pim_addr nhaddr = nh_node->gate.ipv4;
#else
- pim_addr nhaddr = nh_node->gate.ipv6;
+ pim_addr nhaddr = nh_node->gate.ipv6;
#endif
- nbrs[i] = pim_neighbor_find(ifps[i], nhaddr, true);
- if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
- num_nbrs++;
- }
+ nbr = pim_neighbor_find(ifp, nhaddr, true);
+ if (nbr || pim_if_connected_to_source(ifp, src))
+ num_nbrs++;
}
+
if (pim->ecmp_enable) {
struct prefix src_pfx;
- uint32_t consider = pnc->nexthop_num;
+ uint32_t consider = rib->nexthop_num;
if (neighbor_needed && num_nbrs < consider)
consider = num_nbrs;
if (consider == 0)
- return 0;
+ return false;
// PIM ECMP flag is enable then choose ECMP path.
pim_addr_to_prefix(&src_pfx, src);
@@ -698,16 +848,16 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
mod_val = hash_val % consider;
}
- for (nh_node = pnc->nexthop; nh_node && (found == 0);
- nh_node = nh_node->next) {
- first_ifindex = nh_node->ifindex;
- ifp = ifps[nh_iter];
+ for (nh_node = rib->nexthop; nh_node && !found; nh_node = nh_node->next) {
+ struct pim_neighbor *nbr = NULL;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp = if_lookup_by_index(nh_node->ifindex, pim->vrf->vrf_id);
+
if (!ifp) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
- __FILE__, __func__, first_ifindex, &src,
- pim->vrf->name);
+ zlog_debug("%s %s: could not find interface for ifindex %d (address %pPA(%s))",
+ __FILE__, __func__, nh_node->ifindex, &src,
+ pim->vrf->name);
if (nh_iter == mod_val)
mod_val++; // Select nexthpath
nh_iter++;
@@ -718,10 +868,9 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
- __func__, ifp->name, pim->vrf->name,
- first_ifindex, &src);
+ zlog_debug("%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ __func__, ifp->name, pim->vrf->name, nh_node->ifindex,
+ &src);
if (nh_iter == mod_val)
mod_val++; // Select nexthpath
nh_iter++;
@@ -729,7 +878,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
}
if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
- nbr = nbrs[nh_iter];
+#if PIM_IPV == 4
+ nbr = pim_neighbor_find(ifp, nh_node->gate.ipv4, true);
+#else
+ nbr = pim_neighbor_find(ifp, nh_node->gate.ipv6, true);
+#endif
+
if (!nbr && !if_is_loopback(ifp)) {
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
@@ -750,12 +904,12 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
#else
nexthop->mrib_nexthop_addr = nh_node->gate.ipv6;
#endif
- nexthop->mrib_metric_preference = pnc->distance;
- nexthop->mrib_route_metric = pnc->metric;
+ nexthop->mrib_metric_preference = rib->distance;
+ nexthop->mrib_route_metric = rib->metric;
nexthop->last_lookup = src;
nexthop->last_lookup_time = pim_time_monotonic_usec();
nexthop->nbr = nbr;
- found = 1;
+ found = true;
if (PIM_DEBUG_PIM_NHT)
zlog_debug(
"%s: (%pPA,%pPA)(%s) selected nhop interface %s addr %pPAs mod_val %u iter %d ecmp %d",
@@ -766,260 +920,55 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim,
nh_iter++;
}
- if (found)
- return 1;
- else
- return 0;
+ return found;
}
-/* This API is used to parse Registered address nexthop update coming from Zebra
- */
-void pim_nexthop_update(struct vrf *vrf, struct prefix *match,
- struct zapi_route *nhr)
-{
- struct nexthop *nexthop;
- struct nexthop *nhlist_head = NULL;
- struct nexthop *nhlist_tail = NULL;
- int i;
- struct pim_rpf rpf;
- struct pim_nexthop_cache *pnc = NULL;
- struct interface *ifp = NULL;
- struct pim_instance *pim;
-
- pim = vrf->info;
-
- rpf.rpf_addr = pim_addr_from_prefix(match);
- pnc = pim_nexthop_cache_find(pim, &rpf);
- if (!pnc) {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: Skipping NHT update, addr %pPA is not in local cached DB.",
- __func__, &rpf.rpf_addr);
- return;
- }
-
- pnc->last_update = pim_time_monotonic_usec();
-
- if (nhr->nexthop_num) {
- pnc->nexthop_num = 0;
-
- for (i = 0; i < nhr->nexthop_num; i++) {
- nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]);
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IFINDEX:
- /*
- * Connected route (i.e. no nexthop), use
- * RPF address from nexthop cache (i.e.
- * destination) as PIM nexthop.
- */
-#if PIM_IPV == 4
- nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- nexthop->gate.ipv4 = pnc->rpf.rpf_addr;
-#else
- nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- nexthop->gate.ipv6 = pnc->rpf.rpf_addr;
-#endif
- break;
-#if PIM_IPV == 4
- /* RFC5549 IPv4-over-IPv6 nexthop handling:
- * if we get an IPv6 nexthop in IPv4 PIM, hunt down a
- * PIM neighbor and use that instead.
- */
- case NEXTHOP_TYPE_IPV6_IFINDEX: {
- struct interface *ifp1 = NULL;
- struct pim_neighbor *nbr = NULL;
-
- ifp1 = if_lookup_by_index(nexthop->ifindex,
- pim->vrf->vrf_id);
-
- if (!ifp1)
- nbr = NULL;
- else
- /* FIXME: should really use nbr's
- * secondary address list here
- */
- nbr = pim_neighbor_find_if(ifp1);
-
- /* Overwrite with Nbr address as NH addr */
- if (nbr)
- nexthop->gate.ipv4 = nbr->source_addr;
- else
- // Mark nexthop address to 0 until PIM
- // Nbr is resolved.
- nexthop->gate.ipv4 = PIMADDR_ANY;
-
- break;
- }
-#else
- case NEXTHOP_TYPE_IPV6_IFINDEX:
-#endif
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- case NEXTHOP_TYPE_BLACKHOLE:
- /* nothing to do for the other nexthop types */
- break;
- }
-
- ifp = if_lookup_by_index(nexthop->ifindex,
- pim->vrf->vrf_id);
- if (!ifp) {
- if (PIM_DEBUG_PIM_NHT) {
- char buf[NEXTHOP_STRLEN];
- zlog_debug(
- "%s: could not find interface for ifindex %d(%s) (addr %s)",
- __func__, nexthop->ifindex,
- pim->vrf->name,
- nexthop2str(nexthop, buf,
- sizeof(buf)));
- }
- nexthop_free(nexthop);
- continue;
- }
-
- if (PIM_DEBUG_PIM_NHT) {
-#if PIM_IPV == 4
- pim_addr nhaddr = nexthop->gate.ipv4;
-#else
- pim_addr nhaddr = nexthop->gate.ipv6;
-#endif
- zlog_debug("%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ",
- __func__, match, pim->vrf->name,
- i + 1, &nhaddr, ifp->name,
- nexthop->type, nhr->distance,
- nhr->metric);
- }
-
- if (!ifp->info) {
- /*
- * Though Multicast is not enabled on this
- * Interface store it in database otheriwse we
- * may miss this update and this will not cause
- * any issue, because while choosing the path we
- * are ommitting the Interfaces which are not
- * multicast enabled
- */
- if (PIM_DEBUG_PIM_NHT) {
- char buf[NEXTHOP_STRLEN];
-
- zlog_debug(
- "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)",
- __func__, ifp->name,
- pim->vrf->name,
- nexthop->ifindex,
- nexthop2str(nexthop, buf,
- sizeof(buf)));
- }
- }
-
- if (nhlist_tail) {
- nhlist_tail->next = nexthop;
- nhlist_tail = nexthop;
- } else {
- nhlist_tail = nexthop;
- nhlist_head = nexthop;
- }
-
- // Keep track of all nexthops, even PIM-disabled ones.
- pnc->nexthop_num++;
- }
- /* Reset existing pnc->nexthop before assigning new list */
- nexthops_free(pnc->nexthop);
- pnc->nexthop = nhlist_head;
- if (pnc->nexthop_num) {
- pnc->flags |= PIM_NEXTHOP_VALID;
- pnc->distance = nhr->distance;
- pnc->metric = nhr->metric;
- }
- } else {
- pnc->flags &= ~PIM_NEXTHOP_VALID;
- pnc->nexthop_num = nhr->nexthop_num;
- nexthops_free(pnc->nexthop);
- pnc->nexthop = NULL;
- }
- SET_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED);
-
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug("%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d",
- __func__, match, pim->vrf->name, nhr->nexthop_num,
- pnc->nexthop_num, vrf->vrf_id,
- pnc->upstream_hash->count, listcount(pnc->rp_list));
-
- pim_rpf_set_refresh_time(pim);
-
- if (listcount(pnc->rp_list))
- pim_update_rp_nh(pim, pnc);
- if (pnc->upstream_hash->count)
- pim_update_upstream_nh(pim, pnc);
-
- if (pnc->candrp_count)
- pim_crp_nht_update(pim, pnc);
-}
-
-int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, pim_addr src,
- struct prefix *grp, int neighbor_needed)
+bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, bool neighbor_needed)
{
struct pim_nexthop_cache *pnc;
struct pim_zlookup_nexthop nexthop_tab[router->multipath];
- struct pim_neighbor *nbrs[router->multipath], *nbr = NULL;
- struct pim_rpf rpf;
int num_ifindex;
- struct interface *ifps[router->multipath], *ifp;
- int first_ifindex;
- int found = 0;
+ bool found = false;
uint16_t i = 0;
- uint32_t hash_val = 0, mod_val = 0;
+ uint32_t hash_val = 0;
+ uint32_t mod_val = 0;
uint32_t num_nbrs = 0;
- struct pim_interface *pim_ifp;
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld",
- __func__, &src, pim->vrf->name,
- nexthop->last_lookup_time);
-
- rpf.rpf_addr = src;
+ zlog_debug("%s: Looking up: %pPA(%s), last lookup time: %lld", __func__, &src,
+ pim->vrf->name, nexthop->last_lookup_time);
- pnc = pim_nexthop_cache_find(pim, &rpf);
+ pnc = pim_nexthop_cache_find(pim, src);
if (pnc) {
- if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_ANSWER_RECEIVED))
- return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp,
- neighbor_needed);
+ if (pim_nht_pnc_has_answer(pim, pnc))
+ return pim_ecmp_nexthop_search(pim, pnc, nexthop, src, grp, neighbor_needed);
}
- memset(nexthop_tab, 0,
- sizeof(struct pim_zlookup_nexthop) * router->multipath);
- num_ifindex =
- zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
- PIM_NEXTHOP_LOOKUP_MAX);
+ memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, src,
+ PIM_NEXTHOP_LOOKUP_MAX);
if (num_ifindex < 1) {
if (PIM_DEBUG_PIM_NHT)
- zlog_warn(
- "%s: could not find nexthop ifindex for address %pPA(%s)",
- __func__, &src, pim->vrf->name);
- return 0;
+ zlog_warn("%s: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, &src, pim->vrf->name);
+ return false;
}
- memset(&nbrs, 0, sizeof(nbrs));
- memset(&ifps, 0, sizeof(ifps));
-
- /*
- * Look up all interfaces and neighbors,
- * store for later usage
- */
+ /* Count the number of neighbors for ECMP computation */
for (i = 0; i < num_ifindex; i++) {
- ifps[i] = if_lookup_by_index(nexthop_tab[i].ifindex,
- pim->vrf->vrf_id);
- if (ifps[i]) {
- nbrs[i] = pim_neighbor_find(
- ifps[i], nexthop_tab[i].nexthop_addr, true);
-
- if (nbrs[i] || pim_if_connected_to_source(ifps[i], src))
- num_nbrs++;
- }
+ struct pim_neighbor *nbr;
+ struct interface *ifp = if_lookup_by_index(nexthop_tab[i].ifindex, pim->vrf->vrf_id);
+
+ if (!ifp)
+ continue;
+
+ nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true);
+ if (nbr || pim_if_connected_to_source(ifp, src))
+ num_nbrs++;
}
- // If PIM ECMP enable then choose ECMP path.
+ /* If PIM ECMP enable then choose ECMP path. */
if (pim->ecmp_enable) {
struct prefix src_pfx;
uint32_t consider = num_ifindex;
@@ -1028,30 +977,27 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
consider = num_nbrs;
if (consider == 0)
- return 0;
+ return false;
pim_addr_to_prefix(&src_pfx, src);
hash_val = pim_compute_ecmp_hash(&src_pfx, grp);
mod_val = hash_val % consider;
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("%s: hash_val %u mod_val %u", __func__,
- hash_val, mod_val);
+ zlog_debug("%s: hash_val %u mod_val %u", __func__, hash_val, mod_val);
}
- i = 0;
- while (!found && (i < num_ifindex)) {
- first_ifindex = nexthop_tab[i].ifindex;
+ for (i = 0; i < num_ifindex && !found; i++) {
+ struct pim_neighbor *nbr = NULL;
+ struct pim_interface *pim_ifp;
+ struct interface *ifp = if_lookup_by_index(nexthop_tab[i].ifindex, pim->vrf->vrf_id);
- ifp = ifps[i];
if (!ifp) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s %s: could not find interface for ifindex %d (address %pPA(%s))",
- __FILE__, __func__, first_ifindex, &src,
- pim->vrf->name);
+ zlog_debug("%s %s: could not find interface for ifindex %d (address %pPA(%s))",
+ __FILE__, __func__, nexthop_tab[i].ifindex, &src,
+ pim->vrf->name);
if (i == mod_val)
mod_val++;
- i++;
continue;
}
@@ -1059,99 +1005,431 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
if (!pim_ifp || !pim_ifp->pim_enable) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
- __func__, ifp->name, pim->vrf->name,
- first_ifindex, &src);
+ zlog_debug("%s: pim not enabled on input interface %s(%s) (ifindex=%d, RPF for source %pPA)",
+ __func__, ifp->name, pim->vrf->name,
+ nexthop_tab[i].ifindex, &src);
if (i == mod_val)
mod_val++;
- i++;
continue;
}
+
if (neighbor_needed && !pim_if_connected_to_source(ifp, src)) {
- nbr = nbrs[i];
+ nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true);
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("ifp name: %s(%s), pim nbr: %p",
- ifp->name, pim->vrf->name, nbr);
+ zlog_debug("ifp name: %s(%s), pim nbr: %p", ifp->name,
+ pim->vrf->name, nbr);
if (!nbr && !if_is_loopback(ifp)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)",
+ __func__, &nexthop_tab[i].nexthop_addr,
+ ifp->name, pim->vrf->name, &src);
if (i == mod_val)
mod_val++;
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: NBR (%pPA) not found on input interface %s(%s) (RPF for source %pPA)",
- __func__,
- &nexthop_tab[i].nexthop_addr,
- ifp->name, pim->vrf->name,
- &src);
- i++;
continue;
}
}
if (i == mod_val) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
- __func__, &nexthop_tab[i].nexthop_addr,
- &src, ifp->name, pim->vrf->name,
- nexthop_tab[i].route_metric,
- nexthop_tab[i].protocol_distance);
+ zlog_debug("%s: found nhop %pPA for addr %pPA interface %s(%s) metric %d dist %d",
+ __func__, &nexthop_tab[i].nexthop_addr, &src, ifp->name,
+ pim->vrf->name, nexthop_tab[i].route_metric,
+ nexthop_tab[i].protocol_distance);
/* update nexthop data */
nexthop->interface = ifp;
- nexthop->mrib_nexthop_addr =
- nexthop_tab[i].nexthop_addr;
- nexthop->mrib_metric_preference =
- nexthop_tab[i].protocol_distance;
- nexthop->mrib_route_metric =
- nexthop_tab[i].route_metric;
+ nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
+ nexthop->mrib_metric_preference = nexthop_tab[i].protocol_distance;
+ nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
nexthop->last_lookup = src;
nexthop->last_lookup_time = pim_time_monotonic_usec();
nexthop->nbr = nbr;
- found = 1;
+ found = true;
}
- i++;
}
- if (found)
- return 1;
- else
- return 0;
+ return found;
}
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
- struct prefix *grp)
+bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr,
+ int neighbor_needed)
+{
+ struct pim_zlookup_nexthop nexthop_tab[router->multipath];
+ struct pim_neighbor *nbr = NULL;
+ int num_ifindex;
+ struct interface *ifp = NULL;
+ ifindex_t first_ifindex = 0;
+ bool found = false;
+ int i = 0;
+ struct pim_interface *pim_ifp;
+
+#if PIM_IPV == 4
+ /*
+ * We should not attempt to lookup a
+ * 255.255.255.255 address, since
+ * it will never work
+ */
+ if (pim_addr_is_any(addr))
+ return false;
+#endif
+
+ if ((!pim_addr_cmp(nexthop->last_lookup, addr)) &&
+ (nexthop->last_lookup_time > pim->last_route_change_time)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: Using last lookup for %pPAs at %lld, %" PRId64 " addr %pPAs",
+ __func__, &addr, nexthop->last_lookup_time,
+ pim->last_route_change_time, &nexthop->mrib_nexthop_addr);
+ pim->nexthop_lookups_avoided++;
+ return true;
+ }
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64, __func__,
+ &addr, nexthop->last_lookup_time, pim->last_route_change_time);
+
+ memset(nexthop_tab, 0, sizeof(struct pim_zlookup_nexthop) * router->multipath);
+ num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, router->multipath, addr,
+ PIM_NEXTHOP_LOOKUP_MAX);
+ if (num_ifindex < 1) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: could not find nexthop ifindex for address %pPAs", __func__,
+ &addr);
+ return false;
+ }
+
+ while (!found && (i < num_ifindex)) {
+ first_ifindex = nexthop_tab[i].ifindex;
+
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: could not find interface for ifindex %d (address %pPAs)",
+ __func__, first_ifindex, &addr);
+ i++;
+ continue;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp || !pim_ifp->pim_enable) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
+ __func__, ifp->name, first_ifindex, &addr);
+ i++;
+ } else if (neighbor_needed && !pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(ifp, nexthop_tab[i].nexthop_addr, true);
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("ifp name: %s, pim nbr: %p", ifp->name, nbr);
+ if (!nbr && !if_is_loopback(ifp))
+ i++;
+ else
+ found = true;
+ } else
+ found = true;
+ }
+
+ if (found) {
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
+ __func__, &nexthop_tab[i].nexthop_addr, &addr, ifp->name,
+ first_ifindex, nexthop_tab[i].route_metric,
+ nexthop_tab[i].protocol_distance);
+
+ /* update nexthop data */
+ nexthop->interface = ifp;
+ nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
+ nexthop->mrib_metric_preference = nexthop_tab[i].protocol_distance;
+ nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
+ nexthop->last_lookup = addr;
+ nexthop->last_lookup_time = pim_time_monotonic_usec();
+ nexthop->nbr = nbr;
+ return true;
+ } else
+ return false;
+}
+
+int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp)
{
struct pim_nexthop nhop;
int vif_index;
ifindex_t ifindex;
memset(&nhop, 0, sizeof(nhop));
- if (!pim_ecmp_nexthop_lookup(pim, &nhop, src, grp, 1)) {
+ if (!pim_nht_lookup_ecmp(pim, &nhop, src, grp, true)) {
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: could not find nexthop ifindex for address %pPA(%s)",
- __func__, &src, pim->vrf->name);
+ zlog_debug("%s: could not find nexthop ifindex for address %pPA(%s)",
+ __func__, &src, pim->vrf->name);
return -1;
}
ifindex = nhop.interface->ifindex;
if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
- __func__, ifindex,
- ifindex2ifname(ifindex, pim->vrf->vrf_id),
- pim->vrf->name, &src);
+ zlog_debug("%s: found nexthop ifindex=%d (interface %s(%s)) for address %pPA",
+ __func__, ifindex, ifindex2ifname(ifindex, pim->vrf->vrf_id),
+ pim->vrf->name, &src);
vif_index = pim_if_find_vifindex_by_ifindex(pim, ifindex);
if (vif_index < 0) {
if (PIM_DEBUG_PIM_NHT) {
- zlog_debug(
- "%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
- __func__, vif_index, pim->vrf->name, &src);
+ zlog_debug("%s: low vif_index=%d(%s) < 1 nexthop for address %pPA",
+ __func__, vif_index, pim->vrf->name, &src);
}
return -2;
}
return vif_index;
}
+
+/* This API is used to parse Registered address nexthop update coming from Zebra
+ */
+void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr)
+{
+ struct nexthop *nhlist_head = NULL;
+ struct nexthop *nhlist_tail = NULL;
+ struct pim_nexthop_cache *pnc = NULL;
+ struct pim_nexthop_cache_rib *pnc_rib = NULL;
+ struct interface *ifp = NULL;
+ struct pim_instance *pim;
+ pim_addr addr;
+
+ pim = vrf->info;
+ addr = pim_addr_from_prefix(match);
+ pnc = pim_nexthop_cache_find(pim, addr);
+ if (!pnc) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: Skipping NHT update, addr %pPA is not in local cached DB.",
+ __func__, &addr);
+ return;
+ }
+
+ if (nhr->safi == SAFI_UNICAST)
+ pnc_rib = &pnc->urib;
+ else if (nhr->safi == SAFI_MULTICAST)
+ pnc_rib = &pnc->mrib;
+ else
+ return;
+
+ pnc_rib->last_update = pim_time_monotonic_usec();
+ SET_FLAG(pnc_rib->flags, PIM_NEXTHOP_ANSWER_RECEIVED);
+ UNSET_FLAG(pnc_rib->flags, PIM_NEXTHOP_VALID);
+ pnc_rib->nexthop_num = 0;
+ /* Free the existing nexthop list, resets with any valid nexthops from the update */
+ nexthops_free(pnc_rib->nexthop);
+ pnc_rib->nexthop = NULL;
+
+ for (int i = 0; i < nhr->nexthop_num; i++) {
+ struct nexthop *nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]);
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ /*
+ * Connected route (i.e. no nexthop), use
+ * RPF address from nexthop cache (i.e.
+ * destination) as PIM nexthop.
+ */
+#if PIM_IPV == 4
+ nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ nexthop->gate.ipv4 = pnc->addr;
+#else
+ nexthop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ nexthop->gate.ipv6 = pnc->addr;
+#endif
+ break;
+
+#if PIM_IPV == 4
+ /* RFC5549 IPv4-over-IPv6 nexthop handling:
+ * if we get an IPv6 nexthop in IPv4 PIM, hunt down a
+ * PIM neighbor and use that instead.
+ */
+ case NEXTHOP_TYPE_IPV6_IFINDEX: {
+ struct pim_neighbor *nbr = NULL;
+ struct interface *ifp1 = if_lookup_by_index(nexthop->ifindex,
+ pim->vrf->vrf_id);
+
+ if (ifp1)
+ /* FIXME: should really use nbr's
+ * secondary address list here
+ */
+ nbr = pim_neighbor_find_if(ifp1);
+
+ /* Overwrite with Nbr address as NH addr */
+ if (nbr)
+ nexthop->gate.ipv4 = nbr->source_addr;
+ else
+ /* Mark nexthop address to 0 until PIM Nbr is resolved. */
+ nexthop->gate.ipv4 = PIMADDR_ANY;
+
+ break;
+ }
+#else
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+#endif
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* nothing to do for the other nexthop types */
+ break;
+ }
+
+ ifp = if_lookup_by_index(nexthop->ifindex, pim->vrf->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char buf[NEXTHOP_STRLEN];
+ zlog_debug("%s: could not find interface for ifindex %d(%s) (addr %s)",
+ __func__, nexthop->ifindex, pim->vrf->name,
+ nexthop2str(nexthop, buf, sizeof(buf)));
+ }
+ nexthop_free(nexthop);
+ continue;
+ }
+
+ if (PIM_DEBUG_PIM_NHT) {
+#if PIM_IPV == 4
+ pim_addr nhaddr = nexthop->gate.ipv4;
+#else
+ pim_addr nhaddr = nexthop->gate.ipv6;
+#endif
+ zlog_debug("%s: NHT addr %pFX(%s) %d-nhop via %pPA(%s) type %d distance:%u metric:%u ",
+ __func__, match, pim->vrf->name, i + 1, &nhaddr, ifp->name,
+ nexthop->type, nhr->distance, nhr->metric);
+ }
+
+ if (!ifp->info) {
+ /*
+ * Though Multicast is not enabled on this
+ * Interface store it in database otheriwse we
+ * may miss this update and this will not cause
+ * any issue, because while choosing the path we
+ * are ommitting the Interfaces which are not
+ * multicast enabled
+ */
+ if (PIM_DEBUG_PIM_NHT) {
+ char buf[NEXTHOP_STRLEN];
+
+ zlog_debug("%s: multicast not enabled on input interface %s(%s) (ifindex=%d, addr %s)",
+ __func__, ifp->name, pim->vrf->name, nexthop->ifindex,
+ nexthop2str(nexthop, buf, sizeof(buf)));
+ }
+ }
+
+ if (nhlist_tail) {
+ nhlist_tail->next = nexthop;
+ nhlist_tail = nexthop;
+ } else {
+ nhlist_tail = nexthop;
+ nhlist_head = nexthop;
+ }
+
+ /* Keep track of all nexthops, even PIM-disabled ones. */
+ pnc_rib->nexthop_num++;
+ } /* End for nexthops */
+
+ /* Assign the list if there are nexthops */
+ if (pnc_rib->nexthop_num) {
+ SET_FLAG(pnc_rib->flags, PIM_NEXTHOP_VALID);
+ pnc_rib->nexthop = nhlist_head;
+ pnc_rib->distance = nhr->distance;
+ pnc_rib->metric = nhr->metric;
+ pnc_rib->prefix_len = nhr->prefix.prefixlen;
+ }
+
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug("%s: NHT Update for %pFX(%s) num_nh %d num_pim_nh %d vrf:%u up %ld rp %d",
+ __func__, match, pim->vrf->name, nhr->nexthop_num, pnc_rib->nexthop_num,
+ vrf->vrf_id, pnc->upstream_hash->count, listcount(pnc->rp_list));
+
+ pim_rpf_set_refresh_time(pim);
+
+ if (listcount(pnc->rp_list))
+ pim_update_rp_nh(pim, pnc);
+ if (pnc->upstream_hash->count)
+ pim_update_upstream_nh(pim, pnc);
+
+ if (pnc->candrp_count)
+ pim_crp_nht_update(pim, pnc);
+}
+
+static int pim_nht_hash_mode_update_helper(struct hash_bucket *bucket, void *arg)
+{
+ struct pim_nexthop_cache *pnc = bucket->data;
+ struct pnc_hash_walk_data *pwd = arg;
+ struct pim_instance *pim = pwd->pim;
+
+ if (listcount(pnc->rp_list))
+ pim_update_rp_nh(pim, pnc);
+
+ if (pnc->upstream_hash->count)
+ pim_update_upstream_nh(pim, pnc);
+
+ if (pnc->candrp_count)
+ pim_crp_nht_update(pim, pnc);
+
+ return HASHWALK_CONTINUE;
+}
+
+void pim_nht_mode_changed(struct pim_instance *pim)
+{
+ struct pnc_hash_walk_data pwd;
+
+ /* Update the refresh time to force new lookups if needed */
+ pim_rpf_set_refresh_time(pim);
+
+ /* Force update the registered RP and upstreams for all cache entries */
+ pwd.pim = pim;
+ hash_walk(pim->nht_hash, pim_nht_hash_mode_update_helper, &pwd);
+}
+
+/* Cleanup pim->nht_hash each node data */
+static void pim_nht_hash_clean(void *data)
+{
+ struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
+
+ list_delete(&pnc->rp_list);
+ hash_clean_and_free(&pnc->upstream_hash, NULL);
+
+ if (pnc->mrib.nexthop)
+ nexthops_free(pnc->mrib.nexthop);
+
+ if (pnc->urib.nexthop)
+ nexthops_free(pnc->urib.nexthop);
+
+ XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
+}
+
+static unsigned int pim_nht_hash_key(const void *arg)
+{
+ const struct pim_nexthop_cache *r = arg;
+
+#if PIM_IPV == 4
+ return jhash_1word(r->addr.s_addr, 0);
+#else
+ return jhash2(r->addr.s6_addr32, array_size(r->addr.s6_addr32), 0);
+#endif
+}
+
+static bool pim_nht_equal(const void *arg1, const void *arg2)
+{
+ const struct pim_nexthop_cache *r1 = arg1;
+ const struct pim_nexthop_cache *r2 = arg2;
+
+ return (!pim_addr_cmp(r1->addr, r2->addr));
+}
+
+void pim_nht_init(struct pim_instance *pim)
+{
+ char hash_name[64];
+
+ snprintf(hash_name, sizeof(hash_name), "PIM %s NHT Hash", pim->vrf->name);
+ pim->nht_hash = hash_create_size(256, pim_nht_hash_key, pim_nht_equal, hash_name);
+
+ pim->rpf_mode = MCAST_NO_CONFIG;
+
+ if (PIM_DEBUG_ZEBRA)
+ zlog_debug("%s: NHT hash init: %s ", __func__, hash_name);
+}
+
+void pim_nht_terminate(struct pim_instance *pim)
+{
+ /* Traverse and cleanup nht_hash */
+ hash_clean_and_free(&pim->nht_hash, (void *)pim_nht_hash_clean);
+}
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index d064f714a5..144139f406 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -17,11 +17,12 @@
#include "pim_rpf.h"
/* PIM nexthop cache value structure. */
-struct pim_nexthop_cache {
- struct pim_rpf rpf;
+struct pim_nexthop_cache_rib {
/* IGP route's metric. */
uint32_t metric;
uint32_t distance;
+ uint16_t prefix_len;
+
/* Nexthop number and nexthop linked list. */
uint16_t nexthop_num;
struct nexthop *nexthop;
@@ -29,6 +30,13 @@ struct pim_nexthop_cache {
uint16_t flags;
#define PIM_NEXTHOP_VALID (1 << 0)
#define PIM_NEXTHOP_ANSWER_RECEIVED (1 << 1)
+};
+
+struct pim_nexthop_cache {
+ pim_addr addr;
+
+ struct pim_nexthop_cache_rib mrib;
+ struct pim_nexthop_cache_rib urib;
struct list *rp_list;
struct hash *upstream_hash;
@@ -46,36 +54,74 @@ struct pnc_hash_walk_data {
struct interface *ifp;
};
-void pim_nexthop_update(struct vrf *vrf, struct prefix *match,
- struct zapi_route *nhr);
-int pim_find_or_track_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp,
- struct pim_nexthop_cache *out_pnc);
-void pim_delete_tracked_nexthop(struct pim_instance *pim, pim_addr addr,
- struct pim_upstream *up, struct rp_info *rp);
-struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
- struct pim_rpf *rpf);
-uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
-int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
- struct pim_nexthop *nexthop, pim_addr src,
- struct prefix *grp, int neighbor_needed);
-void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
- struct pim_nexthop_cache *pnc, int command);
-int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim, pim_addr src,
- struct prefix *grp);
-void pim_rp_nexthop_del(struct rp_info *rp_info);
-
-/* for RPF check on BSM message receipt */
+/* Verify that we have nexthop information in the cache entry */
+bool pim_nht_pnc_is_valid(struct pim_instance *pim, struct pim_nexthop_cache *pnc);
+
+/* Get (or add) the NH cache entry for the given address */
+struct pim_nexthop_cache *pim_nht_get(struct pim_instance *pim, pim_addr addr);
+
+/* Set the gateway address for all nexthops in the given cache entry to the given address
+ * unless the gateway is already set, and only if the nexthop is through the given interface.
+ */
+void pim_nht_set_gateway(struct pim_instance *pim, struct pim_nexthop_cache *pnc, pim_addr addr,
+ struct interface *ifp);
+
+/* Track a new addr, registers an upstream or RP for updates */
+bool pim_nht_find_or_track(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp, struct pim_nexthop_cache *out_pnc);
+
+/* Track a new addr, increments BSR count */
void pim_nht_bsr_add(struct pim_instance *pim, pim_addr bsr_addr);
-void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
-/* RPF(bsr_addr) == src_ip%src_ifp? */
-bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr,
- struct interface *src_ifp, pim_addr src_ip);
-void pim_upstream_nh_if_update(struct pim_instance *pim, struct interface *ifp);
-/* wrappers for usage with Candidate RPs in BSMs */
+/* Track a new addr, increments Cand RP count */
bool pim_nht_candrp_add(struct pim_instance *pim, pim_addr addr);
+
+/* Delete a tracked addr with registered upstream or RP, if no-one else is interested, stop tracking */
+void pim_nht_delete_tracked(struct pim_instance *pim, pim_addr addr, struct pim_upstream *up,
+ struct rp_info *rp);
+
+/* Delete a tracked addr and decrement BSR count, if no-one else is interested, stop tracking */
+void pim_nht_bsr_del(struct pim_instance *pim, pim_addr bsr_addr);
+
+/* Delete a tracked addr and decrement Cand RP count, if no-one else is interested, stop tracking */
void pim_nht_candrp_del(struct pim_instance *pim, pim_addr addr);
-void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc);
+
+/* RPF(bsr_addr) == src_ip%src_ifp? */
+bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, struct interface *src_ifp,
+ pim_addr src_ip);
+
+/* Reset the rp.source_nexthop of the given RP */
+void pim_nht_rp_del(struct rp_info *rp_info);
+
+/* Walk the NH cache and update every nexthop that uses the given interface */
+void pim_nht_upstream_if_update(struct pim_instance *pim, struct interface *ifp);
+
+/* Lookup nexthop information for src, returned in nexthop when function returns true.
+ * Tries to find in cache first and does a synchronous lookup if not found in the cache.
+ * If neighbor_needed is true, then nexthop is only considered valid if it's to a pim
+ * neighbor.
+ * Providing the group only effects the ECMP decision, if enabled
+ */
+bool pim_nht_lookup_ecmp(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr src,
+ struct prefix *grp, bool neighbor_needed);
+
+/* Very similar to pim_nht_lookup_ecmp, but does not check the nht cache and only does
+ * a synchronous lookup. No ECMP decision is made.
+ */
+bool pim_nht_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop, pim_addr addr,
+ int neighbor_needed);
+
+/* Performs a pim_nht_lookup_ecmp and returns the mroute VIF index of the nexthop interface */
+int pim_nht_lookup_ecmp_if_vif_index(struct pim_instance *pim, pim_addr src, struct prefix *grp);
+
+/* Tracked nexthop update from zebra */
+void pim_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr);
+
+/* RPF lookup mode changed via configuration */
+void pim_nht_mode_changed(struct pim_instance *pim);
+
+/* NHT init and finish funcitons */
+void pim_nht_init(struct pim_instance *pim);
+void pim_nht_terminate(struct pim_instance *pim);
#endif
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index b149b5a2a9..f776a59b7f 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -709,7 +709,10 @@ int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
// inherited_olist(S,G,rpt)
// This is taken care of by the kernel for us
}
+
+#if PIM_IPV == 4
pim_upstream_msdp_reg_timer_start(upstream);
+#endif /* PIM_IPV == 4 */
} else {
if (PIM_DEBUG_PIM_REG) {
if (!i_am_rp)
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index e6de991a14..4fd19b5dbe 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -40,20 +40,6 @@
#include "pim_ssm.h"
#include "termtable.h"
-/* Cleanup pim->rpf_hash each node data */
-void pim_rp_list_hash_clean(void *data)
-{
- struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
-
- list_delete(&pnc->rp_list);
-
- hash_clean_and_free(&pnc->upstream_hash, NULL);
- if (pnc->nexthop)
- nexthops_free(pnc->nexthop);
-
- XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
-}
-
static void pim_rp_info_free(struct rp_info *rp_info)
{
XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
@@ -97,14 +83,7 @@ void pim_rp_init(struct pim_instance *pim)
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- if (!pim_get_all_mcast_group(&rp_info->group)) {
- flog_err(EC_LIB_DEVELOPMENT,
- "Unable to convert all-multicast prefix");
- list_delete(&pim->rp_list);
- route_table_finish(pim->rp_table);
- XFREE(MTYPE_PIM_RP, rp_info);
- return;
- }
+ pim_get_all_mcast_group(&rp_info->group);
rp_info->rp.rpf_addr = PIMADDR_ANY;
listnode_add(pim->rp_list, rp_info);
@@ -343,7 +322,9 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
*/
void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
{
+#if PIM_IPV == 4
pim_msdp_i_am_rp_changed(pim);
+#endif /* PIM_IPV == 4 */
pim_upstream_reeval_use_rpt(pim);
}
@@ -437,7 +418,7 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
zlog_debug(
"%s: Deregister upstream %s addr %pPA with Zebra NHT",
__func__, up->sg_str, &old_upstream_addr);
- pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
+ pim_nht_delete_tracked(pim, old_upstream_addr, up, NULL);
}
/* Update the upstream address */
@@ -522,11 +503,7 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
} else {
-
- if (!pim_get_all_mcast_group(&group_all)) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_BAD_ADDRESS;
- }
+ pim_get_all_mcast_group(&group_all);
rp_all = pim_rp_find_match_group(pim, &group_all);
/*
@@ -592,12 +569,10 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
zlog_debug("new RP %pPA for %pFX is ourselves",
&rp_all->rp.rpf_addr, &rp_all->group);
pim_rp_refresh_group_to_rp_mapping(pim);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
- NULL);
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_all, NULL);
- if (!pim_ecmp_nexthop_lookup(pim,
- &rp_all->rp.source_nexthop,
- nht_p, &rp_all->group, 1))
+ if (!pim_nht_lookup_ecmp(pim, &rp_all->rp.source_nexthop, nht_p,
+ &rp_all->group, true))
return PIM_RP_NO_PATH;
return PIM_SUCCESS;
}
@@ -692,9 +667,8 @@ int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
- &rp_info->group, 1))
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true))
return PIM_RP_NO_PATH;
return PIM_SUCCESS;
@@ -706,9 +680,10 @@ void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
struct prefix group;
int result;
- if (group_range == NULL)
- result = pim_get_all_mcast_group(&group);
- else
+ if (group_range == NULL) {
+ result = 0;
+ pim_get_all_mcast_group(&group);
+ } else
result = str2prefix(group_range, &group);
if (!result) {
@@ -785,11 +760,9 @@ int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
&nht_p);
- pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
-
- if (!pim_get_all_mcast_group(&g_all))
- return PIM_RP_BAD_ADDRESS;
+ pim_nht_delete_tracked(pim, nht_p, NULL, rp_info);
+ pim_get_all_mcast_group(&g_all);
rp_all = pim_rp_find_match_group(pim, &g_all);
if (rp_all == rp_info) {
@@ -919,10 +892,10 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
__func__, &nht_p);
- pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
+ pim_nht_delete_tracked(pim, nht_p, NULL, rp_info);
}
- pim_rp_nexthop_del(rp_info);
+ pim_nht_rp_del(rp_info);
listnode_delete(pim->rp_list, rp_info);
/* Update the new RP address*/
@@ -956,9 +929,8 @@ int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
- &rp_info->group, 1)) {
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group, true)) {
route_unlock_node(rn);
return PIM_RP_NO_PATH;
}
@@ -984,13 +956,14 @@ void pim_rp_setup(struct pim_instance *pim)
nht_p = rp_info->rp.rpf_addr;
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- nht_p, &rp_info->group, 1)) {
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group,
+ true)) {
if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug(
- "Unable to lookup nexthop for rp specified");
- pim_rp_nexthop_del(rp_info);
+ zlog_debug("%s: unable to lookup nexthop for rp %pPA", __func__,
+ &rp_info->rp.rpf_addr);
+
+ pim_nht_rp_del(rp_info);
}
}
}
@@ -1030,7 +1003,9 @@ void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
}
if (i_am_rp_changed) {
+#if PIM_IPV == 4
pim_msdp_i_am_rp_changed(pim);
+#endif /* PIM_IPV == 4 */
pim_upstream_reeval_use_rpt(pim);
}
}
@@ -1072,7 +1047,9 @@ void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
}
if (i_am_rp_changed) {
+#if PIM_IPV == 4
pim_msdp_i_am_rp_changed(pim);
+#endif /* PIM_IPV == 4 */
pim_upstream_reeval_use_rpt(pim);
}
}
@@ -1129,10 +1106,14 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
zlog_debug(
"%s: NHT Register RP addr %pPA grp %pFX with Zebra",
__func__, &nht_p, &rp_info->group);
- pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
+ pim_nht_find_or_track(pim, nht_p, NULL, rp_info, NULL);
pim_rpf_set_refresh_time(pim);
- (void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
- nht_p, &rp_info->group, 1);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, nht_p, &rp_info->group,
+ true))
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: unable to lookup nexthop for rp %pPA", __func__,
+ &rp_info->rp.rpf_addr);
+
return (&rp_info->rp);
}
@@ -1337,7 +1318,6 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
{
struct listnode *node = NULL;
struct rp_info *rp_info = NULL;
- struct nexthop *nh_node = NULL;
pim_addr nht_p;
struct pim_nexthop_cache pnc;
@@ -1347,35 +1327,11 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
nht_p = rp_info->rp.rpf_addr;
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
- if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
- continue;
- for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
-#if PIM_IPV == 4
- if (!pim_addr_is_any(nh_node->gate.ipv4))
- continue;
-#else
- if (!pim_addr_is_any(nh_node->gate.ipv6))
- continue;
-#endif
-
- struct interface *ifp1 = if_lookup_by_index(
- nh_node->ifindex, pim->vrf->vrf_id);
-
- if (nbr->interface != ifp1)
- continue;
+ if (!pim_nht_find_or_track(pim, nht_p, NULL, rp_info, &pnc))
+ continue;
-#if PIM_IPV == 4
- nh_node->gate.ipv4 = nbr->source_addr;
-#else
- nh_node->gate.ipv6 = nbr->source_addr;
-#endif
- if (PIM_DEBUG_PIM_NHT_RP)
- zlog_debug(
- "%s: addr %pPA new nexthop addr %pPAs interface %s",
- __func__, &nht_p, &nbr->source_addr,
- ifp1->name);
- }
+ pim_nht_set_gateway(pim, &pnc, nbr->source_addr, nbr->interface);
}
}
@@ -1540,9 +1496,9 @@ void pim_embedded_rp_new(struct pim_instance *pim, const pim_addr *group, const
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra", __func__,
&rp_info->rp.rpf_addr, &rp_info->group);
- pim_find_or_track_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
- &rp_info->group, 1)) {
+ pim_nht_find_or_track(pim, rp_info->rp.rpf_addr, NULL, rp_info, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
+ &rp_info->group, 1)) {
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug("%s: Embedded RP %pPA learned but no next hop", __func__,
&rp_info->rp.rpf_addr);
@@ -1582,7 +1538,7 @@ void pim_embedded_rp_free(struct pim_instance *pim, struct rp_info *rp_info)
if (PIM_DEBUG_TRACE)
zlog_debug("delete embedded RP %pPA", &rp_info->rp.rpf_addr);
- pim_delete_tracked_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info);
+ pim_nht_delete_tracked(pim, rp_info->rp.rpf_addr, NULL, rp_info);
listnode_delete(pim->rp_list, rp_info);
XFREE(MTYPE_PIM_EMBEDDED_RP_ENTRY, rp_info);
}
diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h
index 9da059f8be..a7818a9d39 100644
--- a/pimd/pim_rp.h
+++ b/pimd/pim_rp.h
@@ -42,8 +42,6 @@ struct rp_info {
void pim_rp_init(struct pim_instance *pim);
void pim_rp_free(struct pim_instance *pim);
-void pim_rp_list_hash_clean(void *data);
-
int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
const char *plist, enum rp_source rp_src_flag);
void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index d18ec4943a..75e9213825 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -38,120 +38,6 @@ void pim_rpf_set_refresh_time(struct pim_instance *pim)
pim->last_route_change_time);
}
-bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
- pim_addr addr, int neighbor_needed)
-{
- struct pim_zlookup_nexthop nexthop_tab[router->multipath];
- struct pim_neighbor *nbr = NULL;
- int num_ifindex;
- struct interface *ifp = NULL;
- ifindex_t first_ifindex = 0;
- int found = 0;
- int i = 0;
- struct pim_interface *pim_ifp;
-
-#if PIM_IPV == 4
- /*
- * We should not attempt to lookup a
- * 255.255.255.255 address, since
- * it will never work
- */
- if (pim_addr_is_any(addr))
- return false;
-#endif
-
- if ((!pim_addr_cmp(nexthop->last_lookup, addr)) &&
- (nexthop->last_lookup_time > pim->last_route_change_time)) {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: Using last lookup for %pPAs at %lld, %" PRId64
- " addr %pPAs",
- __func__, &addr, nexthop->last_lookup_time,
- pim->last_route_change_time,
- &nexthop->mrib_nexthop_addr);
- pim->nexthop_lookups_avoided++;
- return true;
- } else {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s: Looking up: %pPAs, last lookup time: %lld, %" PRId64,
- __func__, &addr, nexthop->last_lookup_time,
- pim->last_route_change_time);
- }
-
- memset(nexthop_tab, 0,
- sizeof(struct pim_zlookup_nexthop) * router->multipath);
- num_ifindex =
- zclient_lookup_nexthop(pim, nexthop_tab, router->multipath,
- addr, PIM_NEXTHOP_LOOKUP_MAX);
- if (num_ifindex < 1) {
- if (PIM_DEBUG_PIM_NHT)
- zlog_debug(
- "%s %s: could not find nexthop ifindex for address %pPAs",
- __FILE__, __func__, &addr);
- return false;
- }
-
- while (!found && (i < num_ifindex)) {
- first_ifindex = nexthop_tab[i].ifindex;
-
- ifp = if_lookup_by_index(first_ifindex, pim->vrf->vrf_id);
- if (!ifp) {
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s %s: could not find interface for ifindex %d (address %pPAs)",
- __FILE__, __func__, first_ifindex,
- &addr);
- i++;
- continue;
- }
-
- pim_ifp = ifp->info;
- if (!pim_ifp || !pim_ifp->pim_enable) {
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s: pim not enabled on input interface %s (ifindex=%d, RPF for source %pPAs)",
- __func__, ifp->name, first_ifindex,
- &addr);
- i++;
- } else if (neighbor_needed &&
- !pim_if_connected_to_source(ifp, addr)) {
- nbr = pim_neighbor_find(
- ifp, nexthop_tab[i].nexthop_addr, true);
- if (PIM_DEBUG_PIM_TRACE_DETAIL)
- zlog_debug("ifp name: %s, pim nbr: %p",
- ifp->name, nbr);
- if (!nbr && !if_is_loopback(ifp))
- i++;
- else
- found = 1;
- } else
- found = 1;
- }
-
- if (found) {
- if (PIM_DEBUG_ZEBRA)
- zlog_debug(
- "%s %s: found nexthop %pPAs for address %pPAs: interface %s ifindex=%d metric=%d pref=%d",
- __FILE__, __func__,
- &nexthop_tab[i].nexthop_addr, &addr, ifp->name,
- first_ifindex, nexthop_tab[i].route_metric,
- nexthop_tab[i].protocol_distance);
-
- /* update nexthop data */
- nexthop->interface = ifp;
- nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
- nexthop->mrib_metric_preference =
- nexthop_tab[i].protocol_distance;
- nexthop->mrib_route_metric = nexthop_tab[i].route_metric;
- nexthop->last_lookup = addr;
- nexthop->last_lookup_time = pim_time_monotonic_usec();
- nexthop->nbr = nbr;
- return true;
- } else
- return false;
-}
-
static int nexthop_mismatch(const struct pim_nexthop *nh1,
const struct pim_nexthop *nh2)
{
@@ -221,9 +107,9 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
if ((pim_addr_is_any(up->sg.src) && I_am_RP(pim, up->sg.grp)) ||
PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
neigh_needed = false;
- pim_find_or_track_nexthop(pim, up->upstream_addr, up, NULL, NULL);
- if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, src, &grp,
- neigh_needed)) {
+
+ pim_nht_find_or_track(pim, up->upstream_addr, up, NULL, NULL);
+ if (!pim_nht_lookup_ecmp(pim, &rpf->source_nexthop, src, &grp, neigh_needed)) {
/* Route is Deleted in Zebra, reset the stored NH data */
pim_upstream_rpf_clear(pim, up);
pim_rpf_cost_change(pim, up, saved_mrib_route_metric);
@@ -371,25 +257,3 @@ int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2)
return 0;
}
-
-unsigned int pim_rpf_hash_key(const void *arg)
-{
- const struct pim_nexthop_cache *r = arg;
-
-#if PIM_IPV == 4
- return jhash_1word(r->rpf.rpf_addr.s_addr, 0);
-#else
- return jhash2(r->rpf.rpf_addr.s6_addr32,
- array_size(r->rpf.rpf_addr.s6_addr32), 0);
-#endif
-}
-
-bool pim_rpf_equal(const void *arg1, const void *arg2)
-{
- const struct pim_nexthop_cache *r1 =
- (const struct pim_nexthop_cache *)arg1;
- const struct pim_nexthop_cache *r2 =
- (const struct pim_nexthop_cache *)arg2;
-
- return (!pim_addr_cmp(r1->rpf.rpf_addr, r2->rpf.rpf_addr));
-}
diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h
index 7dae53f8fc..84d6b7f6c2 100644
--- a/pimd/pim_rpf.h
+++ b/pimd/pim_rpf.h
@@ -11,6 +11,7 @@
#include "pim_str.h"
struct pim_instance;
+struct pim_upstream;
/*
RFC 4601:
@@ -41,13 +42,17 @@ struct pim_rpf {
enum pim_rpf_result { PIM_RPF_OK = 0, PIM_RPF_CHANGED, PIM_RPF_FAILURE };
-struct pim_upstream;
-
-unsigned int pim_rpf_hash_key(const void *arg);
-bool pim_rpf_equal(const void *arg1, const void *arg2);
+/* RPF lookup behaviour */
+enum pim_rpf_lookup_mode {
+ MCAST_NO_CONFIG = 0, /* MIX_MRIB_FIRST, but no show in config write */
+ MCAST_MRIB_ONLY, /* MRIB only */
+ MCAST_URIB_ONLY, /* URIB only */
+ MCAST_MIX_MRIB_FIRST, /* MRIB, if nothing at all then URIB */
+ MCAST_MIX_DISTANCE, /* MRIB & URIB, lower distance wins */
+ MCAST_MIX_PFXLEN, /* MRIB & URIB, longer prefix wins */
+ /* on equal value, MRIB wins for last 2 */
+};
-bool pim_nexthop_lookup(struct pim_instance *pim, struct pim_nexthop *nexthop,
- pim_addr addr, int neighbor_needed);
enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
struct pim_upstream *up,
struct pim_rpf *old, const char *caller);
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
index e21793b8ca..d067abf45a 100644
--- a/pimd/pim_tib.c
+++ b/pimd/pim_tib.c
@@ -34,16 +34,17 @@ tib_sg_oil_setup(struct pim_instance *pim, pim_sgaddr sg, struct interface *oif)
up = pim_upstream_find(pim, &sg);
if (up) {
- memcpy(&nexthop, &up->rpf.source_nexthop,
- sizeof(struct pim_nexthop));
- (void)pim_ecmp_nexthop_lookup(pim, &nexthop, vif_source, &grp,
- 0);
+ memcpy(&nexthop, &up->rpf.source_nexthop, sizeof(struct pim_nexthop));
+ if (!pim_nht_lookup_ecmp(pim, &nexthop, vif_source, &grp, false))
+ if (PIM_DEBUG_PIM_NHT_RP)
+ zlog_debug("%s: Nexthop Lookup failed vif_src:%pPA, sg.src:%pPA, sg.grp:%pPA",
+ __func__, &vif_source, &sg.src, &sg.grp);
+
if (nexthop.interface)
input_iface_vif_index = pim_if_find_vifindex_by_ifindex(
pim, nexthop.interface->ifindex);
} else
- input_iface_vif_index =
- pim_ecmp_fib_lookup_if_vif_index(pim, vif_source, &grp);
+ input_iface_vif_index = pim_nht_lookup_ecmp_if_vif_index(pim, vif_source, &grp);
if (PIM_DEBUG_ZEBRA)
zlog_debug("%s: NHT %pSG vif_source %pPAs vif_index:%d",
@@ -115,13 +116,8 @@ bool tib_sg_gm_join(struct pim_instance *pim, pim_sgaddr sg,
return false;
}
- if (!*oilp) {
+ if (!*oilp)
*oilp = tib_sg_oil_setup(pim, sg, oif);
-#if PIM_IPV == 6
- if (pim_embedded_rp_is_embedded(&sg.grp))
- (*oilp)->oil_ref_count--;
-#endif /* PIM_IPV == 6 */
- }
if (!*oilp)
return false;
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 7417f31137..c52119e43a 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -178,7 +178,9 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
{
struct listnode *node, *nnode;
struct pim_ifchannel *ch;
+#if PIM_IPV == 4
bool notify_msdp = false;
+#endif /* PIM_IPV == 4 */
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
@@ -206,12 +208,14 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
if (up->join_state == PIM_UPSTREAM_JOINED) {
pim_jp_agg_single_upstream_send(&up->rpf, up, 0);
+#if PIM_IPV == 4
if (pim_addr_is_any(up->sg.src)) {
/* if a (*, G) entry in the joined state is being
* deleted we
* need to notify MSDP */
notify_msdp = true;
}
+#endif /* PIM_IPV == 4 */
}
join_timer_stop(up);
@@ -221,7 +225,9 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
if (!pim_addr_is_any(up->sg.src)) {
if (pim->upstream_sg_wheel)
wheel_remove_item(pim->upstream_sg_wheel, up);
+#if PIM_IPV == 4
notify_msdp = true;
+#endif /* PIM_IPV == 4 */
}
pim_mroute_del(up->channel_oil, __func__);
@@ -241,9 +247,11 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
rb_pim_upstream_del(&pim->upstream_head, up);
+#if PIM_IPV == 4
if (notify_msdp) {
pim_msdp_up_del(pim, &up->sg);
}
+#endif /* PIM_IPV == 4 */
/* When RP gets deleted, pim_rp_del() deregister addr with Zebra NHT
* and assign up->upstream_addr as INADDR_ANY.
@@ -257,7 +265,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
zlog_debug(
"%s: Deregister upstream %s addr %pPA with Zebra NHT",
__func__, up->sg_str, &up->upstream_addr);
- pim_delete_tracked_nexthop(pim, up->upstream_addr, up, NULL);
+ pim_nht_delete_tracked(pim, up->upstream_addr, up, NULL);
}
XFREE(MTYPE_PIM_UPSTREAM, up);
@@ -723,7 +731,9 @@ void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
if (old_state != PIM_UPSTREAM_JOINED) {
int old_fhr = PIM_UPSTREAM_FLAG_TEST_FHR(up->flags);
+#if PIM_IPV == 4
pim_msdp_up_join_state_changed(pim, up);
+#endif /* PIM_IPV == 4 */
if (pim_upstream_could_register(up)) {
PIM_UPSTREAM_FLAG_SET_FHR(up->flags);
if (!old_fhr
@@ -753,8 +763,10 @@ void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
if (!pim_addr_is_any(up->sg.src))
up->sptbit = PIM_UPSTREAM_SPTBIT_FALSE;
+#if PIM_IPV == 4
if (old_state == PIM_UPSTREAM_JOINED)
pim_msdp_up_join_state_changed(pim, up);
+#endif /* PIM_IPV == 4 */
if (old_state != new_state) {
old_use_rpt =
@@ -1424,8 +1436,10 @@ struct pim_upstream *pim_upstream_keep_alive_timer_proc(
*/
}
+#if PIM_IPV == 4
/* source is no longer active - pull the SA from MSDP's cache */
pim_msdp_sa_local_del(pim, &up->sg);
+#endif /* PIM_IPV == 4 */
/* JoinDesired can change when KAT is started or stopped */
pim_upstream_update_join_desired(pim, up);
@@ -1493,32 +1507,15 @@ void pim_upstream_keep_alive_timer_start(struct pim_upstream *up, uint32_t time)
event_add_timer(router->master, pim_upstream_keep_alive_timer, up, time,
&up->t_ka_timer);
+#if PIM_IPV == 4
/* any time keepalive is started against a SG we will have to
* re-evaluate our active source database */
pim_msdp_sa_local_update(up);
+#endif /* PIM_IPV == 4 */
/* JoinDesired can change when KAT is started or stopped */
pim_upstream_update_join_desired(up->pim, up);
}
-/* MSDP on RP needs to know if a source is registerable to this RP */
-static void pim_upstream_msdp_reg_timer(struct event *t)
-{
- struct pim_upstream *up = EVENT_ARG(t);
- struct pim_instance *pim = up->channel_oil->pim;
-
- /* source is no longer active - pull the SA from MSDP's cache */
- pim_msdp_sa_local_del(pim, &up->sg);
-}
-
-void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
-{
- EVENT_OFF(up->t_msdp_reg_timer);
- event_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
- PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
-
- pim_msdp_sa_local_update(up);
-}
-
/*
* 4.2.1 Last-Hop Switchover to the SPT
*
diff --git a/pimd/pim_upstream.h b/pimd/pim_upstream.h
index 8b4a35be39..1d4b2128a8 100644
--- a/pimd/pim_upstream.h
+++ b/pimd/pim_upstream.h
@@ -350,7 +350,6 @@ int pim_upstream_inherited_olist(struct pim_instance *pim,
int pim_upstream_empty_inherited_olist(struct pim_upstream *up);
void pim_upstream_find_new_rpf(struct pim_instance *pim);
-void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up);
void pim_upstream_init(struct pim_instance *pim);
void pim_upstream_terminate(struct pim_instance *pim);
diff --git a/pimd/pim_util.c b/pimd/pim_util.c
index 657e84ae50..0aea240587 100644
--- a/pimd/pim_util.c
+++ b/pimd/pim_util.c
@@ -9,7 +9,10 @@
#include "log.h"
#include "prefix.h"
#include "plist.h"
+#include "plist_int.h"
+#include "pimd.h"
+#include "pim_instance.h"
#include "pim_util.h"
/*
@@ -126,34 +129,105 @@ int pim_is_group_224_4(struct in_addr group_addr)
return prefix_match(&group_all, &group);
}
-bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp)
+static bool pim_cisco_match(const struct filter *filter, const struct in_addr *source,
+ const struct in_addr *group)
{
- struct prefix grp_pfx;
- struct prefix_list *pl;
+ const struct filter_cisco *cfilter = &filter->u.cfilter;
+ uint32_t source_addr;
+ uint32_t group_addr;
- if (!pim_ifp->boundary_oil_plist)
+ group_addr = group->s_addr & ~cfilter->mask_mask.s_addr;
+
+ if (cfilter->extended) {
+ source_addr = source->s_addr & ~cfilter->addr_mask.s_addr;
+ if (group_addr == cfilter->mask.s_addr && source_addr == cfilter->addr.s_addr)
+ return true;
+ } else if (group_addr == cfilter->addr.s_addr)
+ return true;
+
+ return false;
+}
+
+enum filter_type pim_access_list_apply(struct access_list *access, const struct in_addr *source,
+ const struct in_addr *group)
+{
+ struct filter *filter;
+ struct prefix group_prefix = {};
+
+ if (access == NULL)
+ return FILTER_DENY;
+
+ for (filter = access->head; filter; filter = filter->next) {
+ if (filter->cisco) {
+ if (pim_cisco_match(filter, source, group))
+ return filter->type;
+ }
+ }
+
+ group_prefix.family = AF_INET;
+ group_prefix.prefixlen = IPV4_MAX_BITLEN;
+ group_prefix.u.prefix4.s_addr = group->s_addr;
+ return access_list_apply(access, &group_prefix);
+}
+
+bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp, pim_addr *src)
+{
+ bool is_filtered = false;
+#if PIM_IPV == 4
+ struct prefix grp_pfx = {};
+ pim_addr any_src = PIMADDR_ANY;
+
+ if (!pim_ifp->boundary_oil_plist && !pim_ifp->boundary_acl)
return false;
pim_addr_to_prefix(&grp_pfx, *grp);
- pl = prefix_list_lookup(PIM_AFI, pim_ifp->boundary_oil_plist);
- return pl ? prefix_list_apply_ext(pl, NULL, &grp_pfx, true) ==
- PREFIX_DENY
- : false;
+ /* Filter if either group or (S,G) are denied */
+ if (pim_ifp->boundary_oil_plist) {
+ is_filtered = prefix_list_apply_ext(pim_ifp->boundary_oil_plist, NULL, &grp_pfx,
+ true) == PREFIX_DENY;
+ if (is_filtered && PIM_DEBUG_EVENTS) {
+ zlog_debug("Filtering group %pI4 per prefix-list %s", grp,
+ pim_ifp->boundary_oil_plist->name);
+ }
+ }
+ if (!is_filtered && pim_ifp->boundary_acl) {
+ /* If src not provided, set to "any" (*)? */
+ if (!src)
+ src = &any_src;
+ /* S,G filtering using extended access-list syntax */
+ is_filtered = pim_access_list_apply(pim_ifp->boundary_acl, src, grp) == FILTER_DENY;
+ if (is_filtered && PIM_DEBUG_EVENTS) {
+ if (pim_addr_is_any(*src)) {
+ zlog_debug("Filtering (S,G)=(*, %pI4) per access-list %s", grp,
+ pim_ifp->boundary_acl->name);
+ } else {
+ zlog_debug("Filtering (S,G)=(%pI4, %pI4) per access-list %s", src,
+ grp, pim_ifp->boundary_acl->name);
+ }
+ }
+ }
+#endif
+ return is_filtered;
}
/* This function returns all multicast group */
-int pim_get_all_mcast_group(struct prefix *prefix)
+void pim_get_all_mcast_group(struct prefix *prefix)
{
+ memset(prefix, 0, sizeof(*prefix));
+
#if PIM_IPV == 4
- if (!str2prefix("224.0.0.0/4", prefix))
- return 0;
+ /* Precomputed version of: `str2prefix("224.0.0.0/4", prefix);` */
+ prefix->family = AF_INET;
+ prefix->prefixlen = 4;
+ prefix->u.prefix4.s_addr = htonl(0xe0000000);
#else
- if (!str2prefix("FF00::0/8", prefix))
- return 0;
+ /* Precomputed version of: `str2prefix("FF00::0/8", prefix)` */
+ prefix->family = AF_INET6;
+ prefix->prefixlen = 8;
+ prefix->u.prefix6.s6_addr[0] = 0xff;
#endif
- return 1;
}
bool pim_addr_is_multicast(pim_addr addr)
diff --git a/pimd/pim_util.h b/pimd/pim_util.h
index c882fe4878..a3d944b82b 100644
--- a/pimd/pim_util.h
+++ b/pimd/pim_util.h
@@ -10,6 +10,7 @@
#include <stdint.h>
#include <zebra.h>
+#include "lib/filter.h"
#include "checksum.h"
#include "pimd.h"
@@ -22,7 +23,9 @@ void pim_pkt_dump(const char *label, const uint8_t *buf, int size);
int pim_is_group_224_0_0_0_24(struct in_addr group_addr);
int pim_is_group_224_4(struct in_addr group_addr);
-bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp);
-int pim_get_all_mcast_group(struct prefix *prefix);
+enum filter_type pim_access_list_apply(struct access_list *access, const struct in_addr *source,
+ const struct in_addr *group);
+bool pim_is_group_filtered(struct pim_interface *pim_ifp, pim_addr *grp, pim_addr *src);
+void pim_get_all_mcast_group(struct prefix *prefix);
bool pim_addr_is_multicast(pim_addr addr);
#endif /* PIM_UTIL_H */
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 4d83593c17..974cf30cf1 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -12,6 +12,8 @@
#include "vty.h"
#include "vrf.h"
#include "plist.h"
+#include "plist_int.h"
+#include "filter.h"
#include "pimd.h"
#include "pim_vty.h"
@@ -178,8 +180,10 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
int writes = 0;
struct pim_ssm *ssm = pim->ssm_info;
+#if PIM_IPV == 4
writes += pim_msdp_peer_config_write(vty, pim);
writes += pim_msdp_config_write(pim, vty);
+#endif /* PIM_IPV == 4 */
if (!pim->send_v6_secondary) {
vty_out(vty, " no send-v6-secondary\n");
@@ -271,15 +275,14 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
}
}
- if (pim->msdp.hold_time != PIM_MSDP_PEER_HOLD_TIME
- || pim->msdp.keep_alive != PIM_MSDP_PEER_KA_TIME
- || pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME) {
- vty_out(vty, " msdp timers %u %u", pim->msdp.hold_time,
- pim->msdp.keep_alive);
- if (pim->msdp.connection_retry
- != PIM_MSDP_PEER_CONNECT_RETRY_TIME)
- vty_out(vty, " %u", pim->msdp.connection_retry);
- vty_out(vty, "\n");
+ if (pim->rpf_mode != MCAST_NO_CONFIG) {
+ ++writes;
+ vty_out(vty, " rpf-lookup-mode %s\n",
+ pim->rpf_mode == MCAST_URIB_ONLY ? "urib-only"
+ : pim->rpf_mode == MCAST_MRIB_ONLY ? "mrib-only"
+ : pim->rpf_mode == MCAST_MIX_MRIB_FIRST ? "mrib-then-urib"
+ : pim->rpf_mode == MCAST_MIX_DISTANCE ? "lower-distance"
+ : "longer-prefix");
}
return writes;
@@ -342,6 +345,9 @@ static int gm_config_write(struct vty *vty, int writes,
struct listnode *node;
struct gm_join *ij;
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
+ if (ij->join_type == GM_JOIN_PROXY)
+ continue;
+
if (pim_addr_is_any(ij->source_addr))
vty_out(vty, " ip igmp join-group %pPAs\n",
&ij->group_addr);
@@ -412,6 +418,9 @@ static int gm_config_write(struct vty *vty, int writes,
struct gm_join *ij;
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, node, ij)) {
+ if (ij->join_type == GM_JOIN_PROXY)
+ continue;
+
if (pim_addr_is_any(ij->source_addr))
vty_out(vty, " ipv6 mld join-group %pPAs\n",
&ij->group_addr);
@@ -486,7 +495,13 @@ int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
/* boundary */
if (pim_ifp->boundary_oil_plist) {
vty_out(vty, " " PIM_AF_NAME " multicast boundary oil %s\n",
- pim_ifp->boundary_oil_plist);
+ pim_ifp->boundary_oil_plist->name);
+ ++writes;
+ }
+
+ if (pim_ifp->boundary_acl) {
+ vty_out(vty, " " PIM_AF_NAME " multicast boundary %s\n",
+ pim_ifp->boundary_acl->name);
++writes;
}
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
index f1f315cc98..511d35bf76 100644
--- a/pimd/pim_vxlan.c
+++ b/pimd/pim_vxlan.c
@@ -411,10 +411,9 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
* we must dereg the old nexthop and force to new "static"
* iif
*/
- if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
- pim_delete_tracked_nexthop(vxlan_sg->pim,
- up->upstream_addr, up, NULL);
- }
+ if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
+ pim_nht_delete_tracked(vxlan_sg->pim, up->upstream_addr, up, NULL);
+
/* We are acting FHR; clear out use_rpt setting if any */
pim_upstream_update_use_rpt(up, false /*update_mroute*/);
pim_upstream_ref(up, flags, __func__);
diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c
index ce4d85a2c8..f0ec3c6b6e 100644
--- a/pimd/pim_zebra.c
+++ b/pimd/pim_zebra.c
@@ -426,7 +426,6 @@ static void pim_zebra_connected(struct zclient *zclient)
static void pim_zebra_capabilities(struct zclient_capabilities *cap)
{
- router->mlag_role = cap->role;
router->multipath = cap->ecmp;
}
diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c
index 5d344f1f66..febc595ad4 100644
--- a/pimd/pim_zlookup.c
+++ b/pimd/pim_zlookup.c
@@ -153,6 +153,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
struct ipaddr raddr;
uint8_t distance;
uint32_t metric;
+ uint16_t prefix_len;
int nexthop_num;
int i, err;
@@ -162,7 +163,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
s = zlookup->ibuf;
- while (command != ZEBRA_NEXTHOP_LOOKUP_MRIB) {
+ while (command != ZEBRA_NEXTHOP_LOOKUP) {
stream_reset(s);
err = zclient_read_header(s, zlookup->sock, &length, &marker,
&version, &vrf_id, &command);
@@ -193,8 +194,14 @@ static int zclient_read_nexthop(struct pim_instance *pim,
distance = stream_getc(s);
metric = stream_getl(s);
+ prefix_len = stream_getw(s);
nexthop_num = stream_getw(s);
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), distance=%d, metric=%d, prefix_len=%d, nexthop_num=%d",
+ __func__, &addr, pim->vrf->name, distance, metric, prefix_len,
+ nexthop_num);
+
if (nexthop_num < 1 || nexthop_num > router->multipath) {
if (PIM_DEBUG_PIM_NHT_DETAIL)
zlog_debug("%s: socket %d bad nexthop_num=%d", __func__,
@@ -220,6 +227,7 @@ static int zclient_read_nexthop(struct pim_instance *pim,
}
nexthop_tab[num_ifindex].protocol_distance = distance;
nexthop_tab[num_ifindex].route_metric = metric;
+ nexthop_tab[num_ifindex].prefix_len = prefix_len;
nexthop_tab[num_ifindex].vrf_id = nexthop_vrf_id;
switch (nexthop_type) {
case NEXTHOP_TYPE_IFINDEX:
@@ -301,20 +309,23 @@ static int zclient_read_nexthop(struct pim_instance *pim,
}
}
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), num_ifindex=%d", __func__, &addr, pim->vrf->name,
+ num_ifindex);
+
return num_ifindex;
}
-static int zclient_lookup_nexthop_once(struct pim_instance *pim,
- struct pim_zlookup_nexthop nexthop_tab[],
- const int tab_size, pim_addr addr)
+static int zclient_rib_lookup(struct pim_instance *pim, struct pim_zlookup_nexthop nexthop_tab[],
+ const int tab_size, pim_addr addr, safi_t safi)
{
struct stream *s;
int ret;
struct ipaddr ipaddr;
if (PIM_DEBUG_PIM_NHT_DETAIL)
- zlog_debug("%s: addr=%pPAs(%s)", __func__, &addr,
- pim->vrf->name);
+ zlog_debug("%s: addr=%pPAs(%s), %sRIB", __func__, &addr, pim->vrf->name,
+ (safi == SAFI_MULTICAST ? "M" : "U"));
/* Check socket. */
if (zlookup->sock < 0) {
@@ -337,8 +348,9 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim,
s = zlookup->obuf;
stream_reset(s);
- zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, pim->vrf->vrf_id);
+ zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, pim->vrf->vrf_id);
stream_put_ipaddr(s, &ipaddr);
+ stream_putc(s, safi);
stream_putw_at(s, 0, stream_get_endp(s));
ret = writen(zlookup->sock, s->data, stream_get_endp(s));
@@ -361,6 +373,79 @@ static int zclient_lookup_nexthop_once(struct pim_instance *pim,
return zclient_read_nexthop(pim, zlookup, nexthop_tab, tab_size, addr);
}
+static int zclient_lookup_nexthop_once(struct pim_instance *pim,
+ struct pim_zlookup_nexthop nexthop_tab[], const int tab_size,
+ pim_addr addr)
+{
+ if (pim->rpf_mode == MCAST_MRIB_ONLY)
+ return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_MULTICAST);
+
+ if (pim->rpf_mode == MCAST_URIB_ONLY)
+ return zclient_rib_lookup(pim, nexthop_tab, tab_size, addr, SAFI_UNICAST);
+
+ /* All other modes require looking up both tables and making a choice */
+ struct pim_zlookup_nexthop mrib_tab[tab_size];
+ struct pim_zlookup_nexthop urib_tab[tab_size];
+ int mrib_num;
+ int urib_num;
+
+ memset(mrib_tab, 0, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ memset(urib_tab, 0, sizeof(struct pim_zlookup_nexthop) * tab_size);
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), looking up both MRIB and URIB", __func__, &addr,
+ pim->vrf->name);
+
+ mrib_num = zclient_rib_lookup(pim, mrib_tab, tab_size, addr, SAFI_MULTICAST);
+ urib_num = zclient_rib_lookup(pim, urib_tab, tab_size, addr, SAFI_UNICAST);
+
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), MRIB nexthops=%d, URIB nexthops=%d", __func__,
+ &addr, pim->vrf->name, mrib_num, urib_num);
+
+ /* If only one table has results, use that always */
+ if (mrib_num < 1) {
+ if (urib_num > 0)
+ memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return urib_num;
+ }
+
+ if (urib_num < 1) {
+ if (mrib_num > 0)
+ memcpy(nexthop_tab, mrib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return mrib_num;
+ }
+
+ /* See if we should use the URIB based on configured lookup mode */
+ /* Both tables have results, so compare them. Distance and prefix length are the same for all
+ * nexthops, so only compare the first in the list
+ */
+ if (pim->rpf_mode == MCAST_MIX_DISTANCE &&
+ mrib_tab[0].protocol_distance > urib_tab[0].protocol_distance) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), URIB has shortest distance", __func__,
+ &addr, pim->vrf->name);
+ memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return urib_num;
+ } else if (pim->rpf_mode == MCAST_MIX_PFXLEN &&
+ mrib_tab[0].prefix_len < urib_tab[0].prefix_len) {
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), URIB has lengthest prefix length", __func__,
+ &addr, pim->vrf->name);
+ memcpy(nexthop_tab, urib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return urib_num;
+ }
+
+ /* All others use the MRIB */
+ /* For MCAST_MIX_MRIB_FIRST (and by extension, MCAST_NO_CONFIG),
+ * always return mrib if both have results
+ */
+ if (PIM_DEBUG_PIM_NHT_DETAIL)
+ zlog_debug("%s: addr=%pPAs(%s), MRIB has nexthops", __func__, &addr, pim->vrf->name);
+ memcpy(nexthop_tab, mrib_tab, sizeof(struct pim_zlookup_nexthop) * tab_size);
+ return mrib_num;
+}
+
void zclient_lookup_read_pipe(struct event *thread)
{
struct zclient *zlookup = EVENT_ARG(thread);
diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h
index ee2dd20113..c9461eb7e3 100644
--- a/pimd/pim_zlookup.h
+++ b/pimd/pim_zlookup.h
@@ -21,6 +21,7 @@ struct pim_zlookup_nexthop {
ifindex_t ifindex;
uint32_t route_metric;
uint8_t protocol_distance;
+ uint16_t prefix_len;
};
void zclient_lookup_new(void);
diff --git a/pimd/pimd.c b/pimd/pimd.c
index db61974800..c1de58550a 100644
--- a/pimd/pimd.c
+++ b/pimd/pimd.c
@@ -17,6 +17,7 @@
#include "vrf.h"
#include "lib_errors.h"
#include "bfd.h"
+#include "filter.h"
#include "pimd.h"
#if PIM_IPV == 4
@@ -35,6 +36,7 @@
#include "pim_zlookup.h"
#include "pim_zebra.h"
#include "pim_mlag.h"
+#include "pim_autorp.h"
#if MAXVIFS > 256
CPP_NOTICE("Work needs to be done to make this work properly via the pim mroute socket\n");
@@ -70,6 +72,9 @@ void pim_prefix_list_update(struct prefix_list *plist)
pim_rp_prefix_list_update(pim, plist);
pim_ssm_prefix_list_update(pim, plist);
pim_upstream_spt_prefix_list_update(pim, plist);
+#if PIM_IPV == 4
+ pim_autorp_prefix_list_update(pim, plist);
+#endif
}
}
@@ -143,6 +148,7 @@ void pim_terminate(void)
prefix_list_add_hook(NULL);
prefix_list_delete_hook(NULL);
prefix_list_reset();
+ access_list_reset();
pim_vxlan_terminate();
pim_vrf_terminate();
diff --git a/ripd/ripd.c b/ripd/ripd.c
index 8768819fe2..2d038507ab 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -1263,9 +1263,13 @@ static void rip_response_process(struct rip_packet *packet, int size,
rip->vrf->vrf_id)) {
struct route_node *rn;
struct rip_info *rinfo;
+ struct prefix p = { 0 };
- rn = route_node_match_ipv4(rip->table,
- &rte->nexthop);
+ p.family = AF_INET;
+ p.prefixlen = IPV4_MAX_BITLEN;
+ p.u.prefix4 = rte->nexthop;
+
+ rn = route_node_match(rip->table, &p);
if (rn) {
rinfo = rn->info;
diff --git a/staticd/static_nht.c b/staticd/static_nht.c
index 6be598434d..06d27c6f59 100644
--- a/staticd/static_nht.c
+++ b/staticd/static_nht.c
@@ -21,6 +21,7 @@ static void static_nht_update_path(struct static_path *pn, struct prefix *nhp,
uint32_t nh_num, vrf_id_t nh_vrf_id)
{
struct static_nexthop *nh;
+ bool route_changed = false;
frr_each(static_nexthop_list, &pn->nexthop_list, nh) {
if (nh->nh_vrf_id != nh_vrf_id)
@@ -42,8 +43,10 @@ static void static_nht_update_path(struct static_path *pn, struct prefix *nhp,
nh->nh_valid = !!nh_num;
if (nh->state == STATIC_START)
- static_zebra_route_add(pn, true);
+ route_changed = true;
}
+ if (route_changed)
+ static_zebra_route_add(pn, true);
}
static void static_nht_update_safi(struct prefix *sp, struct prefix *nhp,
diff --git a/tests/helpers/python/frrtest.py b/tests/helpers/python/frrtest.py
index 3faa2a6f13..4682bd8786 100644
--- a/tests/helpers/python/frrtest.py
+++ b/tests/helpers/python/frrtest.py
@@ -163,8 +163,8 @@ class TestRefMismatch(Exception):
difflib.unified_diff(
self.reftext.splitlines(),
self.outtext.splitlines(),
- "outtext",
"reftext",
+ "outtext",
lineterm="",
)
)
diff --git a/tests/topotests/Dockerfile b/tests/topotests/Dockerfile
index 1503e67d31..d55827fe6c 100644
--- a/tests/topotests/Dockerfile
+++ b/tests/topotests/Dockerfile
@@ -1,60 +1,98 @@
-FROM ubuntu:18.04
+FROM ubuntu:22.04
-RUN export DEBIAN_FRONTEND=noninteractive \
- && apt-get update \
- && apt-get install -y \
- autoconf \
- binutils \
- bison \
- ca-certificates \
- flex \
+ARG DEBIAN_FRONTEND=noninteractive
+ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
+
+RUN apt update -y && apt upgrade -y && \
+ # Basic build requirements from documentation
+ apt-get install -y \
+ autoconf \
+ automake \
+ bison \
+ build-essential \
+ flex \
+ git \
+ install-info \
+ libc-ares-dev \
+ libcap-dev \
+ libelf-dev \
+ libjson-c-dev \
+ libpam0g-dev \
+ libreadline-dev \
+ libsnmp-dev \
+ libsqlite3-dev \
+ lsb-release \
+ libtool \
+ lcov \
+ make \
+ perl \
+ pkg-config \
+ python3-dev \
+ python3-sphinx \
+ screen \
+ texinfo \
+ tmux \
+ && \
+ # Protobuf build requirements
+ apt-get install -y \
+ libprotobuf-c-dev \
+ protobuf-c-compiler \
+ && \
+ # Libyang2 extra build requirements
+ apt-get install -y \
+ cmake \
+ libpcre2-dev \
+ && \
+ # GRPC extra build requirements
+ apt-get install -y \
+ libgrpc-dev \
+ libgrpc++-dev \
+ protobuf-compiler-grpc \
+ && \
+ # Runtime/triage/testing requirements
+ apt-get install -y \
+ rsync \
+ curl \
gdb \
- git \
- gpg \
- install-info \
- iputils-ping \
+ kmod \
iproute2 \
- less \
- libtool \
- libjson-c-dev \
- libpcre3-dev \
- libpython-dev \
- libpython3-dev \
- libreadline-dev \
- libc-ares-dev \
- libcap-dev \
- libelf-dev \
- man \
- mininet \
- pkg-config \
- python-pip \
+ iputils-ping \
+ liblua5.3-dev \
+ libssl-dev \
+ lua5.3 \
+ net-tools \
python3 \
- python3-dev \
- python3-sphinx \
- python3-pytest \
- rsync \
+ python3-pip \
+ snmp \
+ snmp-mibs-downloader \
+ snmpd \
+ sudo \
+ time \
+ tshark \
+ valgrind \
+ yodl \
strace \
tcpdump \
- texinfo \
- tmux \
- valgrind \
- vim \
- wget \
- x11-xserver-utils \
- xterm \
- && pip install \
- exabgp==3.4.17 \
- "scapy>=2.4.2" \
- ipaddr \
- pytest \
- && rm -rf /var/lib/apt/lists/*
+ && \
+ download-mibs && \
+ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/iana/IANA-IPPM-METRICS-REGISTRY-MIB -O /usr/share/snmp/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB && \
+ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/SNMPv2-PDU -O /usr/share/snmp/mibs/ietf/SNMPv2-PDU && \
+ wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/IPATM-IPMC-MIB -O /usr/share/snmp/mibs/ietf/IPATM-IPMC-MIB && \
+ python3 -m pip install wheel && \
+ python3 -m pip install 'protobuf<4' grpcio grpcio-tools && \
+ python3 -m pip install 'pytest>=6.2.4' 'pytest-xdist>=2.3.0' && \
+ python3 -m pip install 'scapy>=2.4.5' && \
+ python3 -m pip install xmltodict && \
+ python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311
+
+# Install FRR built packages
+RUN mkdir -p /etc/apt/keyrings && \
+ curl -s -o /etc/apt/keyrings/frrouting.gpg https://deb.frrouting.org/frr/keys.gpg && \
+ echo deb '[signed-by=/etc/apt/keyrings/frrouting.gpg]' https://deb.frrouting.org/frr \
+ $(lsb_release -s -c) "frr-stable" > /etc/apt/sources.list.d/frr.list && \
+ apt-get update && apt-get install -y librtr-dev libyang2-dev libyang2-tools
-RUN export DEBIAN_FRONTEND=noninteractive \
- && wget -qO- https://deb.frrouting.org/frr/keys.asc | apt-key add - \
- && echo "deb https://deb.frrouting.org/frr bionic frr-stable" > /etc/apt/sources.list.d/frr.list \
- && apt-get update \
- && apt-get install -y libyang-dev \
- && rm -rf /var/lib/apt/lists/*
+RUN apt install -y openvswitch-switch
RUN groupadd -r -g 92 frr \
&& groupadd -r -g 85 frrvty \
diff --git a/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref b/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref
index 33c44780b4..a188ad92fc 100644
--- a/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref
+++ b/tests/topotests/all_protocol_startup/r1/ipv4_routes.ref
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
C>* 192.168.0.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 192.168.1.0/26 is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 192.168.2.0/26 is directly connected, r1-eth2, weight 1, XX:XX:XX
diff --git a/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref b/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref
index f5c1d6d7d2..4cb8692f90 100644
--- a/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref
+++ b/tests/topotests/all_protocol_startup/r1/ipv6_routes.ref
@@ -1,3 +1,4 @@
+IPv6 unicast VRF default:
C>* fc00:0:0:1::/64 is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* fc00:0:0:2::/64 is directly connected, r1-eth2, weight 1, XX:XX:XX
C>* fc00:0:0:3::/64 is directly connected, r1-eth3, weight 1, XX:XX:XX
diff --git a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py
index 0ffd762734..06a350c8e9 100644
--- a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py
+++ b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py
@@ -20,6 +20,7 @@ import sys
import pytest
import glob
from time import sleep
+from lib.topolog import logger
pytestmark = [
pytest.mark.babeld,
@@ -1715,6 +1716,77 @@ def test_resilient_nexthop_group():
net["r1"].cmd('vtysh -c "conf" -c "no nexthop-group resilience"')
+def test_interface_stuff():
+ global fatal_error
+ net = get_topogen().net
+
+ # Skip if previous fatal error condition is raised
+ if fatal_error != "":
+ pytest.skip(fatal_error)
+
+ print("\n\n** Verifying some interface code")
+ print("************************************\n")
+
+ net["r1"].cmd('vtysh -c "conf" -c "interface r1-eth0" -c "multicast enable"')
+
+ def _test_interface_multicast_on():
+ output = json.loads(net["r1"].cmd('vtysh -c "show int r1-eth0 json"'))
+ expected = {
+ "r1-eth0": {
+ "flags": "<UP,LOWER_UP,BROADCAST,RUNNING,MULTICAST>",
+ "multicastConfig": "Enabled by CLI",
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_test_interface_multicast_on)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Multicast bit was not set on r1-eth0"
+
+ net["r1"].cmd('vtysh -c "conf" -c "interface r1-eth0" -c "multicast disable"')
+
+ def _test_interface_multicast_off():
+ output = json.loads(
+ net["r1"].cmd('vtysh -c "show int r1-eth0 vrf default json"')
+ )
+ expected = {
+ "r1-eth0": {
+ "flags": "<UP,LOWER_UP,BROADCAST,RUNNING>",
+ "multicastConfig": "Disabled by CLI",
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_test_interface_multicast_off)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Multicast bit was not turned off on r1-eth0"
+
+ net["r1"].cmd('vtysh -c "conf" -c "interface r1-eth0" -c "no multicast disable"')
+
+ def _test_interface_multicast_disable():
+ output = json.loads(net["r1"].cmd('vtysh -c "show int r1-eth0 json"'))
+ expected = {
+ "r1-eth0": {
+ "flags": "<UP,LOWER_UP,BROADCAST,RUNNING>",
+ "multicastConfig": "Not specified by CLI",
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_test_interface_multicast_disable)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Multicast bit was set on r1-eth0"
+
+ logger.info("Ensure that these commands are still nominally working")
+ rc, o, e = net["r1"].cmd_status('vtysh -c "show interface description vrf all"')
+ logger.info(o)
+ assert rc == 0
+
+ rc, o, e = net["r1"].cmd_status('vtysh -c "show interface description vrf default"')
+ logger.info(o)
+ assert rc == 0
+
+
def test_shutdown_check_stderr():
global fatal_error
net = get_topogen().net
diff --git a/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf b/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf
index 1aab1d1372..0fe6f1c1c4 100644
--- a/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf
+++ b/tests/topotests/bfd_profiles_topo1/r2/bgpd.conf
@@ -5,9 +5,11 @@ router bgp 100
no bgp ebgp-requires-policy
neighbor 172.16.1.1 remote-as 100
neighbor 172.16.1.1 timers 3 10
+ neighbor 172.16.1.1 timers connect 1
neighbor 172.16.1.1 bfd profile fasttx
neighbor 2001:db8:2::2 remote-as 200
neighbor 2001:db8:2::2 timers 3 10
+ neighbor 2001:db8:2::2 timers connect 1
neighbor 2001:db8:2::2 ebgp-multihop 2
neighbor 2001:db8:2::2 bfd profile slowtx
address-family ipv4 unicast
diff --git a/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf b/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf
index 65647b39e5..d1168d93bc 100644
--- a/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf
+++ b/tests/topotests/bfd_profiles_topo1/r3/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 100
bgp router-id 10.254.254.3
neighbor 172.16.1.2 remote-as 100
neighbor 172.16.1.2 timers 3 10
+ neighbor 172.16.1.2 timers connect 1
neighbor 172.16.1.2 bfd profile DOES_NOT_EXIST
address-family ipv4 unicast
redistribute connected
diff --git a/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf b/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf
index 12d68270f8..1a8e6bb94d 100644
--- a/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf
+++ b/tests/topotests/bfd_profiles_topo1/r4/bgpd.conf
@@ -5,6 +5,7 @@ router bgp 200
no bgp ebgp-requires-policy
neighbor 2001:db8:1::2 remote-as 100
neighbor 2001:db8:1::2 timers 3 10
+ neighbor 2001:db8:1::2 timers connect 1
neighbor 2001:db8:1::2 ebgp-multihop 2
neighbor 2001:db8:1::2 bfd profile DOES_NOT_EXIST
address-family ipv4 unicast
diff --git a/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py b/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py
index 3d17a2b709..e58b53728b 100644
--- a/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py
+++ b/tests/topotests/bgp_addpath_best_selected/test_bgp_addpath_best_selected.py
@@ -73,7 +73,9 @@ def test_bgp_addpath_best_selected():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ r1 = tgen.gears["r1"]
r2 = tgen.gears["r2"]
+ r7 = tgen.gears["r7"]
def _bgp_converge():
output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast 172.16.16.254/32 json"))
@@ -111,78 +113,67 @@ def test_bgp_addpath_best_selected():
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Can't converge initially"
- def check_bgp_advertised_routes_to_r1():
+ def r1_check_bgp_received_routes_from_r2():
output = json.loads(
- r2.vtysh_cmd(
- "show bgp ipv4 neighbors 192.168.1.1 advertised-routes detail json"
- )
+ r1.vtysh_cmd("show bgp ipv4 neighbors 192.168.1.2 routes json")
)
expected = {
- "advertisedRoutes": {
- "172.16.16.254/32": {
- "paths": [
- {
- "aspath": {
- "string": "65005",
- }
- },
- {
- "aspath": {
- "string": "65006",
- }
- },
- ]
- }
+ "routes": {
+ "172.16.16.254/32": [
+ {
+ "valid": True,
+ "path": "65002 65005",
+ },
+ {
+ "valid": True,
+ "path": "65002 65006",
+ },
+ ]
},
- "totalPrefixCounter": 2,
+ "totalRoutes": 1,
+ "totalPaths": 2,
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(check_bgp_advertised_routes_to_r1)
+ test_func = functools.partial(r1_check_bgp_received_routes_from_r2)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert (
result is None
- ), "Received more/less Add-Path best paths, but should be only 1+1 (real best path)"
+ ), "Received more/less Add-Path best paths, but should be ONLY 1+1 (real best path)"
- def check_bgp_advertised_routes_to_r7():
+ def r7_check_bgp_received_routes_from_r2():
output = json.loads(
- r2.vtysh_cmd(
- "show bgp ipv4 neighbors 192.168.7.7 advertised-routes detail json"
- )
+ r7.vtysh_cmd("show bgp ipv4 neighbors 192.168.7.2 routes json")
)
expected = {
- "advertisedRoutes": {
- "172.16.16.254/32": {
- "paths": [
- {
- "aspath": {
- "string": "65004",
- }
- },
- {
- "aspath": {
- "string": "65005",
- }
- },
- {
- "aspath": {
- "string": "65006",
- }
- },
- ]
- }
+ "routes": {
+ "172.16.16.254/32": [
+ {
+ "valid": True,
+ "path": "65002 65004",
+ },
+ {
+ "valid": True,
+ "path": "65002 65005",
+ },
+ {
+ "valid": True,
+ "path": "65002 65006",
+ },
+ ]
},
- "totalPrefixCounter": 3,
+ "totalRoutes": 1,
+ "totalPaths": 3,
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(check_bgp_advertised_routes_to_r7)
+ test_func = functools.partial(r7_check_bgp_received_routes_from_r2)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert (
result is None
- ), "Received more/less Add-Path best paths, but should be only 2+1 (real best path)"
+ ), "Received more/less Add-Path best paths, but should be ONLY 2+1 (real best path)"
if __name__ == "__main__":
diff --git a/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf b/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf
index c7cf4a527f..69be4b541d 100644
--- a/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf
+++ b/tests/topotests/bgp_aggregate_address_topo1/r1/bgpd.conf
@@ -19,8 +19,10 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor 10.0.0.2 remote-as 65001
neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
neighbor 10.0.1.2 remote-as internal
neighbor 10.0.1.2 timers 3 10
+ neighbor 10.0.1.2 timers connect 1
address-family ipv4 unicast
redistribute connected
aggregate-address 192.168.0.0/24 matching-MED-only
diff --git a/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf b/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf
index acacd86526..418624aed4 100644
--- a/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf
+++ b/tests/topotests/bgp_aggregate_address_topo1/r2/bgpd.conf
@@ -1,6 +1,7 @@
router bgp 65000
neighbor 10.0.1.1 remote-as internal
neighbor 10.0.1.1 timers 3 10
+ neighbor 10.0.1.1 timers connect 1
address-family ipv4 unicast
redistribute connected
exit-address-family
diff --git a/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf b/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf
index 002a5c78c0..a6e24b221b 100644
--- a/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf
+++ b/tests/topotests/bgp_aggregator_zero/r1/bgpd.conf
@@ -3,4 +3,5 @@ router bgp 65534
no bgp ebgp-requires-policy
neighbor 10.0.0.2 remote-as external
neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
!
diff --git a/tests/topotests/bgp_aspath_zero/r1/bgpd.conf b/tests/topotests/bgp_aspath_zero/r1/bgpd.conf
index 002a5c78c0..a6e24b221b 100644
--- a/tests/topotests/bgp_aspath_zero/r1/bgpd.conf
+++ b/tests/topotests/bgp_aspath_zero/r1/bgpd.conf
@@ -3,4 +3,5 @@ router bgp 65534
no bgp ebgp-requires-policy
neighbor 10.0.0.2 remote-as external
neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
!
diff --git a/tests/topotests/bgp_bmp/bgpbmp.py b/tests/topotests/bgp_bmp/bgpbmp.py
new file mode 100644
index 0000000000..eac78a63f7
--- /dev/null
+++ b/tests/topotests/bgp_bmp/bgpbmp.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Copyright 2023, 6wind
+import json
+import os
+
+from lib import topotest
+from lib.topogen import get_topogen
+from lib.topolog import logger
+
+# remember the last sequence number of the logging messages
+SEQ = 0
+
+
+def bmp_reset_seq():
+ global SEQ
+ SEQ = 0
+
+
+def get_bmp_messages(bmp_collector, bmp_log_file):
+ """
+ Read the BMP logging messages.
+ """
+ messages = []
+ text_output = bmp_collector.run(f"cat {bmp_log_file}")
+
+ for m in text_output.splitlines():
+ # some output in the bash can break the message decoding
+ try:
+ messages.append(json.loads(m))
+ except Exception as e:
+ logger.warning(str(e) + " message: {}".format(str(m)))
+ continue
+
+ if not messages:
+ logger.error("Bad BMP log format, check your BMP server")
+
+ return messages
+
+
+def bmp_update_seq(bmp_collector, bmp_log_file):
+ global SEQ
+
+ messages = get_bmp_messages(bmp_collector, bmp_log_file)
+
+ if len(messages):
+ SEQ = messages[-1]["seq"]
+
+
+def bmp_update_expected_files(
+ bmp_actual,
+ expected_prefixes,
+ bmp_log_type,
+ policy,
+ step,
+ bmp_client,
+ bmp_log_folder,
+):
+ tgen = get_topogen()
+
+ with open(
+ f"{bmp_log_folder}/tmp/bmp-{bmp_log_type}-{policy}-step{step}.json", "w"
+ ) as json_file:
+ json.dump(bmp_actual, json_file, indent=4)
+
+ out = bmp_client.vtysh_cmd("show bgp vrf vrf1 ipv4 json", isjson=True)
+ filtered_out = {
+ "routes": {
+ prefix: route_info
+ for prefix, route_info in out["routes"].items()
+ if prefix in expected_prefixes
+ }
+ }
+ if bmp_log_type == "withdraw":
+ for pfx in expected_prefixes:
+ if "::" in pfx:
+ continue
+ filtered_out["routes"][pfx] = None
+
+ # ls {bmp_log_folder}/tmp/show*json | while read file; do egrep -v 'prefix|network|metric|ocPrf|version|weight|peerId|vrf|Version|valid|Reason|fe80' $file >$(basename $file); echo >> $(basename $file); done
+ with open(
+ f"{bmp_log_folder}/tmp/show-bgp-ipv4-{bmp_log_type}-step{step}.json", "w"
+ ) as json_file:
+ json.dump(filtered_out, json_file, indent=4)
+
+ out = tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv6 json", isjson=True)
+ filtered_out = {
+ "routes": {
+ prefix: route_info
+ for prefix, route_info in out["routes"].items()
+ if prefix in expected_prefixes
+ }
+ }
+ if bmp_log_type == "withdraw":
+ for pfx in expected_prefixes:
+ if "::" not in pfx:
+ continue
+ filtered_out["routes"][pfx] = None
+
+ with open(
+ f"{bmp_log_folder}/tmp/show-bgp-ipv6-{bmp_log_type}-step{step}.json", "w"
+ ) as json_file:
+ json.dump(filtered_out, json_file, indent=4)
+
+
+def bmp_check_for_prefixes(
+ expected_prefixes,
+ bmp_log_type,
+ policy,
+ step,
+ bmp_collector,
+ bmp_log_folder,
+ bmp_client,
+ expected_json_path,
+ update_expected_json,
+ loc_rib,
+):
+ """
+ Check for the presence of the given prefixes in the BMP server logs with
+ the given message type and the set policy.
+
+ """
+ global SEQ
+
+ bmp_log_file = f"{bmp_log_folder}/bmp.log"
+ # we care only about the new messages
+ messages = [
+ m
+ for m in sorted(
+ get_bmp_messages(bmp_collector, bmp_log_file), key=lambda d: d["seq"]
+ )
+ if m["seq"] > SEQ
+ ]
+
+ # create empty initial files
+ # for step in $(seq 1); do
+ # for i in "update" "withdraw"; do
+ # for j in "pre-policy" "post-policy" "loc-rib"; do
+ # echo '{"null": {}}'> bmp-$i-$j-step$step.json
+ # done
+ # done
+ # done
+
+ ref_file = f"{expected_json_path}/bmp-{bmp_log_type}-{policy}-step{step}.json"
+ expected = json.loads(open(ref_file).read())
+
+ # Build actual json from logs
+ actual = {}
+ for m in messages:
+ if (
+ "bmp_log_type" in m.keys()
+ and "ip_prefix" in m.keys()
+ and m["ip_prefix"] in expected_prefixes
+ and m["bmp_log_type"] == bmp_log_type
+ and m["policy"] == policy
+ ):
+ policy_dict = actual.setdefault(m["policy"], {})
+ bmp_log_type_dict = policy_dict.setdefault(m["bmp_log_type"], {})
+
+ # Add or update the ip_prefix dictionary with filtered key-value pairs
+ bmp_log_type_dict[m["ip_prefix"]] = {
+ k: v
+ for k, v in sorted(m.items())
+ # filter out variable keys
+ if k not in ["timestamp", "seq", "nxhp_link-local"]
+ }
+
+ # build expected JSON files
+ if (
+ update_expected_json
+ and actual
+ and set(actual.get(policy, {}).get(bmp_log_type, {}).keys())
+ == set(expected_prefixes)
+ ):
+ bmp_update_expected_files(
+ actual,
+ expected_prefixes,
+ bmp_log_type,
+ policy,
+ step,
+ bmp_client,
+ bmp_log_folder,
+ )
+
+ return topotest.json_cmp(actual, expected, exact=True)
+
+
+def bmp_check_for_peer_message(
+ expected_peers, bmp_log_type, bmp_collector, bmp_log_file, is_rd_instance=False
+):
+ """
+ Check for the presence of a peer up message for the peer
+ """
+ global SEQ
+
+ # we care only about the new messages
+ messages = [
+ m
+ for m in sorted(
+ get_bmp_messages(bmp_collector, bmp_log_file), key=lambda d: d["seq"]
+ )
+ if m["seq"] > SEQ
+ ]
+
+ # get the list of pairs (prefix, policy, seq) for the given message type
+ peers = []
+ for m in messages:
+ if is_rd_instance and m["peer_distinguisher"] == "0:0":
+ continue
+ if (
+ "peer_ip" in m.keys()
+ and m["peer_ip"] != "0.0.0.0"
+ and m["bmp_log_type"] == bmp_log_type
+ ):
+ if is_rd_instance and m["peer_type"] != "route distinguisher instance":
+ continue
+ peers.append(m["peer_ip"])
+ elif m["policy"] == "loc-rib" and m["bmp_log_type"] == bmp_log_type:
+ peers.append("0.0.0.0")
+
+ # check for prefixes
+ for ep in expected_peers:
+ if ep not in peers:
+ msg = "The peer {} is not present in the {} log messages."
+ logger.debug(msg.format(ep, bmp_log_type))
+ return False
+
+ SEQ = messages[-1]["seq"]
+ return True
diff --git a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-loc-rib-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json
index ba31bf1d5d..d6c87dd4fd 100644
--- a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-loc-rib-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-loc-rib-step1.json
@@ -10,6 +10,7 @@
"origin": "IGP",
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "444:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib"
},
@@ -23,6 +24,7 @@
"origin": "IGP",
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "555:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib",
"safi": 1
diff --git a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-post-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json
index d5d9d65182..04e01623df 100644
--- a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-post-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-post-policy-step1.json
@@ -10,9 +10,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy"
},
"2111::1111/128": {
@@ -25,9 +25,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-pre-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json
index e11badc040..760ee0409a 100644
--- a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-update-pre-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-update-pre-policy-step1.json
@@ -10,9 +10,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy"
},
"2111::1111/128": {
@@ -25,9 +25,9 @@
"origin": "IGP",
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-loc-rib-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json
index 37ddc09ff8..6a82f7af1a 100644
--- a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-loc-rib-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-loc-rib-step1.json
@@ -7,6 +7,7 @@
"is_filtered": false,
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "444:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib"
},
@@ -17,6 +18,7 @@
"is_filtered": false,
"peer_asn": 65501,
"peer_bgp_id": "192.168.0.1",
+ "peer_distinguisher": "555:1",
"peer_type": "loc-rib instance",
"policy": "loc-rib",
"safi": 1
diff --git a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-post-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json
index de84307a4e..f57b1a51ce 100644
--- a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-post-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-post-policy-step1.json
@@ -7,9 +7,9 @@
"ipv6": false,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy"
},
"2111::1111/128": {
@@ -19,9 +19,9 @@
"ipv6": true,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "post-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-pre-policy-step1.json b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json
index 1c34498b7a..a52308c789 100644
--- a/tests/topotests/bgp_bmp_vrf/bmp1/bmp-withdraw-pre-policy-step1.json
+++ b/tests/topotests/bgp_bmp/bmp1vrf/bmp-withdraw-pre-policy-step1.json
@@ -7,9 +7,9 @@
"ipv6": false,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "444:1",
"peer_ip": "192.168.0.2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy"
},
"2111::1111/128": {
@@ -19,9 +19,9 @@
"ipv6": true,
"peer_asn": 65502,
"peer_bgp_id": "192.168.0.2",
- "peer_distinguisher": "0:0",
+ "peer_distinguisher": "555:1",
"peer_ip": "192:168::2",
- "peer_type": "global instance",
+ "peer_type": "route distinguisher instance",
"policy": "pre-policy",
"safi": 1
}
diff --git a/tests/topotests/bgp_bmp/r1/bgpd.conf b/tests/topotests/bgp_bmp/r1/frr.conf
index 485c217096..f7cb669b3d 100644
--- a/tests/topotests/bgp_bmp/r1/bgpd.conf
+++ b/tests/topotests/bgp_bmp/r1/frr.conf
@@ -1,3 +1,10 @@
+interface r1-eth0
+ ip address 192.0.2.1/24
+!
+interface r1-eth1
+ ip address 192.168.0.1/24
+ ipv6 address 192:168::1/64
+!
router bgp 65501
bgp router-id 192.168.0.1
bgp log-neighbor-changes
@@ -41,7 +48,7 @@ router bgp 65501
exit-address-family
!
router bgp 65501 vrf vrf1
- bgp router_id 192.168.0.1
+ bgp router-id 192.168.0.1
bgp log-neighbor-changes
address-family ipv4 unicast
label vpn export 101
diff --git a/tests/topotests/bgp_bmp/r1/zebra.conf b/tests/topotests/bgp_bmp/r1/zebra.conf
deleted file mode 100644
index 0b523c9e18..0000000000
--- a/tests/topotests/bgp_bmp/r1/zebra.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-interface r1-eth0
- ip address 192.0.2.1/24
-!
-interface r1-eth1
- ip address 192.168.0.1/24
- ipv6 address 192:168::1/64
-!
diff --git a/tests/topotests/bgp_bmp_vrf/r1/bgpd.conf b/tests/topotests/bgp_bmp/r1vrf/frr.conf
index 961e20498b..8706693458 100644
--- a/tests/topotests/bgp_bmp_vrf/r1/bgpd.conf
+++ b/tests/topotests/bgp_bmp/r1vrf/frr.conf
@@ -1,3 +1,10 @@
+interface r1vrf-eth0
+ ip address 192.0.2.1/24
+!
+interface r1vrf-eth1
+ ip address 192.168.0.1/24
+ ipv6 address 192:168::1/64
+!
router bgp 65501 vrf vrf1
bgp router-id 192.168.0.1
bgp log-neighbor-changes
@@ -15,14 +22,15 @@ router bgp 65501 vrf vrf1
bmp monitor ipv6 unicast loc-rib
exit
!
-
address-family ipv4 unicast
+ rd vpn export 444:1
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 soft-reconfiguration inbound
no neighbor 192:168::2 activate
exit-address-family
!
address-family ipv6 unicast
+ rd vpn export 555:1
neighbor 192:168::2 activate
neighbor 192:168::2 soft-reconfiguration inbound
exit-address-family
diff --git a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv4-update-step1.json b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv4-update-step1.json
index 038c87ca9d..dc0228db61 100644
--- a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv4-update-step1.json
+++ b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv4-update-step1.json
@@ -9,7 +9,6 @@
"nexthops": [
{
"ip": "192.168.0.2",
- "hostname": "r2",
"afi": "ipv4",
"used": true
}
diff --git a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv4-withdraw-step1.json b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv4-withdraw-step1.json
index 6a77813776..6a77813776 100644
--- a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv4-withdraw-step1.json
+++ b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv4-withdraw-step1.json
diff --git a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv6-update-step1.json b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv6-update-step1.json
index db34220149..64c8622ab5 100644
--- a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv6-update-step1.json
+++ b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv6-update-step1.json
@@ -9,12 +9,10 @@
"nexthops": [
{
"ip": "192:168::2",
- "hostname": "r2",
"afi": "ipv6",
"scope": "global"
},
{
- "hostname": "r2",
"afi": "ipv6",
"scope": "link-local",
"used": true
diff --git a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv6-withdraw-step1.json b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv6-withdraw-step1.json
index 93f4a75e8c..93f4a75e8c 100644
--- a/tests/topotests/bgp_bmp_vrf/r1/show-bgp-ipv6-withdraw-step1.json
+++ b/tests/topotests/bgp_bmp/r1vrf/show-bgp-ipv6-withdraw-step1.json
diff --git a/tests/topotests/bgp_bmp/r2/bgpd.conf b/tests/topotests/bgp_bmp/r2/frr.conf
index 40e2cd5bbc..250071f484 100644
--- a/tests/topotests/bgp_bmp/r2/bgpd.conf
+++ b/tests/topotests/bgp_bmp/r2/frr.conf
@@ -1,3 +1,11 @@
+interface r2-eth0
+ ip address 192.168.0.2/24
+ ipv6 address 192:168::2/64
+!
+interface r2-eth1
+ ip address 172.31.0.2/24
+ ipv6 address 172:31::2/64
+!
router bgp 65502
bgp router-id 192.168.0.2
bgp log-neighbor-changes
diff --git a/tests/topotests/bgp_bmp/r2/zebra.conf b/tests/topotests/bgp_bmp/r2/zebra.conf
deleted file mode 100644
index 9d82bfe2df..0000000000
--- a/tests/topotests/bgp_bmp/r2/zebra.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-interface r2-eth0
- ip address 192.168.0.2/24
- ipv6 address 192:168::2/64
-!
-interface r2-eth1
- ip address 172.31.0.2/24
- ipv6 address 172:31::2/64
-!
diff --git a/tests/topotests/bgp_bmp_vrf/r2/bgpd.conf b/tests/topotests/bgp_bmp/r2vrf/frr.conf
index 7c8255a175..5268190dec 100644
--- a/tests/topotests/bgp_bmp_vrf/r2/bgpd.conf
+++ b/tests/topotests/bgp_bmp/r2vrf/frr.conf
@@ -1,3 +1,11 @@
+interface r2vrf-eth0
+ ip address 192.168.0.2/24
+ ipv6 address 192:168::2/64
+!
+interface r2vrf-eth1
+ ip address 172.31.0.2/24
+ ipv6 address 172:31::2/64
+!
router bgp 65502
bgp router-id 192.168.0.2
bgp log-neighbor-changes
diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp.py b/tests/topotests/bgp_bmp/test_bgp_bmp.py
deleted file mode 100644
index 658ad2b99a..0000000000
--- a/tests/topotests/bgp_bmp/test_bgp_bmp.py
+++ /dev/null
@@ -1,476 +0,0 @@
-#!/usr/bin/env python
-# SPDX-License-Identifier: ISC
-
-# Copyright 2023 6WIND S.A.
-# Authored by Farid Mihoub <farid.mihoub@6wind.com>
-#
-
-"""
-test_bgp_bmp.py: Test BGP BMP functionalities
-
- +------+ +------+ +------+
- | | | | | |
- | BMP1 |------------| R1 |---------------| R2 |
- | | | | | |
- +------+ +------+ +------+
-
-Setup two routers R1 and R2 with one link configured with IPv4 and
-IPv6 addresses.
-Configure BGP in R1 and R2 to exchange prefixes from
-the latter to the first router.
-Setup a link between R1 and the BMP server, activate the BMP feature in R1
-and ensure the monitored BGP sessions logs are well present on the BMP server.
-"""
-
-from functools import partial
-from ipaddress import ip_network
-import json
-import os
-import pytest
-import sys
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join("../"))
-sys.path.append(os.path.join("../lib/"))
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib import topotest
-from lib.bgp import verify_bgp_convergence_from_running_config
-from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-
-pytestmark = [pytest.mark.bgpd]
-
-# remember the last sequence number of the logging messages
-SEQ = 0
-
-PRE_POLICY = "pre-policy"
-POST_POLICY = "post-policy"
-LOC_RIB = "loc-rib"
-
-UPDATE_EXPECTED_JSON = False
-DEBUG_PCAP = False
-
-
-def build_topo(tgen):
- tgen.add_router("r1")
- tgen.add_router("r2")
- tgen.add_bmp_server("bmp1", ip="192.0.2.10", defaultRoute="via 192.0.2.1")
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["bmp1"])
-
- tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth1", "r2-eth0")
-
-
-def setup_module(mod):
- tgen = Topogen(build_topo, mod.__name__)
- tgen.start_topology()
-
- if DEBUG_PCAP:
- tgen.gears["r1"].run("rm /tmp/bmp.pcap")
- tgen.gears["r1"].run(
- "tcpdump -nni r1-eth0 -s 0 -w /tmp/bmp.pcap &", stdout=None
- )
-
- for rname, router in tgen.routers().items():
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, "{}/bgpd.conf".format(rname)),
- "-M bmp",
- )
-
- tgen.start_router()
-
- logger.info("starting BMP servers")
- for bmp_name, server in tgen.get_bmp_servers().items():
- server.start(log_file=os.path.join(tgen.logdir, bmp_name, "bmp.log"))
-
-
-def teardown_module(_mod):
- tgen = get_topogen()
- tgen.stop_topology()
-
-
-def test_bgp_convergence():
- tgen = get_topogen()
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
- result = verify_bgp_convergence_from_running_config(tgen, dut="r1")
- assert result is True, "BGP is not converging"
-
-
-def get_bmp_messages():
- """
- Read the BMP logging messages.
- """
- messages = []
- tgen = get_topogen()
- text_output = tgen.gears["bmp1"].run(
- "cat {}".format(os.path.join(tgen.logdir, "bmp1", "bmp.log"))
- )
-
- for m in text_output.splitlines():
- # some output in the bash can break the message decoding
- try:
- messages.append(json.loads(m))
- except Exception as e:
- logger.warning(str(e) + " message: {}".format(str(m)))
- continue
-
- if not messages:
- logger.error("Bad BMP log format, check your BMP server")
-
- return messages
-
-
-def update_seq():
- global SEQ
-
- messages = get_bmp_messages()
-
- if len(messages):
- SEQ = messages[-1]["seq"]
-
-
-def update_expected_files(bmp_actual, expected_prefixes, bmp_log_type, policy, step):
- tgen = get_topogen()
-
- with open(f"/tmp/bmp-{bmp_log_type}-{policy}-step{step}.json", "w") as json_file:
- json.dump(bmp_actual, json_file, indent=4)
-
- if step == 2: # vpn
- rd = "444:2"
- out = tgen.gears["r1"].vtysh_cmd("show bgp ipv4 vpn json", isjson=True)
- filtered_out = {
- "routes": {
- "routeDistinguishers": {
- rd: {
- prefix: route_info
- for prefix, route_info in out["routes"]
- .get("routeDistinguishers", {})
- .get(rd, {})
- .items()
- if prefix in expected_prefixes
- }
- }
- }
- }
- if bmp_log_type == "withdraw":
- for pfx in expected_prefixes:
- if "::" in pfx:
- continue
- filtered_out["routes"]["routeDistinguishers"][rd][pfx] = None
-
- # ls /tmp/show*json | while read file; do egrep -v 'prefix|network|metric|ocPrf|version|weight|peerId|vrf|Version|valid|Reason|fe80' $file >$(basename $file); echo >> $(basename $file); done
- with open(
- f"/tmp/show-bgp-ipv4-{bmp_log_type}-step{step}.json", "w"
- ) as json_file:
- json.dump(filtered_out, json_file, indent=4)
-
- rd = "555:2"
- out = tgen.gears["r1"].vtysh_cmd("show bgp ipv6 vpn json", isjson=True)
- filtered_out = {
- "routes": {
- "routeDistinguishers": {
- rd: {
- prefix: route_info
- for prefix, route_info in out["routes"]
- .get("routeDistinguishers", {})
- .get(rd, {})
- .items()
- if prefix in expected_prefixes
- }
- }
- }
- }
- if bmp_log_type == "withdraw":
- for pfx in expected_prefixes:
- if "::" not in pfx:
- continue
- filtered_out["routes"]["routeDistinguishers"][rd][pfx] = None
- with open(
- f"/tmp/show-bgp-ipv6-{bmp_log_type}-step{step}.json", "w"
- ) as json_file:
- json.dump(filtered_out, json_file, indent=4)
-
- return
-
- out = tgen.gears["r1"].vtysh_cmd("show bgp ipv4 json", isjson=True)
- filtered_out = {
- "routes": {
- prefix: route_info
- for prefix, route_info in out["routes"].items()
- if prefix in expected_prefixes
- }
- }
- if bmp_log_type == "withdraw":
- for pfx in expected_prefixes:
- if "::" in pfx:
- continue
- filtered_out["routes"][pfx] = None
-
- # ls /tmp/show*json | while read file; do egrep -v 'prefix|network|metric|ocPrf|version|weight|peerId|vrf|Version|valid|Reason|fe80' $file >$(basename $file); echo >> $(basename $file); done
- with open(f"/tmp/show-bgp-ipv4-{bmp_log_type}-step{step}.json", "w") as json_file:
- json.dump(filtered_out, json_file, indent=4)
-
- out = tgen.gears["r1"].vtysh_cmd("show bgp ipv6 json", isjson=True)
- filtered_out = {
- "routes": {
- prefix: route_info
- for prefix, route_info in out["routes"].items()
- if prefix in expected_prefixes
- }
- }
- if bmp_log_type == "withdraw":
- for pfx in expected_prefixes:
- if "::" not in pfx:
- continue
- filtered_out["routes"][pfx] = None
- with open(f"/tmp/show-bgp-ipv6-{bmp_log_type}-step{step}.json", "w") as json_file:
- json.dump(filtered_out, json_file, indent=4)
-
-
-def check_for_prefixes(expected_prefixes, bmp_log_type, policy, step):
- """
- Check for the presence of the given prefixes in the BMP server logs with
- the given message type and the set policy.
-
- """
- global SEQ
-
- # we care only about the new messages
- messages = [
- m for m in sorted(get_bmp_messages(), key=lambda d: d["seq"]) if m["seq"] > SEQ
- ]
-
- # create empty initial files
- # for step in $(seq 2); do
- # for i in "update" "withdraw"; do
- # for j in "pre-policy" "post-policy" "loc-rib"; do
- # echo '{"null": {}}'> bmp-$i-$j-step$step.json
- # done
- # done
- # done
-
- ref_file = f"{CWD}/bmp1/bmp-{bmp_log_type}-{policy}-step{step}.json"
- expected = json.loads(open(ref_file).read())
-
- # Build actual json from logs
- actual = {}
- for m in messages:
- if (
- "bmp_log_type" in m.keys()
- and "ip_prefix" in m.keys()
- and m["ip_prefix"] in expected_prefixes
- and m["bmp_log_type"] == bmp_log_type
- and m["policy"] == policy
- ):
- policy_dict = actual.setdefault(m["policy"], {})
- bmp_log_type_dict = policy_dict.setdefault(m["bmp_log_type"], {})
-
- # Add or update the ip_prefix dictionary with filtered key-value pairs
- bmp_log_type_dict[m["ip_prefix"]] = {
- k: v
- for k, v in sorted(m.items())
- # filter out variable keys
- if k not in ["timestamp", "seq", "nxhp_link-local"]
- and (
- # When policy is loc-rib, the peer-distinguisher is 0:0
- # for the default VRF or the RD if any or the 0:<vrf_id>.
- # 0:<vrf_id> is used to distinguished. RFC7854 says: "If the
- # peer is a "Local Instance Peer", it is set to a unique,
- # locally defined value." The value is not tested because it
- # is variable.
- k != "peer_distinguisher"
- or policy != LOC_RIB
- or v == "0:0"
- or not v.startswith("0:")
- )
- }
-
- # build expected JSON files
- if (
- UPDATE_EXPECTED_JSON
- and actual
- and set(actual.get(policy, {}).get(bmp_log_type, {}).keys())
- == set(expected_prefixes)
- ):
- update_expected_files(actual, expected_prefixes, bmp_log_type, policy, step)
-
- return topotest.json_cmp(actual, expected, exact=True)
-
-
-def check_for_peer_message(expected_peers, bmp_log_type):
- """
- Check for the presence of a peer up message for the peer
- """
- global SEQ
- # we care only about the new messages
- messages = [
- m for m in sorted(get_bmp_messages(), key=lambda d: d["seq"]) if m["seq"] > SEQ
- ]
-
- # get the list of pairs (prefix, policy, seq) for the given message type
- peers = [
- m["peer_ip"]
- for m in messages
- if "peer_ip" in m.keys() and m["bmp_log_type"] == bmp_log_type
- ]
-
- # check for prefixes
- for ep in expected_peers:
- if ep not in peers:
- msg = "The peer {} is not present in the {} log messages."
- logger.debug(msg.format(ep, bmp_log_type))
- return False
-
- SEQ = messages[-1]["seq"]
- return True
-
-
-def configure_prefixes(tgen, node, asn, safi, prefixes, vrf=None, update=True):
- """
- Configure the bgp prefixes.
- """
- withdraw = "no " if not update else ""
- vrf = " vrf {}".format(vrf) if vrf else ""
- for p in prefixes:
- ip = ip_network(p)
- cmd = [
- "conf t\n",
- "router bgp {}{}\n".format(asn, vrf),
- "address-family ipv{} {}\n".format(ip.version, safi),
- "{}network {}\n".format(withdraw, ip),
- "exit-address-family\n",
- ]
- logger.debug("setting prefix: ipv{} {} {}".format(ip.version, safi, ip))
- tgen.gears[node].vtysh_cmd("".join(cmd))
-
-
-def _test_prefixes(policy, vrf=None, step=0):
- """
- Setup the BMP monitor policy, Add and withdraw ipv4/v6 prefixes.
- Check if the previous actions are logged in the BMP server with the right
- message type and the right policy.
- """
- tgen = get_topogen()
-
- safi = "vpn" if vrf else "unicast"
-
- prefixes = ["172.31.0.15/32", "2001::1111/128"]
-
- for type in ("update", "withdraw"):
- update_seq()
-
- configure_prefixes(
- tgen, "r2", 65502, "unicast", prefixes, vrf=vrf, update=(type == "update")
- )
-
- logger.info(f"checking for prefixes {type}")
-
- for ipver in [4, 6]:
- if UPDATE_EXPECTED_JSON:
- continue
- ref_file = "{}/r1/show-bgp-ipv{}-{}-step{}.json".format(
- CWD, ipver, type, step
- )
- expected = json.loads(open(ref_file).read())
-
- test_func = partial(
- topotest.router_json_cmp,
- tgen.gears["r1"],
- f"show bgp ipv{ipver} {safi} json",
- expected,
- )
- _, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assertmsg = f"r1: BGP IPv{ipver} convergence failed"
- assert res is None, assertmsg
-
- # check
- test_func = partial(check_for_prefixes, prefixes, type, policy, step)
- success, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assert success, "Checking the updated prefixes has failed ! %s" % res
-
-
-def test_bmp_server_logging():
- """
- Assert the logging of the bmp server.
- """
-
- def check_for_log_file():
- tgen = get_topogen()
- output = tgen.gears["bmp1"].run(
- "ls {}".format(os.path.join(tgen.logdir, "bmp1"))
- )
- if "bmp.log" not in output:
- return False
- return True
-
- success, _ = topotest.run_and_expect(check_for_log_file, True, count=30, wait=1)
- assert success, "The BMP server is not logging"
-
-
-def test_peer_up():
- """
- Checking for BMP peers up messages
- """
-
- peers = ["192.168.0.2", "192:168::2"]
-
- logger.info("checking for BMP peers up messages")
-
- test_func = partial(check_for_peer_message, peers, "peer up")
- success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
- assert success, "Checking the updated prefixes has been failed !."
-
-
-def test_bmp_bgp_unicast():
- """
- Add/withdraw bgp unicast prefixes and check the bmp logs.
- """
- logger.info("*** Unicast prefixes pre-policy logging ***")
- _test_prefixes(PRE_POLICY, step=1)
- logger.info("*** Unicast prefixes post-policy logging ***")
- _test_prefixes(POST_POLICY, step=1)
- logger.info("*** Unicast prefixes loc-rib logging ***")
- _test_prefixes(LOC_RIB, step=1)
-
-
-def test_bmp_bgp_vpn():
- # check for the prefixes in the BMP server logging file
- logger.info("***** VPN prefixes pre-policy logging *****")
- _test_prefixes(PRE_POLICY, vrf="vrf1", step=2)
- logger.info("***** VPN prefixes post-policy logging *****")
- _test_prefixes(POST_POLICY, vrf="vrf1", step=2)
- logger.info("***** VPN prefixes loc-rib logging *****")
- _test_prefixes(LOC_RIB, vrf="vrf1", step=2)
-
-
-def test_peer_down():
- """
- Checking for BMP peers down messages
- """
- tgen = get_topogen()
-
- tgen.gears["r2"].vtysh_cmd("clear bgp *")
-
- peers = ["192.168.0.2", "192:168::2"]
-
- logger.info("checking for BMP peers down messages")
-
- test_func = partial(check_for_peer_message, peers, "peer down")
- success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
- assert success, "Checking the updated prefixes has been failed !."
-
-
-if __name__ == "__main__":
- args = ["-s"] + sys.argv[1:]
- sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_1.py b/tests/topotests/bgp_bmp/test_bgp_bmp_1.py
new file mode 100644
index 0000000000..be3e07929a
--- /dev/null
+++ b/tests/topotests/bgp_bmp/test_bgp_bmp_1.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+
+"""
+test_bgp_bmp.py: Test BGP BMP functionalities
+
+ +------+ +------+ +------+
+ | | | | | |
+ | BMP1 |------------| R1 |---------------| R2 |
+ | | | | | |
+ +------+ +------+ +------+
+
+Setup two routers R1 and R2 with one link configured with IPv4 and
+IPv6 addresses.
+Configure BGP in R1 and R2 to exchange prefixes from
+the latter to the first router.
+Setup a link between R1 and the BMP server, activate the BMP feature in R1
+and ensure the monitored BGP sessions logs are well present on the BMP server.
+"""
+
+from functools import partial
+import json
+import os
+import pytest
+import sys
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join("../"))
+sys.path.append(os.path.join("../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.bgp import verify_bgp_convergence_from_running_config
+from lib.bgp import bgp_configure_prefixes
+from .bgpbmp import (
+ bmp_check_for_prefixes,
+ bmp_check_for_peer_message,
+ bmp_update_seq,
+)
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.bgpd]
+
+PRE_POLICY = "pre-policy"
+POST_POLICY = "post-policy"
+LOC_RIB = "loc-rib"
+
+UPDATE_EXPECTED_JSON = False
+DEBUG_PCAP = False
+
+
+def build_topo(tgen):
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_bmp_server("bmp1", ip="192.0.2.10", defaultRoute="via 192.0.2.1")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["bmp1"])
+
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth1", "r2-eth0")
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ if DEBUG_PCAP:
+ pcap_file = os.path.join(tgen.logdir, "r1/bmp.pcap")
+ tgen.gears["r1"].run(
+ "tcpdump -nni r1-eth0 -s 0 -w {} &".format(pcap_file), stdout=None
+ )
+
+ for rname, router in tgen.routers().items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(
+ os.path.join(CWD, "{}/frr.conf".format(rname)),
+ [(TopoRouter.RD_ZEBRA, None), (TopoRouter.RD_BGP, "-M bmp")],
+ )
+
+ tgen.start_router()
+
+ logger.info("starting BMP servers")
+ for bmp_name, server in tgen.get_bmp_servers().items():
+ server.start(log_file=os.path.join(tgen.logdir, bmp_name, "bmp.log"))
+
+
+def teardown_module(_mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_convergence():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ result = verify_bgp_convergence_from_running_config(tgen, dut="r1")
+ assert result is True, "BGP is not converging"
+
+
+def _test_prefixes(policy, vrf=None, step=0):
+ """
+ Setup the BMP monitor policy, Add and withdraw ipv4/v6 prefixes.
+ Check if the previous actions are logged in the BMP server with the right
+ message type and the right policy.
+ """
+ tgen = get_topogen()
+
+ safi = "vpn" if vrf else "unicast"
+
+ prefixes = ["172.31.0.15/32", "2001::1111/128"]
+
+ for type in ("update", "withdraw"):
+ bmp_update_seq(tgen.gears["bmp1"], os.path.join(tgen.logdir, "bmp1", "bmp.log"))
+
+ bgp_configure_prefixes(
+ tgen.gears["r2"],
+ 65502,
+ "unicast",
+ prefixes,
+ vrf=vrf,
+ update=(type == "update"),
+ )
+
+ logger.info(f"checking for prefixes {type}")
+
+ for ipver in [4, 6]:
+ if UPDATE_EXPECTED_JSON:
+ continue
+ ref_file = "{}/r1/show-bgp-ipv{}-{}-step{}.json".format(
+ CWD, ipver, type, step
+ )
+ expected = json.loads(open(ref_file).read())
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears["r1"],
+ f"show bgp ipv{ipver} {safi} json",
+ expected,
+ )
+ _, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = f"r1: BGP IPv{ipver} convergence failed"
+ assert res is None, assertmsg
+
+ # check
+ test_func = partial(
+ bmp_check_for_prefixes,
+ prefixes,
+ type,
+ policy,
+ step,
+ tgen.gears["bmp1"],
+ os.path.join(tgen.logdir, "bmp1"),
+ tgen.gears["r1"],
+ f"{CWD}/bmp1",
+ UPDATE_EXPECTED_JSON,
+ LOC_RIB,
+ )
+ success, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert success, "Checking the updated prefixes has failed ! %s" % res
+
+
+def test_bmp_server_logging():
+ """
+ Assert the logging of the bmp server.
+ """
+
+ def check_for_log_file():
+ tgen = get_topogen()
+ output = tgen.gears["bmp1"].run(
+ "ls {}".format(os.path.join(tgen.logdir, "bmp1"))
+ )
+ if "bmp.log" not in output:
+ return False
+ return True
+
+ success, _ = topotest.run_and_expect(check_for_log_file, True, count=30, wait=1)
+ assert success, "The BMP server is not logging"
+
+
+def test_peer_up():
+ """
+ Checking for BMP peers up messages
+ """
+
+ tgen = get_topogen()
+ peers = ["192.168.0.2", "192:168::2", "0.0.0.0"]
+
+ logger.info("checking for BMP peers up messages")
+
+ test_func = partial(
+ bmp_check_for_peer_message,
+ peers,
+ "peer up",
+ tgen.gears["bmp1"],
+ os.path.join(tgen.logdir, "bmp1", "bmp.log"),
+ )
+ success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
+ assert success, "Checking the updated prefixes has been failed !."
+
+
+def test_bmp_bgp_unicast():
+ """
+ Add/withdraw bgp unicast prefixes and check the bmp logs.
+ """
+ logger.info("*** Unicast prefixes pre-policy logging ***")
+ _test_prefixes(PRE_POLICY, step=1)
+ logger.info("*** Unicast prefixes post-policy logging ***")
+ _test_prefixes(POST_POLICY, step=1)
+ logger.info("*** Unicast prefixes loc-rib logging ***")
+ _test_prefixes(LOC_RIB, step=1)
+
+
+def test_bmp_bgp_vpn():
+ # check for the prefixes in the BMP server logging file
+ logger.info("***** VPN prefixes pre-policy logging *****")
+ _test_prefixes(PRE_POLICY, vrf="vrf1", step=2)
+ logger.info("***** VPN prefixes post-policy logging *****")
+ _test_prefixes(POST_POLICY, vrf="vrf1", step=2)
+ logger.info("***** VPN prefixes loc-rib logging *****")
+ _test_prefixes(LOC_RIB, vrf="vrf1", step=2)
+
+
+def test_peer_down():
+ """
+ Checking for BMP peers down messages
+ """
+ tgen = get_topogen()
+
+ tgen.gears["r2"].vtysh_cmd("clear bgp *")
+
+ peers = ["192.168.0.2", "192:168::2"]
+
+ logger.info("checking for BMP peers down messages")
+
+ test_func = partial(
+ bmp_check_for_peer_message,
+ peers,
+ "peer down",
+ tgen.gears["bmp1"],
+ os.path.join(tgen.logdir, "bmp1", "bmp.log"),
+ )
+ success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
+ assert success, "Checking the updated prefixes has been failed !."
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_bmp/test_bgp_bmp_2.py b/tests/topotests/bgp_bmp/test_bgp_bmp_2.py
new file mode 100644
index 0000000000..f16ff2b445
--- /dev/null
+++ b/tests/topotests/bgp_bmp/test_bgp_bmp_2.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright 2023 6WIND S.A.
+# Authored by Farid Mihoub <farid.mihoub@6wind.com>
+#
+
+"""
+test_bgp_bmp.py: Test BGP BMP functionalities
+
+ +------+ +------+ +------+
+ | | | | | |
+ | BMP1 |------------| R1 |---------------| R2 |
+ | | | | | |
+ +------+ +------+ +------+
+
+Setup two routers R1 and R2 with one link configured with IPv4 and
+IPv6 addresses.
+Configure BGP in R1 and R2 to exchange prefixes from
+the latter to the first router.
+Setup a link between R1 and the BMP server, activate the BMP feature in R1
+and ensure the monitored BGP sessions logs are well present on the BMP server.
+"""
+
+from functools import partial
+import json
+import os
+import platform
+import pytest
+import sys
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join("../"))
+sys.path.append(os.path.join("../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.bgp import verify_bgp_convergence_from_running_config
+from lib.bgp import bgp_configure_prefixes
+from .bgpbmp import (
+ bmp_check_for_prefixes,
+ bmp_check_for_peer_message,
+ bmp_update_seq,
+ bmp_reset_seq,
+)
+
+
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.bgpd]
+
+PRE_POLICY = "pre-policy"
+POST_POLICY = "post-policy"
+LOC_RIB = "loc-rib"
+
+UPDATE_EXPECTED_JSON = False
+DEBUG_PCAP = False
+
+
+def build_topo(tgen):
+ tgen.add_router("r1vrf")
+ tgen.add_router("r2vrf")
+ tgen.add_bmp_server("bmp1vrf", ip="192.0.2.10", defaultRoute="via 192.0.2.1")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1vrf"])
+ switch.add_link(tgen.gears["bmp1vrf"])
+
+ tgen.add_link(tgen.gears["r1vrf"], tgen.gears["r2vrf"], "r1vrf-eth1", "r2vrf-eth0")
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ tgen.net["r1vrf"].cmd(
+ """
+ip link add vrf1 type vrf table 10
+ip link set vrf1 up
+ip link set r1vrf-eth1 master vrf1
+"""
+ )
+ bmp_reset_seq()
+ if DEBUG_PCAP:
+ pcap_file = os.path.join(tgen.logdir, "r1vrf/bmp.pcap")
+ tgen.gears["r1vrf"].run(
+ "tcpdump -nni r1vrf-eth0 -s 0 -w {} &".format(pcap_file), stdout=None
+ )
+
+ for rname, router in tgen.routers().items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(
+ os.path.join(CWD, "{}/frr.conf".format(rname)),
+ [(TopoRouter.RD_ZEBRA, None), (TopoRouter.RD_BGP, "-M bmp")],
+ )
+
+ tgen.start_router()
+
+ logger.info("starting BMP servers")
+ for bmp_name, server in tgen.get_bmp_servers().items():
+ server.start(log_file=os.path.join(tgen.logdir, bmp_name, "bmp.log"))
+
+
+def teardown_module(_mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_convergence():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ result = verify_bgp_convergence_from_running_config(tgen, dut="r1vrf")
+ assert result is True, "BGP is not converging"
+
+
+def _test_prefixes(policy, step=1):
+ """
+ Setup the BMP monitor policy, Add and withdraw ipv4/v6 prefixes.
+ Check if the previous actions are logged in the BMP server with the right
+ message type and the right policy.
+ """
+ tgen = get_topogen()
+
+ prefixes = ["172.31.0.15/32", "2111::1111/128"]
+
+ for type in ("update", "withdraw"):
+ bmp_update_seq(
+ tgen.gears["bmp1vrf"], os.path.join(tgen.logdir, "bmp1vrf", "bmp.log")
+ )
+
+ # add prefixes
+ bgp_configure_prefixes(
+ tgen.gears["r2vrf"], 65502, "unicast", prefixes, update=(type == "update")
+ )
+
+ logger.info(f"checking for prefixes {type}")
+
+ for ipver in [4, 6]:
+ if UPDATE_EXPECTED_JSON:
+ continue
+ ref_file = "{}/r1vrf/show-bgp-ipv{}-{}-step{}.json".format(
+ CWD, ipver, type, step
+ )
+ expected = json.loads(open(ref_file).read())
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears["r1vrf"],
+ f"show bgp vrf vrf1 ipv{ipver} json",
+ expected,
+ )
+ _, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = f"r1vrf: BGP IPv{ipver} convergence failed"
+ assert res is None, assertmsg
+
+ # check
+ test_func = partial(
+ bmp_check_for_prefixes,
+ prefixes,
+ type,
+ policy,
+ step,
+ tgen.gears["bmp1vrf"],
+ os.path.join(tgen.logdir, "bmp1vrf"),
+ tgen.gears["r1vrf"],
+ f"{CWD}/bmp1vrf",
+ UPDATE_EXPECTED_JSON,
+ LOC_RIB,
+ )
+ success, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert success, "Checking the updated prefixes has failed ! %s" % res
+
+
+def test_bmp_server_logging():
+ """
+ Assert the logging of the bmp server.
+ """
+
+ def check_for_log_file():
+ tgen = get_topogen()
+ output = tgen.gears["bmp1vrf"].run(
+ "ls {}".format(os.path.join(tgen.logdir, "bmp1vrf"))
+ )
+ if "bmp.log" not in output:
+ return False
+ return True
+
+ success, _ = topotest.run_and_expect(check_for_log_file, True, count=30, wait=1)
+ assert success, "The BMP server is not logging"
+
+
+def test_peer_up():
+ """
+ Checking for BMP peers up messages
+ """
+
+ tgen = get_topogen()
+ peers = ["192.168.0.2", "192:168::2", "0.0.0.0"]
+
+ logger.info("checking for BMP peers up messages")
+
+ test_func = partial(
+ bmp_check_for_peer_message,
+ peers,
+ "peer up",
+ tgen.gears["bmp1vrf"],
+ os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"),
+ is_rd_instance=True,
+ )
+ success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
+ assert success, "Checking the updated prefixes has been failed !."
+
+
+def test_bmp_bgp_unicast():
+ """
+ Add/withdraw bgp unicast prefixes and check the bmp logs.
+ """
+ logger.info("*** Unicast prefixes pre-policy logging ***")
+ _test_prefixes(PRE_POLICY)
+ logger.info("*** Unicast prefixes post-policy logging ***")
+ _test_prefixes(POST_POLICY)
+ logger.info("*** Unicast prefixes loc-rib logging ***")
+ _test_prefixes(LOC_RIB)
+
+
+def test_peer_down():
+ """
+ Checking for BMP peers down messages
+ """
+ tgen = get_topogen()
+
+ tgen.gears["r2vrf"].vtysh_cmd("clear bgp *")
+
+ peers = ["192.168.0.2", "192:168::2"]
+
+ logger.info("checking for BMP peers down messages")
+
+ test_func = partial(
+ bmp_check_for_peer_message,
+ peers,
+ "peer down",
+ tgen.gears["bmp1vrf"],
+ os.path.join(tgen.logdir, "bmp1vrf", "bmp.log"),
+ is_rd_instance=True,
+ )
+ success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
+ assert success, "Checking the updated prefixes has been failed !."
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_bmp_vrf/r1/zebra.conf b/tests/topotests/bgp_bmp_vrf/r1/zebra.conf
deleted file mode 100644
index 0b523c9e18..0000000000
--- a/tests/topotests/bgp_bmp_vrf/r1/zebra.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-interface r1-eth0
- ip address 192.0.2.1/24
-!
-interface r1-eth1
- ip address 192.168.0.1/24
- ipv6 address 192:168::1/64
-!
diff --git a/tests/topotests/bgp_bmp_vrf/r2/zebra.conf b/tests/topotests/bgp_bmp_vrf/r2/zebra.conf
deleted file mode 100644
index 9d82bfe2df..0000000000
--- a/tests/topotests/bgp_bmp_vrf/r2/zebra.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-interface r2-eth0
- ip address 192.168.0.2/24
- ipv6 address 192:168::2/64
-!
-interface r2-eth1
- ip address 172.31.0.2/24
- ipv6 address 172:31::2/64
-!
diff --git a/tests/topotests/bgp_bmp_vrf/test_bgp_bmp_vrf.py b/tests/topotests/bgp_bmp_vrf/test_bgp_bmp_vrf.py
deleted file mode 100644
index d31328bdb6..0000000000
--- a/tests/topotests/bgp_bmp_vrf/test_bgp_bmp_vrf.py
+++ /dev/null
@@ -1,418 +0,0 @@
-#!/usr/bin/env python
-# SPDX-License-Identifier: ISC
-
-# Copyright 2023 6WIND S.A.
-# Authored by Farid Mihoub <farid.mihoub@6wind.com>
-#
-
-"""
-test_bgp_bmp.py: Test BGP BMP functionalities
-
- +------+ +------+ +------+
- | | | | | |
- | BMP1 |------------| R1 |---------------| R2 |
- | | | | | |
- +------+ +------+ +------+
-
-Setup two routers R1 and R2 with one link configured with IPv4 and
-IPv6 addresses.
-Configure BGP in R1 and R2 to exchange prefixes from
-the latter to the first router.
-Setup a link between R1 and the BMP server, activate the BMP feature in R1
-and ensure the monitored BGP sessions logs are well present on the BMP server.
-"""
-
-from functools import partial
-from ipaddress import ip_network
-import json
-import os
-import platform
-import pytest
-import sys
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join("../"))
-sys.path.append(os.path.join("../lib/"))
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib import topotest
-from lib.bgp import verify_bgp_convergence_from_running_config
-from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-
-pytestmark = [pytest.mark.bgpd]
-
-# remember the last sequence number of the logging messages
-SEQ = 0
-
-PRE_POLICY = "pre-policy"
-POST_POLICY = "post-policy"
-LOC_RIB = "loc-rib"
-
-UPDATE_EXPECTED_JSON = False
-DEBUG_PCAP = False
-
-
-def build_topo(tgen):
- tgen.add_router("r1")
- tgen.add_router("r2")
- tgen.add_bmp_server("bmp1", ip="192.0.2.10", defaultRoute="via 192.0.2.1")
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["bmp1"])
-
- tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth1", "r2-eth0")
-
-
-def setup_module(mod):
- tgen = Topogen(build_topo, mod.__name__)
- tgen.start_topology()
-
- tgen.net["r1"].cmd(
- """
-ip link add vrf1 type vrf table 10
-ip link set vrf1 up
-ip link set r1-eth1 master vrf1
-"""
- )
-
- if DEBUG_PCAP:
- tgen.gears["r1"].run("rm /tmp/bmp_vrf.pcap")
- tgen.gears["r1"].run(
- "tcpdump -nni r1-eth0 -s 0 -w /tmp/bmp_vrf.pcap &", stdout=None
- )
-
- for rname, router in tgen.routers().items():
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, "{}/bgpd.conf".format(rname)),
- "-M bmp",
- )
-
- tgen.start_router()
-
- logger.info("starting BMP servers")
- for bmp_name, server in tgen.get_bmp_servers().items():
- server.start(log_file=os.path.join(tgen.logdir, bmp_name, "bmp.log"))
-
-
-def teardown_module(_mod):
- tgen = get_topogen()
- tgen.stop_topology()
-
-
-def test_bgp_convergence():
- tgen = get_topogen()
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
- result = verify_bgp_convergence_from_running_config(tgen, dut="r1")
- assert result is True, "BGP is not converging"
-
-
-def get_bmp_messages():
- """
- Read the BMP logging messages.
- """
- messages = []
- tgen = get_topogen()
- text_output = tgen.gears["bmp1"].run(
- "cat {}".format(os.path.join(tgen.logdir, "bmp1", "bmp.log"))
- )
-
- for m in text_output.splitlines():
- # some output in the bash can break the message decoding
- try:
- messages.append(json.loads(m))
- except Exception as e:
- logger.warning(str(e) + " message: {}".format(str(m)))
- continue
-
- if not messages:
- logger.error("Bad BMP log format, check your BMP server")
-
- return messages
-
-
-def update_seq():
- global SEQ
-
- messages = get_bmp_messages()
-
- if len(messages):
- SEQ = messages[-1]["seq"]
-
-
-def update_expected_files(bmp_actual, expected_prefixes, bmp_log_type, policy, step):
- tgen = get_topogen()
-
- with open(f"/tmp/bmp-{bmp_log_type}-{policy}-step{step}.json", "w") as json_file:
- json.dump(bmp_actual, json_file, indent=4)
-
- out = tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv4 json", isjson=True)
- filtered_out = {
- "routes": {
- prefix: route_info
- for prefix, route_info in out["routes"].items()
- if prefix in expected_prefixes
- }
- }
- if bmp_log_type == "withdraw":
- for pfx in expected_prefixes:
- if "::" in pfx:
- continue
- filtered_out["routes"][pfx] = None
-
- # ls /tmp/show*json | while read file; do egrep -v 'prefix|network|metric|ocPrf|version|weight|peerId|vrf|Version|valid|Reason|fe80' $file >$(basename $file); echo >> $(basename $file); done
- with open(f"/tmp/show-bgp-ipv4-{bmp_log_type}-step{step}.json", "w") as json_file:
- json.dump(filtered_out, json_file, indent=4)
-
- out = tgen.gears["r1"].vtysh_cmd("show bgp vrf vrf1 ipv6 json", isjson=True)
- filtered_out = {
- "routes": {
- prefix: route_info
- for prefix, route_info in out["routes"].items()
- if prefix in expected_prefixes
- }
- }
- if bmp_log_type == "withdraw":
- for pfx in expected_prefixes:
- if "::" not in pfx:
- continue
- filtered_out["routes"][pfx] = None
-
- with open(f"/tmp/show-bgp-ipv6-{bmp_log_type}-step{step}.json", "w") as json_file:
- json.dump(filtered_out, json_file, indent=4)
-
-
-def check_for_prefixes(expected_prefixes, bmp_log_type, policy, step):
- """
- Check for the presence of the given prefixes in the BMP server logs with
- the given message type and the set policy.
-
- """
- global SEQ
-
- # we care only about the new messages
- messages = [
- m for m in sorted(get_bmp_messages(), key=lambda d: d["seq"]) if m["seq"] > SEQ
- ]
-
- # create empty initial files
- # for step in $(seq 1); do
- # for i in "update" "withdraw"; do
- # for j in "pre-policy" "post-policy" "loc-rib"; do
- # echo '{"null": {}}'> bmp-$i-$j-step$step.json
- # done
- # done
- # done
-
- ref_file = f"{CWD}/bmp1/bmp-{bmp_log_type}-{policy}-step{step}.json"
- expected = json.loads(open(ref_file).read())
-
- # Build actual json from logs
- actual = {}
- for m in messages:
- if (
- "bmp_log_type" in m.keys()
- and "ip_prefix" in m.keys()
- and m["ip_prefix"] in expected_prefixes
- and m["bmp_log_type"] == bmp_log_type
- and m["policy"] == policy
- ):
- policy_dict = actual.setdefault(m["policy"], {})
- bmp_log_type_dict = policy_dict.setdefault(m["bmp_log_type"], {})
-
- # Add or update the ip_prefix dictionary with filtered key-value pairs
- bmp_log_type_dict[m["ip_prefix"]] = {
- k: v
- for k, v in sorted(m.items())
- # filter out variable keys
- if k not in ["timestamp", "seq", "nxhp_link-local"]
- and (
- # When policy is loc-rib, the peer-distinguisher is 0:0
- # for the default VRF or the RD if any or the 0:<vrf_id>.
- # 0:<vrf_id> is used to distinguished. RFC7854 says: "If the
- # peer is a "Local Instance Peer", it is set to a unique,
- # locally defined value." The value is not tested because it
- # is variable.
- k != "peer_distinguisher"
- or policy != LOC_RIB
- or v == "0:0"
- or not v.startswith("0:")
- )
- }
-
- # build expected JSON files
- if (
- UPDATE_EXPECTED_JSON
- and actual
- and set(actual.get(policy, {}).get(bmp_log_type, {}).keys())
- == set(expected_prefixes)
- ):
- update_expected_files(actual, expected_prefixes, bmp_log_type, policy, step)
-
- return topotest.json_cmp(actual, expected, exact=True)
-
-
-def check_for_peer_message(expected_peers, bmp_log_type):
- """
- Check for the presence of a peer up message for the peer
- """
- global SEQ
- # we care only about the new messages
- messages = [
- m for m in sorted(get_bmp_messages(), key=lambda d: d["seq"]) if m["seq"] > SEQ
- ]
-
- # get the list of pairs (prefix, policy, seq) for the given message type
- peers = [
- m["peer_ip"]
- for m in messages
- if "peer_ip" in m.keys() and m["bmp_log_type"] == bmp_log_type
- ]
-
- # check for prefixes
- for ep in expected_peers:
- if ep not in peers:
- msg = "The peer {} is not present in the {} log messages."
- logger.debug(msg.format(ep, bmp_log_type))
- return False
-
- SEQ = messages[-1]["seq"]
- return True
-
-
-def configure_prefixes(tgen, node, asn, safi, prefixes, vrf=None, update=True):
- """
- Configure the bgp prefixes.
- """
- withdraw = "no " if not update else ""
- vrf = " vrf {}".format(vrf) if vrf else ""
- for p in prefixes:
- ip = ip_network(p)
- cmd = [
- "conf t\n",
- "router bgp {}{}\n".format(asn, vrf),
- "address-family ipv{} {}\n".format(ip.version, safi),
- "{}network {}\n".format(withdraw, ip),
- "exit-address-family\n",
- ]
- logger.debug("setting prefix: ipv{} {} {}".format(ip.version, safi, ip))
- tgen.gears[node].vtysh_cmd("".join(cmd))
-
-
-def _test_prefixes(policy, step=1):
- """
- Setup the BMP monitor policy, Add and withdraw ipv4/v6 prefixes.
- Check if the previous actions are logged in the BMP server with the right
- message type and the right policy.
- """
- tgen = get_topogen()
-
- prefixes = ["172.31.0.15/32", "2111::1111/128"]
-
- for type in ("update", "withdraw"):
- update_seq()
-
- # add prefixes
- configure_prefixes(
- tgen, "r2", 65502, "unicast", prefixes, update=(type == "update")
- )
-
- logger.info(f"checking for prefixes {type}")
-
- for ipver in [4, 6]:
- if UPDATE_EXPECTED_JSON:
- continue
- ref_file = "{}/r1/show-bgp-ipv{}-{}-step{}.json".format(
- CWD, ipver, type, step
- )
- expected = json.loads(open(ref_file).read())
-
- test_func = partial(
- topotest.router_json_cmp,
- tgen.gears["r1"],
- f"show bgp vrf vrf1 ipv{ipver} json",
- expected,
- )
- _, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assertmsg = f"r1: BGP IPv{ipver} convergence failed"
- assert res is None, assertmsg
-
- # check
- test_func = partial(check_for_prefixes, prefixes, type, policy, step)
- success, res = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assert success, "Checking the updated prefixes has been failed ! %s" % res
-
-
-def test_bmp_server_logging():
- """
- Assert the logging of the bmp server.
- """
-
- def check_for_log_file():
- tgen = get_topogen()
- output = tgen.gears["bmp1"].run(
- "ls {}".format(os.path.join(tgen.logdir, "bmp1"))
- )
- if "bmp.log" not in output:
- return False
- return True
-
- success, _ = topotest.run_and_expect(check_for_log_file, True, count=30, wait=1)
- assert success, "The BMP server is not logging"
-
-
-def test_peer_up():
- """
- Checking for BMP peers up messages
- """
-
- peers = ["192.168.0.2", "192:168::2"]
-
- logger.info("checking for BMP peers up messages")
-
- test_func = partial(check_for_peer_message, peers, "peer up")
- success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
- assert success, "Checking the updated prefixes has been failed !."
-
-
-def test_bmp_bgp_unicast():
- """
- Add/withdraw bgp unicast prefixes and check the bmp logs.
- """
- logger.info("*** Unicast prefixes pre-policy logging ***")
- _test_prefixes(PRE_POLICY)
- logger.info("*** Unicast prefixes post-policy logging ***")
- _test_prefixes(POST_POLICY)
- logger.info("*** Unicast prefixes loc-rib logging ***")
- _test_prefixes(LOC_RIB)
-
-
-def test_peer_down():
- """
- Checking for BMP peers down messages
- """
- tgen = get_topogen()
-
- tgen.gears["r2"].vtysh_cmd("clear bgp *")
-
- peers = ["192.168.0.2", "192:168::2"]
-
- logger.info("checking for BMP peers down messages")
-
- test_func = partial(check_for_peer_message, peers, "peer down")
- success, _ = topotest.run_and_expect(test_func, True, count=30, wait=1)
- assert success, "Checking the updated prefixes has been failed !."
-
-
-if __name__ == "__main__":
- args = ["-s"] + sys.argv[1:]
- sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf b/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf
index 49981ac589..09c65321c2 100644
--- a/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf
+++ b/tests/topotests/bgp_ecmp_topo1/r1/bgpd.conf
@@ -8,44 +8,64 @@ router bgp 100
no bgp ebgp-requires-policy
neighbor 10.0.1.101 remote-as 99
neighbor 10.0.1.101 timers 3 10
+ neighbor 10.0.1.101 timers connect 1
neighbor 10.0.1.102 remote-as 99
neighbor 10.0.1.102 timers 3 10
+ neighbor 10.0.1.102 timers connect 1
neighbor 10.0.1.103 remote-as 99
neighbor 10.0.1.103 timers 3 10
+ neighbor 10.0.1.103 timers connect 1
neighbor 10.0.1.104 remote-as 99
neighbor 10.0.1.104 timers 3 10
+ neighbor 10.0.1.104 timers connect 1
neighbor 10.0.1.105 remote-as 99
neighbor 10.0.1.105 timers 3 10
+ neighbor 10.0.1.105 timers connect 1
neighbor 10.0.2.106 remote-as 99
neighbor 10.0.2.106 timers 3 10
+ neighbor 10.0.1.106 timers connect 1
neighbor 10.0.2.107 remote-as 99
neighbor 10.0.2.107 timers 3 10
+ neighbor 10.0.1.107 timers connect 1
neighbor 10.0.2.108 remote-as 99
neighbor 10.0.2.108 timers 3 10
+ neighbor 10.0.1.108 timers connect 1
neighbor 10.0.2.109 remote-as 99
neighbor 10.0.2.109 timers 3 10
+ neighbor 10.0.1.109 timers connect 1
neighbor 10.0.2.110 remote-as 99
neighbor 10.0.2.110 timers 3 10
+ neighbor 10.0.1.110 timers connect 1
neighbor 10.0.3.111 remote-as 111
neighbor 10.0.3.111 timers 3 10
+ neighbor 10.0.1.111 timers connect 1
neighbor 10.0.3.112 remote-as 112
neighbor 10.0.3.112 timers 3 10
+ neighbor 10.0.1.112 timers connect 1
neighbor 10.0.3.113 remote-as 113
neighbor 10.0.3.113 timers 3 10
+ neighbor 10.0.1.113 timers connect 1
neighbor 10.0.3.114 remote-as 114
neighbor 10.0.3.114 timers 3 10
+ neighbor 10.0.1.114 timers connect 1
neighbor 10.0.3.115 remote-as 115
neighbor 10.0.3.115 timers 3 10
+ neighbor 10.0.1.115 timers connect 1
neighbor 10.0.4.116 remote-as 116
neighbor 10.0.4.116 timers 3 10
+ neighbor 10.0.1.116 timers connect 1
neighbor 10.0.4.117 remote-as 117
neighbor 10.0.4.117 timers 3 10
+ neighbor 10.0.1.117 timers connect 1
neighbor 10.0.4.118 remote-as 118
neighbor 10.0.4.118 timers 3 10
+ neighbor 10.0.1.118 timers connect 1
neighbor 10.0.4.119 remote-as 119
neighbor 10.0.4.119 timers 3 10
+ neighbor 10.0.1.119 timers connect 1
neighbor 10.0.4.120 remote-as 120
neighbor 10.0.4.120 timers 3 10
+ neighbor 10.0.1.120 timers connect 1
!
!
diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json
new file mode 100644
index 0000000000..7532ce9331
--- /dev/null
+++ b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json
@@ -0,0 +1,131 @@
+{
+ "bgpLocalRouterId":"192.168.100.21",
+ "defaultLocPrf":100,
+ "localAS":65000,
+ "192.168.101.41:2":{
+ "rd":"192.168.101.41:2",
+ "[5]:[0]:[32]:[192.168.101.41]":{
+ "prefix":"[5]:[0]:[32]:[192.168.101.41]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.101.41",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.41",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::2]":{
+ "prefix":"[5]:[0]:[128]:[fd00::2]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::2",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.41",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "192.168.102.21:2":{
+ "rd":"192.168.102.21:2",
+ "[5]:[0]:[32]:[192.168.102.21]":{
+ "prefix":"[5]:[0]:[32]:[192.168.102.21]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.102.21",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.21",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::1]":{
+ "prefix":"[5]:[0]:[128]:[fd00::1]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::1",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.21",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "numPrefix":4,
+ "totalPrefix":4
+}
diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json
new file mode 100644
index 0000000000..a14ba1291e
--- /dev/null
+++ b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json
@@ -0,0 +1,191 @@
+{
+ "bgpLocalRouterId":"192.168.100.21",
+ "defaultLocPrf":100,
+ "localAS":65000,
+ "192.168.101.41:2":{
+ "rd":"192.168.101.41:2",
+ "[5]:[0]:[32]:[192.168.101.41]":{
+ "prefix":"[5]:[0]:[32]:[192.168.101.41]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.101.41",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.41",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[32]:[192.168.102.41]":{
+ "prefix":"[5]:[0]:[32]:[192.168.102.41]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.102.41",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.41",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::2]":{
+ "prefix":"[5]:[0]:[128]:[fd00::2]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::2",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.41",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::3]":{
+ "prefix":"[5]:[0]:[128]:[fd00::3]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::3",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.41",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "192.168.102.21:2":{
+ "rd":"192.168.102.21:2",
+ "[5]:[0]:[32]:[192.168.102.21]":{
+ "prefix":"[5]:[0]:[32]:[192.168.102.21]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.102.21",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.21",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::1]":{
+ "prefix":"[5]:[0]:[128]:[fd00::1]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::1",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.21",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "numPrefix":6,
+ "totalPrefix":6
+}
diff --git a/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json b/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json
new file mode 100644
index 0000000000..597bca5fd3
--- /dev/null
+++ b/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json
@@ -0,0 +1,131 @@
+{
+ "bgpLocalRouterId":"192.168.100.41",
+ "defaultLocPrf":100,
+ "localAS":65000,
+ "192.168.101.41:2":{
+ "rd":"192.168.101.41:2",
+ "[5]:[0]:[32]:[192.168.101.41]":{
+ "prefix":"[5]:[0]:[32]:[192.168.101.41]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.101.41",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::2]":{
+ "prefix":"[5]:[0]:[128]:[fd00::2]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::2",
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.41",
+ "hostname":"r2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "192.168.102.21:2":{
+ "rd":"192.168.102.21:2",
+ "[5]:[0]:[32]:[192.168.102.21]":{
+ "prefix":"[5]:[0]:[32]:[192.168.102.21]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":32,
+ "ip":"192.168.102.21",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.21",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.21",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "[5]:[0]:[128]:[fd00::1]":{
+ "prefix":"[5]:[0]:[128]:[fd00::1]",
+ "prefixLen":352,
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "routeType":5,
+ "ethTag":0,
+ "ipLen":128,
+ "ip":"fd00::1",
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"192.168.100.21",
+ "path":"",
+ "origin":"IGP",
+ "nexthops":[
+ {
+ "ip":"192.168.100.21",
+ "hostname":"r1",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "numPrefix":4,
+ "totalPrefix":4
+}
diff --git a/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf b/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf
index de5a0efc44..4f1d8e4a37 100644
--- a/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf
+++ b/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf
@@ -20,12 +20,30 @@ router bgp 65000 vrf r2-vrf-101
no bgp network import-check
address-family ipv4 unicast
network 192.168.101.41/32
+ network 192.168.102.41/32
exit-address-family
address-family ipv6 unicast
network fd00::2/128
+ network fd00::3/128
exit-address-family
address-family l2vpn evpn
- advertise ipv4 unicast
- advertise ipv6 unicast
+ advertise ipv4 unicast route-map rmap4
+ advertise ipv6 unicast route-map rmap6
exit-address-family
!
+access-list acl4_1 seq 10 permit 192.168.101.41/32
+access-list acl4_2 seq 10 permit 192.168.102.41/32
+ipv6 access-list acl6_1 seq 10 permit fd00::2/128
+ipv6 access-list acl6_2 seq 10 permit fd00::3/128
+route-map rmap4 permit 1
+ match ip address acl4_1
+exit
+route-map rmap4 deny 2
+ match ip address acl4_2
+exit
+route-map rmap6 permit 1
+ match ipv6 address acl6_1
+exit
+route-map rmap6 deny 2
+ match ipv6 address acl6_2
+exit
diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
index 9dfb7fc4d9..a9636a92f4 100644
--- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
+++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py
@@ -13,6 +13,8 @@
with route advertisements on a separate netns.
"""
+import json
+from functools import partial
import os
import sys
import pytest
@@ -160,6 +162,36 @@ def teardown_module(_mod):
tgen.stop_topology()
+def _test_evpn_ping_router(pingrouter, ipv4_only=False):
+ """
+ internal function to check ping between r1 and r2
+ """
+ # Check IPv4 and IPv6 connectivity between r1 and r2 ( routing vxlan evpn)
+ logger.info(
+ "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)"
+ )
+ output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000")
+ logger.info(output)
+ if "1000 packets transmitted, 1000 received" not in output:
+ assertmsg = (
+ "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok"
+ )
+ assert 0, assertmsg
+ else:
+ logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK")
+
+ if ipv4_only:
+ return
+
+ logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(r2-vrf-101 = fd00::2)")
+ output = pingrouter.run("ip netns exec r1-vrf-101 ping fd00::2 -f -c 1000")
+ logger.info(output)
+ if "1000 packets transmitted, 1000 received" not in output:
+ assert 0, "expected ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) should be ok"
+ else:
+ logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) OK")
+
+
def test_protocols_convergence():
"""
Assert that all protocols have converged
@@ -168,7 +200,34 @@ def test_protocols_convergence():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- topotest.sleep(4, "waiting 4 seconds for bgp convergence")
+ # Check BGP IPv4 routing tables on r1
+ logger.info("Checking BGP L2VPN EVPN routes for convergence on r1")
+
+ for rname in ("r1", "r2"):
+ router = tgen.gears[rname]
+ json_file = "{}/{}/bgp_l2vpn_evpn_routes.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ assert 0, "bgp_l2vpn_evpn_routes.json file not found"
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp l2vpn evpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+def test_protocols_dump_info():
+ """
+ Dump EVPN information
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
# Check IPv4/IPv6 routing tables.
output = tgen.gears["r1"].vtysh_cmd("show bgp l2vpn evpn", isjson=False)
logger.info("==== result from show bgp l2vpn evpn")
@@ -203,6 +262,15 @@ def test_protocols_convergence():
logger.info("==== result from show evpn rmac vni all")
logger.info(output)
+
+def test_router_check_ip():
+ """
+ Check routes are correctly installed
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
expected = {
"fd00::2/128": [
{
@@ -221,56 +289,112 @@ def test_protocols_convergence():
)
assert result is None, "ipv6 route check failed"
- expected = {
- "101": {
- "numNextHops": 2,
- "192.168.100.41": {
- "nexthopIp": "192.168.100.41",
- },
- "::ffff:192.168.100.41": {
- "nexthopIp": "::ffff:192.168.100.41",
- },
+
+def _test_router_check_evpn_contexts(router, ipv4_only=False):
+ """
+ Check EVPN nexthops and RMAC number are correctly configured
+ """
+ if ipv4_only:
+ expected = {
+ "101": {
+ "numNextHops": 1,
+ "192.168.100.41": {
+ "nexthopIp": "192.168.100.41",
+ },
+ }
+ }
+ else:
+ expected = {
+ "101": {
+ "numNextHops": 2,
+ "192.168.100.41": {
+ "nexthopIp": "192.168.100.41",
+ },
+ "::ffff:192.168.100.41": {
+ "nexthopIp": "::ffff:192.168.100.41",
+ },
+ }
}
- }
result = topotest.router_json_cmp(
- tgen.gears["r1"], "show evpn next-hops vni all json", expected
+ router, "show evpn next-hops vni all json", expected
)
assert result is None, "evpn next-hops check failed"
expected = {"101": {"numRmacs": 1}}
- result = topotest.router_json_cmp(
- tgen.gears["r1"], "show evpn rmac vni all json", expected
- )
+ result = topotest.router_json_cmp(router, "show evpn rmac vni all json", expected)
assert result is None, "evpn rmac number check failed"
- # Check IPv4 and IPv6 connectivity between r1 and r2 ( routing vxlan evpn)
- pingrouter = tgen.gears["r1"]
- logger.info(
- "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)"
+
+def test_router_check_evpn_contexts():
+ """
+ Check EVPN nexthops and RMAC number are correctly configured
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ _test_router_check_evpn_contexts(tgen.gears["r1"])
+
+
+def test_evpn_ping():
+ """
+ Check ping between R1 and R2 is ok
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ _test_evpn_ping_router(tgen.gears["r1"])
+
+
+def test_evpn_disable_routemap():
+ """
+ Check the removal of a route-map on R2. More EVPN Prefixes are expected
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r2"].vtysh_cmd(
+ """
+ configure terminal\n
+ router bgp 65000 vrf r2-vrf-101\n
+ address-family l2vpn evpn\n
+ advertise ipv4 unicast\n
+ advertise ipv6 unicast\n
+ """
)
- output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000")
- logger.info(output)
- if "1000 packets transmitted, 1000 received" not in output:
- assertmsg = (
- "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok"
- )
- assert 0, assertmsg
- else:
- logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK")
+ router = tgen.gears["r1"]
+ json_file = "{}/{}/bgp_l2vpn_evpn_routes_all.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ assert 0, "bgp_l2vpn_evpn_routes.json file not found"
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp l2vpn evpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
- logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(r2-vrf-101 = fd00::2)")
- output = pingrouter.run("ip netns exec r1-vrf-101 ping fd00::2 -f -c 1000")
- logger.info(output)
- if "1000 packets transmitted, 1000 received" not in output:
- assert 0, "expected ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) should be ok"
- else:
- logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) OK")
+
+def test_evpn_remove_ip():
+ """
+ Check the removal of an EVPN route is correctly handled
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
config_no_ipv6 = {
"r2": {
"raw_config": [
"router bgp 65000 vrf r2-vrf-101",
"address-family ipv6 unicast",
+ "no network fd00::3/128",
"no network fd00::2/128",
]
}
@@ -293,6 +417,7 @@ def test_protocols_convergence():
}
result = verify_bgp_rib(tgen, "ipv6", "r1", ipv6_routes, expected=False)
assert result is not True, "expect IPv6 route fd00::2/128 withdrawn"
+
output = tgen.gears["r1"].vtysh_cmd("show evpn next-hops vni all", isjson=False)
logger.info("==== result from show evpn next-hops vni all")
logger.info(output)
@@ -300,37 +425,27 @@ def test_protocols_convergence():
logger.info("==== result from show evpn next-hops vni all")
logger.info(output)
- expected = {
- "101": {
- "numNextHops": 1,
- "192.168.100.41": {
- "nexthopIp": "192.168.100.41",
- },
- }
- }
- result = topotest.router_json_cmp(
- tgen.gears["r1"], "show evpn next-hops vni all json", expected
- )
- assert result is None, "evpn next-hops check failed"
- expected = {"101": {"numRmacs": 1}}
- result = topotest.router_json_cmp(
- tgen.gears["r1"], "show evpn rmac vni all json", expected
- )
- assert result is None, "evpn rmac number check failed"
+def test_router_check_evpn_contexts_again():
+ """
+ Check EVPN nexthops and RMAC number are correctly configured
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
- logger.info(
- "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)"
- )
- output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000")
- logger.info(output)
- if "1000 packets transmitted, 1000 received" not in output:
- assertmsg = (
- "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok"
- )
- assert 0, assertmsg
- else:
- logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK")
+ _test_router_check_evpn_contexts(tgen.gears["r1"], ipv4_only=True)
+
+
+def test_evpn_ping_again():
+ """
+ Check ping between R1 and R2 is ok
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ _test_evpn_ping_router(tgen.gears["r1"], ipv4_only=True)
def test_memory_leak():
diff --git a/tests/topotests/bgp_flowspec/r1/bgpd.conf b/tests/topotests/bgp_flowspec/r1/bgpd.conf
index 4b7a20f958..288aeaf4dd 100644
--- a/tests/topotests/bgp_flowspec/r1/bgpd.conf
+++ b/tests/topotests/bgp_flowspec/r1/bgpd.conf
@@ -6,6 +6,7 @@ router bgp 100
bgp router-id 10.0.1.1
neighbor 10.0.1.101 remote-as 100
neighbor 10.0.1.101 timers 3 10
+ neighbor 10.0.1.101 timers connect 1
neighbor 10.0.1.101 update-source 10.0.1.1
address-family ipv6 flowspec
local-install r1-eth0
diff --git a/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py b/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py
index 5d8338d6eb..7e39b83d8f 100644
--- a/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py
+++ b/tests/topotests/bgp_gr_notification/test_bgp_gr_notification.py
@@ -187,6 +187,16 @@ def test_bgp_administrative_reset_gr():
"""
)
+ def _bgp_verify_show_bgp_router_json():
+ output = json.loads(r1.vtysh_cmd("show bgp router json"))
+ expected = {
+ "bgpStartedAt": "*",
+ "bgpStartedGracefully": False,
+ "bgpInMaintenanceMode": False,
+ "bgpInstanceCount": 1,
+ }
+ return topotest.json_cmp(output, expected)
+
step("Initial BGP converge")
test_func = functools.partial(_bgp_converge)
_, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
@@ -205,6 +215,11 @@ def test_bgp_administrative_reset_gr():
_, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Failed to send Administrative Reset notification from R2"
+ step("Check show bgp router json")
+ test_func = functools.partial(_bgp_verify_show_bgp_router_json)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Invalid BGP router details"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/bgp_invalid_nexthop/r1/frr.conf b/tests/topotests/bgp_invalid_nexthop/r1/frr.conf
index 05e1a6c825..f96aeb4366 100644
--- a/tests/topotests/bgp_invalid_nexthop/r1/frr.conf
+++ b/tests/topotests/bgp_invalid_nexthop/r1/frr.conf
@@ -8,6 +8,7 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor fc00::2 remote-as external
neighbor fc00::2 timers 3 10
+ neighbor fc00::2 timers connect 1
address-family ipv6
neighbor fc00::2 activate
exit-address-family
diff --git a/tests/topotests/bgp_lu_topo1/R3/bgpd.conf b/tests/topotests/bgp_lu_topo1/R3/bgpd.conf
index 31d26ea1ed..9ba059aeec 100644
--- a/tests/topotests/bgp_lu_topo1/R3/bgpd.conf
+++ b/tests/topotests/bgp_lu_topo1/R3/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log
!
! debug bgp updates
!
diff --git a/tests/topotests/bgp_lu_topo1/R3/zebra.conf b/tests/topotests/bgp_lu_topo1/R3/zebra.conf
index ea4a1482dd..fedcd6bc3a 100644
--- a/tests/topotests/bgp_lu_topo1/R3/zebra.conf
+++ b/tests/topotests/bgp_lu_topo1/R3/zebra.conf
@@ -1,4 +1,3 @@
-log file /tmp/zebra.log
!
! debug zebra events
! debug zebra packet detail
diff --git a/tests/topotests/bgp_lu_topo2/R3/bgpd.conf b/tests/topotests/bgp_lu_topo2/R3/bgpd.conf
index 6443445b80..a0dd0fe009 100644
--- a/tests/topotests/bgp_lu_topo2/R3/bgpd.conf
+++ b/tests/topotests/bgp_lu_topo2/R3/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log
no log unique-id
!
!
diff --git a/tests/topotests/bgp_lu_topo2/R3/staticd.conf b/tests/topotests/bgp_lu_topo2/R3/staticd.conf
index 867fc5a837..c0eee461ea 100644
--- a/tests/topotests/bgp_lu_topo2/R3/staticd.conf
+++ b/tests/topotests/bgp_lu_topo2/R3/staticd.conf
@@ -1,4 +1,3 @@
-log file /tmp/staticd.log
no log unique-id
!
!
diff --git a/tests/topotests/bgp_lu_topo2/R3/zebra.conf b/tests/topotests/bgp_lu_topo2/R3/zebra.conf
index dd24deb214..fd29ed54dc 100644
--- a/tests/topotests/bgp_lu_topo2/R3/zebra.conf
+++ b/tests/topotests/bgp_lu_topo2/R3/zebra.conf
@@ -1,4 +1,3 @@
-log file /tmp/zebra.log
no log unique-id
!
!
diff --git a/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf b/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf
index 41bf96344a..14f90b859d 100644
--- a/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf
+++ b/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf
@@ -1,6 +1,6 @@
!
router bgp 65001
- bgp max-med on-startup 5 777
+ bgp max-med on-startup 30 777
no bgp ebgp-requires-policy
neighbor 192.168.255.2 remote-as 65001
neighbor 192.168.255.2 timers 3 10
diff --git a/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py b/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py
index 545d7bd245..12ec88249a 100644
--- a/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py
+++ b/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py
@@ -82,12 +82,12 @@ def test_bgp_max_med_on_startup():
# Check session is established
test_func = functools.partial(_bgp_converge, router2)
- _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1.0)
assert result is None, "Failed bgp convergence on r2"
# Check metric has value of max-med
test_func = functools.partial(_bgp_has_routes, router2, 777)
- _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1.0)
assert result is None, "r2 does not receive routes with metric 777"
# Check that when the max-med timer expires, metric is updated
diff --git a/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py b/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py
index c9ff2ffc7e..d2d6a40ae8 100755
--- a/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py
+++ b/tests/topotests/bgp_minimum_holdtime/test_bgp_minimum_holdtime.py
@@ -76,7 +76,7 @@ def test_bgp_minimum_holdtime():
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_neighbor_check_if_notification_sent)
- _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Failed to send notification message\n"
diff --git a/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf b/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf
index cd7f44ac66..ced5cb5e4d 100644
--- a/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf
+++ b/tests/topotests/bgp_multiview_topo1/r1/bgpd.conf
@@ -19,10 +19,13 @@ router bgp 100 view 1
timers bgp 60 180
neighbor 172.16.1.1 remote-as 65001
neighbor 172.16.1.1 timers 3 10
+ neighbor 172.16.1.1 timers connect 1
neighbor 172.16.1.2 remote-as 65002
neighbor 172.16.1.2 timers 3 10
+ neighbor 172.16.1.2 timers connect 1
neighbor 172.16.1.5 remote-as 65005
neighbor 172.16.1.5 timers 3 10
+ neighbor 172.16.1.5 timers connect 1
!
router bgp 100 view 2
bgp router-id 172.30.1.1
@@ -32,8 +35,10 @@ router bgp 100 view 2
timers bgp 60 180
neighbor 172.16.1.3 remote-as 65003
neighbor 172.16.1.3 timers 3 10
+ neighbor 172.16.1.3 timers connect 1
neighbor 172.16.1.4 remote-as 65004
neighbor 172.16.1.4 timers 3 10
+ neighbor 172.16.1.4 timers connect 1
!
router bgp 100 view 3
bgp router-id 172.30.1.1
@@ -43,10 +48,13 @@ router bgp 100 view 3
timers bgp 60 180
neighbor 172.16.1.6 remote-as 65006
neighbor 172.16.1.6 timers 3 10
+ neighbor 172.16.1.6 timers connect 1
neighbor 172.16.1.7 remote-as 65007
neighbor 172.16.1.7 timers 3 10
+ neighbor 172.16.1.7 timers connect 1
neighbor 172.16.1.8 remote-as 65008
neighbor 172.16.1.8 timers 3 10
+ neighbor 172.16.1.8 timers connect 1
!
route-map local1 permit 10
set community 100:9999 additive
diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf
index 7efa1b79fa..06ac666ce6 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:2::9 remote-as internal
neighbor fd00:0:2::9 timers 3 10
+ neighbor fd00:0:2::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf
index 4d4ae44e28..4b696b51b3 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:2::9 remote-as internal
neighbor fd00:0:2::9 timers 3 10
+ neighbor fd00:0:2::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf
index b14c9bace4..081909bbb3 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor fd00:0:2::9 remote-as external
neighbor fd00:0:2::9 timers 3 10
+ neighbor fd00:0:2::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf
index becea2bbe6..b8f9078f51 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65002
no bgp ebgp-requires-policy
neighbor fd00:0:3::9 remote-as external
neighbor fd00:0:3::9 timers 3 10
+ neighbor fd00:0:3::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf
index 801736ab98..19c6bbc819 100644
--- a/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf
@@ -2,6 +2,7 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:4::9 remote-as internal
neighbor fd00:0:4::9 timers 3 10
+ neighbor fd00:0:4::9 timers connect 1
address-family ipv4 unicast
redistribute connected route-map RMAP4
!
diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf
index 705ae78b8e..1c8f2fa49e 100644
--- a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf
+++ b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf
@@ -2,16 +2,22 @@ router bgp 65000
no bgp ebgp-requires-policy
neighbor fd00:0:2::1 remote-as internal
neighbor fd00:0:2::1 timers 3 10
+ neighbor fd00:0:2::1 timers connect 1
neighbor fd00:0:2::2 remote-as internal
neighbor fd00:0:2::2 timers 3 10
+ neighbor fd00:0:2::2 timers connect 1
neighbor fd00:0:2::3 remote-as internal
neighbor fd00:0:2::3 timers 3 10
+ neighbor fd00:0:2::3 timers connect 1
neighbor fd00:0:2::4 remote-as external
neighbor fd00:0:2::4 timers 3 10
+ neighbor fd00:0:2::4 timers connect 1
neighbor fd00:0:3::5 remote-as external
neighbor fd00:0:3::5 timers 3 10
+ neighbor fd00:0:3::5 timers connect 1
neighbor fd00:0:4::6 remote-as internal
neighbor fd00:0:4::6 timers 3 10
+ neighbor fd00:0:4::6 timers connect 1
address-family ipv4 unicast
neighbor fd00:0:2::1 route-reflector-client
neighbor fd00:0:2::2 route-reflector-client
diff --git a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py
index e478139eb1..58daee32c3 100644
--- a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py
+++ b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py
@@ -36,7 +36,7 @@ pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
- """
+ r"""
All peers are FRR BGP peers except r3 that is a exabgp peer.
rr is a route-reflector for AS 65000 iBGP peers.
Exabgp does not send any IPv6 Link-Local nexthop
@@ -222,7 +222,6 @@ def test_bgp_ipv6_table_step1():
link_local_cache = {}
router_list = tgen.routers().values()
for router in router_list:
- # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name)
ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
replace_link_local(expected, link_local_cache)
@@ -275,7 +274,6 @@ router bgp 65000
router_list = tgen.routers().values()
for router in router_list:
- # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name)
ref_file = "{}/{}/show_bgp_ipv6_step2.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
replace_link_local(expected, link_local_cache)
@@ -327,7 +325,6 @@ router bgp 65000
router_list = tgen.routers().values()
for router in router_list:
- # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name)
ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
replace_link_local(expected, link_local_cache)
diff --git a/tests/topotests/bgp_path_attribute_discard/r1/frr.conf b/tests/topotests/bgp_path_attribute_discard/r1/frr.conf
index ae7fbdd9a9..ae47862963 100644
--- a/tests/topotests/bgp_path_attribute_discard/r1/frr.conf
+++ b/tests/topotests/bgp_path_attribute_discard/r1/frr.conf
@@ -6,4 +6,5 @@ router bgp 65001
no bgp ebgp-requires-policy
neighbor 10.0.0.254 remote-as external
neighbor 10.0.0.254 timers 3 10
+ neighbor 10.0.0.254 timers connect 1
!
diff --git a/tests/topotests/bgp_path_attribute_discard/r2/frr.conf b/tests/topotests/bgp_path_attribute_discard/r2/frr.conf
index 1dafbdd8e1..30ffdefff3 100644
--- a/tests/topotests/bgp_path_attribute_discard/r2/frr.conf
+++ b/tests/topotests/bgp_path_attribute_discard/r2/frr.conf
@@ -6,5 +6,6 @@ router bgp 65254
no bgp ebgp-requires-policy
neighbor 10.0.0.254 remote-as internal
neighbor 10.0.0.254 timers 3 10
+ neighbor 10.0.0.254 timers connect 1
neighbor 10.0.0.254 path-attribute discard 26
!
diff --git a/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py b/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py
index adc92f59fe..c6f1b6193b 100644
--- a/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py
+++ b/tests/topotests/bgp_path_attribute_discard/test_bgp_path_attribute_discard.py
@@ -142,6 +142,27 @@ def test_bgp_path_attribute_discard():
result is None
), "Failed to discard path attributes (atomic-aggregate, community)"
+ def _bgp_check_attributes_discarded_stats():
+ output = json.loads(r1.vtysh_cmd("show bgp neighbor json"))
+ expected = {
+ "10.0.0.254": {
+ "prefixStats": {
+ "inboundFiltered": 0,
+ "aspathLoop": 0,
+ "originatorLoop": 0,
+ "clusterLoop": 0,
+ "invalidNextHop": 0,
+ "withdrawn": 0,
+ "attributesDiscarded": 3,
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_attributes_discarded_stats)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Discarded path attributes count is not as expected"
+
def _bgp_check_if_aigp_invalid_attribute_discarded():
output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast json detail"))
expected = {
diff --git a/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py b/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py
index a9d678a42d..4f6472f3c5 100644
--- a/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py
+++ b/tests/topotests/bgp_path_attribute_treat_as_withdraw/test_bgp_path_attribute_treat_as_withdraw.py
@@ -134,6 +134,27 @@ def test_bgp_path_attribute_treat_as_withdraw():
_, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Failed to withdraw prefixes with atomic-aggregate attribute"
+ def _bgp_check_attributes_withdrawn_stats():
+ output = json.loads(r2.vtysh_cmd("show bgp neighbor json"))
+ expected = {
+ "10.0.0.1": {
+ "prefixStats": {
+ "inboundFiltered": 0,
+ "aspathLoop": 0,
+ "originatorLoop": 0,
+ "clusterLoop": 0,
+ "invalidNextHop": 0,
+ "withdrawn": 1,
+ "attributesDiscarded": 0,
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_attributes_withdrawn_stats)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Withdrawn prefix count is not as expected"
+
def test_memory_leak():
"Run the memory leak test and report results."
diff --git a/tests/topotests/bgp_peer_group/r1/bgpd.conf b/tests/topotests/bgp_peer_group/r1/bgpd.conf
deleted file mode 100644
index 68d8e61a59..0000000000
--- a/tests/topotests/bgp_peer_group/r1/bgpd.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-!
-router bgp 65001
- neighbor PG peer-group
- neighbor PG remote-as external
- neighbor PG timers 3 10
- neighbor 192.168.255.3 peer-group PG
- neighbor r1-eth0 interface peer-group PG
- neighbor PG1 peer-group
- neighbor PG1 remote-as external
- neighbor PG1 timers 3 20
- neighbor 192.168.251.2 peer-group PG1
-!
diff --git a/tests/topotests/bgp_peer_group/r1/frr.conf b/tests/topotests/bgp_peer_group/r1/frr.conf
new file mode 100644
index 0000000000..cf9d16c918
--- /dev/null
+++ b/tests/topotests/bgp_peer_group/r1/frr.conf
@@ -0,0 +1,28 @@
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+interface r1-eth1
+ ip address 192.168.251.1/30
+!
+interface r1-eth2
+ ip address 192.168.252.1/30
+!
+ip forwarding
+!
+router bgp 65001
+ neighbor PG peer-group
+ neighbor PG remote-as external
+ neighbor PG timers 3 10
+ neighbor 192.168.255.3 peer-group PG
+ neighbor r1-eth0 interface peer-group PG
+ neighbor PG1 peer-group
+ neighbor PG1 remote-as external
+ neighbor PG1 timers 3 20
+ neighbor PG1 graceful-restart-disable
+ neighbor PG2 peer-group
+ neighbor PG2 local-as 65554 no-prepend replace-as
+ neighbor 192.168.251.2 peer-group PG1
+ neighbor 192.168.252.2 remote-as 65004
+ neighbor 192.168.252.2 peer-group PG2
+!
diff --git a/tests/topotests/bgp_peer_group/r1/zebra.conf b/tests/topotests/bgp_peer_group/r1/zebra.conf
deleted file mode 100644
index 16fd8c538c..0000000000
--- a/tests/topotests/bgp_peer_group/r1/zebra.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-!
-interface r1-eth0
- ip address 192.168.255.1/24
-!
-interface r1-eth1
- ip address 192.168.251.1/30
-!
-ip forwarding
-!
diff --git a/tests/topotests/bgp_peer_group/r2/bgpd.conf b/tests/topotests/bgp_peer_group/r2/bgpd.conf
deleted file mode 100644
index d0e8f017d1..0000000000
--- a/tests/topotests/bgp_peer_group/r2/bgpd.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-!
-router bgp 65002
- neighbor PG peer-group
- neighbor PG remote-as external
- neighbor PG timers 3 10
- neighbor r2-eth0 interface peer-group PG
- neighbor PG1 peer-group
- neighbor PG1 remote-as external
- neighbor PG1 timers 3 20
- neighbor 192.168.251.1 peer-group PG1
-!
diff --git a/tests/topotests/bgp_peer_group/r2/frr.conf b/tests/topotests/bgp_peer_group/r2/frr.conf
new file mode 100644
index 0000000000..4713789f15
--- /dev/null
+++ b/tests/topotests/bgp_peer_group/r2/frr.conf
@@ -0,0 +1,19 @@
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+interface r2-eth1
+ ip address 192.168.251.2/30
+!
+ip forwarding
+!
+router bgp 65002
+ neighbor PG peer-group
+ neighbor PG remote-as external
+ neighbor PG timers 3 10
+ neighbor r2-eth0 interface peer-group PG
+ neighbor PG1 peer-group
+ neighbor PG1 remote-as external
+ neighbor PG1 timers 3 20
+ neighbor 192.168.251.1 peer-group PG1
+!
diff --git a/tests/topotests/bgp_peer_group/r2/zebra.conf b/tests/topotests/bgp_peer_group/r2/zebra.conf
deleted file mode 100644
index c2ad956c9c..0000000000
--- a/tests/topotests/bgp_peer_group/r2/zebra.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-!
-interface r2-eth0
- ip address 192.168.255.2/24
-!
-interface r2-eth1
- ip address 192.168.251.2/30
-!
-ip forwarding
-!
diff --git a/tests/topotests/bgp_peer_group/r3/bgpd.conf b/tests/topotests/bgp_peer_group/r3/frr.conf
index 5a1340fb0b..e8bffaab51 100644
--- a/tests/topotests/bgp_peer_group/r3/bgpd.conf
+++ b/tests/topotests/bgp_peer_group/r3/frr.conf
@@ -1,4 +1,9 @@
!
+interface r3-eth0
+ ip address 192.168.255.3/24
+!
+ip forwarding
+!
router bgp 65003
no bgp ebgp-requires-policy
neighbor PG peer-group
diff --git a/tests/topotests/bgp_peer_group/r3/zebra.conf b/tests/topotests/bgp_peer_group/r3/zebra.conf
deleted file mode 100644
index e9fdfb70c5..0000000000
--- a/tests/topotests/bgp_peer_group/r3/zebra.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-!
-interface r3-eth0
- ip address 192.168.255.3/24
-!
-ip forwarding
-!
diff --git a/tests/topotests/bgp_peer_group/r4/frr.conf b/tests/topotests/bgp_peer_group/r4/frr.conf
new file mode 100644
index 0000000000..b1da90f064
--- /dev/null
+++ b/tests/topotests/bgp_peer_group/r4/frr.conf
@@ -0,0 +1,7 @@
+!
+interface r4-eth0
+ ip address 192.168.252.2/30
+!
+router bgp 65004
+ neighbor 192.168.252.1 remote-as external
+!
diff --git a/tests/topotests/bgp_peer_group/test_bgp_peer-group.py b/tests/topotests/bgp_peer_group/test_bgp_peer-group.py
index 7d476b0538..e98d5f8b3b 100644
--- a/tests/topotests/bgp_peer_group/test_bgp_peer-group.py
+++ b/tests/topotests/bgp_peer_group/test_bgp_peer-group.py
@@ -2,12 +2,14 @@
# SPDX-License-Identifier: ISC
#
-# Copyright (c) 2021 by
+# Copyright (c) 2021-2024 by
# Donatas Abraitis <donatas.abraitis@gmail.com>
+# Donatas Abraitis <donatas@opensourcerouting.org>
#
"""
-Test if peer-group works for numbered and unnumbered configurations.
+Test if various random settings with peer-group works for
+numbered and unnumbered configurations.
"""
import os
@@ -21,14 +23,14 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import Topogen, get_topogen
from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
- for routern in range(1, 4):
+ for routern in range(1, 5):
tgen.add_router("r{}".format(routern))
switch = tgen.add_switch("s1")
@@ -40,6 +42,10 @@ def build_topo(tgen):
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r4"])
+
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
@@ -48,12 +54,7 @@ def setup_module(mod):
router_list = tgen.routers()
for _, (rname, router) in enumerate(router_list.items(), 1):
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
- )
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
tgen.start_router()
@@ -72,14 +73,31 @@ def test_bgp_peer_group():
def _bgp_peer_group_configured():
output = json.loads(tgen.gears["r1"].vtysh_cmd("show ip bgp neighbor json"))
expected = {
- "r1-eth0": {"peerGroup": "PG", "bgpState": "Established"},
- "192.168.255.3": {"peerGroup": "PG", "bgpState": "Established"},
- "192.168.251.2": {"peerGroup": "PG1", "bgpState": "Established"},
+ "r1-eth0": {
+ "peerGroup": "PG",
+ "bgpState": "Established",
+ "neighborCapabilities": {"gracefulRestart": "advertisedAndReceived"},
+ },
+ "192.168.255.3": {
+ "peerGroup": "PG",
+ "bgpState": "Established",
+ "neighborCapabilities": {"gracefulRestart": "advertisedAndReceived"},
+ },
+ "192.168.251.2": {
+ "peerGroup": "PG1",
+ "bgpState": "Established",
+ "neighborCapabilities": {"gracefulRestart": "received"},
+ },
+ "192.168.252.2": {
+ "peerGroup": "PG2",
+ "bgpState": "Established",
+ "neighborCapabilities": {"gracefulRestart": "advertisedAndReceived"},
+ },
}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_peer_group_configured)
- _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Failed bgp convergence in r1"
def _bgp_peer_group_check_advertised_routes():
@@ -97,10 +115,28 @@ def test_bgp_peer_group():
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_peer_group_check_advertised_routes)
- _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Failed checking advertised routes from r3"
+def test_show_running_remote_as_peer_group():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ output = (
+ tgen.gears["r1"]
+ .cmd(
+ 'vtysh -c "show running bgpd" | grep "^ neighbor 192.168.252.2 remote-as 65004"'
+ )
+ .rstrip()
+ )
+ assert (
+ output == " neighbor 192.168.252.2 remote-as 65004"
+ ), "192.168.252.2 remote-as is flushed"
+
+
def test_bgp_peer_group_remote_as_del_readd():
tgen = get_topogen()
@@ -122,7 +158,7 @@ def test_bgp_peer_group_remote_as_del_readd():
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_peer_group_remoteas_del)
- _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Failed bgp convergence in r1"
logger.info("Re-add bgp peer-group PG1 remote-as neighbor should be established")
@@ -139,7 +175,7 @@ def test_bgp_peer_group_remote_as_del_readd():
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_peer_group_remoteas_add)
- _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "Failed bgp convergence in r1"
diff --git a/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf b/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf
index 038f108aa8..e743010922 100644
--- a/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf
+++ b/tests/topotests/bgp_peer_type_multipath_relax/r1/bgpd.conf
@@ -8,9 +8,17 @@ router bgp 64510
bgp bestpath compare-routerid
bgp bestpath peer-type multipath-relax
neighbor 10.0.1.2 remote-as 64510
+ neighbor 10.0.1.2 timers 3 10
+ neighbor 10.0.1.2 timers connect 1
neighbor 10.0.3.2 remote-as 64502
+ neighbor 10.0.3.2 timers 3 10
+ neighbor 10.0.3.2 timers connect 1
neighbor 10.0.4.2 remote-as 64503
+ neighbor 10.0.4.2 timers 3 10
+ neighbor 10.0.4.2 timers connect 1
neighbor 10.0.5.2 remote-as 64511
+ neighbor 10.0.5.2 timers 3 10
+ neighbor 10.0.5.2 timers connect 1
!
line vty
!
diff --git a/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf b/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf
index 2362a19f26..1da7173bba 100644
--- a/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf
+++ b/tests/topotests/bgp_peer_type_multipath_relax/r2/bgpd.conf
@@ -7,7 +7,11 @@ router bgp 64511
bgp router-id 10.0.5.2
no bgp ebgp-requires-policy
neighbor 10.0.2.2 remote-as 64511
+ neighbor 10.0.2.2 timers 3 10
+ neighbor 10.0.2.2 timers connect 1
neighbor 10.0.5.1 remote-as 64510
+ neighbor 10.0.5.1 timers 3 10
+ neighbor 10.0.5.1 timers connect 1
!
address-family ipv4 unicast
neighbor 10.0.5.1 route-map dropall in
diff --git a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf
index e02226f2fd..3fd5e5e9c3 100644
--- a/tests/topotests/bgp_prefix_sid/r1/bgpd.conf
+++ b/tests/topotests/bgp_prefix_sid/r1/bgpd.conf
@@ -7,8 +7,10 @@ router bgp 1
no bgp ebgp-requires-policy
neighbor 10.0.0.101 remote-as 2
neighbor 10.0.0.101 timers 3 10
+ neighbor 10.0.0.101 timers connect 1
neighbor 10.0.0.102 remote-as 3
neighbor 10.0.0.102 timers 3 10
+ neighbor 10.0.0.102 timers connect 1
!
address-family ipv4 labeled-unicast
neighbor 10.0.0.101 activate
diff --git a/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf b/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
index b3ca0e114d..946103c30f 100644
--- a/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
+++ b/tests/topotests/bgp_prefix_sid2/r1/bgpd.conf
@@ -18,6 +18,7 @@ router bgp 1
no bgp ebgp-requires-policy
neighbor 10.0.0.101 remote-as 2
neighbor 10.0.0.101 timers 3 10
+ neighbor 10.0.0.101 timers connect 1
!
address-family ipv6 vpn
neighbor 10.0.0.101 activate
diff --git a/tests/topotests/bgp_route_server_client/r1/bgpd.conf b/tests/topotests/bgp_route_server_client/r1/bgpd.conf
index e464e6c50b..5cbb7956be 100644
--- a/tests/topotests/bgp_route_server_client/r1/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r1/bgpd.conf
@@ -2,10 +2,12 @@
router bgp 65001
bgp router-id 10.10.10.1
no bgp ebgp-requires-policy
- no bgp enforce-first-as
- neighbor 2001:db8:1::1 remote-as external
- neighbor 2001:db8:1::1 timers 3 10
- neighbor 2001:db8:1::1 timers connect 5
+ neighbor pg peer-group
+ neighbor pg remote-as external
+ neighbor pg timers 1 3
+ neighbor pg timers connect 1
+ no neighbor pg enforce-first-as
+ neighbor 2001:db8:1::1 peer-group pg
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:1::1 activate
diff --git a/tests/topotests/bgp_route_server_client/r2/bgpd.conf b/tests/topotests/bgp_route_server_client/r2/bgpd.conf
index 19607660f9..7fda2b0a05 100644
--- a/tests/topotests/bgp_route_server_client/r2/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r2/bgpd.conf
@@ -3,16 +3,16 @@ router bgp 65000 view RS
no bgp ebgp-requires-policy
neighbor 2001:db8:1::2 remote-as external
neighbor 2001:db8:1::2 timers 3 10
- neighbor 2001:db8:1::2 timers connect 5
+ neighbor 2001:db8:1::2 timers connect 1
neighbor 2001:db8:1::3 remote-as external
neighbor 2001:db8:1::3 timers 3 10
- neighbor 2001:db8:1::3 timers connect 5
+ neighbor 2001:db8:1::3 timers connect 1
neighbor 2001:db8:1::4 remote-as external
neighbor 2001:db8:1::4 timers 3 10
- neighbor 2001:db8:1::4 timers connect 5
+ neighbor 2001:db8:1::4 timers connect 1
neighbor 2001:db8:3::2 remote-as external
neighbor 2001:db8:3::2 timers 3 10
- neighbor 2001:db8:3::2 timers connect 5
+ neighbor 2001:db8:3::2 timers connect 1
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:1::2 activate
diff --git a/tests/topotests/bgp_route_server_client/r3/bgpd.conf b/tests/topotests/bgp_route_server_client/r3/bgpd.conf
index f7daba87fa..2f20b91334 100644
--- a/tests/topotests/bgp_route_server_client/r3/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r3/bgpd.conf
@@ -5,7 +5,7 @@ router bgp 65003
no bgp enforce-first-as
neighbor 2001:db8:3::1 remote-as external
neighbor 2001:db8:3::1 timers 3 10
- neighbor 2001:db8:3::1 timers connect 5
+ neighbor 2001:db8:3::1 timers connect 1
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:3::1 activate
diff --git a/tests/topotests/bgp_route_server_client/r4/bgpd.conf b/tests/topotests/bgp_route_server_client/r4/bgpd.conf
index c907d7284e..66a1573018 100644
--- a/tests/topotests/bgp_route_server_client/r4/bgpd.conf
+++ b/tests/topotests/bgp_route_server_client/r4/bgpd.conf
@@ -5,7 +5,7 @@ router bgp 65004
no bgp enforce-first-as
neighbor 2001:db8:1::1 remote-as external
neighbor 2001:db8:1::1 timers 3 10
- neighbor 2001:db8:1::1 timers connect 5
+ neighbor 2001:db8:1::1 timers connect 1
address-family ipv6 unicast
redistribute connected
neighbor 2001:db8:1::1 activate
diff --git a/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py b/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py
index a6334918df..9dfeec6de0 100644
--- a/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py
+++ b/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py
@@ -180,7 +180,6 @@ def test_bgp_route_server_client_step1():
else:
cmd = "show bgp ipv6 unicast json"
- # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name)
ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
replace_link_local(expected, link_local_cache)
@@ -230,7 +229,6 @@ router bgp 65000 view RS
else:
cmd = "show bgp ipv6 unicast json"
- # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name)
ref_file = "{}/{}/show_bgp_ipv6_step2.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
replace_link_local(expected, link_local_cache)
@@ -286,7 +284,6 @@ router bgp 65000 view RS
else:
cmd = "show bgp ipv6 unicast json"
- # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name)
ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
replace_link_local(expected, link_local_cache)
diff --git a/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json b/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json
new file mode 100644
index 0000000000..016c019d10
--- /dev/null
+++ b/tests/topotests/bgp_rpki_topo1/r2/bgp_rpki_valid.json
@@ -0,0 +1,70 @@
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "tableVersion": 3,
+ "routerId": "192.0.2.2",
+ "defaultLocPrf": 100,
+ "localAS": 65002,
+ "routes": {
+ "198.51.100.0/24": [
+ {
+ "origin": "IGP",
+ "metric": 0,
+ "valid": true,
+ "version": 2,
+ "rpkiValidationState": "valid",
+ "bestpath": {
+ "overall": true,
+ "selectionReason": "First path received"
+ },
+ "nexthops": [
+ {
+ "ip": "192.0.2.1",
+ "hostname": "r1",
+ "afi": "ipv4",
+ "metric": 0,
+ "accessible": true,
+ "used": true
+ }
+ ],
+ "peer": {
+ "peerId": "192.0.2.1",
+ "routerId": "192.0.2.1",
+ "hostname": "r1",
+ "type": "external"
+ }
+ }
+ ],
+ "203.0.113.0/24": [
+ {
+ "origin": "IGP",
+ "metric": 0,
+ "valid": true,
+ "version": 3,
+ "rpkiValidationState": "valid",
+ "bestpath": {
+ "overall": true,
+ "selectionReason": "First path received"
+ },
+ "nexthops": [
+ {
+ "ip": "192.0.2.1",
+ "hostname": "r1",
+ "afi": "ipv4",
+ "metric": 0,
+ "accessible": true,
+ "used": true
+ }
+ ],
+ "peer": {
+ "peerId": "192.0.2.1",
+ "routerId": "192.0.2.1",
+ "hostname": "r1",
+ "type": "external"
+ }
+ }
+ ]
+ },
+ "totalRoutes": 3,
+ "totalPaths": 3
+} \ No newline at end of file
diff --git a/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf b/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf
index 4de177dc25..e5dc7f65f9 100644
--- a/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf
+++ b/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf
@@ -9,6 +9,7 @@ router bgp 65002
neighbor 192.168.4.4 timers connect 1
address-family ipv4 unicast
neighbor 192.168.4.4 next-hop-self
+ neighbor 192.168.4.4 send-community extended rpki
exit-address-family
!
router bgp 65002 vrf vrf10
diff --git a/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py b/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
index 7b40bbdae8..5b775aa6cb 100644
--- a/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
+++ b/tests/topotests/bgp_rpki_topo1/test_bgp_rpki_topo1.py
@@ -101,6 +101,16 @@ def show_rpki_prefixes(rname, expected, vrf=None):
return topotest.json_cmp(output, expected)
+def show_rpki_valid(rname, expected, vrf=None):
+ tgen = get_topogen()
+
+ cmd = "show bgp ipv4 detail json"
+
+ output = json.loads(tgen.gears[rname].vtysh_cmd(cmd))
+
+ return topotest.json_cmp(output, expected)
+
+
def show_bgp_ipv4_table_rpki(rname, rpki_state, expected, vrf=None):
tgen = get_topogen()
@@ -123,6 +133,25 @@ def show_bgp_ipv4_table_rpki(rname, rpki_state, expected, vrf=None):
return topotest.json_cmp(output, expected)
+def test_show_bgp_rpki_prefixes_valid():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for rname in ["r1", "r3"]:
+ logger.info("{}: checking if rtrd is running".format(rname))
+ if rtrd_process[rname].poll() is not None:
+ pytest.skip(tgen.errors)
+
+ rname = "r2"
+ expected = open(os.path.join(CWD, "{}/bgp_rpki_valid.json".format(rname))).read()
+ expected_json = json.loads(expected)
+ test_func = functools.partial(show_rpki_valid, rname, expected_json)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see RPKI on {}".format(rname)
+
+
def test_show_bgp_rpki_prefixes():
tgen = get_topogen()
diff --git a/tests/topotests/bgp_bmp_vrf/__init__.py b/tests/topotests/bgp_show_advertised_routes_detail/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/tests/topotests/bgp_bmp_vrf/__init__.py
+++ b/tests/topotests/bgp_show_advertised_routes_detail/__init__.py
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf b/tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf
new file mode 100644
index 0000000000..c9710eb5e8
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/r1/frr.conf
@@ -0,0 +1,13 @@
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as auto
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ network 10.10.10.1/32
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf b/tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf
new file mode 100644
index 0000000000..30b4ba539f
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/r2/frr.conf
@@ -0,0 +1,29 @@
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
+int r2-eth1
+ ip address 192.168.2.2/24
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as auto
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+ neighbor 192.168.2.3 remote-as auto
+ neighbor 192.168.2.3 timers 1 3
+ neighbor 192.168.2.3 timers connect 1
+ address-family ipv4 unicast
+ neighbor 192.168.2.3 route-map r3 out
+ exit-address-family
+ !
+!
+ip prefix-list p1 permit 10.10.10.1/32
+!
+route-map r3 permit 10
+ match ip address prefix-list p1
+ set large-community 65001:65002:65003
+ set community 65001:65002
+ set extcommunity bandwidth 100
+exit
+!
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf b/tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf
new file mode 100644
index 0000000000..11333d481f
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/r3/frr.conf
@@ -0,0 +1,11 @@
+!
+int r3-eth0
+ ip address 192.168.2.3/24
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as auto
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ !
+!
diff --git a/tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py b/tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py
new file mode 100644
index 0000000000..fda7ec601d
--- /dev/null
+++ b/tests/topotests/bgp_show_advertised_routes_detail/test_bgp_show_advertised_routes_detail.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2024 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = [pytest.mark.bgpd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2"), "s2": ("r2", "r3")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_show_advertised_routes_detail():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r2 = tgen.gears["r2"]
+
+ def _bgp_converge():
+ output = json.loads(
+ r2.vtysh_cmd(
+ "show bgp ipv4 unicast neighbor 192.168.2.3 advertised-routes detail json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "10.10.10.1/32": {
+ "paths": [
+ {
+ "community": {
+ "string": "65001:65002",
+ },
+ "extendedCommunity": {
+ "string": "LB:65002:12500000 (100.000 Mbps)"
+ },
+ "largeCommunity": {
+ "string": "65001:65002:65003",
+ },
+ }
+ ],
+ }
+ },
+ "totalPrefixCounter": 1,
+ "filteredPrefixCounter": 0,
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_converge,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Can't converge"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce1/bgpd.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce1/bgpd.conf
index b598666dfb..75d8c9e553 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce1/bgpd.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce1/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 65001
timers bgp 3 9
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce1/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce1/zebra.conf
index 4a8579845c..8e6743c5fd 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce1/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce1/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce2/bgpd.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce2/bgpd.conf
index e388ccba8a..37d40651c5 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce2/bgpd.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce2/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 65001
bgp router-id 192.168.200.10
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce2/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce2/zebra.conf
index 5e0aa5d3f0..02afea1148 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce2/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce2/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce3/bgpd.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce3/bgpd.conf
index e388ccba8a..37d40651c5 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce3/bgpd.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce3/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 65001
bgp router-id 192.168.200.10
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce3/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce3/zebra.conf
index fabc11e84d..714f1ec356 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce3/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce3/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce4/bgpd.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce4/bgpd.conf
index e388ccba8a..37d40651c5 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce4/bgpd.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce4/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 65001
bgp router-id 192.168.200.10
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/ce4/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/ce4/zebra.conf
index e369f41b39..d5efab4bf6 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/ce4/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/ce4/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/r1/bgpd.conf b/tests/topotests/bgp_snmp_mplsl3vpn/r1/bgpd.conf
index 098e55d0ed..b80a90ac7d 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/r1/bgpd.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/r1/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 65000
bgp router-id 10.1.1.1
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/r2/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/r2/zebra.conf
index 4fec8af3db..87cffebd84 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/r2/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/r2/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/r3/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/r3/zebra.conf
index e433995593..162f5bbccc 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/r3/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/r3/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/r4/bgpd.conf b/tests/topotests/bgp_snmp_mplsl3vpn/r4/bgpd.conf
index 2a834c799e..1f44feb0da 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/r4/bgpd.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/r4/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 65000
bgp router-id 10.4.4.4
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/r4/zebra.conf b/tests/topotests/bgp_snmp_mplsl3vpn/r4/zebra.conf
index 14580e5b3a..7b0719d266 100644
--- a/tests/topotests/bgp_snmp_mplsl3vpn/r4/zebra.conf
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/r4/zebra.conf
@@ -1,5 +1,3 @@
-log file /tmp/zebra.log
-log stdout
!
! debug zebra events
! debug zebra dplane
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/__init__.py
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf
new file mode 100644
index 0000000000..7daf335aab
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r1/frr.conf
@@ -0,0 +1,117 @@
+interface r1-eth1 vrf vrf1
+ ip address 173.31.1.1/32
+!
+interface r1-eth2 vrf vrf2
+ ip address 173.31.1.2/32
+!
+interface r1-eth3 vrf vrf3
+ ip address 173.31.1.3/32
+!
+interface r1-eth4 vrf vrf4
+ ip address 173.31.1.4/32
+!
+interface r1-eth5 vrf vrf5
+ ip address 173.31.1.5/32
+!
+
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+
+interface r1-eth6
+ ip address 193.170.0.1/24
+
+interface lo
+ ip address 11.11.11.11/32
+!
+router ospf
+ ospf router-id 11.11.11.11
+ network 193.170.0.0/24 area 0.0.0.0
+ network 11.11.11.11/32 area 0.0.0.0
+ redistribute connected
+exit
+!
+mpls ldp
+ router-id 11.11.11.11
+ !
+ address-family ipv4
+ discovery transport-address 11.11.11.11
+ !
+ interface r1-eth6
+ exit
+ !
+ exit-address-family
+ !
+exit
+!
+bgp route-map delay-timer 1
+router bgp 65500
+ bgp router-id 192.0.2.1
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.2 remote-as 65501
+ address-family ipv4 unicast
+ no neighbor 192.168.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.2 activate
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:1
+ rt vpn both 53:1
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf2
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:2
+ rt vpn both 53:2
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf3
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:3
+ rt vpn both 53:3
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf4
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:4
+ rt vpn both 53:4
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf5
+ bgp router-id 192.0.2.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:5
+ rt vpn both 53:5
+ export vpn
+ import vpn
+ exit-address-family
+!
+
+interface r1-eth0
+ mpls bgp forwarding
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf
new file mode 100644
index 0000000000..6facebe40e
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r2/frr.conf
@@ -0,0 +1,88 @@
+interface r2-eth1 vrf vrf1
+ ip address 173.31.0.1/32
+!
+interface r2-eth2 vrf vrf2
+ ip address 173.31.0.2/32
+!
+interface r2-eth3 vrf vrf3
+ ip address 173.31.0.3/32
+!
+interface r2-eth4 vrf vrf4
+ ip address 173.31.0.4/32
+!
+interface r2-eth5 vrf vrf5
+ ip address 173.31.0.5/32
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
+router bgp 65501
+ bgp router-id 192.0.2.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 192.168.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 192.168.0.1 activate
+ exit-address-family
+!
+router bgp 65501 vrf vrf1
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:1
+ rt vpn both 53:1
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf2
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:2
+ rt vpn both 53:2
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf3
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:3
+ rt vpn both 53:3
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf4
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:4
+ rt vpn both 53:4
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65501 vrf vrf5
+ bgp router-id 192.0.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export auto
+ rd vpn export 445:5
+ rt vpn both 53:5
+ export vpn
+ import vpn
+ exit-address-family
+!
+
+interface r2-eth0
+ mpls bgp forwarding
+!
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf
new file mode 100644
index 0000000000..8f49cdfe0c
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/r3/frr.conf
@@ -0,0 +1,32 @@
+interface r3-eth0
+ ip address 193.170.0.2/24
+!
+interface lo
+ ip address 33.33.33.33/32
+!
+interface r3-eth1
+ ip address 180.170.0.2/32
+!
+interface r3-eth2
+ ip address 180.170.0.3/32
+!
+router ospf
+ ospf router-id 33.33.33.33
+ network 193.170.0.0/24 area 0.0.0.0
+ network 33.33.33.33/32 area 0.0.0.0
+ redistribute connected
+exit
+!
+mpls ldp
+ router-id 33.33.33.33
+ !
+ address-family ipv4
+ discovery transport-address 33.33.33.33
+ !
+ interface r3-eth0
+ exit
+ !
+ exit-address-family
+ !
+exit
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py
new file mode 100644
index 0000000000..ed3cdca2f9
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_ebgp_vpn_auto/test_bgp_vpnv4_vpn_auto.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_bgp_vpnv4_vpn_auto.py
+#
+# Copyright (c) 2024 by Varun Hegde
+#
+
+"""
+ test_bgp_vpnv4_vpn_auto.py: Test the FRR BGP daemon with BGP VPN session with label export auto
+"""
+
+import os
+import sys
+import json
+import functools
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.bgpcheck import (
+ check_show_bgp_vpn_prefix_found,
+ check_show_bgp_vpn_prefix_not_found,
+)
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 3 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+
+
+ for i in range(6):
+ switch = tgen.add_switch("s{0}".format(i))
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ #create a singiluar link between R2 -- R3
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+ for i in range(7, 9):
+ switch = tgen.add_switch("s{0}".format(i))
+ switch.add_link(tgen.gears["r3"])
+
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ "ip link add vrf{} type vrf table {}",
+ "ip link set dev vrf{} up",
+ "ip link set dev r1-eth{} master vrf{}",
+ "echo 1 > /proc/sys/net/mpls/conf/r1-eth{}/input",
+ ]
+ cmds_list2 = [
+ "ip link add vrf{} type vrf table {}",
+ "ip link set dev vrf{} up",
+ "ip link set dev r2-eth{} master vrf{}",
+ "echo 1 > /proc/sys/net/mpls/conf/r2-eth{}/input",
+ ]
+
+ for i in range(1, 6):
+ for cmd in cmds_list:
+ input = cmd.format(i, i)
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format(i, i))
+ logger.info("output: " + output)
+
+ for cmd in cmds_list2:
+ input = cmd.format(i, i)
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format(i, i))
+ logger.info("output: " + output)
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def test_labelpool_release():
+ """
+ Check that once we remove BGP VPN sesson
+ label pool structure ( allocated_map ) gets released properly or not
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Just waiting for BGP VPN session to converge
+ logger.info("Waiting for BGP VPN sessions to converge and label pools to get initialised")
+ router = tgen.gears["r1"]
+
+ def _bgp_converge():
+ output = json.loads(
+ router.vtysh_cmd("show bgp labelpool summary json")
+ )
+ expected = {"ledger":5,"inUse":5,"requests":0,"labelChunks":1,"pending":0,"reconnects":1}
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to see BGP Labelpool initialised"
+
+
+ # checking the initial label pool chunk's free labels
+ logger.info("checking the initial label pool chunk's free labels")
+ expected = [{"first":80,"last":207,"size":128,"numberFree":123}]
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp label chunks json",
+ expected,
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+ # Test case : check whether label got released or not
+ logger.info(
+ "Remove multiple vpn session and check whether label got released or no"
+ )
+ router.vtysh_cmd(
+ """
+ configure terminal
+ no router bgp 65500 vrf vrf1
+ no router bgp 65500 vrf vrf2
+ """
+ )
+ expected = [{"first":80,"last":207,"size":128,"numberFree":125}]
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp label chunks json",
+ expected,
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py b/tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/__init__.py
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf b/tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf
new file mode 100644
index 0000000000..30d11627f5
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/r1/frr.conf
@@ -0,0 +1,30 @@
+!
+interface r1-eth0
+ ip address 192.168.179.4/24
+exit
+!
+router bgp 65001
+ bgp router-id 192.168.179.4
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.179.5 remote-as auto
+!
+ address-family ipv4 vpn
+ neighbor 192.168.179.5 activate
+ neighbor 192.168.179.5 next-hop-self
+ neighbor 192.168.179.5 allowas-in 1
+ exit-address-family
+!
+router bgp 65001 vrf CUSTOMER-A
+ bgp router-id 192.168.0.1
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+!
+ address-family ipv4 unicast
+ label vpn export auto
+ rd vpn export 100:1
+ rt vpn both 100:1
+ export vpn
+ import vpn
+ exit-address-family
+
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf b/tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf
new file mode 100644
index 0000000000..bbfd2c22f4
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/r2/frr.conf
@@ -0,0 +1,40 @@
+!
+interface lo
+ ip address 10.10.10.10/32
+!
+interface r2-eth0
+ ip address 192.168.179.5/24
+exit
+!
+interface r2-eth1
+ ip address 192.168.2.2/24
+exit
+!
+router bgp 65002
+ bgp router-id 192.168.179.5
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.179.4 remote-as auto
+!
+ address-family ipv4 vpn
+ neighbor 192.168.179.4 activate
+ neighbor 192.168.179.4 next-hop-self
+ exit-address-family
+!
+router bgp 65002 vrf CUSTOMER-A
+ bgp router-id 192.168.0.2
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+!
+ address-family ipv4 unicast
+ redistribute connected
+ network 10.10.10.10/32 route-map r1
+ label vpn export auto
+ rd vpn export 100:1
+ rt vpn both 100:1
+ export vpn
+ import vpn
+ exit-address-family
+!
+route-map r1 permit 10
+ set as-path prepend 65001
diff --git a/tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py b/tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py
new file mode 100644
index 0000000000..f3d016cb17
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_import_allowas_in/test_bgp_vpnv4_import_allowas_in.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2024 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ r1.run("ip link add CUSTOMER-A type vrf table 1001")
+ r1.run("ip link set up dev CUSTOMER-A")
+ r1.run("ip link set r1-eth1 master CUSTOMER-A")
+
+ r2.run("ip link add CUSTOMER-A type vrf table 1001")
+ r2.run("ip link set up dev CUSTOMER-A")
+ r2.run("ip link set r2-eth1 master CUSTOMER-A")
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_issue_12502():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ def _bgp_converge():
+ output = json.loads(
+ r1.vtysh_cmd("show bgp vrf CUSTOMER-A ipv4 unicast 10.10.10.10/32 json")
+ )
+ expected = {
+ "paths": [
+ {
+ "importedFrom": "100:1",
+ "aspath": {
+ "string": "65002 65001",
+ },
+ "valid": True,
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed to see 192.168.2.0/24 with a valid next-hop"
+
+ def _vrf_route_imported_to_zebra():
+ output = json.loads(
+ r1.vtysh_cmd("show ip route vrf CUSTOMER-A 10.10.10.10/32 json")
+ )
+ expected = {
+ "10.10.10.10/32": [
+ {
+ "protocol": "bgp",
+ "vrfName": "CUSTOMER-A",
+ "selected": True,
+ "installed": True,
+ "table": 1001,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "fib": True,
+ "ip": "192.168.179.5",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "vrf": "default",
+ "active": True,
+ }
+ ],
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_vrf_route_imported_to_zebra)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert (
+ result is None
+ ), "Failed to see 10.10.10.10/32 to be imported into default VRF (Zebra)"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
index 6237decfc3..ee84e375fb 100644
--- a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
+++ b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
@@ -140,6 +140,10 @@ def router_json_cmp_exact_filter(router, cmd, expected):
# filter out tableVersion, version and nhVrfID
json_output.pop("tableVersion")
+ if "totalRoutes" in json_output:
+ json_output.pop("totalRoutes")
+ if "totalPaths" in json_output:
+ json_output.pop("totalPaths")
for rd, data in json_output["routes"]["routeDistinguishers"].items():
for _, attrs in data.items():
for attr in attrs:
@@ -163,12 +167,18 @@ def router_vrf_json_cmp_exact_filter(router, cmd, expected):
json_output = json.loads(output)
+ print(json_output)
+
# filter out tableVersion, version, nhVrfId and vrfId
for vrf, data in json_output.items():
if "vrfId" in data:
data.pop("vrfId")
if "tableVersion" in data:
data.pop("tableVersion")
+ if "totalRoutes" in data:
+ data.pop("totalRoutes")
+ if "totalPaths" in data:
+ data.pop("totalPaths")
if "routes" not in data:
continue
for _, attrs in data["routes"].items():
@@ -203,7 +213,7 @@ def check_show_bgp_ipv4_vpn(rname, json_file):
"show bgp ipv4 vpn json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
@@ -224,7 +234,7 @@ def check_show_bgp_vrf_ipv4(rname, json_file):
"show bgp vrf all ipv4 unicast json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
@@ -248,7 +258,7 @@ def test_protocols_convergence_step0():
"show bgp ipv4 vpn summary json",
expected,
)
- _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
diff --git a/tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf b/tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf
new file mode 100644
index 0000000000..d3ababde3a
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf
@@ -0,0 +1,75 @@
+int dummy0
+ ip address 10.0.4.1/24
+ no shut
+!
+int dummy1
+ ip address 10.0.0.1/24
+ no shut
+!
+int dummy2
+ ip address 10.0.1.1/24
+ no shut
+!
+int dummy3
+ ip address 10.0.2.1/24
+ no shut
+!
+int dummy4
+ ip address 10.0.3.1/24
+ no shut
+!
+int EVA
+ no shut
+!
+int DONNA
+ no shut
+!
+ip router-id 10.0.4.1
+!
+router bgp 99
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ rd vpn export 10.0.4.1:1
+ rt vpn export 10.0.4.1:1
+ rt vpn import 10.0.4.1:101
+ export vpn
+ import vpn
+ !
+!
+router bgp 99 vrf DONNA
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 101
+ rd vpn export 10.0.4.1:1
+ rt vpn export 10.0.4.1:101
+ rt vpn import 10.0.4.1:1 10.0.4.1:102 10.0.4.1:103
+ export vpn
+ import vpn
+ !
+!
+router bgp 99 vrf EVA
+ no bgp ebgp-requires-policy
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 10.0.4.1:1
+ rt vpn export 10.0.4.1:102
+ rt vpn import 10.0.4.1:101 10.0.4.1:103
+ export vpn
+ import vpn
+ !
+!
+router bgp 99 vrf ZITA
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ address-family ipv4 unicast
+ network 172.16.101.0/24
+ label vpn export 103
+ rd vpn export 10.0.4.1:1
+ rt vpn export 10.0.4.1:103
+ export vpn
+ import vpn
+ !
+!
diff --git a/tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs b/tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs
new file mode 100644
index 0000000000..f62c5cd211
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+ip link add DONNA type vrf table 1001
+ip link add EVA type vrf table 1002
+
+ip link add dummy0 type dummy # vrf default
+ip link add dummy1 type dummy
+ip link add dummy2 type dummy
+ip link add dummy3 type dummy
+ip link add dummy4 type dummy
+
+ip link set dummy1 master DONNA
+ip link set dummy2 master EVA
+ip link set dummy3 master DONNA
+ip link set dummy4 master EVA
+
+
diff --git a/tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py b/tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py
new file mode 100644
index 0000000000..a44f07b560
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py
@@ -0,0 +1,517 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_bgp_vpnv4_route_leak_basic.py
+#
+# Copyright (c) 2018 Cumulus Networks, Inc.
+# Donald Sharp
+# Copyright (c) 2024 6WIND SAS
+#
+
+"""
+Test basic VPNv4 route leaking
+"""
+
+import os
+import sys
+from functools import partial
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.checkping import check_ping
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ for routern in range(1, 2):
+ tgen.add_router("r{}".format(routern))
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ # For all registered routers, load the unified configuration file
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/setup_vrfs".format(CWD))
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+ # tgen.mininet_cli()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def test_vrf_route_leak_donna():
+ logger.info("Ensure that routes are leaked back and forth")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ # Test DONNA VRF.
+ expect = {
+ "10.0.0.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ "10.0.1.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "EVA",
+ "vrf": "EVA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.2.0/24": [{"protocol": "connected"}],
+ "10.0.3.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "EVA",
+ "vrf": "EVA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.4.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "dummy0",
+ "vrf": "default",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "172.16.101.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": None,
+ "nexthops": [
+ {
+ "fib": None,
+ "interfaceName": "unknown",
+ "vrf": "Unknown",
+ "active": None,
+ },
+ ],
+ },
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+
+def test_vrf_route_leak_eva():
+ logger.info("Ensure that routes are leaked back and forth")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ # Test EVA VRF.
+ expect = {
+ "10.0.0.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "DONNA",
+ "vrf": "DONNA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.1.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ "10.0.2.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "DONNA",
+ "vrf": "DONNA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.3.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ "172.16.101.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": None,
+ "nexthops": [
+ {
+ "fib": None,
+ "interfaceName": "unknown",
+ "vrf": "Unknown",
+ "active": None,
+ },
+ ],
+ },
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf EVA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF EVA check failed:\n{}".format(diff)
+
+
+def test_vrf_route_leak_default():
+ logger.info("Ensure that routes are leaked back and forth")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ # Test default VRF.
+ expect = {
+ "10.0.0.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "DONNA",
+ "vrf": "DONNA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.2.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "DONNA",
+ "vrf": "DONNA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.4.0/24": [
+ {
+ "protocol": "connected",
+ }
+ ],
+ }
+
+ test_func = partial(topotest.router_json_cmp, r1, "show ip route json", expect)
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF default check failed:\n{}".format(diff)
+
+
+def test_ping():
+ "Simple ping tests"
+
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ logger.info("Ping from default to DONNA")
+ check_ping("r1", "10.0.0.1", True, 10, 0.5, source_addr="10.0.4.1")
+
+
+def test_vrf_route_leak_donna_after_eva_down():
+ logger.info("Ensure that route states change after EVA interface goes down")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r1.vtysh_cmd(
+ """
+configure
+interface EVA
+ shutdown
+"""
+ )
+
+ # Test DONNA VRF.
+ expect = {
+ "10.0.1.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": None,
+ "nexthops": [
+ {
+ "fib": None,
+ "interfaceName": "EVA",
+ "vrf": "EVA",
+ "active": None,
+ },
+ ],
+ },
+ ],
+ "10.0.3.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": None,
+ "nexthops": [
+ {
+ "fib": None,
+ "interfaceName": "EVA",
+ "vrf": "EVA",
+ "active": None,
+ },
+ ],
+ },
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+ """
+ Check that "show ip route vrf DONNA json" and the JSON at key "DONNA" of
+ "show ip route vrf all json" gives the same result.
+ """
+
+ def check_vrf_table(router, vrf, expect):
+ output = router.vtysh_cmd("show ip route vrf all json", isjson=True)
+ vrf_table = output.get(vrf, {})
+
+ return topotest.json_cmp(vrf_table, expect)
+
+ test_func = partial(check_vrf_table, r1, "DONNA", expect)
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+
+def test_vrf_route_leak_donna_after_eva_up():
+ logger.info("Ensure that route states change after EVA interface goes up")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r1.vtysh_cmd(
+ """
+configure
+interface EVA
+ no shutdown
+"""
+ )
+
+ # Test DONNA VRF.
+ expect = {
+ "10.0.1.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "EVA",
+ "vrf": "EVA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ "10.0.3.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "EVA",
+ "vrf": "EVA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+
+def test_vrf_route_leak_donna_add_vrf_zita():
+ logger.info("Add VRF ZITA and ensure that the route from VRF ZITA is updated")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r1.cmd("ip link add ZITA type vrf table 1003")
+
+ # Test DONNA VRF.
+ expect = {
+ "172.16.101.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": None,
+ "nexthops": [
+ {
+ "fib": None,
+ "interfaceName": "ZITA",
+ "vrf": "ZITA",
+ "active": None,
+ },
+ ],
+ },
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+
+def test_vrf_route_leak_donna_set_zita_up():
+ logger.info("Set VRF ZITA up and ensure that the route from VRF ZITA is updated")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r1.vtysh_cmd(
+ """
+configure
+interface ZITA
+ no shutdown
+"""
+ )
+
+ # Test DONNA VRF.
+ expect = {
+ "172.16.101.0/24": [
+ {
+ "protocol": "bgp",
+ "selected": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "interfaceName": "ZITA",
+ "vrf": "ZITA",
+ "active": True,
+ },
+ ],
+ },
+ ],
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+
+def test_vrf_route_leak_donna_delete_vrf_zita():
+ logger.info("Delete VRF ZITA and ensure that the route from VRF ZITA is deleted")
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r1.cmd("ip link delete ZITA")
+
+ # Test DONNA VRF.
+ expect = {
+ "172.16.101.0/24": None,
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect
+ )
+ result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result, "BGP VRF DONNA check failed:\n{}".format(diff)
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_vrf_netns/r1/bgpd.conf b/tests/topotests/bgp_vrf_netns/r1/bgpd.conf
index 572dce7455..2853a7a5ca 100644
--- a/tests/topotests/bgp_vrf_netns/r1/bgpd.conf
+++ b/tests/topotests/bgp_vrf_netns/r1/bgpd.conf
@@ -5,6 +5,7 @@ router bgp 100 vrf r1-bgp-cust1
no bgp ebgp-requires-policy
neighbor 10.0.1.101 remote-as 99
neighbor 10.0.1.101 timers 3 10
+ neighbor 10.0.1.101 timers connect 1
!
!
diff --git a/tests/topotests/docker/README.md b/tests/topotests/docker/README.md
index 2b40994cf6..2bd58a15b8 100644
--- a/tests/topotests/docker/README.md
+++ b/tests/topotests/docker/README.md
@@ -68,5 +68,5 @@ without pulling from the registry using the following commands:
```console
make topotests-build
-TOPOTEST_PULL=0 make topotests
+make topotests
```
diff --git a/tests/topotests/docker/build.sh b/tests/topotests/docker/build.sh
index aec20587ba..20d08e4979 100755
--- a/tests/topotests/docker/build.sh
+++ b/tests/topotests/docker/build.sh
@@ -1,11 +1,11 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
#
# Copyright 2018 Network Device Education Foundation, Inc. ("NetDEF")
cd "$(dirname "$0")"/..
-exec docker build --pull \
+exec $(command -v docker || command -v podman) build --pull \
--compress \
-t frrouting/topotests:latest \
.
diff --git a/tests/topotests/docker/frr-topotests.sh b/tests/topotests/docker/frr-topotests.sh
index ce373d9bd0..8de8e7b1f6 100755
--- a/tests/topotests/docker/frr-topotests.sh
+++ b/tests/topotests/docker/frr-topotests.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
#
# Copyright 2018 Network Device Education Foundation, Inc. ("NetDEF")
@@ -45,9 +45,6 @@ if [[ "$1" = "-h" ]] || [[ "$1" = "--help" ]]; then
TOPOTEST_OPTIONS These options are appended to the docker-run
command for starting the tests.
- TOPOTEST_PULL If set to 0, don't try to pull the most recent
- version of the docker image from dockerhub.
-
TOPOTEST_SANITIZER Controls whether to use the address sanitizer.
Enabled by default, set to 0 to disable.
@@ -116,14 +113,12 @@ if [ -z "$TOPOTEST_FRR" ]; then
git -C "$TOPOTEST_FRR" ls-files -z > "${TOPOTEST_LOGS}/git-ls-files"
fi
+cmd="$(command -v docker || command -v podman)"
+
if [ -z "$TOPOTEST_BUILDCACHE" ]; then
TOPOTEST_BUILDCACHE=topotest-buildcache
- docker volume inspect "${TOPOTEST_BUILDCACHE}" &> /dev/null \
- || docker volume create "${TOPOTEST_BUILDCACHE}"
-fi
-
-if [ "${TOPOTEST_PULL:-1}" = "1" ]; then
- docker pull frrouting/topotests:latest
+ "${cmd}" volume inspect "${TOPOTEST_BUILDCACHE}" &> /dev/null \
+ || "${cmd}" volume create "${TOPOTEST_BUILDCACHE}"
fi
if [[ -n "$TMUX" ]]; then
@@ -152,4 +147,4 @@ if [ -t 0 ]; then
set -- -t "$@"
fi
-exec docker run "$@"
+exec "${cmd}" run "$@"
diff --git a/tests/topotests/docker/inner/compile_frr.sh b/tests/topotests/docker/inner/compile_frr.sh
index 4a88dc677f..e943c385c7 100755
--- a/tests/topotests/docker/inner/compile_frr.sh
+++ b/tests/topotests/docker/inner/compile_frr.sh
@@ -58,9 +58,6 @@ if [ ! -e Makefile ]; then
fi
bash configure >&3 \
- --enable-static-bin \
- --enable-static \
- --enable-shared \
--enable-dev-build \
--with-moduledir=/usr/lib/frr/modules \
--prefix=/usr \
@@ -69,6 +66,8 @@ if [ ! -e Makefile ]; then
--sbindir=/usr/lib/frr \
--enable-multipath=0 \
--enable-fpm \
+ --enable-grpc \
+ --enable-scripting \
--enable-sharpd \
$EXTRA_CONFIGURE \
--with-pkg-extra-version=-topotests \
diff --git a/tests/topotests/docker/inner/entrypoint.sh b/tests/topotests/docker/inner/entrypoint.sh
index 44e16db4b9..b92217440b 100755
--- a/tests/topotests/docker/inner/entrypoint.sh
+++ b/tests/topotests/docker/inner/entrypoint.sh
@@ -20,6 +20,11 @@ cd "${FRR_BUILD_DIR}/tests/topotests"
log_info "Setting permissions on /tmp so we can generate logs"
chmod 1777 /tmp
+# This is a MUST, otherwise we have:
+# AddressSanitizer:DEADLYSIGNAL
+# Segmentation fault
+sysctl -w vm.mmap_rnd_bits=28
+
if [ $# -eq 0 ] || ([[ "$1" != /* ]] && [[ "$1" != ./* ]]); then
export TOPOTESTS_CHECK_MEMLEAK=/tmp/memleak_
export TOPOTESTS_CHECK_STDERR=Yes
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
index 45868663a8..cb3104a522 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_chaos_topo1.py
@@ -21,6 +21,8 @@ import sys
import time
import pytest
import platform
+import functools
+from lib import topotest
from copy import deepcopy
@@ -539,6 +541,16 @@ def test_RT_verification_auto_p0(request):
result = create_vrf_cfg(tgen, topo, input_dict=input_dict_vni)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ expected = {"numL3Vnis": 0}
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ tgen.gears["e1"],
+ "show bgp l2vpn evpn vni json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=3)
+ assert result is None, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
input_dict_2 = {}
for dut in ["e1"]:
temp = {dut: {"bgp": []}}
diff --git a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
index beb4de432e..52181a75dc 100644
--- a/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
+++ b/tests/topotests/evpn_type5_test_topo1/test_evpn_type5_topo1.py
@@ -25,6 +25,8 @@ import sys
import time
import pytest
import platform
+import functools
+from lib import topotest
from copy import deepcopy
@@ -1124,7 +1126,6 @@ def test_active_standby_evpn_implementation_p1(request):
)
for addr_type in ADDR_TYPES:
-
logger.info("Verifying only ipv4 routes")
if addr_type != "ipv4":
continue
@@ -2050,6 +2051,18 @@ def test_bgp_attributes_for_evpn_address_family_p1(request, attribute):
tc_name, result
)
+ expected = {"numL3Vnis": 0}
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ tgen.gears["d1"],
+ "show bgp l2vpn evpn vni json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=3)
+ assert result is None, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
input_dict_2 = {}
for dut in ["d1"]:
temp = {dut: {"bgp": []}}
diff --git a/tests/topotests/isis_topo1/test_isis_topo1.py b/tests/topotests/isis_topo1/test_isis_topo1.py
index fe3d865565..e02fb07daf 100644
--- a/tests/topotests/isis_topo1/test_isis_topo1.py
+++ b/tests/topotests/isis_topo1/test_isis_topo1.py
@@ -12,6 +12,7 @@
"""
test_isis_topo1.py: Test ISIS topology.
"""
+import time
import datetime
import functools
import json
@@ -314,6 +315,107 @@ def test_isis_neighbor_json():
), assertmsg
+def test_isis_neighbor_state():
+ "Check that the neighbor states remain normal when the ISIS type is switched."
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Checking 'show isis neighbor state on a p2p link'")
+
+ # Establish a P2P link
+ # When the IS-IS type of r3 is set to level-1-2 and the IS-IS type of r5 is set to level-1,
+ # it is expected that all neighbors exist and are in the Up state
+ r3 = tgen.gears["r3"]
+ r3.vtysh_cmd(
+ """
+ configure
+ router isis 1
+ no redistribute ipv4 connected level-1
+ no redistribute ipv4 connected level-2
+ no redistribute ipv6 connected level-1
+ no redistribute ipv6 connected level-2
+ interface r3-eth1
+ no isis circuit-type
+ isis network point-to-point
+ end
+ """
+ )
+ r5 = tgen.gears["r5"]
+ r5.vtysh_cmd(
+ """
+ configure
+ router isis 1
+ no redistribute ipv4 connected level-1
+ no redistribute ipv6 connected level-1
+ no redistribute ipv4 table 20 level-1
+ interface r5-eth0
+ no isis circuit-type
+ isis network point-to-point
+ end
+ """
+ )
+ result = _check_isis_neighbor_json("r3", "r5", True, "Up")
+ assert result is True, result
+ result = _check_isis_neighbor_json("r5", "r3", True, "Up")
+ assert result is True, result
+
+ # Remove the configuration that affects the switch of IS-IS type.
+ # Configure the IS-IS type of r3 to transition from level-1-2 to level-2-only,
+ # while maintaining the IS-IS type of r5 as level-1.
+ # In this scenario,
+ # the expectation is that some neighbors do not exist or are in the Initializing state
+ r3.vtysh_cmd(
+ """
+ configure
+ router isis 1
+ is-type level-2-only
+ end
+ """
+ )
+ result = _check_isis_neighbor_json("r3", "r5", False, "Initializing")
+ assert result is True, result
+ result = _check_isis_neighbor_json("r5", "r3", False, "Initializing")
+ assert result is True, result
+
+ # Restore to initial configuration
+ logger.info("Checking 'restore to initial configuration'")
+ r3.vtysh_cmd(
+ """
+ configure
+ interface r3-eth1
+ isis circuit-type level-1
+ no isis network point-to-point
+ router isis 1
+ no is-type
+ redistribute ipv4 connected level-1
+ redistribute ipv4 connected level-2
+ redistribute ipv6 connected level-1
+ redistribute ipv6 connected level-2
+ end
+ """
+ )
+ r5.vtysh_cmd(
+ """
+ configure
+ interface r5-eth0
+ isis circuit-type level-1
+ no isis network point-to-point
+ router isis 1
+ redistribute ipv4 connected level-1
+ redistribute ipv6 connected level-1
+ redistribute ipv4 table 20 level-1
+ end
+ """
+ )
+ result = _check_isis_neighbor_json("r3", "r5", True, "Up")
+ assert result is True, result
+ result = _check_isis_neighbor_json("r5", "r3", True, "Up")
+ assert result is True, result
+
+
def test_isis_database_json():
"Check json struct in show isis database json"
@@ -623,6 +725,65 @@ def test_isis_hello_padding_during_adjacency_formation():
assert result is True, result
+def _check_isis_neighbor_json(
+ self, neighbor, neighbor_expected, neighbor_state_expected
+):
+ tgen = get_topogen()
+ router = tgen.gears[self]
+ logger.info(
+ f"check_isis_neighbor_json {router} {neighbor} {neighbor_expected} {neighbor_state_expected}"
+ )
+
+ result = _check_isis_neighbor_exist(self, neighbor)
+ if result == True:
+ return _check_isis_neighbor_state(self, neighbor, neighbor_state_expected)
+ elif neighbor_expected == True:
+ return "{} with expected neighbor {} got none ".format(router.name, neighbor)
+ else:
+ return True
+
+
+@retry(retry_timeout=60)
+def _check_isis_neighbor_exist(self, neighbor):
+ tgen = get_topogen()
+ router = tgen.gears[self]
+ logger.info(f"check_isis_neighbor_exist {router} {neighbor}")
+ neighbor_json = router.vtysh_cmd("show isis neighbor json", isjson=True)
+
+ circuits = neighbor_json.get("areas", [])[0].get("circuits", [])
+ for circuit in circuits:
+ if "adj" in circuit and circuit["adj"] == neighbor:
+ return True
+
+ return "The neighbor {} of router {} has not been learned yet ".format(
+ neighbor, router.name
+ )
+
+
+@retry(retry_timeout=5)
+def _check_isis_neighbor_state(self, neighbor, neighbor_state_expected):
+ tgen = get_topogen()
+ router = tgen.gears[self]
+ logger.info(
+ f"check_isis_neighbor_state {router} {neighbor} {neighbor_state_expected}"
+ )
+ neighbor_json = router.vtysh_cmd(
+ "show isis neighbor {} json".format(neighbor), isjson=True
+ )
+
+ circuits = neighbor_json.get("areas", [])[0].get("circuits", [])
+ for circuit in circuits:
+ interface = circuit.get("interface", {})
+ if "state" in interface:
+ neighbor_state = interface["state"]
+ if neighbor_state == neighbor_state_expected:
+ return True
+
+ return "{} peer with expected neighbor_state {} got {} ".format(
+ router.name, neighbor_state_expected, neighbor_state
+ )
+
+
@retry(retry_timeout=10)
def check_last_iih_packet_for_padding(router, expect_padding):
logfilename = "{}/{}".format(router.gearlogdir, "isisd.log")
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index bcd1c74812..329c2b54f5 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -5638,3 +5638,22 @@ def configure_bgp_soft_configuration(tgen, dut, neighbor_dict, direction):
)
)
return True
+
+
+def bgp_configure_prefixes(router, asn, safi, prefixes, vrf=None, update=True):
+ """
+ Configure the bgp prefixes.
+ """
+ withdraw = "no " if not update else ""
+ vrf = " vrf {}".format(vrf) if vrf else ""
+ for p in prefixes:
+ ip = ipaddress.ip_network(p)
+ cmd = [
+ "conf t\n",
+ f"router bgp {asn}{vrf}\n"
+ f"address-family ipv{ip.version} {safi}\n"
+ f"{withdraw}network {ip}\n".format(withdraw, ip),
+ "exit-address-family\n",
+ ]
+ logger.debug(f"setting prefix: ipv{ip.version} {safi} {ip}")
+ router.vtysh_cmd("".join(cmd))
diff --git a/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py b/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py
index 3694cb4fe3..ca49c405d1 100644
--- a/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py
+++ b/tests/topotests/lib/bmp_collector/bgp/update/path_attributes.py
@@ -72,6 +72,12 @@ class PathAttribute:
if path_attr_cls == cls.UNKNOWN_ATTR:
return data[offset + attr_len :], None
+ # RFC1771, 4.3 UPDATE Message Format
+ # The path segment length is a 1-octet long field containing
+ # the number of ASs in the path segment value field.
+ if type_code == PATH_ATTR_TYPE_AS_PATH and attr_len == 0:
+ return data[offset:], path_attr_cls.dissect(data[offset : offset + 2])
+
return data[offset + attr_len :], path_attr_cls.dissect(
data[offset : offset + attr_len]
)
diff --git a/tests/topotests/lib/bmp_collector/bmpserver b/tests/topotests/lib/bmp_collector/bmpserver.py
index 56d85fc74b..c42c387563 100755
--- a/tests/topotests/lib/bmp_collector/bmpserver
+++ b/tests/topotests/lib/bmp_collector/bmpserver.py
@@ -5,8 +5,11 @@
# Authored by Farid Mihoub <farid.mihoub@6wind.com>
#
import argparse
+import errno
+import logging
# XXX: something more reliable should be used "Twisted" a great choice.
+import os
import signal
import socket
import sys
@@ -20,11 +23,11 @@ BGP_MAX_SIZE = 4096
# Global variable to track shutdown signal
shutdown = False
-
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--address", type=str, default="0.0.0.0")
parser.add_argument("-p", "--port", type=int, default=1789)
parser.add_argument("-l", "--logfile", type=str, default="/var/log/bmp.log")
+parser.add_argument("-r", "--pidfile", type=str, default="/var/run/bmp.pid")
def handle_signal(signum, frame):
@@ -40,6 +43,74 @@ def timestamp_print(message, file=sys.stderr):
print(f"[{current_time}] {message}", file=file)
+def check_pid(pid):
+ if pid < 0: # user input error
+ return False
+ if pid == 0: # all processes
+ return False
+ try:
+ os.kill(pid, 0)
+ return True
+ except OSError as err:
+ if err.errno == errno.EPERM: # a process we were denied access to
+ return True
+ if err.errno == errno.ESRCH: # No such process
+ return False
+ # should never happen
+ return False
+
+
+def savepid():
+ ownid = os.getpid()
+
+ flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
+ mode = ((os.R_OK | os.W_OK) << 6) | (os.R_OK << 3) | os.R_OK
+
+ try:
+ fd = os.open(pid_file, flags, mode)
+ except OSError:
+ try:
+ pid = open(pid_file, "r").readline().strip()
+ if check_pid(int(pid)):
+ timestamp_print(
+ "PID file already exists and program still running %s\n" % pid_file
+ )
+ return False
+ else:
+ # If pid is not running, reopen file without O_EXCL
+ fd = os.open(pid_file, flags ^ os.O_EXCL, mode)
+ except (OSError, IOError, ValueError):
+ timestamp_print(
+ "issue accessing PID file %s (most likely permission or ownership)\n"
+ % pid_file
+ )
+ return False
+
+ try:
+ f = os.fdopen(fd, "w")
+ line = "%d\n" % ownid
+ f.write(line)
+ f.close()
+ saved_pid = True
+ except IOError:
+ timestamp_print("Can not create PID file %s\n" % pid_file)
+ return False
+ timestamp_print("Created PID file %s with value %d\n" % (pid_file, ownid))
+ return True
+
+
+def removepid():
+ try:
+ os.remove(pid_file)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ pass
+ else:
+ timestamp_print("Can not remove PID file %s\n" % pid_file)
+ return
+ timestamp_print("Removed PID file %s\n" % pid_file)
+
+
def main():
global shutdown
@@ -51,8 +122,13 @@ def main():
ADDRESS, PORT = args.address, args.port
LOG_FILE = args.logfile
+ global pid_file
+ pid_file = args.pidfile
+
timestamp_print(f"Starting bmpserver on {args.address}:{args.port}")
+ savepid()
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
@@ -80,9 +156,7 @@ def main():
while len(data) > BMPMsg.MIN_LEN:
data = BMPMsg.dissect(data, log_file=LOG_FILE)
- timestamp_print(
- f"Finished dissecting data from {client_address}"
- )
+ timestamp_print(f"Finished dissecting data from {client_address}")
except Exception as e:
timestamp_print(f"{e}")
@@ -99,6 +173,7 @@ def main():
timestamp_print(f"{e}")
finally:
timestamp_print(f"Server shutting down on {ADDRESS}:{PORT}")
+ removepid()
if __name__ == "__main__":
@@ -106,4 +181,5 @@ if __name__ == "__main__":
sys.exit(main())
except KeyboardInterrupt:
logging.info("BMP server was interrupted and is shutting down.")
+ removepid()
sys.exit(0)
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index 369a794ebc..349b82aab4 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -1720,26 +1720,26 @@ def verify_pim_rp_info(
)
return errormsg
- if not iamRP:
- if rp_json["iAmRP"] == False:
- logger.info(
- "[DUT %s]: Verifying group "
- "and iAmNotRP [PASSED]!!"
- " Found Expected: (%s, %s:%s)",
- dut,
- grp_addr,
- "iAmRP",
- rp_json["iAmRP"],
- )
- else:
- errormsg = (
- "[DUT %s]: Verifying group"
- "%s and iAmRP [FAILED]!! "
- "Expected: (iAmRP: %s),"
- " Found: (iAmRP: %s)"
- % (dut, grp_addr, "false", rp_json["iAmRP"])
- )
- return errormsg
+ if not iamRP:
+ if rp_json["iAmRP"] == False:
+ logger.info(
+ "[DUT %s]: Verifying group "
+ "and iAmNotRP [PASSED]!!"
+ " Found Expected: (%s, %s:%s)",
+ dut,
+ grp_addr,
+ "iAmRP",
+ rp_json["iAmRP"],
+ )
+ else:
+ errormsg = (
+ "[DUT %s]: Verifying group"
+ "%s and iAmRP [FAILED]!! "
+ "Expected: (iAmRP: %s),"
+ " Found: (iAmRP: %s)"
+ % (dut, grp_addr, "false", rp_json["iAmRP"])
+ )
+ return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py
index 8e2e76d154..6d586cee50 100644
--- a/tests/topotests/lib/snmptest.py
+++ b/tests/topotests/lib/snmptest.py
@@ -104,12 +104,16 @@ class SnmpTester(object):
return None
return self._get_snmp_value(result)
- def walk(self, oid):
+ def walk(self, oid, raw=False):
cmd = "snmpwalk {0} {1} 2>&1 | grep -v SNMPv2-PDU".format(
self._snmp_config(), oid
)
result = self.router.cmd(cmd)
+
+ if raw:
+ return result
+
return self._parse_multiline(result)
def parse_notif_ipv4(self, notif):
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 4d7c56423e..0a9a84a4bb 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -1293,18 +1293,19 @@ class TopoBMPCollector(TopoHost):
log_err = os.path.join(log_dir, "bmpserver.log")
log_arg = "-l {}".format(log_file) if log_file else ""
+ self.pid_file = os.path.join(log_dir, "bmpserver.pid")
with open(log_err, "w") as err:
self.run(
- "{}/bmp_collector/bmpserver -a {} -p {} {}&".format(
- CWD, self.ip, self.port, log_arg
+ "{}/bmp_collector/bmpserver.py -a {} -p {} -r {} {}&".format(
+ CWD, self.ip, self.port, self.pid_file, log_arg
),
stdout=None,
stderr=err,
)
def stop(self):
- self.run("pkill -f bmpserver")
+ self.run(f"kill $(cat {self.pid_file}")
return ""
diff --git a/tests/topotests/mgmt_config/r1/early-end-zebra.conf b/tests/topotests/mgmt_config/r1/early-end-zebra.conf
index 44a2f96825..926540f9bc 100644
--- a/tests/topotests/mgmt_config/r1/early-end-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-end-zebra.conf
@@ -1,6 +1,6 @@
allow-external-route-update
end
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
end
ip table range 2 3
end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
index 37619d52ac..b8514f324f 100644
--- a/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-end2-zebra.conf
@@ -1,7 +1,7 @@
conf t
allow-external-route-update
end
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
end
ip table range 2 3
end \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
index 44f202dbcb..990351685b 100644
--- a/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-exit-zebra.conf
@@ -1,6 +1,6 @@
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
exit
ip table range 2 3
exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
index c7109bfd39..5a783f4492 100644
--- a/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/early-exit2-zebra.conf
@@ -1,7 +1,7 @@
conf t
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
exit
ip table range 2 3
exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
index 0c38459702..c8396fec70 100644
--- a/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/one-exit-zebra.conf
@@ -1,3 +1,3 @@
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only
+router-id 1.2.3.4
diff --git a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
index 34acb76d92..3a50f6d136 100644
--- a/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
+++ b/tests/topotests/mgmt_config/r1/one-exit2-zebra.conf
@@ -1,4 +1,4 @@
conf t
allow-external-route-update
exit
-ip multicast rpf-lookup-mode urib-only \ No newline at end of file
+router-id 1.2.3.4 \ No newline at end of file
diff --git a/tests/topotests/mgmt_config/test_config.py b/tests/topotests/mgmt_config/test_config.py
index 1d732223ff..627a564a66 100644
--- a/tests/topotests/mgmt_config/test_config.py
+++ b/tests/topotests/mgmt_config/test_config.py
@@ -153,7 +153,7 @@ def cleanup_config(r1, tempdir, logpath):
yield
r1.cmd_nostatus("vtysh -c 'conf t' -c 'no allow-external-route-update'")
- r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip multicast rpf-lookup-mode urib-only'")
+ r1.cmd_nostatus("vtysh -c 'conf t' -c 'no router-id 1.2.3.4'")
r1.cmd_nostatus("vtysh -c 'conf t' -c 'no ip table range 2 3'")
logbuf = save_log_snippet(logpath, logbuf, "/dev/null")
@@ -290,9 +290,7 @@ def test_zebra_one_exit_file(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath):
@@ -307,9 +305,7 @@ def test_zebra_one_exit_redir(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
def test_zebra_early_exit_file(r1, confdir, tempdir, logpath):
@@ -324,9 +320,7 @@ def test_zebra_early_exit_file(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
@@ -342,9 +336,7 @@ def test_zebra_early_exit_redir(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
@@ -360,9 +352,7 @@ def test_zebra_early_end_file(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" in showrun
- ), "zebra second conf missing"
+ assert "router-id 1.2.3.4" in showrun, "zebra second conf missing"
assert "ip table range 2 3" in showrun, "zebra third missing"
@@ -378,7 +368,5 @@ def test_zebra_early_end_redir(r1, confdir, tempdir, logpath):
showrun = r1.cmd_nostatus("vtysh -c 'show running'")
assert "allow-external-route-update" in showrun, "zebra conf missing"
- assert (
- "ip multicast rpf-lookup-mode urib-only" not in showrun
- ), "zebra second conf present, unexpected"
+ assert "router-id 1.2.3.4" not in showrun, "zebra second conf present, unexpected"
assert "ip table range 2 3" not in showrun, "zebra third conf present, unexpected"
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
index 948f4e6c23..da2d8e3625 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-default.json
@@ -39,7 +39,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -68,7 +68,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -97,7 +97,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -126,7 +126,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -238,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -250,7 +252,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -265,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -277,7 +281,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -292,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -304,7 +310,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -319,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -331,7 +339,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -369,7 +377,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -398,7 +406,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -427,7 +435,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -456,7 +464,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -480,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -492,7 +502,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -507,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -519,7 +531,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -534,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -546,7 +560,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -561,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -573,7 +589,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -589,3 +605,4 @@
]
}
}
+
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
index 30daecf16e..b4abdde465 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-nokey.json
@@ -38,8 +38,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -67,8 +67,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -96,8 +96,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -125,8 +125,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -238,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -249,8 +251,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -265,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -276,8 +280,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight":1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -292,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -303,8 +309,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -319,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -330,8 +338,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -368,8 +376,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -397,8 +405,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -426,8 +434,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -455,8 +463,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -480,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -491,8 +501,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -507,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -518,8 +530,8 @@
"gateway": "",
"interface": "r1-eth0",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -534,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -545,8 +559,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -561,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -572,8 +588,8 @@
"gateway": "",
"interface": "r1-eth1",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -623,8 +639,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -652,8 +668,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -681,8 +697,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -710,8 +726,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -823,7 +839,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -834,8 +852,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -850,7 +868,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -861,8 +881,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -877,7 +897,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -888,8 +910,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -904,7 +926,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -915,8 +939,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -953,8 +977,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -982,8 +1006,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1011,8 +1035,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1040,8 +1064,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1065,7 +1089,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1076,8 +1102,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1092,7 +1118,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1103,8 +1131,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1119,7 +1147,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1130,8 +1160,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -1146,7 +1176,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1157,8 +1189,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null],
- "weight": 1
+ "fib": [null],
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
index cfabd49c45..5d61b9865f 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-red.json
@@ -38,7 +38,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -66,7 +67,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -94,7 +96,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -122,7 +125,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -234,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -245,7 +251,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -260,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -271,7 +280,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -286,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -297,7 +309,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -312,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -323,7 +338,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -360,7 +376,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -388,7 +405,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -416,7 +434,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -444,7 +463,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -468,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -479,7 +501,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -494,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -505,7 +530,8 @@
"gateway": "",
"interface": "r1-eth2",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -520,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -531,7 +559,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
@@ -546,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -557,7 +588,8 @@
"gateway": "",
"interface": "r1-eth3",
"active": [null],
- "fib": [null]
+ "fib": [null],
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
index b1124bd7bb..86e67a9e23 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra-ribs.json
@@ -35,7 +35,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -64,7 +64,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -93,7 +93,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -122,7 +122,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -234,7 +234,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -246,7 +248,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -261,7 +263,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -273,7 +277,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -288,7 +292,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -300,7 +306,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -315,7 +321,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -327,7 +335,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -365,7 +373,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -394,7 +402,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -423,7 +431,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -452,7 +460,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -476,7 +484,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -488,7 +498,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -503,7 +513,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -515,7 +527,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -530,7 +542,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -542,7 +556,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -557,7 +571,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -569,7 +585,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
index 70c8798b31..86e67a9e23 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib-vrf-zebra.json
@@ -234,7 +234,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -261,7 +263,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -288,7 +292,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -315,7 +321,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -476,7 +484,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -503,7 +513,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -530,7 +542,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -557,7 +571,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/oper-results/result-lib.json b/tests/topotests/mgmt_oper/oper-results/result-lib.json
index 0b2a9fa427..b4abdde465 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-lib.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-lib.json
@@ -39,7 +39,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -68,7 +68,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -97,7 +97,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -126,7 +126,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -238,7 +238,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -250,7 +252,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -265,7 +267,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -277,7 +281,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -292,7 +296,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -304,7 +310,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -319,7 +325,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -331,7 +339,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -369,7 +377,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -398,7 +406,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -427,7 +435,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -456,7 +464,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -480,7 +488,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -492,7 +502,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -507,7 +517,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -519,7 +531,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -534,7 +546,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -546,7 +560,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -561,7 +575,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -573,7 +589,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -624,7 +640,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -653,7 +669,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -682,7 +698,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -711,7 +727,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -823,7 +839,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -835,7 +853,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -850,7 +868,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -862,7 +882,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -877,7 +897,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -889,7 +911,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -904,7 +926,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -916,7 +940,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -954,7 +978,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -983,7 +1007,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1012,7 +1036,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1041,7 +1065,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1065,7 +1089,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1077,7 +1103,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1092,7 +1118,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1104,7 +1132,7 @@
"interface": "r1-eth2",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1119,7 +1147,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1131,7 +1161,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -1146,7 +1176,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -1158,7 +1190,7 @@
"interface": "r1-eth3",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
index 769c1f73a5..e313a158a3 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-ipv4-unicast.json
@@ -35,7 +35,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -64,7 +64,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -93,7 +93,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -122,7 +122,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
index c740f592f7..86e67a9e23 100644
--- a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-nokeys.json
@@ -35,7 +35,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -64,7 +64,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -93,7 +93,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -122,7 +122,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -234,7 +234,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -246,7 +248,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -261,7 +263,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -273,7 +277,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -288,7 +292,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -300,7 +306,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -315,7 +321,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -327,7 +335,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -365,7 +373,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -394,7 +402,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -423,7 +431,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -476,7 +484,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -488,7 +498,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -503,7 +513,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -515,7 +527,7 @@
"interface": "r1-eth0",
"active": [null],
"fib": [null],
- "weight": 1
+ "weight": 1
}
]
}
@@ -530,7 +542,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -542,7 +556,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
@@ -557,7 +571,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -569,7 +585,7 @@
"interface": "r1-eth1",
"active": [null],
"fib": [null],
- "weight":1
+ "weight": 1
}
]
}
diff --git a/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json
new file mode 100644
index 0000000000..e313a158a3
--- /dev/null
+++ b/tests/topotests/mgmt_oper/oper-results/result-ribs-rib-route-nokey.json
@@ -0,0 +1,229 @@
+{
+ "frr-vrf:lib": {
+ "vrf": [
+ {
+ "name": "default",
+ "frr-zebra:zebra": {
+ "ribs": {
+ "rib": [
+ {
+ "afi-safi-name": "frr-routing:ipv4-unicast",
+ "table-id": 254,
+ "route": [
+ {
+ "prefix": "0.0.0.0/0"
+ },
+ {
+ "prefix": "1.1.1.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "1.1.1.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.0/24",
+ "route-entry": [
+ {
+ "protocol": "connected",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "2.2.2.1/32",
+ "route-entry": [
+ {
+ "protocol": "local",
+ "distance": 0,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 8,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ifindex",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.0.0.0/8",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "blackhole",
+ "vrf": "rubout",
+ "gateway": "",
+ "interface": " ",
+ "bh-type": "null",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "11.11.11.11/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "1.1.1.2",
+ "interface": "r1-eth0",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "prefix": "12.12.12.12/32",
+ "route-entry": [
+ {
+ "protocol": "static",
+ "distance": 1,
+ "metric": 0,
+ "selected": [null],
+ "installed": [null],
+ "internal-flags": 73,
+ "internal-status": 16,
+ "uptime": "rubout",
+ "nexthop-group": {
+ "id": "rubout",
+ "nexthop": [
+ {
+ "nh-type": "ip4-ifindex",
+ "vrf": "rubout",
+ "gateway": "2.2.2.2",
+ "interface": "r1-eth1",
+ "active": [null],
+ "fib": [null],
+ "weight": 1
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}
+
diff --git a/tests/topotests/mgmt_oper/oper.py b/tests/topotests/mgmt_oper/oper.py
index f54e64ae18..bca452d011 100644
--- a/tests/topotests/mgmt_oper/oper.py
+++ b/tests/topotests/mgmt_oper/oper.py
@@ -77,7 +77,13 @@ def _do_oper_test(tgen, qr, seconds_left=None):
# Don't use this for now.
dd_json_cmp = None
- expected = open(qr[1], encoding="ascii").read()
+ if isinstance(qr[1], str):
+ expected = open(qr[1], encoding="ascii").read()
+ expected_alt = None
+ else:
+ expected = open(qr[1][0], encoding="ascii").read()
+ expected_alt = open(qr[1][1], encoding="ascii").read()
+
output = r1.cmd_nostatus(qcmd.format(qr[0], qr[2] if len(qr) > 2 else ""))
diag = logging.debug if seconds_left else logging.warning
@@ -90,6 +96,7 @@ def _do_oper_test(tgen, qr, seconds_left=None):
try:
ejson = json.loads(expected)
+ ejson_alt = json.loads(expected_alt) if expected_alt is not None else None
except json.decoder.JSONDecodeError as error:
logging.error(
"Error decoding json exp result: %s\noutput:\n%s", error, expected
@@ -99,6 +106,8 @@ def _do_oper_test(tgen, qr, seconds_left=None):
if dd_json_cmp:
cmpout = json_cmp(ojson, ejson, exact_match=True)
+ if cmpout and ejson_alt is not None:
+ cmpout = json_cmp(ojson, ejson_alt, exact_match=True)
if cmpout:
diag(
"-------DIFF---------\n%s\n---------DIFF----------",
@@ -106,6 +115,8 @@ def _do_oper_test(tgen, qr, seconds_left=None):
)
else:
cmpout = tt_json_cmp(ojson, ejson, exact=True)
+ if cmpout and ejson_alt is not None:
+ cmpout = tt_json_cmp(ojson, ejson_alt, exact=True)
if cmpout:
diag(
"-------EXPECT--------\n%s\n------END-EXPECT------",
@@ -118,6 +129,7 @@ def _do_oper_test(tgen, qr, seconds_left=None):
diag("----diff---\n{}".format(cmpout))
diag("Command: {}".format(qcmd.format(qr[0], qr[2] if len(qr) > 2 else "")))
diag("File: {}".format(qr[1]))
+ cmpout = str(cmpout)
return cmpout
@@ -127,7 +139,8 @@ def do_oper_test(tgen, query_results):
step(f"Perform query '{qr[0]}'", reset=reset)
if reset:
reset = False
- _do_oper_test(tgen, qr)
+ ret = _do_oper_test(tgen, qr)
+ assert ret is None, "Unexpected diff: " + str(ret)
def get_ip_networks(super_prefix, count):
diff --git a/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json
new file mode 100644
index 0000000000..efd7e8c684
--- /dev/null
+++ b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim-empty-label.json
@@ -0,0 +1,3 @@
+{
+ "frr-zebra:evpn-mh": {}
+}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json
index efd7e8c684..2c63c08510 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-intf-eth0-wd-trim.json
@@ -1,3 +1,2 @@
{
- "frr-zebra:evpn-mh": {}
}
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
index f85b163bd6..19295870d5 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-default.json
@@ -121,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -148,7 +150,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
index e2cfec9724..f0bde048f2 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-nokey.json
@@ -121,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -148,7 +150,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -282,7 +286,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -309,7 +315,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
index 3567f35a34..8b632bac66 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-red.json
@@ -92,7 +92,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -119,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
index d9ca58d25d..678a80ab97 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra-ribs.json
@@ -117,7 +117,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -144,7 +146,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
index d9ca58d25d..678a80ab97 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib-vrf-zebra.json
@@ -117,7 +117,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -144,7 +146,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-lib.json b/tests/topotests/mgmt_oper/simple-results/result-lib.json
index e2cfec9724..f0bde048f2 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-lib.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-lib.json
@@ -121,7 +121,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -148,7 +150,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -282,7 +286,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -309,7 +315,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
index d9ca58d25d..678a80ab97 100644
--- a/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
+++ b/tests/topotests/mgmt_oper/simple-results/result-ribs-rib-nokeys.json
@@ -117,7 +117,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
@@ -144,7 +146,9 @@
"distance": 0,
"metric": 0,
"selected": [null],
+ "installed": [null],
"internal-flags": 8,
+ "internal-status": 16,
"uptime": "rubout",
"nexthop-group": {
"id": "rubout",
diff --git a/tests/topotests/mgmt_oper/test_oper.py b/tests/topotests/mgmt_oper/test_oper.py
index e4ceabf352..23529bc75e 100644
--- a/tests/topotests/mgmt_oper/test_oper.py
+++ b/tests/topotests/mgmt_oper/test_oper.py
@@ -107,6 +107,7 @@ vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ri
for f in ${resdir}/result-*; do
sed -i -e 's/"uptime": ".*"/"uptime": "rubout"/;s/"id": [0-9][0-9]*/"id": "rubout"/' $f
+ sed -i -e 's/"phy-address": ".*"/"phy-address": "rubout"/' $f
sed -i -e 's/"if-index": [0-9][0-9]*/"if-index": "rubout"/' $f
sed -i -e 's,"vrf": "[0-9]*","vrf": "rubout",' $f
done
diff --git a/tests/topotests/mgmt_oper/test_simple.py b/tests/topotests/mgmt_oper/test_simple.py
index 2b3d6ff6a5..237f7d57d5 100644
--- a/tests/topotests/mgmt_oper/test_simple.py
+++ b/tests/topotests/mgmt_oper/test_simple.py
@@ -154,7 +154,11 @@ def test_oper_simple(tgen):
),
(
'/frr-interface:lib/interface[name="r1-eth0"]/frr-zebra:zebra/evpn-mh',
- "simple-results/result-intf-eth0-wd-trim.json",
+ (
+ # Output is different between libyang2 and libyang3+
+ "simple-results/result-intf-eth0-wd-trim.json",
+ "simple-results/result-intf-eth0-wd-trim-empty-label.json",
+ ),
"with-config exact with-defaults trim",
),
(
@@ -181,7 +185,7 @@ vtysh -c 'show mgmt get-data /frr-vrf:lib' > ${resdir}/result-lib.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf' > ${resdir}/result-lib-vrf-nokey.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]' > ${resdir}/result-lib-vrf-default.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="red"]' > ${resdir}/result-lib-vrf-red.json
-vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra' > ${resdir}/result-lib-vrf-ebra.json
+vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra' > ${resdir}/result-lib-vrf-zebra.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs' > ${resdir}/result-lib-vrf-zebra-ribs.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib' > ${resdir}/result-ribs-rib-nokeys.json
vtysh -c 'show mgmt get-data /frr-vrf:lib/vrf[name="default"]/frr-zebra:zebra/ribs/rib[afi-safi-name="frr-routing:ipv4-unicast"][table-id="254"]' > ${resdir}/result-ribs-rib-ipv4-unicast.json
diff --git a/tests/topotests/msdp_topo1/r1/pimd.conf b/tests/topotests/msdp_topo1/r1/pimd.conf
index 3c116a003b..1548aed05e 100644
--- a/tests/topotests/msdp_topo1/r1/pimd.conf
+++ b/tests/topotests/msdp_topo1/r1/pimd.conf
@@ -22,5 +22,7 @@ ip pim rp 10.254.254.1
ip pim join-prune-interval 5
!
router pim
+ msdp log neighbor-events
+ msdp log sa-events
msdp peer 192.168.0.2 password 1234
!
diff --git a/tests/topotests/msdp_topo1/test_msdp_topo1.py b/tests/topotests/msdp_topo1/test_msdp_topo1.py
index ff80052d26..5143ef67a5 100755
--- a/tests/topotests/msdp_topo1/test_msdp_topo1.py
+++ b/tests/topotests/msdp_topo1/test_msdp_topo1.py
@@ -17,6 +17,7 @@ import os
import sys
import json
from functools import partial
+import re
import pytest
# Save the Current Working Directory to find configuration files.
@@ -510,6 +511,104 @@ def test_msdp_sa_filter():
assert val is None, "multicast route convergence failure"
+def test_msdp_sa_limit():
+ "Test MSDP SA limiting."
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r4"].vtysh_cmd(
+ """
+ configure terminal
+ router pim
+ msdp log sa-events
+ msdp peer 192.168.2.1 sa-limit 4
+ msdp peer 192.168.3.1 sa-limit 4
+ """
+ )
+
+ # Flow from r1 -> r4
+ for multicast_address in [
+ "229.1.2.10",
+ "229.1.2.11",
+ "229.1.2.12",
+ "229.1.2.13",
+ "229.1.2.14",
+ ]:
+ app_helper.run("h1", [multicast_address, "h1-eth0"])
+ app_helper.run("h2", ["--send=0.7", multicast_address, "h2-eth0"])
+
+ def test_sa_limit_log():
+ r4_log = tgen.gears["r4"].net.getLog("log", "pimd")
+ return re.search(r"MSDP peer .+ reject SA (.+, .+): SA limit \d+ of 4", r4_log)
+
+ _, val = topotest.run_and_expect(test_sa_limit_log, None, count=30, wait=1)
+ assert val is None, "SA limit check failed"
+
+
+def test_msdp_log_events():
+ "Test that the enabled logs are working as expected."
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1_log = tgen.gears["r1"].net.getLog("log", "pimd")
+
+ # Look up for informational messages that should have been enabled.
+ match = re.search("MSDP peer 192.168.1.2 state changed to established", r1_log)
+ assert match is not None
+
+ match = re.search(r"MSDP SA \(192.168.10.100\,229.1.2.3\) created", r1_log)
+ assert match is not None
+
+
+def test_msdp_shutdown():
+ "Shutdown MSDP sessions between r1, r2, r3, then check the state."
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["r1"].vtysh_cmd(
+ """
+ configure terminal
+ router pim
+ msdp shutdown
+ """
+ )
+
+ r1_expect = {
+ "192.168.0.2": {
+ "state": "inactive",
+ },
+ "192.168.1.2": {
+ "state": "inactive",
+ },
+ }
+ r2_expect = {
+ "192.168.0.1": {
+ "state": "listen",
+ }
+ }
+ r3_expect = {
+ "192.168.1.1": {
+ "state": "listen",
+ }
+ }
+ for router in [("r1", r1_expect), ("r2", r2_expect), ("r3", r3_expect)]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router[0]],
+ "show ip msdp peer json",
+ router[1],
+ )
+ logger.info("Waiting for {} msdp peer data".format(router[0]))
+ _, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert val is None, "multicast route convergence failure"
+
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
diff --git a/tests/topotests/msdp_topo3/__init__.py b/tests/topotests/msdp_topo3/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/msdp_topo3/__init__.py
diff --git a/tests/topotests/msdp_topo3/r1/frr.conf b/tests/topotests/msdp_topo3/r1/frr.conf
new file mode 100644
index 0000000000..d5b10bf8a1
--- /dev/null
+++ b/tests/topotests/msdp_topo3/r1/frr.conf
@@ -0,0 +1,31 @@
+log commands
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+ ip pim
+!
+interface r1-eth1
+ ip address 192.168.100.1/24
+ ip igmp
+ ip pim passive
+!
+interface lo
+ ip address 10.254.254.1/32
+ ip pim
+ ip pim use-source 10.254.254.1
+!
+router bgp 65100
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as 65200
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
+router pim
+ msdp originator-id 10.254.254.1
+ msdp log sa-events
+ msdp peer 192.168.1.2 source 192.168.1.1
+ rp 192.168.1.1
+! \ No newline at end of file
diff --git a/tests/topotests/msdp_topo3/r2/frr.conf b/tests/topotests/msdp_topo3/r2/frr.conf
new file mode 100644
index 0000000000..245c061874
--- /dev/null
+++ b/tests/topotests/msdp_topo3/r2/frr.conf
@@ -0,0 +1,28 @@
+log commands
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+ ip pim
+!
+interface r2-eth1
+ ip address 192.168.101.1/24
+ ip igmp
+ ip pim passive
+!
+interface lo
+ ip address 10.254.254.2/32
+!
+router bgp 65200
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.1 remote-as 65100
+ !
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
+router pim
+ msdp log sa-events
+ msdp peer 192.168.1.1 source 192.168.1.2
+ rp 192.168.1.2
+! \ No newline at end of file
diff --git a/tests/topotests/msdp_topo3/test_msdp_topo3.py b/tests/topotests/msdp_topo3/test_msdp_topo3.py
new file mode 100644
index 0000000000..9393ae7ffd
--- /dev/null
+++ b/tests/topotests/msdp_topo3/test_msdp_topo3.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_msdp_topo3.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2024 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+test_msdp_topo3.py: Test the FRR PIM MSDP peer.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import re
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+
+# Required to instantiate the topology builder class.
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+from lib.pim import McastTesterHelper
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
+
+app_helper = McastTesterHelper()
+
+
+def build_topo(tgen):
+ """
+ +----+ +----+ +----+ +----+
+ | h1 | <-> | r1 | <-> | r2 | <-> | h2 |
+ +----+ +----+ +----+ +----+
+
+ -------------------------->
+
+ Multicast traffic SG(192.168.100.100, 229.1.1.1)
+ """
+
+ # Create 2 routers
+ for routern in range(1, 3):
+ tgen.add_router(f"r{routern}")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # Create a host connected and direct at r1:
+ switch = tgen.add_switch("s2")
+ tgen.add_host("h1", "192.168.100.100/24", "via 192.168.100.1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["h1"])
+
+ # Create a host connected and direct at r2:
+ switch = tgen.add_switch("s3")
+ tgen.add_host("h2", "192.168.101.100/24", "via 192.168.101.1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["h2"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for _, router in router_list.items():
+ router.load_frr_config(os.path.join(CWD, f"{router.name}/frr.conf"))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+ app_helper.init(tgen)
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ app_helper.cleanup()
+ tgen.stop_topology()
+
+
+def test_bgp_convergence():
+ "Wait for BGP protocol convergence"
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("waiting for protocols to converge")
+
+ def expect_loopback_route(router, iptype, route, proto):
+ "Wait until route is present on RIB for protocol."
+ logger.info("waiting route {} in {}".format(route, router))
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router],
+ "show {} route json".format(iptype),
+ {route: [{"protocol": proto}]},
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
+ assertmsg = '"{}" convergence failure'.format(router)
+ assert result is None, assertmsg
+
+ # Wait for R1
+ expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp")
+
+ # Wait for R2
+ expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
+
+
+def test_sa_learn():
+ """
+ Test that the learned SA uses the configured originator ID instead
+ of the configured RP.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ MCAST_ADDRESS = "229.1.1.1"
+ app_helper.run("h1", ["--send=0.7", MCAST_ADDRESS, "h1-eth0"])
+ app_helper.run("h2", [MCAST_ADDRESS, "h2-eth0"])
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears["r2"],
+ "show ip msdp sa json",
+ {
+ "229.1.1.1": {
+ "192.168.100.100": {
+ "rp": "10.254.254.1",
+ "local": "no",
+ }
+ }
+ }
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=100, wait=1)
+ assert result is None, 'r2 SA convergence failure'
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py b/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py
index 6f9a58b195..5e30cbd013 100644
--- a/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py
+++ b/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py
@@ -82,6 +82,37 @@ def test_install_sharp_instance_routes():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ r1 = tgen.gears["r1"]
+ logger.info("Ensure that connected routes are actually installed")
+ expected = {
+ "192.168.100.0/24": [
+ {
+ "prefix": "192.168.100.0/24",
+ "prefixLen": 24,
+ "protocol": "connected",
+ "vrfName": "default",
+ "selected": True,
+ "destSelected": True,
+ "installed": True,
+ "nexthops": [
+ {
+ "fib": True,
+ "directlyConnected": True,
+ "interfaceName": "lo",
+ "active": True,
+ "weight": 1,
+ }
+ ],
+ }
+ ]
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route connected json", expected
+ )
+
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+
logger.info("Installing sharp routes")
r1 = tgen.gears["r1"]
r1.vtysh_cmd("sharp install route 4.5.6.7 nexthop 192.168.100.2 1")
diff --git a/tests/topotests/ospf_metric_propagation/h1/frr.conf b/tests/topotests/ospf_metric_propagation/h1/frr.conf
index 1196a192dd..b8d1834e24 100644
--- a/tests/topotests/ospf_metric_propagation/h1/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/h1/frr.conf
@@ -1,10 +1,8 @@
!
hostname h1
-password zebra
-log file /tmp/h1-frr.log
!
ip route 0.0.0.0/0 10.0.91.1
!
interface h1-eth0
ip address 10.0.91.2/24
-! \ No newline at end of file
+!
diff --git a/tests/topotests/ospf_metric_propagation/h2/frr.conf b/tests/topotests/ospf_metric_propagation/h2/frr.conf
index f951fe6ba1..4377256261 100644
--- a/tests/topotests/ospf_metric_propagation/h2/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/h2/frr.conf
@@ -1,10 +1,8 @@
!
hostname h2
-password zebra
-log file /tmp/h2-frr.log
!
ip route 0.0.0.0/0 10.0.94.4
!
interface h2-eth0
ip address 10.0.94.2/24
-! \ No newline at end of file
+!
diff --git a/tests/topotests/ospf_metric_propagation/r1/frr.conf b/tests/topotests/ospf_metric_propagation/r1/frr.conf
index 4966e6a9da..082f7df519 100644
--- a/tests/topotests/ospf_metric_propagation/r1/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/r1/frr.conf
@@ -1,8 +1,5 @@
!
hostname r1
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r1-eth0
ip address 10.0.1.1/24
@@ -93,4 +90,4 @@ route-map costplus permit 30
route-map costplus permit 40
set metric-type type-1
set metric +1
- exit \ No newline at end of file
+ exit
diff --git a/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json b/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json
new file mode 100644
index 0000000000..628a556c62
--- /dev/null
+++ b/tests/topotests/ospf_metric_propagation/r1/show_ip_route_static.json
@@ -0,0 +1,50 @@
+{
+ "10.48.48.0/24":[
+ {
+ "prefix":"10.48.48.0/24",
+ "prefixLen":24,
+ "protocol":"ospf",
+ "vrfId":0,
+ "vrfName":"default",
+ "distance":20,
+ "metric":134,
+ "table":254,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "ip":"10.0.1.2",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth0",
+ "active":true,
+ "weight":1
+ }
+ ]
+ },
+ {
+ "prefix":"10.48.48.0/24",
+ "prefixLen":24,
+ "protocol":"bgp",
+ "vrfId":0,
+ "vrfName":"default",
+ "selected":true,
+ "destSelected":true,
+ "distance":20,
+ "metric":34,
+ "installed":true,
+ "table":254,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "ip":"10.0.10.5",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth1",
+ "vrf":"blue",
+ "active":true,
+ "weight":1
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tests/topotests/ospf_metric_propagation/r2/frr.conf b/tests/topotests/ospf_metric_propagation/r2/frr.conf
index 0ac5001b1b..e6ced31d87 100644
--- a/tests/topotests/ospf_metric_propagation/r2/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/r2/frr.conf
@@ -1,8 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r2-frr.log
-ip forwarding
!
interface r2-eth0
ip address 10.0.1.2/24
diff --git a/tests/topotests/ospf_metric_propagation/r3/frr.conf b/tests/topotests/ospf_metric_propagation/r3/frr.conf
index 0859173f79..f6989057f6 100644
--- a/tests/topotests/ospf_metric_propagation/r3/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/r3/frr.conf
@@ -1,8 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r3-frr.log
-ip forwarding
!
interface r3-eth0
ip address 10.0.3.3/24
diff --git a/tests/topotests/ospf_metric_propagation/r4/frr.conf b/tests/topotests/ospf_metric_propagation/r4/frr.conf
index 743da27272..d9832d80b8 100644
--- a/tests/topotests/ospf_metric_propagation/r4/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/r4/frr.conf
@@ -1,9 +1,10 @@
!
hostname r4
-password zebra
-log file /tmp/r4-frr.log
-ip forwarding
!
+vrf green
+ ip route 10.48.48.0/24 10.0.94.2
+exit
+
interface r4-eth0
ip address 10.0.3.4/24
ip ospf cost 100
@@ -62,6 +63,7 @@ router bgp 99 vrf green
address-family ipv4 unicast
redistribute connected
redistribute ospf
+ redistribute static
import vrf route-map rmap
import vrf default
import vrf blue
diff --git a/tests/topotests/ospf_metric_propagation/ra/frr.conf b/tests/topotests/ospf_metric_propagation/ra/frr.conf
index 2434faeabc..5ad819da01 100644
--- a/tests/topotests/ospf_metric_propagation/ra/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/ra/frr.conf
@@ -1,8 +1,5 @@
!
hostname ra
-password zebra
-log file /tmp/ra-frr.log
-ip forwarding
!
interface ra-eth0
ip address 10.0.50.5/24
diff --git a/tests/topotests/ospf_metric_propagation/rb/frr.conf b/tests/topotests/ospf_metric_propagation/rb/frr.conf
index b83532a840..21abefed26 100644
--- a/tests/topotests/ospf_metric_propagation/rb/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/rb/frr.conf
@@ -1,8 +1,5 @@
!
hostname rb
-password zebra
-log file /tmp/rb-frr.log
-ip forwarding
!
interface rb-eth0
ip address 10.0.50.6/24
diff --git a/tests/topotests/ospf_metric_propagation/rc/frr.conf b/tests/topotests/ospf_metric_propagation/rc/frr.conf
index dd8077c394..0e6edd92a8 100644
--- a/tests/topotests/ospf_metric_propagation/rc/frr.conf
+++ b/tests/topotests/ospf_metric_propagation/rc/frr.conf
@@ -1,8 +1,5 @@
!
hostname rc
-password zebra
-log file /tmp/rc-frr.log
-ip forwarding
!
interface rc-eth0
ip address 10.0.70.7/24
diff --git a/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py b/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py
index b97b86bff9..4639a1e26b 100644
--- a/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py
+++ b/tests/topotests/ospf_metric_propagation/test_ospf_metric_propagation.py
@@ -190,6 +190,25 @@ def test_all_links_up():
assert result is None, assertmsg
+def test_static_remote():
+ "Test static route at R1 configured on R4"
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ r1 = tgen.gears["r1"]
+ json_file = "{}/r1/show_ip_route_static.json".format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 10.48.48.2 json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+
+ assertmsg = "r1 JSON output mismatches"
+ assert result is None, assertmsg
+
+
def test_link_1_down():
"Test path R1 -> R2 -> Ra -> Rb -> R4"
tgen = get_topogen()
diff --git a/tests/topotests/ospf_multi_instance/r1/frr.conf b/tests/topotests/ospf_multi_instance/r1/frr.conf
new file mode 100644
index 0000000000..b9752c63ad
--- /dev/null
+++ b/tests/topotests/ospf_multi_instance/r1/frr.conf
@@ -0,0 +1,16 @@
+!
+hostname r1
+!
+interface lo
+ ip address 1.1.1.1/32
+ ip ospf area 0
+!
+interface r1-eth0
+ ip address 10.1.1.1/24
+ ip ospf area 0
+!
+!
+router ospf
+ ospf router-id 1.1.1.1
+ distance 20
+!
diff --git a/tests/topotests/ospf_multi_instance/r2/frr.conf b/tests/topotests/ospf_multi_instance/r2/frr.conf
new file mode 100644
index 0000000000..8501e0edc0
--- /dev/null
+++ b/tests/topotests/ospf_multi_instance/r2/frr.conf
@@ -0,0 +1,37 @@
+!
+hostname r2
+password zebra
+! debug ospf event
+! debug ospf lsa
+! debug ospf default-information
+! debug ospf zebra redistribute
+
+ip forwarding
+!
+interface lo1
+ ip address 2.2.2.1/32
+ ip ospf 1 area 0
+ no shut
+!
+interface lo2
+ ip address 2.2.2.2/32
+ ip ospf 2 area 0
+ no shut
+!
+interface r2-eth0
+ ip address 10.1.1.2/24
+ ip ospf 1 area 0
+!
+interface r2-eth1
+ ip address 10.1.2.2/24
+ ip ospf 2 area 0
+!
+router ospf 1
+ ospf router-id 2.2.2.1
+ distance 20
+!
+router ospf 2
+ ospf router-id 2.2.2.2
+ distance 20
+!
+
diff --git a/tests/topotests/ospf_multi_instance/r3/frr.conf b/tests/topotests/ospf_multi_instance/r3/frr.conf
new file mode 100644
index 0000000000..e6f681a462
--- /dev/null
+++ b/tests/topotests/ospf_multi_instance/r3/frr.conf
@@ -0,0 +1,16 @@
+!
+hostname r3
+!
+interface lo
+ ip address 3.3.3.1/32
+ ip ospf area 0
+!
+interface r3-eth0
+ ip address 10.1.2.3/24
+ ip ospf area 0
+!
+!
+router ospf
+ ospf router-id 3.3.3.1
+ distance 20
+!
diff --git a/tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py b/tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py
new file mode 100644
index 0000000000..de44140c09
--- /dev/null
+++ b/tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py
@@ -0,0 +1,403 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_ospf_multi_instance.py
+#
+# Copyright (c) 2024 LabN Consulting
+# Acee Lindem
+#
+
+import os
+import sys
+from functools import partial
+import pytest
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+from lib.common_config import (
+ step,
+ create_interface_in_kernel,
+)
+
+
+"""
+test_ospf_metric_propagation.py: Test OSPF/BGP metric propagation
+"""
+
+TOPOLOGY = """
+
+ +---------+ +--------------------+ +---------+
+ | r1 | | r2 | r2 | | r3 |
+ | | | ospf 1 | ospf 2 | | |
+ | 1.1.1.1 | eth0 eth0| 2.2.2.1 | 2.2.2.2 |eth1 eth0| 3.3.3.1 |
+ | +-------------+ | +-------------+ |
+ | | 10.1.1.0/24 | | | 10.1.2.0/24 | |
+ +---------+ +--------------------+ +---------+
+
+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# Required to instantiate the topology builder class.
+
+pytestmark = [pytest.mark.ospfd, pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 3 routers
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+
+ # Interconect router 1, 2 (0)
+ switch = tgen.add_switch("s1-1-2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # Interconect router 2, 3 (1)
+ switch = tgen.add_switch("s2-2-3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ # Add more loopbacks to r2
+ create_interface_in_kernel(
+ tgen, "r2", "lo1", "2.2.2.1", netmask="255.255.255.255", create=True
+ )
+ create_interface_in_kernel(
+ tgen, "r2", "lo2", "2.2.2.2", netmask="255.255.255.255", create=True
+ )
+
+
+def setup_module(mod):
+ logger.info("OSPF Multi-Instance:\n {}".format(TOPOLOGY))
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ # Starting Routers
+ router_list = tgen.routers()
+
+ for rname, router in router_list.items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_multi_instance_default_origination():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip("Skipped because of router(s) failure")
+
+ step("Configure a local default route")
+ r1 = tgen.gears["r1"]
+ r1.vtysh_cmd("conf t\nip route 0.0.0.0/0 Null0")
+
+ step("Verify the R1 configuration and install of 'ip route 0.0.0.0/0 Null0'")
+ prefix_suppression_cfg = (
+ tgen.net["r1"]
+ .cmd('vtysh -c "show running" | grep "^ip route 0.0.0.0/0 Null0"')
+ .rstrip()
+ )
+ assertmsg = "'ip route 0.0.0.0/0 Null0' applied, but not present in configuration"
+ assert prefix_suppression_cfg == "ip route 0.0.0.0/0 Null0", assertmsg
+
+ input_dict = {
+ "0.0.0.0/0": [
+ {
+ "prefix": "0.0.0.0/0",
+ "prefixLen": 0,
+ "protocol": "static",
+ "nexthops": [
+ {
+ "blackhole": True,
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 0.0.0.0/0 json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "0.0.0.0/0 not installed on router r1"
+ assert result is None, assertmsg
+
+ step(
+ "Verify the R1 configuration and advertisement of 'default-information originate'"
+ )
+ r1.vtysh_cmd("conf t\nrouter ospf\n default-information originate")
+
+ input_dict = {
+ "asExternalLinkStates": [
+ {
+ "lsaType": "AS-external-LSA",
+ "linkStateId": "0.0.0.0",
+ "advertisingRouter": "1.1.1.1",
+ "networkMask": 0,
+ "metricType": "E2 (Larger than any link state path)",
+ "metric": 10,
+ "forwardAddress": "0.0.0.0",
+ "externalRouteTag": 0,
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip ospf database json", input_dict
+ )
+
+ r2 = tgen.gears["r2"]
+ step("Verify the OSPF instance 1 installation of default route on router 2")
+ input_dict = {
+ "0.0.0.0/0": [
+ {
+ "prefix": "0.0.0.0/0",
+ "prefixLen": 0,
+ "protocol": "ospf",
+ "instance": 1,
+ "nexthops": [
+ {
+ "ip": "10.1.1.1",
+ "interfaceName": "r2-eth0",
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip route 0.0.0.0/0 json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "0.0.0.0/0 not installed on router r2"
+ assert result is None, assertmsg
+
+ step("Configure OSPF 'default-intformation originate' on router r2 instance 2")
+ r2.vtysh_cmd("conf t\nrouter ospf 2\n default-information originate")
+
+ step("Verify r2 instance 2 AS-External default origination")
+ input_dict = {
+ "ospfInstance": 2,
+ "routerId": "2.2.2.2",
+ "asExternalLinkStates": [
+ {
+ "lsaType": "AS-external-LSA",
+ "linkStateId": "0.0.0.0",
+ "advertisingRouter": "2.2.2.2",
+ "networkMask": 0,
+ "metricType": "E2 (Larger than any link state path)",
+ "tos": 0,
+ "metric": 10,
+ "forwardAddress": "0.0.0.0",
+ "externalRouteTag": 0,
+ }
+ ],
+ }
+ test_func = partial(
+ topotest.router_json_cmp,
+ r2,
+ "show ip ospf 2 database external json",
+ input_dict,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "AS-External default not originated by router r2 OSPF instance 2"
+ assert result is None, assertmsg
+
+ step("Update the OSPF instance 2 distance so it will be preferred over instance 1")
+ r2.vtysh_cmd("conf t\nrouter ospf 2\n distance 15")
+
+ step("Generate a default route from OSPF on r3")
+ r3 = tgen.gears["r3"]
+ r3.vtysh_cmd("conf t\nrouter ospf\n default-information originate")
+ r3.vtysh_cmd("conf t\nip route 0.0.0.0/0 Null0")
+
+ step("Verify r3 AS-External default origination on r2")
+ input_dict = {
+ "ospfInstance": 2,
+ "routerId": "2.2.2.2",
+ "asExternalLinkStates": [
+ {
+ "lsaType": "AS-external-LSA",
+ "linkStateId": "0.0.0.0",
+ "advertisingRouter": "3.3.3.1",
+ "length": 36,
+ "networkMask": 0,
+ "metricType": "E2 (Larger than any link state path)",
+ "tos": 0,
+ "metric": 10,
+ "forwardAddress": "0.0.0.0",
+ "externalRouteTag": 0,
+ }
+ ],
+ }
+ test_func = partial(
+ topotest.router_json_cmp,
+ r2,
+ "show ip ospf 2 database external json",
+ input_dict,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "AS-External default not originated by router r3 OSPF"
+ assert result is None, assertmsg
+
+ step("Verify r3's default installed by OSPF instance 2 is preferred on r2")
+ input_dict = {
+ "0.0.0.0/0": [
+ {
+ "prefix": "0.0.0.0/0",
+ "prefixLen": 0,
+ "protocol": "ospf",
+ "instance": 2,
+ "distance": 15,
+ "nexthops": [
+ {
+ "ip": "10.1.2.3",
+ "interfaceName": "r2-eth1",
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip route 0.0.0.0/0 json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "0.0.0.0/0 from r3 not installed on router r2"
+ assert result is None, assertmsg
+
+ step(
+ "Verify that r2's OSPF instance 2 AS-External LSA default is flushed due to default from r3"
+ )
+ input_dict = {
+ "ospfInstance": 2,
+ "routerId": "2.2.2.2",
+ "asExternalLinkStates": [
+ {
+ "lsaAge": 3600,
+ "lsaType": "AS-external-LSA",
+ "linkStateId": "0.0.0.0",
+ "advertisingRouter": "2.2.2.2",
+ "networkMask": 0,
+ "metricType": "E2 (Larger than any link state path)",
+ "tos": 0,
+ "metric": 10,
+ "forwardAddress": "0.0.0.0",
+ "externalRouteTag": 0,
+ }
+ ],
+ }
+ test_func = partial(
+ topotest.router_json_cmp,
+ r2,
+ "show ip ospf 2 database external json",
+ input_dict,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "AS-External default not flushed by router r2 OSPF instance 2"
+ assert result is None, assertmsg
+
+ step("Remove r3's default route and verify that its advertisement is flushed")
+ r3.vtysh_cmd("conf t\nno ip route 0.0.0.0/0 Null0")
+ input_dict = {
+ "routerId": "3.3.3.1",
+ "asExternalLinkStates": [
+ {
+ "lsaAge": 3600,
+ "lsaType": "AS-external-LSA",
+ "linkStateId": "0.0.0.0",
+ "advertisingRouter": "3.3.3.1",
+ "networkMask": 0,
+ "metricType": "E2 (Larger than any link state path)",
+ "tos": 0,
+ "metric": 10,
+ "forwardAddress": "0.0.0.0",
+ "externalRouteTag": 0,
+ }
+ ],
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r3, "show ip ospf database external json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "AS-External default not flushed by router r3 OSPF"
+ assert result is None, assertmsg
+
+ step(
+ "Verify that r2's OSPF instance 2 AS-External default is advertised and installed by r3"
+ )
+ input_dict = {
+ "routerId": "3.3.3.1",
+ "asExternalLinkStates": [
+ {
+ "lsaType": "AS-external-LSA",
+ "linkStateId": "0.0.0.0",
+ "advertisingRouter": "2.2.2.2",
+ "networkMask": 0,
+ "metricType": "E2 (Larger than any link state path)",
+ "tos": 0,
+ "metric": 10,
+ "forwardAddress": "0.0.0.0",
+ "externalRouteTag": 0,
+ }
+ ],
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r3, "show ip ospf database external json", input_dict
+ )
+ assertmsg = "AS-External default not originated by r2 OSPF instance 2"
+ assert result is None, assertmsg
+
+ step("Verify r2's OSPF instance 2 is AS-External default is installed on r3")
+ input_dict = {
+ "0.0.0.0/0": [
+ {
+ "prefix": "0.0.0.0/0",
+ "prefixLen": 0,
+ "protocol": "ospf",
+ "distance": 20,
+ "nexthops": [
+ {
+ "ip": "10.1.2.2",
+ "interfaceName": "r3-eth0",
+ }
+ ],
+ }
+ ]
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r3, "show ip route 0.0.0.0/0 json", input_dict
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assertmsg = "0.0.0.0/0 from router r2 not installed on r3"
+ assert result is None, assertmsg
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf
index 995958132c..bcbe2eded6 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/frr.conf
@@ -1,7 +1,5 @@
!
hostname r1
-password zebra
-log file /tmp/r1-frr.log
!
interface r1-eth0
ip address 10.0.1.1/24
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
index 131085a47a..e4787be3c9 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
index 45ee1071d4..2f893c3d96 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt
@@ -1,4 +1,4 @@
-VRF neno:
+IPv4 unicast VRF neno:
O>* 10.0.3.0/24 [110/20] via 10.0.30.3, r1-eth2, weight 1, XX:XX:XX
B>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX
O 10.0.30.0/24 [110/10] is directly connected, r1-eth2, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf
index 29909de646..0d3eb3c8c9 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/frr.conf
@@ -1,7 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r2-frr.log
!
interface r2-eth0
ip address 10.0.2.2/24
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
index f3724bbb9f..07ec7226fa 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
S>* 0.0.0.0/0 [1/0] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
O>* 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX
O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
index 0f8b12bdfa..f409034b80 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt
@@ -1,4 +1,4 @@
-VRF ray:
+IPv4 unicast VRF ray:
B 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX
B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX
B>* 10.0.3.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf
index 35fe22e9f9..1cc2972f01 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/frr.conf
@@ -1,7 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r3-frr.log
!
interface r3-eth0
ip address 10.0.3.3/24
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
index db4e268cb0..2af9d2460d 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r3/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r3-eth0, weight 1, XX:XX:XX
L>* 10.0.3.3/32 is directly connected, r3-eth0, weight 1, XX:XX:XX
@@ -6,5 +7,3 @@ O 10.0.30.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.30.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX
L>* 10.0.30.3/32 is directly connected, r3-eth1, weight 1, XX:XX:XX
O>* 10.0.40.0/24 [110/20] via 10.0.30.1, r3-eth1, weight 1, XX:XX:XX
-
-
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf
index 721c3d91c3..a82d5b033c 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/frr.conf
@@ -1,7 +1,5 @@
!
hostname r4
-password zebra
-log file /tmp/r4-frr.log
!
interface r4-eth0
ip address 10.0.4.4/24
diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
index 4865708578..013073795b 100644
--- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
+++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r4/zebra-vrf-default.txt
@@ -1,3 +1,4 @@
+IPv4 unicast VRF default:
O>* 10.0.3.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX
O 10.0.4.0/24 [110/10] is directly connected, r4-eth0, weight 1, XX:XX:XX
C>* 10.0.4.0/24 is directly connected, r4-eth0, weight 1, XX:XX:XX
@@ -6,4 +7,3 @@ O>* 10.0.30.0/24 [110/20] via 10.0.40.2, r4-eth1, weight 1, XX:XX:XX
O 10.0.40.0/24 [110/10] is directly connected, r4-eth1, weight 1, XX:XX:XX
C>* 10.0.40.0/24 is directly connected, r4-eth1, weight 1, XX:XX:XX
L>* 10.0.40.4/32 is directly connected, r4-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r1/ospfd.conf b/tests/topotests/ospf_netns_vrf/r1/ospfd.conf
index ba13146561..75f38d0058 100644
--- a/tests/topotests/ospf_netns_vrf/r1/ospfd.conf
+++ b/tests/topotests/ospf_netns_vrf/r1/ospfd.conf
@@ -1,7 +1,5 @@
!
hostname r1
-password zebra
-log file /tmp/r1-ospfd.log
!
interface r1-eth0 vrf r1-ospf-cust1
ip ospf hello-interval 1
diff --git a/tests/topotests/ospf_netns_vrf/r1/zebra.conf b/tests/topotests/ospf_netns_vrf/r1/zebra.conf
index 56d7a9764e..1c08f1e263 100644
--- a/tests/topotests/ospf_netns_vrf/r1/zebra.conf
+++ b/tests/topotests/ospf_netns_vrf/r1/zebra.conf
@@ -4,8 +4,6 @@
! debug zebra event
!
hostname r1
-password zebra
-log file /tmp/r1-zebra.log
!
interface r1-eth0 vrf r1-ospf-cust1
ip address 10.0.1.1/24
diff --git a/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt
index 68fd30d4cc..82cc2d9136 100644
--- a/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt
+++ b/tests/topotests/ospf_netns_vrf/r1/zebraroute.txt
@@ -1,4 +1,4 @@
-VRF r1-ospf-cust1:
+IPv4 unicast VRF r1-ospf-cust1:
O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX
@@ -7,4 +7,3 @@ O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX
L>* 10.0.3.2/32 is directly connected, r1-eth1, weight 1, XX:XX:XX
O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt
index f0bce905b1..d6ad2a2500 100644
--- a/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt
+++ b/tests/topotests/ospf_netns_vrf/r1/zebraroutedown.txt
@@ -1,4 +1,4 @@
-VRF r1-ospf-cust1:
+IPv4 unicast VRF r1-ospf-cust1:
O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX
L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX
@@ -6,4 +6,3 @@ O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX
O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX
L>* 10.0.3.2/32 is directly connected, r1-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r2/ospfd.conf b/tests/topotests/ospf_netns_vrf/r2/ospfd.conf
index 01b6b1526b..3cd69bb8ff 100644
--- a/tests/topotests/ospf_netns_vrf/r2/ospfd.conf
+++ b/tests/topotests/ospf_netns_vrf/r2/ospfd.conf
@@ -1,7 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r2-ospfd.log
!
interface r2-eth0 vrf r2-ospf-cust1
ip ospf hello-interval 1
diff --git a/tests/topotests/ospf_netns_vrf/r2/zebra.conf b/tests/topotests/ospf_netns_vrf/r2/zebra.conf
index 6ff72d1267..f997028c74 100644
--- a/tests/topotests/ospf_netns_vrf/r2/zebra.conf
+++ b/tests/topotests/ospf_netns_vrf/r2/zebra.conf
@@ -1,7 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r2-zebra.log
!
interface r2-eth0 vrf r2-ospf-cust1
ip address 10.0.2.1/24
diff --git a/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt
index 098eceb28b..effcbc4634 100644
--- a/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt
+++ b/tests/topotests/ospf_netns_vrf/r2/zebraroute.txt
@@ -1,4 +1,4 @@
-VRF r2-ospf-cust1:
+IPv4 unicast VRF r2-ospf-cust1:
O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX
O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX
@@ -7,4 +7,3 @@ O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX
L>* 10.0.3.3/32 is directly connected, r2-eth1, weight 1, XX:XX:XX
O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt
index a9300f8dfa..7321b184a3 100644
--- a/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt
+++ b/tests/topotests/ospf_netns_vrf/r2/zebraroutedown.txt
@@ -1,4 +1,4 @@
-VRF r2-ospf-cust1:
+IPv4 unicast VRF r2-ospf-cust1:
O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX
O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX
@@ -6,4 +6,3 @@ L>* 10.0.2.1/32 is directly connected, r2-eth0, weight 1, XX:XX:XX
O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX
L>* 10.0.3.3/32 is directly connected, r2-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r3/ospfd.conf b/tests/topotests/ospf_netns_vrf/r3/ospfd.conf
index abfaa5b9ef..4581a609b4 100644
--- a/tests/topotests/ospf_netns_vrf/r3/ospfd.conf
+++ b/tests/topotests/ospf_netns_vrf/r3/ospfd.conf
@@ -1,7 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r3-ospfd.log
!
!
interface r3-eth0 vrf r3-ospf-cust1
diff --git a/tests/topotests/ospf_netns_vrf/r3/zebra.conf b/tests/topotests/ospf_netns_vrf/r3/zebra.conf
index 1534150048..4053d94a63 100644
--- a/tests/topotests/ospf_netns_vrf/r3/zebra.conf
+++ b/tests/topotests/ospf_netns_vrf/r3/zebra.conf
@@ -1,7 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r3-zebra.log
!
interface r3-eth0 vrf r3-ospf-cust1
ip address 10.0.3.1/24
diff --git a/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt b/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt
index f58beb81a7..3fea04bd19 100644
--- a/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt
+++ b/tests/topotests/ospf_netns_vrf/r3/zebraroute.txt
@@ -1,4 +1,4 @@
-VRF r3-ospf-cust1:
+IPv4 unicast VRF r3-ospf-cust1:
O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, weight 1, XX:XX:XX
O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, weight 1, XX:XX:XX
O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX
@@ -7,4 +7,3 @@ L>* 10.0.3.1/32 is directly connected, r3-eth0, weight 1, XX:XX:XX
O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.10.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX
L>* 10.0.10.1/32 is directly connected, r3-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt b/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt
index cfedf8fcb4..3287355ce0 100644
--- a/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt
+++ b/tests/topotests/ospf_netns_vrf/r3/zebraroutedown.txt
@@ -1,5 +1,4 @@
-VRF r3-ospf-cust1:
+IPv4 unicast VRF r3-ospf-cust1:
O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.10.0/24 is directly connected, r3-eth1, weight 1, XX:XX:XX
L>* 10.0.10.1/32 is directly connected, r3-eth1, weight 1, XX:XX:XX
-
diff --git a/tests/topotests/ospf_p2mp/r1/frr-p2mp-non-broadcast.conf b/tests/topotests/ospf_p2mp/r1/frr-p2mp-non-broadcast.conf
index ca84349cdc..fdc75633b1 100644
--- a/tests/topotests/ospf_p2mp/r1/frr-p2mp-non-broadcast.conf
+++ b/tests/topotests/ospf_p2mp/r1/frr-p2mp-non-broadcast.conf
@@ -1,8 +1,5 @@
!
hostname r1
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r1-eth0
ip address 10.1.0.1/24
diff --git a/tests/topotests/ospf_p2mp/r1/frr-p2mp.conf b/tests/topotests/ospf_p2mp/r1/frr-p2mp.conf
index 89f255bb44..350c0de7ba 100644
--- a/tests/topotests/ospf_p2mp/r1/frr-p2mp.conf
+++ b/tests/topotests/ospf_p2mp/r1/frr-p2mp.conf
@@ -1,13 +1,10 @@
!
-!log file ospfd.log debug
! debug ospf event
! debug ospf client
! debug ospf lsa
! debug ospf packet all
hostname r1
-password zebra
-log file /tmp/r1-frr.log
ip forwarding
!
interface r1-eth0
diff --git a/tests/topotests/ospf_p2mp/r2/frr-p2mp-non-broadcast.conf b/tests/topotests/ospf_p2mp/r2/frr-p2mp-non-broadcast.conf
index 6e26897c49..c44e936f9d 100644
--- a/tests/topotests/ospf_p2mp/r2/frr-p2mp-non-broadcast.conf
+++ b/tests/topotests/ospf_p2mp/r2/frr-p2mp-non-broadcast.conf
@@ -1,8 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r2-eth0
ip address 10.1.0.2/24
diff --git a/tests/topotests/ospf_p2mp/r2/frr-p2mp.conf b/tests/topotests/ospf_p2mp/r2/frr-p2mp.conf
index 429330987e..806914d814 100644
--- a/tests/topotests/ospf_p2mp/r2/frr-p2mp.conf
+++ b/tests/topotests/ospf_p2mp/r2/frr-p2mp.conf
@@ -1,14 +1,10 @@
!
-!log file ospfd.log debug
! debug ospf event
! debug ospf client
! debug ospf lsa
! debug ospf packet all
!
hostname r2
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r2-eth0
ip address 10.1.0.2/24
diff --git a/tests/topotests/ospf_p2mp/r3/frr-p2mp-non-broadcast.conf b/tests/topotests/ospf_p2mp/r3/frr-p2mp-non-broadcast.conf
index a69e0557be..d89269b324 100644
--- a/tests/topotests/ospf_p2mp/r3/frr-p2mp-non-broadcast.conf
+++ b/tests/topotests/ospf_p2mp/r3/frr-p2mp-non-broadcast.conf
@@ -1,8 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r3-eth0
ip address 10.1.0.3/24
diff --git a/tests/topotests/ospf_p2mp/r3/frr-p2mp.conf b/tests/topotests/ospf_p2mp/r3/frr-p2mp.conf
index eada78450e..343a9d0086 100644
--- a/tests/topotests/ospf_p2mp/r3/frr-p2mp.conf
+++ b/tests/topotests/ospf_p2mp/r3/frr-p2mp.conf
@@ -1,14 +1,10 @@
!
-!log file ospfd.log debug
! debug ospf event
! debug ospf client
! debug ospf lsa
! debug ospf packet all
!
hostname r3
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r3-eth0
ip address 10.1.0.3/24
diff --git a/tests/topotests/ospf_p2mp/r4/frr-p2mp-non-broadcast.conf b/tests/topotests/ospf_p2mp/r4/frr-p2mp-non-broadcast.conf
index 1b8388584b..aa6c80d41e 100644
--- a/tests/topotests/ospf_p2mp/r4/frr-p2mp-non-broadcast.conf
+++ b/tests/topotests/ospf_p2mp/r4/frr-p2mp-non-broadcast.conf
@@ -1,8 +1,5 @@
!
hostname r4
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r4-eth0
ip address 10.1.0.4/24
diff --git a/tests/topotests/ospf_p2mp/r4/frr-p2mp.conf b/tests/topotests/ospf_p2mp/r4/frr-p2mp.conf
index 3146ea0957..a1527f5aa9 100644
--- a/tests/topotests/ospf_p2mp/r4/frr-p2mp.conf
+++ b/tests/topotests/ospf_p2mp/r4/frr-p2mp.conf
@@ -1,14 +1,10 @@
!
-!log file ospfd.log debug
! debug ospf event
! debug ospf client
! debug ospf lsa
! debug ospf packet all
!
hostname r4
-password zebra
-log file /tmp/r1-frr.log
-ip forwarding
!
interface r4-eth0
ip address 10.1.0.4/24
diff --git a/tests/topotests/ospf_prefix_suppression/r1/frr.conf b/tests/topotests/ospf_prefix_suppression/r1/frr.conf
index 437b474153..4b9df834bf 100644
--- a/tests/topotests/ospf_prefix_suppression/r1/frr.conf
+++ b/tests/topotests/ospf_prefix_suppression/r1/frr.conf
@@ -1,7 +1,5 @@
!
hostname r1
-password zebra
-log file /tmp/r1-frr.log
ip forwarding
!
interface r1-eth0
diff --git a/tests/topotests/ospf_prefix_suppression/r2/frr.conf b/tests/topotests/ospf_prefix_suppression/r2/frr.conf
index 68390f15f1..cf4a25a09a 100644
--- a/tests/topotests/ospf_prefix_suppression/r2/frr.conf
+++ b/tests/topotests/ospf_prefix_suppression/r2/frr.conf
@@ -1,7 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r1-frr.log
ip forwarding
!
interface r2-eth0
diff --git a/tests/topotests/ospf_prefix_suppression/r3/frr.conf b/tests/topotests/ospf_prefix_suppression/r3/frr.conf
index 984a39d989..dbd1bc375f 100644
--- a/tests/topotests/ospf_prefix_suppression/r3/frr.conf
+++ b/tests/topotests/ospf_prefix_suppression/r3/frr.conf
@@ -1,7 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r1-frr.log
ip forwarding
!
interface r3-eth0
diff --git a/tests/topotests/ospfapi/test_ospf_clientapi.py b/tests/topotests/ospfapi/test_ospf_clientapi.py
index 89a34ff9b5..9e00fcf11f 100644
--- a/tests/topotests/ospfapi/test_ospf_clientapi.py
+++ b/tests/topotests/ospfapi/test_ospf_clientapi.py
@@ -218,10 +218,12 @@ def _test_router_id(tgen, testbin):
step("router id: check for modified router id")
r1.vtysh_multicmd("conf t\nrouter ospf\nospf router-id 1.1.1.1")
+ r1.vtysh_multicmd("clear ip ospf process")
_wait_output(p, "SUCCESS: {}".format(waitlist[1]))
step("router id: check for restored router id")
r1.vtysh_multicmd("conf t\nrouter ospf\nospf router-id 1.0.0.0")
+ r1.vtysh_multicmd("clear ip ospf process")
_wait_output(p, "SUCCESS: {}".format(waitlist[2]))
except Exception as error:
logging.error("ERROR: %s", error)
diff --git a/tests/topotests/pim_autorp/r1/frr.conf b/tests/topotests/pim_autorp/r1/frr.conf
index 2fddbc3ae2..fc4e634452 100644
--- a/tests/topotests/pim_autorp/r1/frr.conf
+++ b/tests/topotests/pim_autorp/r1/frr.conf
@@ -1,16 +1,24 @@
!
hostname r1
-password zebra
-log file /tmp/r1-frr.log
-debug pim autorp
+!
+!debug pim autorp
!
interface r1-eth0
- ip address 10.10.76.1/24
+ ip address 10.0.0.1/24
+ ip igmp
+ ip pim
+!
+interface r1-eth1
+ ip address 10.0.1.1/24
ip igmp
ip pim
!
ip forwarding
!
+ip route 10.0.2.0/24 10.0.0.2 50
+ip route 10.0.3.0/24 10.0.0.2 50
+!
router pim
autorp discovery
-! \ No newline at end of file
+ rp 10.0.3.4 224.0.1.0/24
+!
diff --git a/tests/topotests/pim_autorp/r2/frr.conf b/tests/topotests/pim_autorp/r2/frr.conf
index fd3c0cad39..ded462cad1 100644
--- a/tests/topotests/pim_autorp/r2/frr.conf
+++ b/tests/topotests/pim_autorp/r2/frr.conf
@@ -1,16 +1,24 @@
!
hostname r2
-password zebra
-log file /tmp/r2-frr.log
-debug pim autorp
+!
+!debug pim autorp
!
interface r2-eth0
- ip address 10.10.76.2/24
+ ip address 10.0.0.2/24
+ ip igmp
+ ip pim
+!
+interface r2-eth1
+ ip address 10.0.2.2/24
ip igmp
ip pim
!
ip forwarding
!
+ip route 10.0.1.0/24 10.0.0.1 50
+ip route 10.0.3.0/24 10.0.2.4 50
+!
router pim
autorp discovery
-! \ No newline at end of file
+ rp 10.0.3.4 224.0.1.0/24
+!
diff --git a/tests/topotests/pim_autorp/r3/frr.conf b/tests/topotests/pim_autorp/r3/frr.conf
new file mode 100644
index 0000000000..31726f2c01
--- /dev/null
+++ b/tests/topotests/pim_autorp/r3/frr.conf
@@ -0,0 +1,24 @@
+!
+hostname r3
+!
+!debug pim autorp
+!
+interface r3-eth0
+ ip address 10.0.1.3/24
+ ip igmp
+ ip pim
+!
+interface r3-eth1
+ ip address 10.0.3.3/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.0.0/24 10.0.1.1 50
+ip route 10.0.2.0/24 10.0.3.4 50
+!
+router pim
+ autorp discovery
+ rp 10.0.3.4 224.0.1.0/24
+!
diff --git a/tests/topotests/pim_autorp/r4/frr.conf b/tests/topotests/pim_autorp/r4/frr.conf
new file mode 100644
index 0000000000..9d37da99aa
--- /dev/null
+++ b/tests/topotests/pim_autorp/r4/frr.conf
@@ -0,0 +1,24 @@
+!
+hostname r4
+!
+!debug pim autorp
+!
+interface r4-eth0
+ ip address 10.0.2.4/24
+ ip igmp
+ ip pim
+!
+interface r4-eth1
+ ip address 10.0.3.4/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.0.0/24 10.0.2.2 50
+ip route 10.0.1.0/24 10.0.2.2 50
+!
+router pim
+ autorp discovery
+ rp 10.0.3.4 224.0.1.0/24
+!
diff --git a/tests/topotests/pim_autorp/test_pim_autorp.py b/tests/topotests/pim_autorp/test_pim_autorp.py
index ad618af29e..61cf8ebbc5 100644
--- a/tests/topotests/pim_autorp/test_pim_autorp.py
+++ b/tests/topotests/pim_autorp/test_pim_autorp.py
@@ -11,18 +11,14 @@
import os
import sys
import pytest
+import json
from functools import partial
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, get_topogen
+from lib.topogen import Topogen, topotest, get_topogen
from lib.topolog import logger
-from lib.pim import (
- scapy_send_autorp_raw_packet,
- verify_pim_rp_info,
- verify_pim_rp_info_is_empty,
-)
+from lib.pim import verify_pim_rp_info
from lib.common_config import step, write_test_header
from time import sleep
@@ -32,13 +28,26 @@ test_pim_autorp.py: Test general PIM AutoRP functionality
"""
TOPOLOGY = """
- Basic AutoRP functionality
-
- +---+---+ +---+---+
- | | 10.10.76.0/24 | |
- + R1 + <------------------> + R2 |
- | | .1 .2 | |
- +---+---+ +---+---+
+ Test PIM AutoRP functionality:
+ AutoRP candidate RP announcements
+ Mapping agent announcement receive and send discovery
+ AutoRP discovery to active RP info
+
+ +---+---+ +---+---+
+ | | 10.0.0.0/24 | |
+ + R1 +----------------------+ R2 |
+ | | .1 .2 | |
+ +---+---+ r1-eth0 r2-eth0 +---+---+
+ .1 | r1-eth1 r2-eth1 | .2
+ | |
+ 10.0.1.0/24 | | 10.0.2.0/24
+ | |
+ .3 | r3-eth0 r4-eth0 | .4
+ +---+---+ r3-eth1 r4-eth1 +---+---+
+ | | .3 .4 | |
+ + R3 +----------------------+ R4 |
+ | | 10.0.3.0/24 | |
+ +---+---+ +---+---+
"""
# Save the Current Working Directory to find configuration files.
@@ -55,11 +64,14 @@ def build_topo(tgen):
# Create routers
tgen.add_router("r1")
tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("r4")
- # Create link between router 1 and 2
- switch = tgen.add_switch("s1-2")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ # Create topology links
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth0", "r2-eth0")
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r3"], "r1-eth1", "r3-eth0")
+ tgen.add_link(tgen.gears["r2"], tgen.gears["r4"], "r2-eth1", "r4-eth0")
+ tgen.add_link(tgen.gears["r3"], tgen.gears["r4"], "r3-eth1", "r4-eth1")
def setup_module(mod):
@@ -68,15 +80,6 @@ def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
- # Router 1 will be the router configured with "fake" autorp configuration, so give it a default route
- # to router 2 so that routing to the RP address is not an issue
- # r1_defrt_setup_cmds = [
- # "ip route add default via 10.10.76.1 dev r1-eth0",
- # ]
- # for cmd in r1_defrt_setup_cmds:
- # tgen.net["r1"].cmd(cmd)
-
- logger.info("Testing PIM AutoRP support")
router_list = tgen.routers()
for rname, router in router_list.items():
logger.info("Loading router %s" % rname)
@@ -95,8 +98,8 @@ def teardown_module(mod):
tgen.stop_topology()
-def test_pim_autorp_discovery_single_rp(request):
- "Test PIM AutoRP Discovery with single RP"
+def test_pim_autorp_init(request):
+ "Test PIM AutoRP startup with only the static RP"
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
@@ -104,84 +107,253 @@ def test_pim_autorp_discovery_single_rp(request):
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- step("Start with no RP configuration")
- result = verify_pim_rp_info_is_empty(tgen, "r1")
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("Send AutoRP packet from r1 to r2")
- # 1 RP(s), hold time 5 secs, 10.10.76.1, group(s) 224.0.0.0/4
- data = "01005e00012800127f55cfb1080045c00030700c000008110abe0a0a4c01e000012801f001f0001c798b12010005000000000a0a4c0103010004e0000000"
- scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data)
-
- step("Verify rp-info from AutoRP packet")
- result = verify_pim_rp_info(
- tgen,
- None,
- "r2",
- "224.0.0.0/4",
- "r2-eth0",
- "10.10.76.1",
- "AutoRP",
- False,
- "ipv4",
- True,
+ step("Verify start-up with no extra RP configuration")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ]
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
+
+ step("Verify start-up with AutoRP only discovery enabled")
+ expected = json.loads(
+ """
+ {
+ "discovery":{
+ "enabled": true
+ },
+ "announce": {
+ "enabled":false
+ },
+ "mapping-agent": {
+ "enabled":false
+ }
+ }"""
)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim autorp json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format(
+ rtr
+ )
+
+
+def test_pim_autorp_no_mapping_agent_rp(request):
+ "Test PIM AutoRP candidate with no mapping agent"
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
- step("Verify AutoRP configuration times out")
- result = verify_pim_rp_info_is_empty(tgen, "r2")
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ # Start a candidate RP on r2
+ tgen.routers()["r2"].vtysh_cmd(
+ """
+ conf
+ router pim
+ autorp announce 10.0.0.2 224.0.0.0/4
+ autorp announce scope 31 interval 1 holdtime 5
+ """
+ )
-def test_pim_autorp_discovery_multiple_rp(request):
- "Test PIM AutoRP Discovery with multiple RP's"
+ # Without a mapping agent, we should still have no RP
+ step("Verify no RP without mapping agent")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ]
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
+
+ step("Verify candidate RP in AutoRP on R2")
+ expected = json.loads(
+ """
+ {
+ "discovery":{
+ "enabled": true
+ },
+ "announce": {
+ "enabled":true,
+ "scope":31,
+ "interval":1,
+ "holdtime":5,
+ "rpList":[
+ {
+ "rpAddress":"10.0.0.2",
+ "groupRange":"224.0.0.0/4",
+ "prefixList":"-"
+ }
+ ]
+ },
+ "mapping-agent": {
+ "enabled":false
+ }
+ }"""
+ )
+ test_func = partial(
+ topotest.router_json_cmp, tgen.gears["r2"], "show ip pim autorp json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format("r2")
+
+
+def test_pim_autorp_discovery_rp(request):
+ "Test PIM AutoRP candidate advertised by mapping agent"
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
if tgen.routers_have_failure():
- pytest.skip("skipped because of router(s) failure")
+ pytest.skip(tgen.errors)
- step("Start with no RP configuration")
- result = verify_pim_rp_info_is_empty(tgen, "r2")
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("Send AutoRP packet from r1 to r2")
- # 2 RP(s), hold time 5 secs, 10.10.76.1, group(s) 224.0.0.0/8, 10.10.76.3, group(s) 225.0.0.0/8
- data = "01005e00012800127f55cfb1080045c0003c700c000008110ab20a0a4c01e000012801f001f000283f5712020005000000000a0a4c0103010008e00000000a0a4c0303010008e1000000"
- scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data)
-
- step("Verify rp-info from AutoRP packet")
- result = verify_pim_rp_info(
- tgen,
- None,
- "r2",
- "224.0.0.0/8",
- "r2-eth0",
- "10.10.76.1",
- "AutoRP",
- False,
- "ipv4",
- True,
- )
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
- result = verify_pim_rp_info(
- tgen,
- None,
- "r2",
- "225.0.0.0/8",
- "r2-eth0",
- "10.10.76.3",
- "AutoRP",
- False,
- "ipv4",
- True,
+ # Start the mapping agent on R1
+ tgen.routers()["r1"].vtysh_cmd(
+ """
+ conf
+ router pim
+ autorp send-rp-discovery source interface r1-eth0
+ autorp send-rp-discovery scope 31 interval 1 holdtime 5
+ """
)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ step("Verify rp-info of the only candidate RP")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ],
+ "10.0.0.2":[
+ {
+ "rpAddress":"10.0.0.2",
+ "group":"224.0.0.0/4",
+ "source":"AutoRP"
+ }
+ ]
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
+
+ step("Verify mapping-agent in AutoRP on R1")
+ expected = json.loads(
+ """
+ {
+ "announce": {
+ "enabled":false
+ },
+ "mapping-agent": {
+ "enabled":true,
+ "active":true,
+ "scope":31,
+ "interval":1,
+ "holdtime":5,
+ "source":"interface",
+ "interface":"r1-eth0",
+ "address":"10.0.0.1",
+ "rpList":{
+ "10.0.0.2":{
+ "rpAddress":"10.0.0.2",
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"224.0.0.0/4"
+ }
+ ]
+ }
+ }
+ }
+ }"""
+ )
+ test_func = partial(
+ topotest.router_json_cmp, tgen.gears["r1"], "show ip pim autorp json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format("r1")
-def test_pim_autorp_discovery_static(request):
- "Test PIM AutoRP Discovery with Static RP"
+ step("Verify AutoRP discovery RP's")
+ expected = json.loads(
+ """
+ {
+ "discovery":{
+ "enabled": true,
+ "rpList":{
+ "10.0.0.2":{
+ "rpAddress":"10.0.0.2",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"224.0.0.0/4"
+ }
+ ]
+ }
+ }
+ }
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim autorp json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format(
+ rtr
+ )
+
+
+def test_pim_autorp_discovery_multiple_rp_same(request):
+ "Test PIM AutoRP Discovery with multiple RP's for same group prefix"
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
@@ -189,52 +361,94 @@ def test_pim_autorp_discovery_static(request):
if tgen.routers_have_failure():
pytest.skip("skipped because of router(s) failure")
- step("Start with no RP configuration")
- result = verify_pim_rp_info_is_empty(tgen, "r2")
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("Add static RP configuration to r2")
- rnode = tgen.routers()["r2"]
- rnode.cmd("vtysh -c 'conf t' -c 'router pim' -c 'rp 10.10.76.3 224.0.0.0/4'")
-
- step("Verify static rp-info from r2")
- result = verify_pim_rp_info(
- tgen,
- None,
- "r2",
- "224.0.0.0/4",
- "r2-eth0",
- "10.10.76.3",
- "Static",
- False,
- "ipv4",
- True,
- )
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("Send AutoRP packet from r1 to r2")
- # 1 RP(s), hold time 5 secs, 10.10.76.1, group(s) 224.0.0.0/4
- data = "01005e00012800127f55cfb1080045c00030700c000008110abe0a0a4c01e000012801f001f0001c798b12010005000000000a0a4c0103010004e0000000"
- scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data)
-
- step("Verify rp-info from AutoRP packet")
- result = verify_pim_rp_info(
- tgen,
- None,
- "r2",
- "224.0.0.0/4",
- "r2-eth0",
- "10.10.76.1",
- "AutoRP",
- False,
- "ipv4",
- True,
+ # Start a candidate RP on r3
+ tgen.routers()["r3"].vtysh_cmd(
+ """
+ conf
+ router pim
+ autorp announce 10.0.1.3 224.0.0.0/4
+ autorp announce scope 31 interval 1 holdtime 5
+ """
)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
-def test_pim_autorp_announce_cli(request):
- "Test PIM AutoRP Announcement CLI commands"
+ # The new candidate RP has the same group range but a higher IP, they should all
+ # switch to this RP
+ step("Verify rp-info of the candidate RP with the higher IP")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ],
+ "10.0.1.3":[
+ {
+ "rpAddress":"10.0.1.3",
+ "group":"224.0.0.0/4",
+ "source":"AutoRP"
+ }
+ ]
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
+
+ step("Verify AutoRP discovery RP's")
+ expected = json.loads(
+ """
+ {
+ "discovery":{
+ "enabled": true,
+ "rpList":{
+ "10.0.0.2":{
+ "rpAddress":"10.0.0.2",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"224.0.0.0/4"
+ }
+ ]
+ },
+ "10.0.1.3":{
+ "rpAddress":"10.0.1.3",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"224.0.0.0/4"
+ }
+ ]
+ }
+ }
+ }
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim autorp json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format(
+ rtr
+ )
+
+
+def test_pim_autorp_discovery_multiple_rp_different(request):
+ "Test PIM AutoRP Discovery with multiple RP's for different group prefixes"
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
@@ -242,65 +456,218 @@ def test_pim_autorp_announce_cli(request):
if tgen.routers_have_failure():
pytest.skip("skipped because of router(s) failure")
- step("Add AutoRP announcement configuration to r1")
- r1 = tgen.routers()["r1"]
- r1.vtysh_cmd(
+ # Switch R3 candidate to prefix list with different groups
+ step("Change R3 candidate to a prefix list")
+ tgen.routers()["r3"].vtysh_cmd(
"""
conf
+ ip prefix-list MYLIST permit 225.0.0.0/8
+ ip prefix-list MYLIST permit 226.0.0.0/8
router pim
- autorp announce holdtime 90
- autorp announce interval 120
- autorp announce scope 5
- autorp announce 10.2.3.4 225.0.0.0/24
-"""
+ autorp announce 10.0.1.3 group-list MYLIST
+ """
)
- expected = {
- "discoveryEnabled": True,
- "announce": {
- "scope": 5,
- "interval": 120,
- "holdtime": 90,
- "rpList": [
- {"rpAddress": "10.2.3.4", "group": "225.0.0.0/24", "prefixList": ""}
- ],
- },
- }
-
- test_func = partial(
- topotest.router_json_cmp, r1, "show ip pim autorp json", expected
+ # Now that R3 doesn't conflict, we should see both RP's
+ step("Verify rp-info of both candidate RP's")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ],
+ "10.0.0.2":[
+ {
+ "rpAddress":"10.0.0.2",
+ "group":"224.0.0.0/4",
+ "source":"AutoRP"
+ }
+ ],
+ "10.0.1.3":[
+ {
+ "rpAddress":"10.0.1.3",
+ "prefixList":"__AUTORP_10.0.1.3__",
+ "source":"AutoRP"
+ }
+ ]
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
+
+ step("Verify AutoRP discovery RP's")
+ expected = json.loads(
+ """
+ {
+ "discovery":{
+ "enabled": true,
+ "rpList":{
+ "10.0.0.2":{
+ "rpAddress":"10.0.0.2",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"224.0.0.0/4"
+ }
+ ]
+ },
+ "10.0.1.3":{
+ "rpAddress":"10.0.1.3",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"225.0.0.0/8"
+ },
+ {
+ "negative":false,
+ "prefix":"226.0.0.0/8"
+ }
+ ]
+ }
+ }
+ }
+ }"""
)
- _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assertmsg = '"{}" JSON output mismatches'.format(r1.name)
- assert result is None, assertmsg
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim autorp json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format(
+ rtr
+ )
+
+
+def test_pim_autorp_discovery_neg_prefixes(request):
+ "Test PIM AutoRP Discovery with negative prefixes"
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
- r1.vtysh_cmd(
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Add negative prefixes to the R3 candidate prefix list
+ step("Change R3 candidate prefix list to include overlapping negative prefixes")
+ tgen.routers()["r3"].vtysh_cmd(
"""
conf
- router pim
- autorp announce 10.2.3.4 group-list ListA
-"""
+ ip prefix-list MYLIST deny 225.1.0.0/16
+ ip prefix-list MYLIST deny 226.1.0.0/16
+ """
)
- expected = {
- "discoveryEnabled": True,
- "announce": {
- "scope": 5,
- "interval": 120,
- "holdtime": 90,
- "rpList": [{"rpAddress": "10.2.3.4", "group": "", "prefixList": "ListA"}],
- },
- }
- test_func = partial(
- topotest.router_json_cmp, r1, "show ip pim autorp json", expected
+ step("Verify rp-info stays the same")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ],
+ "10.0.0.2":[
+ {
+ "rpAddress":"10.0.0.2",
+ "group":"224.0.0.0/4",
+ "source":"AutoRP"
+ }
+ ],
+ "10.0.1.3":[
+ {
+ "rpAddress":"10.0.1.3",
+ "prefixList":"__AUTORP_10.0.1.3__",
+ "source":"AutoRP"
+ }
+ ]
+ }"""
)
- _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assertmsg = '"{}" JSON output mismatches'.format(r1.name)
- assert result is None, assertmsg
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
+
+ step("Verify AutoRP discovery RP's")
+ expected = json.loads(
+ """
+ {
+ "discovery":{
+ "enabled": true,
+ "rpList":{
+ "10.0.0.2":{
+ "rpAddress":"10.0.0.2",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"224.0.0.0/4"
+ }
+ ]
+ },
+ "10.0.1.3":{
+ "rpAddress":"10.0.1.3",
+ "holdtime":5,
+ "groupRanges":[
+ {
+ "negative":false,
+ "prefix":"225.0.0.0/8"
+ },
+ {
+ "negative":false,
+ "prefix":"226.0.0.0/8"
+ },
+ {
+ "negative":true,
+ "prefix":"225.1.0.0/16"
+ },
+ {
+ "negative":true,
+ "prefix":"226.1.0.0/16"
+ }
+ ]
+ }
+ }
+ }
+ }"""
+ )
+ for rtr in ["r1", "r2", "r3", "r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim autorp json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct autorp configuration".format(
+ rtr
+ )
-def test_pim_autorp_announce_group(request):
- "Test PIM AutoRP Announcement with a single group"
+def test_pim_autorp_discovery_static(request):
+ "Test PIM AutoRP Discovery with Static RP"
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
@@ -308,23 +675,60 @@ def test_pim_autorp_announce_group(request):
if tgen.routers_have_failure():
pytest.skip("skipped because of router(s) failure")
- step("Add candidate RP configuration to r1")
- rnode = tgen.routers()["r1"]
- rnode.cmd(
- "vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce 10.10.76.1 224.0.0.0/4'"
+ # Add in a static RP with a specific range and make sure both are used
+ step("Add static RP configuration to r4")
+ tgen.routers()["r4"].vtysh_cmd(
+ """
+ conf t
+ router pim
+ rp 10.0.2.2 239.0.0.0/24
+ """
)
- step("Verify Announcement sent data")
- # TODO: Verify AutoRP mapping agent receives candidate RP announcement
- # Mapping agent is not yet implemented
- # sleep(10)
- step("Change AutoRP Announcement packet parameters")
- rnode.cmd(
- "vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce scope 8 interval 10 holdtime 60'"
+
+ step("Verify static rp-info from r4")
+ expected = json.loads(
+ """
+ {
+ "10.0.3.4":[
+ {
+ "rpAddress":"10.0.3.4",
+ "group":"224.0.1.0/24",
+ "source":"Static"
+ }
+ ],
+ "10.0.0.2":[
+ {
+ "rpAddress":"10.0.0.2",
+ "group":"224.0.0.0/4",
+ "source":"AutoRP"
+ }
+ ],
+ "10.0.1.3":[
+ {
+ "rpAddress":"10.0.1.3",
+ "prefixList":"__AUTORP_10.0.1.3__",
+ "source":"AutoRP"
+ }
+ ],
+ "10.0.2.2":[
+ {
+ "rpAddress":"10.0.2.2",
+ "group":"239.0.0.0/24",
+ "source":"Static"
+ }
+ ]
+ }"""
)
- step("Verify Announcement sent data")
- # TODO: Verify AutoRP mapping agent receives updated candidate RP announcement
- # Mapping agent is not yet implemented
- # sleep(10)
+
+ for rtr in ["r4"]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[rtr],
+ "show ip pim rp-info json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None)
+ assert result is None, "{} does not have correct rp-info".format(rtr)
def test_memory_leak():
diff --git a/tests/topotests/pim_basic/test_pim.py b/tests/topotests/pim_basic/test_pim.py
index ce1abe42bb..74d5406970 100644
--- a/tests/topotests/pim_basic/test_pim.py
+++ b/tests/topotests/pim_basic/test_pim.py
@@ -132,14 +132,14 @@ def test_pim_send_mcast_stream():
# Let's establish a S,G stream from r2 -> r1
CWD = os.path.dirname(os.path.realpath(__file__))
r2.run(
- "{}/mcast-tx.py --ttl 5 --count 40 --interval 2 229.1.1.1 r2-eth0 > /tmp/bar".format(
- CWD
+ "{}/mcast-tx.py --ttl 5 --count 40 --interval 2 229.1.1.1 r2-eth0 > {}/r2/mcast_tx_output".format(
+ CWD, tgen.logdir
)
)
# And from r3 -> r1
r3.run(
- "{}/mcast-tx.py --ttl 5 --count 40 --interval 2 229.1.1.1 r3-eth0 > /tmp/bar".format(
- CWD
+ "{}/mcast-tx.py --ttl 5 --count 40 --interval 2 229.1.1.1 r3-eth0 > {}/r3/mcast_tx_output".format(
+ CWD, tgen.logdir
)
)
diff --git a/tests/topotests/pim_boundary_acl/r1/frr.conf b/tests/topotests/pim_boundary_acl/r1/frr.conf
new file mode 100644
index 0000000000..cc639b304b
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/r1/frr.conf
@@ -0,0 +1,39 @@
+hostname r1
+!
+!debug pim events
+!debug igmp events
+!debug igmp packets
+!
+ip prefix-list pim-oil-plist seq 10 deny 229.1.1.0/24
+ip prefix-list pim-oil-plist seq 20 permit any
+!
+access-list pim-acl seq 10 deny ip host 10.0.20.2 232.1.1.0 0.0.0.255
+access-list pim-acl seq 20 permit ip any any
+!
+interface r1-eth0
+ ip address 10.0.20.1/24
+ ip igmp
+ ip pim
+!
+interface r1-eth1
+ ip address 10.0.30.1/24
+ ip pim
+!
+interface r1-eth2
+ ip address 10.0.40.1/24
+ ip igmp
+ ip pim
+!
+interface lo
+ ip address 10.254.0.1/32
+ ip pim
+!
+router pim
+ rp 10.254.0.3
+ join-prune-interval 5
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 10.0.30.3 remote-as external
+ neighbor 10.0.30.3 timers 3 10
+ redistribute connected
diff --git a/tests/topotests/pim_boundary_acl/r2/frr.conf b/tests/topotests/pim_boundary_acl/r2/frr.conf
new file mode 100644
index 0000000000..10ace947b2
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/r2/frr.conf
@@ -0,0 +1,19 @@
+hostname r2
+!
+!debug pim events
+!debug igmp events
+!debug igmp packets
+!
+ip prefix-list pim-oil-plist seq 10 deny 229.1.1.0/24
+ip prefix-list pim-oil-plist seq 20 permit any
+!
+access-list pim-acl seq 10 deny ip host 10.0.20.2 232.1.1.0 0.0.0.255
+access-list pim-acl seq 20 permit ip any any
+!
+interface r2-eth0
+ ip address 10.0.20.2/24
+ ip pim
+!
+interface lo
+ ip address 10.254.0.2/32
+!
diff --git a/tests/topotests/pim_boundary_acl/r3/frr.conf b/tests/topotests/pim_boundary_acl/r3/frr.conf
new file mode 100644
index 0000000000..9720774266
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/r3/frr.conf
@@ -0,0 +1,13 @@
+hostname r3
+!
+!debug pim events
+!debug igmp events
+!debug igmp packets
+!
+interface r3-eth0
+ ip address 10.0.40.4/24
+ ip pim
+!
+interface lo
+ ip address 10.254.0.4/32
+!
diff --git a/tests/topotests/pim_boundary_acl/rp/frr.conf b/tests/topotests/pim_boundary_acl/rp/frr.conf
new file mode 100644
index 0000000000..f6eed23917
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/rp/frr.conf
@@ -0,0 +1,22 @@
+hostname rp
+!
+interface rp-eth0
+ ip address 10.0.30.3/24
+ ip pim
+!
+interface lo
+ ip address 10.254.0.3/32
+ ip pim
+!
+router pim
+ rp 10.254.0.3
+ join-prune-interval 5
+ register-accept-list ACCEPT
+!
+ip prefix-list ACCEPT seq 5 permit 10.0.20.0/24 le 32
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 10.0.30.1 remote-as external
+ neighbor 10.0.30.1 timers 3 10
+ redistribute connected \ No newline at end of file
diff --git a/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py b/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py
new file mode 100644
index 0000000000..1488e610c8
--- /dev/null
+++ b/tests/topotests/pim_boundary_acl/test_pim_boundary_acl.py
@@ -0,0 +1,523 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_pim_boundary_acl.py
+#
+# Copyright (c) 2024 Architecture Technology Corporation
+# Corey Siltala
+#
+
+"""
+test_pim_boundary_acl.py: Test multicast boundary commands (access-lists and prefix-lists)
+"""
+
+import os
+import sys
+import pytest
+import json
+from functools import partial
+
+pytestmark = [pytest.mark.pimd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+ASM_GROUP="229.1.1.1"
+SSM_GROUP="232.1.1.1"
+
+def build_topo(tgen):
+ "Build function"
+
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ tgen.add_router("rp")
+
+ # rp ------ r1 -------- r2
+ # \
+ # --------- r3
+ # r1 -> .1
+ # r2 -> .2
+ # rp -> .3
+ # r3 -> .4
+ # loopback network is 10.254.0.X/32
+ #
+ # r1 <- sw1 -> r2
+ # r1-eth0 <-> r2-eth0
+ # 10.0.20.0/24
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r2"])
+
+ # r1 <- sw2 -> rp
+ # r1-eth1 <-> rp-eth0
+ # 10.0.30.0/24
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["rp"])
+
+ # r1 <- sw3 -> r3
+ # r1-eth2 <-> r3-eth0
+ # 10.0.40.0/24
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ # For all registered routers, load the zebra configuration file
+ for rname, router in tgen.routers().items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+ # tgen.mininet_cli()
+
+
+def teardown_module():
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def test_pim_rp_setup():
+ "Ensure basic routing has come up and the rp has an outgoing interface"
+ # Ensure rp and r1 establish pim neighbor ship and bgp has come up
+ # Finally ensure that the rp has an outgoing interface on r1
+ tgen = get_topogen()
+
+ r1 = tgen.gears["r1"]
+ expected = {
+ "10.254.0.3":[
+ {
+ "outboundInterface":"r1-eth1",
+ "group":"224.0.0.0/4",
+ "source":"Static"
+ }
+ ]
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip pim rp-info json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(r1.name)
+ assert result is None, assertmsg
+ # tgen.mininet_cli()
+
+
+def test_pim_asm_igmp_join_acl():
+ "Test ASM IGMP joins with prefix-list ACLs"
+ logger.info("Send IGMP joins from r2 to r1 with ACL enabled and disabled")
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r2 = tgen.gears["r2"]
+ r1 = tgen.gears["r1"]
+
+ # No IGMP sources other than from self for AutoRP Discovery group initially
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "224.0.1.40":"*",
+ "229.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "224.0.1.40":"*",
+ "229.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected no IGMP sources other than for AutoRP Discovery"
+
+ # Send IGMP join from r2, check if r1 has IGMP source
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface {}
+ ip igmp join {}
+ """
+ ).format("r2-eth0", ASM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "229.1.1.1":{
+ "group":"229.1.1.1",
+ "sources":[
+ {
+ "source":"*",
+ "timer":"--:--",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # Test inbound boundary on r1
+ # Enable multicast boundary on r1, toggle IGMP join on r2
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {}
+ """
+ ).format(ASM_GROUP))
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ interface r1-eth0
+ ip multicast boundary oil pim-oil-plist
+ """
+ )
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {}
+ """
+ ).format(ASM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "229.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be absent but is present"
+
+ # Test outbound boundary on r2
+ # Enable multicast boundary on r2, toggle IGMP join (test outbound)
+ # Note: json_cmp treats "*" as wildcard but in this case that's actually what the source is
+ expected = {
+ "vrf":"default",
+ "r2-eth0":{
+ "name":"r2-eth0",
+ "groups":[
+ {
+ "source":"*",
+ "group":"229.1.1.1",
+ "primaryAddr":"10.0.20.2",
+ "sockFd":"*",
+ "upTime":"*"
+ }
+ ]
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be present but is absent"
+
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {}
+ ip multicast boundary oil pim-oil-plist
+ ip igmp join {}
+ """
+ ).format(ASM_GROUP, ASM_GROUP))
+ expected = {
+ "vrf":"default",
+ "r2-eth0":None
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be absent but is present"
+
+ # Cleanup
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {}
+ no ip multicast boundary oil pim-oil-plist
+ """
+ ).format(ASM_GROUP))
+
+
+def test_pim_ssm_igmp_join_acl():
+ "Test SSM IGMP joins with extended ACLs"
+ logger.info("Send IGMP joins from r2 to r1 with ACL enabled and disabled")
+
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r3 = tgen.gears["r3"]
+ r2 = tgen.gears["r2"]
+ r1 = tgen.gears["r1"]
+
+ # No IGMP sources other than from self for AutoRP Discovery group initially
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "224.0.1.40":"*",
+ "229.1.1.1":None,
+ "232.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "224.0.1.40":"*",
+ "229.1.1.1":None,
+ "232.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", {}
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected no IGMP sources other than from AutoRP Discovery"
+
+ # Send IGMP join from r2, check if r1 has IGMP source
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":{
+ "group":"232.1.1.1",
+ "sources":[
+ {
+ "source":"10.0.20.2",
+ "timer":"*",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # Test inbound boundary on r1
+ # Enable multicast boundary on r1, toggle IGMP join on r2
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ interface r1-eth0
+ ip multicast boundary pim-acl
+ """
+ )
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be absent but is present"
+
+ # Add lower, more-specific permit rule to access-list
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ r1.vtysh_cmd((
+ """
+ configure terminal
+ access-list pim-acl seq 5 permit ip host 10.0.20.2 {} 0.0.0.128
+ """
+ ).format(SSM_GROUP))
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":{
+ "group":"232.1.1.1",
+ "sources":[
+ {
+ "source":"10.0.20.2",
+ "timer":"*",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # Test outbound boundary on r2
+ # Enable multicast boundary on r2, toggle IGMP join (test outbound)
+ expected = {
+ "vrf":"default",
+ "r2-eth0":{
+ "name":"r2-eth0",
+ "groups":[
+ {
+ "source":"10.0.20.2",
+ "group":"232.1.1.1",
+ "primaryAddr":"10.0.20.2",
+ "sockFd":"*",
+ "upTime":"*"
+ }
+ ]
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be present but is absent"
+
+ # Enable boundary ACL, check join is absent
+ r2.vtysh_cmd((
+ """
+ configure terminal
+ interface r2-eth0
+ no ip igmp join {} 10.0.20.2
+ ip multicast boundary pim-acl
+ ip igmp join {} 10.0.20.2
+ """
+ ).format(SSM_GROUP, SSM_GROUP))
+ expected = {
+ "vrf":"default",
+ "r2-eth0":None
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r2, "show ip igmp join json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP join to be absent but is present"
+ # Check sources on r1 again, should be absent even though we permitted it because r2 is blocking it outbound
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "232.1.1.1":None
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be absent but is present"
+
+ # Send IGMP join from r3 with different source, should show up on r1
+ # Add lower, more-specific permit rule to access-list
+ r3.vtysh_cmd((
+ """
+ configure terminal
+ interface r3-eth0
+ ip igmp join {} 10.0.40.4
+ """
+ ).format(SSM_GROUP))
+ expected = {
+ "r1-eth0":{
+ "name":"r1-eth0",
+ "232.1.1.1":None
+ },
+ "r1-eth2":{
+ "name":"r1-eth2",
+ "232.1.1.1":{
+ "group":"232.1.1.1",
+ "sources":[
+ {
+ "source":"10.0.40.4",
+ "timer":"*",
+ "forwarded":False,
+ "uptime":"*"
+ }
+ ]
+ }
+ }
+ }
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip igmp sources json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "Expected IGMP source to be present but is absent"
+
+ # PIM join
+ # PIM-DM forwarding
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/pim_cand_rp_bsr/r1/frr.conf b/tests/topotests/pim_cand_rp_bsr/r1/frr.conf
index 899e9c0684..badcb8307b 100644
--- a/tests/topotests/pim_cand_rp_bsr/r1/frr.conf
+++ b/tests/topotests/pim_cand_rp_bsr/r1/frr.conf
@@ -1,7 +1,5 @@
!
hostname r1
-password zebra
-log file /tmp/r1-frr.log
!
!debug pim packet
!debug pim bsm
diff --git a/tests/topotests/pim_cand_rp_bsr/r2/frr.conf b/tests/topotests/pim_cand_rp_bsr/r2/frr.conf
index 85af461d5e..65926688aa 100644
--- a/tests/topotests/pim_cand_rp_bsr/r2/frr.conf
+++ b/tests/topotests/pim_cand_rp_bsr/r2/frr.conf
@@ -1,7 +1,5 @@
!
hostname r2
-password zebra
-log file /tmp/r2-frr.log
!
!debug pim packet
!debug pim bsm
diff --git a/tests/topotests/pim_cand_rp_bsr/r3/frr.conf b/tests/topotests/pim_cand_rp_bsr/r3/frr.conf
index 022c44ea58..eae90c987c 100644
--- a/tests/topotests/pim_cand_rp_bsr/r3/frr.conf
+++ b/tests/topotests/pim_cand_rp_bsr/r3/frr.conf
@@ -1,7 +1,5 @@
!
hostname r3
-password zebra
-log file /tmp/r3-frr.log
!
!debug pim packet
!debug pim bsm
diff --git a/tests/topotests/pim_cand_rp_bsr/r4/frr.conf b/tests/topotests/pim_cand_rp_bsr/r4/frr.conf
index 2d0a035f9a..276e879216 100644
--- a/tests/topotests/pim_cand_rp_bsr/r4/frr.conf
+++ b/tests/topotests/pim_cand_rp_bsr/r4/frr.conf
@@ -1,7 +1,5 @@
!
hostname r4
-password zebra
-log file /tmp/r4-frr.log
!
!
interface lo
diff --git a/tests/topotests/pim_cand_rp_bsr/r5/frr.conf b/tests/topotests/pim_cand_rp_bsr/r5/frr.conf
index 552e51f417..b86c626000 100644
--- a/tests/topotests/pim_cand_rp_bsr/r5/frr.conf
+++ b/tests/topotests/pim_cand_rp_bsr/r5/frr.conf
@@ -1,7 +1,5 @@
!
hostname r5
-password zebra
-log file /tmp/r5-frr.log
!
!
interface r5-eth0
diff --git a/tests/topotests/pim_cand_rp_bsr/r6/frr.conf b/tests/topotests/pim_cand_rp_bsr/r6/frr.conf
index 20955a12c7..1fd3582211 100644
--- a/tests/topotests/pim_cand_rp_bsr/r6/frr.conf
+++ b/tests/topotests/pim_cand_rp_bsr/r6/frr.conf
@@ -1,7 +1,5 @@
!
hostname r6
-password zebra
-log file /tmp/r6-frr.log
!
!
interface r6-eth0
diff --git a/tests/topotests/pim_mrib/__init__.py b/tests/topotests/pim_mrib/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/pim_mrib/__init__.py
diff --git a/tests/topotests/pim_mrib/r1/frr.conf b/tests/topotests/pim_mrib/r1/frr.conf
new file mode 100644
index 0000000000..28cf2b2c46
--- /dev/null
+++ b/tests/topotests/pim_mrib/r1/frr.conf
@@ -0,0 +1,28 @@
+!
+hostname r1
+password zebra
+log file /tmp/r1-frr.log
+!
+!debug pim nht
+!debug pim nht detail
+!debug pim nht rp
+!
+interface r1-eth0
+ ip address 10.0.0.1/24
+ ip igmp
+ ip pim
+!
+interface r1-eth1
+ ip address 10.0.1.1/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.2.0/24 10.0.0.2 50
+ip route 10.0.3.0/24 10.0.1.3 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/r2/frr.conf b/tests/topotests/pim_mrib/r2/frr.conf
new file mode 100644
index 0000000000..3e647f6795
--- /dev/null
+++ b/tests/topotests/pim_mrib/r2/frr.conf
@@ -0,0 +1,28 @@
+!
+hostname r2
+password zebra
+log file /tmp/r2-frr.log
+!
+!debug pim nht
+!debug pim nht detail
+!debug pim nht rp
+!
+interface r2-eth0
+ ip address 10.0.0.2/24
+ ip igmp
+ ip pim
+!
+interface r2-eth1
+ ip address 10.0.2.2/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.1.0/24 10.0.0.1 50
+ip route 10.0.3.0/24 10.0.2.4 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/r3/frr.conf b/tests/topotests/pim_mrib/r3/frr.conf
new file mode 100644
index 0000000000..9815484d02
--- /dev/null
+++ b/tests/topotests/pim_mrib/r3/frr.conf
@@ -0,0 +1,28 @@
+!
+hostname r3
+password zebra
+log file /tmp/r3-frr.log
+!
+!debug pim nht
+!debug pim nht detail
+!debug pim nht rp
+!
+interface r3-eth0
+ ip address 10.0.1.3/24
+ ip igmp
+ ip pim
+!
+interface r3-eth1
+ ip address 10.0.3.3/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.0.0/24 10.0.1.1 50
+ip route 10.0.2.0/24 10.0.3.4 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/r4/frr.conf b/tests/topotests/pim_mrib/r4/frr.conf
new file mode 100644
index 0000000000..8432a7a350
--- /dev/null
+++ b/tests/topotests/pim_mrib/r4/frr.conf
@@ -0,0 +1,29 @@
+!
+hostname r4
+password zebra
+log file /tmp/r4-frr.log
+!
+debug pim nht
+debug pim nht detail
+debug pim nht rp
+debug zebra rib detail
+!
+interface r4-eth0
+ ip address 10.0.2.4/24
+ ip igmp
+ ip pim
+!
+interface r4-eth1
+ ip address 10.0.3.4/24
+ ip igmp
+ ip pim
+!
+ip forwarding
+!
+ip route 10.0.0.0/24 10.0.2.2 50
+ip route 10.0.1.0/24 10.0.3.3 50
+!
+router pim
+ rpf-lookup-mode mrib-then-urib
+ rp 10.0.0.1 224.0.0.0/4
+! \ No newline at end of file
diff --git a/tests/topotests/pim_mrib/test_pim_mrib.py b/tests/topotests/pim_mrib/test_pim_mrib.py
new file mode 100644
index 0000000000..355c503e3b
--- /dev/null
+++ b/tests/topotests/pim_mrib/test_pim_mrib.py
@@ -0,0 +1,328 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_pim_mrib.py
+#
+# Copyright (c) 2024 ATCorp
+# Nathan Bahr
+#
+
+import os
+import sys
+import pytest
+from functools import partial
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
+from lib.pim import (
+ verify_pim_rp_info,
+)
+from lib.common_config import step, write_test_header
+
+"""
+test_pim_mrib.py: Test PIM MRIB overrides and RPF modes
+"""
+
+TOPOLOGY = """
+ Test PIM MRIB overrides and RPF modes
+
+ +---+---+ +---+---+
+ | | 10.0.0.0/24 | |
+ + R1 +----------------------+ R2 |
+ | | .1 .2 | |
+ +---+---+ r1-eth0 r2-eth0 +---+---+
+ .1 | r1-eth1 r2-eth1 | .2
+ | |
+ 10.0.1.0/24 | | 10.0.2.0/24
+ | |
+ .3 | r3-eth0 r4-eth0 | .4
+ +---+---+ r3-eth1 r4-eth1 +---+---+
+ | | .3 .4 | |
+ + R3 +----------------------+ R4 |
+ | | 10.0.3.0/24 | |
+ +---+---+ +---+---+
+"""
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# Required to instantiate the topology builder class.
+pytestmark = [pytest.mark.pimd]
+
+
+def build_topo(tgen):
+ '''Build function'''
+
+ # Create routers
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("r4")
+
+ # Create topology links
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth0", "r2-eth0")
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r3"], "r1-eth1", "r3-eth0")
+ tgen.add_link(tgen.gears["r2"], tgen.gears["r4"], "r2-eth1", "r4-eth0")
+ tgen.add_link(tgen.gears["r3"], tgen.gears["r4"], "r3-eth1", "r4-eth1")
+
+
+def setup_module(mod):
+ logger.info("PIM MRIB/RPF functionality:\n {}".format(TOPOLOGY))
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ logger.info("Loading router %s" % rname)
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ '''Teardown the pytest environment'''
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_pim_mrib_init(request):
+ '''Test boot in MRIB-than-URIB with the default MRIB'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Verify rp-info using default URIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth0",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_override(request):
+ '''Test MRIB override nexthop'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Install a MRIB route that has a shorter prefix length and lower cost.
+ # In MRIB-than-URIB mode, it should use this route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ ip mroute 10.0.0.0/16 10.0.3.3 25
+ '''
+ )
+
+ step("Verify rp-info using MRIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth1",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_prefix_mode(request):
+ '''Test longer prefix lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to longer prefix match, should switch back to the URIB route
+ # even with the lower cost, the longer prefix match will win because of the mode
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode longer-prefix
+ '''
+ )
+
+ step("Verify rp-info using URIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth0",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_dist_mode(request):
+ '''Test lower distance lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to lower distance match, should switch back to the MRIB route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode lower-distance
+ '''
+ )
+
+ step("Verify rp-info using MRIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth1",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_urib_mode(request):
+ '''Test URIB only lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to urib only match, should switch back to the URIB route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode urib-only
+ '''
+ )
+
+ step("Verify rp-info using URIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth0",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_mrib_mode(request):
+ '''Test MRIB only lookup mode'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Switch to mrib only match, should switch back to the MRIB route
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ router pim
+ rpf-lookup-mode mrib-only
+ '''
+ )
+
+ step("Verify rp-info using MRIB nexthop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "r4-eth1",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_pim_mrib_mrib_mode_no_route(request):
+ '''Test MRIB only with no route'''
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of router(s) failure")
+
+ # Remove the MRIB route, in mrib-only mode, it should switch to no path for the RP
+ tgen.routers()["r4"].vtysh_cmd(
+ '''
+ conf term
+ no ip mroute 10.0.0.0/16 10.0.3.3 25
+ '''
+ )
+
+ step("Verify rp-info with Unknown next hop")
+ result = verify_pim_rp_info(
+ tgen,
+ None,
+ "r4",
+ "224.0.0.0/4",
+ "Unknown",
+ "10.0.0.1",
+ "Static",
+ False,
+ "ipv4",
+ True,
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+def test_memory_leak():
+ '''Run the memory leak test and report results.'''
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/simple_snmp_test/r1/bgpd.conf b/tests/topotests/simple_snmp_test/r1/bgpd.conf
index 00d1e17670..bcdf1c4f7e 100644
--- a/tests/topotests/simple_snmp_test/r1/bgpd.conf
+++ b/tests/topotests/simple_snmp_test/r1/bgpd.conf
@@ -1,4 +1,3 @@
-log file /tmp/bgpd.log debugging
!
router bgp 100
bgp router-id 1.1.1.1
diff --git a/tests/topotests/simple_snmp_test/r1/isisd.conf b/tests/topotests/simple_snmp_test/r1/isisd.conf
index 435abde330..c53d2509e2 100644
--- a/tests/topotests/simple_snmp_test/r1/isisd.conf
+++ b/tests/topotests/simple_snmp_test/r1/isisd.conf
@@ -3,6 +3,8 @@ log stdout debugging
! debug isis route-events
! debug isis events
!
+agentx
+!
interface r1-eth0
ip router isis ISIS1
ipv6 router isis ISIS1
diff --git a/tests/topotests/simple_snmp_test/r1/ospf6d.conf b/tests/topotests/simple_snmp_test/r1/ospf6d.conf
new file mode 100644
index 0000000000..e81151710b
--- /dev/null
+++ b/tests/topotests/simple_snmp_test/r1/ospf6d.conf
@@ -0,0 +1,12 @@
+agentx
+
+int r1-eth0
+ ipv6 ospf6 area 0.0.0.0
+
+int r1-eth1
+ ipv6 ospf6 area 0.0.0.0
+int r1-eth2
+ ipv6 ospf6 area 0.0.0.0
+
+router ospf6
+ redistribute local \ No newline at end of file
diff --git a/tests/topotests/simple_snmp_test/r1/ospfd.conf b/tests/topotests/simple_snmp_test/r1/ospfd.conf
new file mode 100644
index 0000000000..cc0d9e52c2
--- /dev/null
+++ b/tests/topotests/simple_snmp_test/r1/ospfd.conf
@@ -0,0 +1,11 @@
+agentx
+
+int r1-eth0
+ ip ospf area 0.0.0.0
+int r1-eth1
+ ip ospf area 0.0.0.0
+int r1-eth2
+ ip ospf area 0.0.0.0
+
+router ospf
+ redistribute local \ No newline at end of file
diff --git a/tests/topotests/simple_snmp_test/r1/ripd.conf b/tests/topotests/simple_snmp_test/r1/ripd.conf
new file mode 100644
index 0000000000..71cdb058cf
--- /dev/null
+++ b/tests/topotests/simple_snmp_test/r1/ripd.conf
@@ -0,0 +1,8 @@
+!
+!
+router rip
+ network 0.0.0.0/0
+ redistribute local
+!
+agentx
+! \ No newline at end of file
diff --git a/tests/topotests/simple_snmp_test/r1/zebra.conf b/tests/topotests/simple_snmp_test/r1/zebra.conf
index 5281d0055d..6483a661ce 100644
--- a/tests/topotests/simple_snmp_test/r1/zebra.conf
+++ b/tests/topotests/simple_snmp_test/r1/zebra.conf
@@ -1,5 +1,7 @@
log file zebra.log
!
+agentx
+!
interface r1-eth0
ip address 192.168.12.12/24
ipv6 address 2000:1:1:12::12/64
@@ -18,5 +20,4 @@ interface lo
ipv6 address 2000:1:1:1::1/128
!
!
-!
line vty
diff --git a/tests/topotests/simple_snmp_test/test_simple_snmp.py b/tests/topotests/simple_snmp_test/test_simple_snmp.py
index 0387e29274..c74ffcc2db 100755
--- a/tests/topotests/simple_snmp_test/test_simple_snmp.py
+++ b/tests/topotests/simple_snmp_test/test_simple_snmp.py
@@ -24,7 +24,8 @@ sys.path.append(os.path.join(CWD, "../"))
# Import topogen and topotest helpers
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.snmptest import SnmpTester
-
+from time import sleep
+from lib.topolog import logger
pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.snmp]
@@ -59,10 +60,14 @@ def setup_module(mod):
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
+ "-M snmp",
)
router.load_config(
- TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
+ TopoRouter.RD_ISIS,
+ os.path.join(CWD, "{}/isisd.conf".format(rname)),
+ "-M snmp",
)
router.load_config(
TopoRouter.RD_BGP,
@@ -70,6 +75,21 @@ def setup_module(mod):
"-M snmp",
)
router.load_config(
+ TopoRouter.RD_RIP,
+ os.path.join(CWD, "{}/ripd.conf".format(rname)),
+ "-M snmp",
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF,
+ os.path.join(CWD, "{}/ospfd.conf".format(rname)),
+ "-M snmp",
+ )
+ router.load_config(
+ TopoRouter.RD_OSPF6,
+ os.path.join(CWD, "{}/ospf6d.conf".format(rname)),
+ "-M snmp",
+ )
+ router.load_config(
TopoRouter.RD_SNMP,
os.path.join(CWD, "{}/snmpd.conf".format(rname)),
"-Le -Ivacm_conf,usmConf,iquery -V -DAgentX,trap",
@@ -77,6 +97,16 @@ def setup_module(mod):
# After loading the configurations, this function loads configured daemons.
tgen.start_router()
+ # Why this sleep? If you are using zebra w/ snmp we have a chicken
+ # and egg problem with the snmpd. snmpd is being started up with
+ # ip addresses, and as such snmpd may not be ready to listen yet
+ # (see startup stuff in topotest.py ) with the 2 second delay
+ # on starting snmpd after zebra. As such if we want to test
+ # anything in zebra we need to sleep a bit to allow the connection
+ # to happen. I have no good way to test to see if zebra is up
+ # and running with snmp at this point in time. So this will have
+ # to do.
+ sleep(17)
def teardown_module():
@@ -103,6 +133,22 @@ def test_r1_bgp_version():
assert r1_snmp.test_oid_walk("bgpVersion", ["10"])
assert r1_snmp.test_oid_walk("bgpVersion", ["10"], ["0"])
+ assert r1_snmp.test_oid(
+ "IP-FORWARD-MIB::ipForwardDest.192.168.12.0", "192.168.12.0"
+ )
+
+ assert r1_snmp.test_oid("ISIS-MIB::isisSysVersion", "one(1)")
+ # rip is not auto-loading agentx from mgmtd
+ # assert r1_snmp.test_oid("RIPv2-MIB::rip2GlobalQueries", "0")
+
+ assert r1_snmp.test_oid("OSPF-MIB::ospfVersionNumber", "version2(2)")
+ assert r1_snmp.test_oid("OSPFV3-MIB::ospfv3VersionNumber", "version3(3)")
+
+ # Let's just dump everything and make sure we get some additional test
+ # coverage
+ logger.info("Let's walk everything")
+ logger.info(r1_snmp.walk(".1", raw=True))
+
def test_memory_leak():
"Run the memory leak test and report results."
diff --git a/tests/topotests/srv6_static_route/test_srv6_route.py b/tests/topotests/srv6_static_route/test_srv6_route.py
index f23e199d4a..e26775daf7 100755
--- a/tests/topotests/srv6_static_route/test_srv6_route.py
+++ b/tests/topotests/srv6_static_route/test_srv6_route.py
@@ -27,7 +27,7 @@ from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-pytestmark = [pytest.mark.bgpd, pytest.mark.sharpd]
+pytestmark = [pytest.mark.staticd]
def open_json_file(filename):
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf
index 9d28957d99..ccfec19e9b 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf
+++ b/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf
@@ -1,5 +1,6 @@
!
router bgp 65500
+ timers bgp 3 9
bgp router-id 192.0.2.1
neighbor 192.0.2.3 remote-as 65500
neighbor 192.0.2.3 update-source lo
@@ -7,6 +8,7 @@ router bgp 65500
neighbor 192.0.2.7 ttl-security hops 10
neighbor 192.0.2.7 disable-connected-check
neighbor 192.0.2.7 update-source lo
+ neighbor 192.0.2.7 timers connect 5
!
address-family ipv4 unicast
network 192.0.2.1/32
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf
index 46d2c9a01d..e02e7a4b29 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf
+++ b/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf
@@ -1,4 +1,5 @@
router bgp 65500
+ timers bgp 3 9
bgp router-id 192.0.2.2
neighbor 192.0.2.1 remote-as 65500
neighbor 192.0.2.1 update-source lo
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf
index 060777e7fe..f2b22d7b38 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf
+++ b/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf
@@ -1,4 +1,5 @@
router bgp 65500
+ timers bgp 3 9
bgp router-id 192.0.2.3
neighbor 192.0.2.1 remote-as 65500
neighbor 192.0.2.1 update-source lo
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf
index dc052da863..d0f2f468bf 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf
+++ b/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf
@@ -1,5 +1,6 @@
!
router bgp 65500
+ timers bgp 3 9
bgp router-id 192.0.2.4
neighbor 192.0.2.1 remote-as 65500
neighbor 192.0.2.1 ttl-security hops 10
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf
index 1c73154e27..e2401eb1f9 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf
+++ b/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf
@@ -1,4 +1,5 @@
router bgp 65500
+ timers bgp 3 9
bgp router-id 192.0.2.5
neighbor 192.0.2.3 remote-as 65500
neighbor 192.0.2.3 update-source lo
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf
index eeda9d9cfa..325124e9f8 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf
+++ b/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf
@@ -1,10 +1,12 @@
!
router bgp 65500
+ timers bgp 3 9
bgp router-id 192.0.2.7
neighbor 192.0.2.1 remote-as 65500
neighbor 192.0.2.1 ttl-security hops 10
neighbor 192.0.2.1 disable-connected-check
neighbor 192.0.2.1 update-source lo
+ neighbor 192.0.2.1 timers connect 5
neighbor 192.0.2.5 remote-as 65500
neighbor 192.0.2.5 update-source lo
!
diff --git a/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py b/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py
index 984ff3c185..e42070b4d6 100644
--- a/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py
+++ b/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py
@@ -156,16 +156,17 @@ def test_zebra_fec_nexthop_resolution_bgp():
def _check_bgp_session():
r1 = tgen.gears["r1"]
- tgen.gears["r3"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end")
- tgen.gears["r3"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end")
- tgen.gears["r5"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end")
- tgen.gears["r5"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end")
output = json.loads(r1.vtysh_cmd("show bgp summary json"))
if output["ipv4Unicast"]["peers"]["192.0.2.7"]["state"] == "Established":
return None
return False
+ tgen.gears["r3"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end")
+ tgen.gears["r3"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end")
+ tgen.gears["r5"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end")
+ tgen.gears["r5"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end")
+
test_func1 = functools.partial(_check_bgp_session)
_, result1 = topotest.run_and_expect(test_func1, None, count=60, wait=0.5)
assert result1 is None, "Failed to verify the fec_nexthop_resolution: bgp session"
diff --git a/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json
new file mode 100644
index 0000000000..50871ae038
--- /dev/null
+++ b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_down.json
@@ -0,0 +1,20 @@
+{
+ "5.5.6.7/32":[
+ {
+ "prefix":"5.5.6.7/32",
+ "prefixLen":32,
+ "protocol":"kernel",
+ "vrfName":"default",
+ "internalFlags":0,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":0,
+ "nexthops":[
+ {
+ "flags":0,
+ "interfaceName":"r1-eth2"
+ }
+ ]
+
+ }
+ ]
+}
diff --git a/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json
new file mode 100644
index 0000000000..d0ab2fa187
--- /dev/null
+++ b/tests/topotests/zebra_multiple_connected/r1/ip_route_kernel_interface_up.json
@@ -0,0 +1,21 @@
+{
+ "5.5.6.7/32":[
+ {
+ "prefix":"5.5.6.7/32",
+ "prefixLen":32,
+ "protocol":"kernel",
+ "vrfName":"default",
+ "internalFlags":8,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":1,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "interfaceName":"r1-eth2",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py b/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py
index eda8c88706..89bc6cf8e0 100644
--- a/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py
+++ b/tests/topotests/zebra_multiple_connected/test_zebra_multiple_connected.py
@@ -65,6 +65,9 @@ def build_topo(tgen):
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
+ # Create a p2p connection between r1 and r2
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"])
+
#####################################################
##
@@ -222,6 +225,50 @@ def test_zebra_kernel_route_blackhole_add():
result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
assert result, "Blackhole Route should have not been removed\n{}".format(_)
+def test_zebra_kernel_route_interface_linkdown():
+ "Test that a kernel routes should be affected by interface change"
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+ router.run("ip route add 5.5.6.7/32 via 10.0.1.66 dev r1-eth2")
+
+ kernel = "{}/{}/ip_route_kernel_interface_up.json".format(CWD, router.name)
+ expected = json.loads(open(kernel).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route 5.5.6.7/32 json", expected
+ )
+ result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, "Kernel Route should be selected:\n{}".format(_)
+
+ # link down
+ router2 = tgen.gears["r2"]
+ router2.run("ip link set dev r2-eth2 down")
+
+ kernel = "{}/{}/ip_route_kernel_interface_down.json".format(CWD, router.name)
+ expected = json.loads(open(kernel).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route 5.5.6.7/32 json", expected
+ )
+ result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, "Kernel Route should not be selected:\n{}".format(_)
+
+ # link up
+ router2 = tgen.gears["r2"]
+ router2.run("ip link set dev r2-eth2 up")
+
+ kernel = "{}/{}/ip_route_kernel_interface_up.json".format(CWD, router.name)
+ expected = json.loads(open(kernel).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route 5.5.6.7/32 json", expected
+ )
+ result, _ = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result, "Kernel Route should be selected:\n{}".format(_)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/zebra_rib/r1/frr-import.conf b/tests/topotests/zebra_rib/r1/frr-import.conf
index d07433144f..687843be0c 100644
--- a/tests/topotests/zebra_rib/r1/frr-import.conf
+++ b/tests/topotests/zebra_rib/r1/frr-import.conf
@@ -1,7 +1,6 @@
!
hostname r1
password zebra
-log file /tmp/r1-frr.log
!
interface r1-eth0
ip address 10.0.0.1/24
@@ -15,4 +14,4 @@ ip route 10.3.0.0/24 10.10.0.2 table 10
ip route 10.4.0.0/24 10.10.0.2 table 10
!
ip forwarding
-! \ No newline at end of file
+!
diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf
index b3889e8784..be831a1d34 100644
--- a/tools/etc/frr/support_bundle_commands.conf
+++ b/tools/etc/frr/support_bundle_commands.conf
@@ -56,6 +56,7 @@ show evpn next-hops vni all
show evpn rmac vni all
show evpn vni detail
+show bmp
CMD_LIST_END
# Zebra Support Bundle Command List
@@ -74,6 +75,10 @@ show ip fib
show ipv6 fib
show nexthop-group rib
show route-map
+show mpls table
+show mpls fec
+show mpls ldp
+show mpls pseudowires
show memory
show interface vrf all
show vrf
@@ -129,14 +134,25 @@ show ip ospf router-info pce
CMD_LIST_END
# RIP Support Bundle Command List
-# PROC_NAME:rip
-# CMD_LIST_START
-# CMD_LIST_END
+PROC_NAME:rip
+CMD_LIST_START
+show ip rip
+show ip rip status
+CMD_LIST_END
# ISIS Support Bundle Command List
-# PROC_NAME:isis
-# CMD_LIST_START
-# CMD_LIST_END
+PROC_NAME:isis
+CMD_LIST_START
+show isis database detail
+show isis interface detail
+show isis route
+show isis mpls ldp-sync
+show isis mpls-te database detail
+show isis mpls-te interface
+show isis mpls-te router
+show isis neighbor detail
+show isis topology
+CMD_LIST_END
# BFD Support Bundle Command List
# PROC_NAME:bfd
@@ -151,26 +167,51 @@ CMD_LIST_END
# PIM Support Bundle Command List
PROC_NAME:pim
CMD_LIST_START
-show ip multicast
-show ip pim interface
-show ip pim interface traffic
-show ip pim nexthop
-show ip pim neighbor
-show ip pim bsr
-show ip pim bsrp-info
-show ip pim bsm-database
-show ip pim rp-info
-show ip igmp groups
-show ip igmp interface
-show ip igmp join
+show ip igmp vrf all groups detail
+show ip igmp vrf all interface detail
+show ip igmp vrf all join
+show ip igmp vrf all join-group
+show ip igmp vrf all proxy
show ip igmp sources
+show ip igmp vrf all static-group
show ip igmp statistics
-show ip pim upstream
-show ip mroute
-show ip pim join
-show ip pim state
+
+show ip mroute vrf all
+show ip multicast vrf all
+
+show ip msdp vrf all mesh-group
+show ip msdp vrf all peer
+show ip msdp vrf all sa detail
+
+show ip pim vrf all autorp
+show ip pim bsm-database
+show ip pim bsr
+show ip pim bsr candidate-bsr
+show ip pim bsr candidate-rp
+show ip pim bsr candidate-rp-database
+show ip pim bsr groups
+show ip pim bsr rp-info
+show ip pim channel
+show ip pim group-type
+show ip pim vrf all interface detail
+show ip pim interface traffic
+show ip pim vrf all join
+show ip pim jp-agg
+show ip pim local-membership
+show ip pim mlag summary
+show ip pim mlag vrf all interface
+show ip pim vrf all mlag upstream
+show ip pim vrf all neighbor detail
+show ip pim nexthop
+show ip pim vrf all rp-info
+show ip pim vrf all rpf
+show ip pim secondary
+show ip pim vrf all state
show ip pim statistics
-show ip pim rpf
+show ip pim vrf all upstream
+show ip pim upstream-join-desired
+show ip pim upstream-rpf
+show ip pim vxlan-groups
CMD_LIST_END
# OSPFv3 Support Bundle Command List
@@ -216,32 +257,37 @@ CMD_LIST_END
#PIMv6 Support Bundle Command List
PROC_NAME:pim6
CMD_LIST_START
+show ipv6 mld vrf all interface detail
+show ipv6 mld vrf all statistics
+show ipv6 mld vrf all joins detail
+show ipv6 mld vrf all groups
+
+show ipv6 mroute vrf all
+show ipv6 multicast vrf all
+
+show ipv6 pim bsm-database
+show ipv6 pim bsr
+show ipv6 pim bsr candidate-bsr
+show ipv6 pim bsr candidate-rp
+show ipv6 pim bsr candidate-rp-database
+show ipv6 pim bsr groups
+show ipv6 pim bsr rp-info
show ipv6 pim channel
-show ipv6 pim interface
+show ipv6 pim vrf all interface detail
show ipv6 pim interface traffic
-show ipv6 pim join
-show ipv6 jp-agg
-show ipv6 pim nexthop
-show ipv6 pim nexthop-lookup
-show ipv6 pim neighbor
+show ipv6 pim vrf all join
+show ipv6 pim jp-agg
show ipv6 pim local-membership
-show ipv6 pim rp-info
-show ipv6 pim rpf
+show ipv6 pim nexthop
+show ipv6 pim vrf all neighbor detail
+show ipv6 pim vrf all rp-info
+show ipv6 pim vrf all rpf
show ipv6 pim secondary
-show ipv6 pim state
+show ipv6 pim vrf all state
show ipv6 pim statistics
-show ipv6 pim upstream
+show ipv6 pim vrf all upstream
show ipv6 pim upstream-join-desired
show ipv6 pim upstream-rpf
-show ipv6 mld interface
-show ipv6 mld statistics
-show ipv6 mld joins
-show ipv6 mld groups
-show ipv6 multicast
-show ipv6 mroute
-show ipv6 pim bsr
-show ipv6 pim bsrp-info
-show ipv6 pim bsm-databases
CMD_LIST_END
#MGMT Support Bundle Command List
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 08a1f1e07e..dba50b3c53 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -255,7 +255,11 @@ ctx_keywords = {
},
"router rip": {},
"router ripng": {},
- "router isis ": {},
+ "router isis ": {
+ "segment-routing srv6": {
+ "node-msd": {},
+ },
+ },
"router openfabric ": {},
"router ospf": {},
"router ospf6": {},
@@ -266,7 +270,7 @@ ctx_keywords = {
"mpls ldp": {"address-family ": {"interface ": {}}},
"l2vpn ": {"member pseudowire ": {}},
"key chain ": {"key ": {}},
- "vrf ": {},
+ "vrf ": {"rpki": {}},
"interface ": {"link-params": {}},
"pseudowire ": {},
"segment-routing": {
@@ -275,7 +279,11 @@ ctx_keywords = {
"policy ": {"candidate-path ": {}},
"pcep": {"pcc": {}, "pce ": {}, "pce-config ": {}},
},
- "srv6": {"locators": {"locator ": {}}},
+ "srv6": {
+ "locators": {"locator ": {}},
+ "encapsulation": {},
+ "formats": {"format": {}},
+ },
},
"nexthop-group ": {},
"route-map ": {},
diff --git a/tools/frr_babeltrace.py b/tools/frr_babeltrace.py
index 9832568b37..7ace5d64b6 100755
--- a/tools/frr_babeltrace.py
+++ b/tools/frr_babeltrace.py
@@ -18,6 +18,7 @@ import sys
import babeltrace
+
########################### common parsers - start ############################
def print_ip_addr(field_val):
"""
@@ -48,24 +49,28 @@ def print_mac(field_val):
"""
return ":".join("%02x" % fb for fb in field_val)
+
def print_net_ipv4_addr(field_val):
"""
pretty print ctf_integer_network ipv4
"""
return str(ipaddress.IPv4Address(field_val))
+
def print_esi(field_val):
"""
pretty print ethernet segment id, esi_t
"""
return ":".join("%02x" % fb for fb in field_val)
+
def get_field_list(event):
"""
only fetch fields added via the TP, skip metadata etc.
"""
return event.field_list_with_scope(babeltrace.CTFScope.EVENT_FIELDS)
+
def parse_event(event, field_parsers):
"""
Wild card event parser; doesn't make things any prettier
@@ -79,8 +84,31 @@ def parse_event(event, field_parsers):
else:
field_info[field] = event.get(field)
print(event.name, field_info)
+
+
+def print_family_str(field_val):
+ """
+ pretty print kernel family to string
+ """
+ if field_val == socket.AF_INET:
+ cmd_str = "ipv4"
+ elif field_val == socket.AF_INET6:
+ cmd_str = "ipv6"
+ elif field_val == socket.AF_BRIDGE:
+ cmd_str = "bridge"
+ elif field_val == 128: # RTNL_FAMILY_IPMR:
+ cmd_str = "ipv4MR"
+ elif field_val == 129: # RTNL_FAMILY_IP6MR:
+ cmd_str = "ipv6MR"
+ else:
+ cmd_str = "Invalid family"
+
+ return cmd_str
+
+
############################ common parsers - end #############################
+
############################ evpn parsers - start #############################
def parse_frr_bgp_evpn_mac_ip_zsend(event):
"""
@@ -92,13 +120,16 @@ def parse_frr_bgp_evpn_mac_ip_zsend(event):
ctf_integer_network_hex(unsigned int, vtep, vtep.s_addr)
ctf_array(unsigned char, esi, esi, sizeof(esi_t))
"""
- field_parsers = {"ip": print_ip_addr,
- "mac": print_mac,
- "esi": print_esi,
- "vtep": print_net_ipv4_addr}
+ field_parsers = {
+ "ip": print_ip_addr,
+ "mac": print_mac,
+ "esi": print_esi,
+ "vtep": print_net_ipv4_addr,
+ }
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_bum_vtep_zsend(event):
"""
bgp evpn bum-vtep parser; raw format -
@@ -110,6 +141,7 @@ def parse_frr_bgp_evpn_bum_vtep_zsend(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_nh_rmac_send(event):
"""
bgp evpn nh-rmac parser; raw format -
@@ -119,17 +151,18 @@ def parse_frr_bgp_evpn_mh_nh_rmac_send(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_local_es_add_zrecv(event):
"""
bgp evpn local-es parser; raw format -
ctf_array(unsigned char, esi, esi, sizeof(esi_t))
ctf_integer_network_hex(unsigned int, vtep, vtep.s_addr)
"""
- field_parsers = {"esi": print_esi,
- "vtep": print_net_ipv4_addr}
+ field_parsers = {"esi": print_esi, "vtep": print_net_ipv4_addr}
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_local_es_del_zrecv(event):
"""
bgp evpn local-es parser; raw format -
@@ -139,6 +172,7 @@ def parse_frr_bgp_evpn_mh_local_es_del_zrecv(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_local_es_evi_add_zrecv(event):
"""
bgp evpn local-es-evi parser; raw format -
@@ -148,6 +182,7 @@ def parse_frr_bgp_evpn_mh_local_es_evi_add_zrecv(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_local_es_evi_del_zrecv(event):
"""
bgp evpn local-es-evi parser; raw format -
@@ -157,6 +192,7 @@ def parse_frr_bgp_evpn_mh_local_es_evi_del_zrecv(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_es_evi_vtep_add(event):
"""
bgp evpn remote ead evi remote vtep add; raw format -
@@ -167,6 +203,7 @@ def parse_frr_bgp_evpn_mh_es_evi_vtep_add(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_es_evi_vtep_del(event):
"""
bgp evpn remote ead evi remote vtep del; raw format -
@@ -177,6 +214,7 @@ def parse_frr_bgp_evpn_mh_es_evi_vtep_del(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_upd(event):
"""
bgp evpn local ead evi vtep; raw format -
@@ -187,6 +225,7 @@ def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_upd(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_del(event):
"""
bgp evpn local ead evi vtep del; raw format -
@@ -197,6 +236,7 @@ def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_del(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_local_vni_add_zrecv(event):
"""
bgp evpn local-vni parser; raw format -
@@ -208,6 +248,7 @@ def parse_frr_bgp_evpn_local_vni_add_zrecv(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_local_l3vni_add_zrecv(event):
"""
bgp evpn local-l3vni parser; raw format -
@@ -215,12 +256,15 @@ def parse_frr_bgp_evpn_local_l3vni_add_zrecv(event):
ctf_array(unsigned char, svi_rmac, svi_rmac, sizeof(struct ethaddr))
ctf_array(unsigned char, vrr_rmac, vrr_rmac, sizeof(struct ethaddr))
"""
- field_parsers = {"vtep": print_net_ipv4_addr,
- "svi_rmac": print_mac,
- "vrr_rmac": print_mac}
+ field_parsers = {
+ "vtep": print_net_ipv4_addr,
+ "svi_rmac": print_mac,
+ "vrr_rmac": print_mac,
+ }
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_local_macip_add_zrecv(event):
"""
bgp evpn local-mac-ip parser; raw format -
@@ -234,6 +278,7 @@ def parse_frr_bgp_evpn_local_macip_add_zrecv(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_local_macip_del_zrecv(event):
"""
bgp evpn local-mac-ip del parser; raw format -
@@ -245,16 +290,20 @@ def parse_frr_bgp_evpn_local_macip_del_zrecv(event):
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_advertise_type5(event):
"""
local originated type-5 route
"""
- field_parsers = {"ip": print_ip_addr,
- "rmac": print_mac,
- "vtep": print_net_ipv4_addr}
+ field_parsers = {
+ "ip": print_ip_addr,
+ "rmac": print_mac,
+ "vtep": print_net_ipv4_addr,
+ }
parse_event(event, field_parsers)
+
def parse_frr_bgp_evpn_withdraw_type5(event):
"""
local originated type-5 route withdraw
@@ -263,8 +312,10 @@ def parse_frr_bgp_evpn_withdraw_type5(event):
parse_event(event, field_parsers)
+
############################ evpn parsers - end *#############################
+
def main():
"""
FRR lttng trace output parser; babel trace plugin
@@ -319,5 +370,6 @@ def main():
else:
parse_event(event, {})
+
if __name__ == "__main__":
main()
diff --git a/tools/gen_northbound_callbacks.c b/tools/gen_northbound_callbacks.c
index 046dc9e99e..516743acab 100644
--- a/tools/gen_northbound_callbacks.c
+++ b/tools/gen_northbound_callbacks.c
@@ -11,6 +11,7 @@
#include <unistd.h>
+#include "darr.h"
#include "yang.h"
#include "northbound.h"
@@ -19,7 +20,7 @@ static bool static_cbs;
static void __attribute__((noreturn)) usage(int status)
{
extern const char *__progname;
- fprintf(stderr, "usage: %s [-h] [-s] [-p path] MODULE\n", __progname);
+ fprintf(stderr, "usage: %s [-h] [-s] [-p path]* MODULE\n", __progname);
exit(status);
}
@@ -408,7 +409,8 @@ static int generate_nb_nodes(const struct lysc_node *snode, void *arg)
int main(int argc, char *argv[])
{
- const char *search_path = NULL;
+ char **search_paths = NULL;
+ char **iter = NULL;
struct yang_module *module;
char module_name_underscores[64];
struct stat st;
@@ -433,7 +435,7 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
- search_path = optarg;
+ *darr_append(search_paths) = darr_strdup(optarg);
break;
case 's':
static_cbs = true;
@@ -450,8 +452,11 @@ int main(int argc, char *argv[])
yang_init(false, true, false);
- if (search_path)
- ly_ctx_set_searchdir(ly_native_ctx, search_path);
+ darr_foreach_p (search_paths, iter) {
+ ly_ctx_set_searchdir(ly_native_ctx, *iter);
+ darr_free(*iter);
+ }
+ darr_free(search_paths);
/* Load all FRR native models to ensure all augmentations are loaded. */
yang_module_load_all();
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index 5a54c60c6b..c460dea70c 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -39,6 +39,7 @@
#include "frrstr.h"
#include "json.h"
#include "ferr.h"
+#include "sockopt.h"
DEFINE_MTYPE_STATIC(MVTYSH, VTYSH_CMD, "Vtysh cmd copy");
@@ -4690,9 +4691,8 @@ static int vtysh_connect(struct vtysh_client *vclient)
* Increasing the RECEIVE socket buffer size so that the socket can hold
* after receving from other process.
*/
- ret = setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbufsize,
- sizeof(rcvbufsize));
- if (ret < 0) {
+ ret = setsockopt_so_recvbuf(sock, rcvbufsize);
+ if (ret <= 0) {
#ifdef DEBUG
fprintf(stderr, "Cannot set socket %d rcv buffer size, %s\n",
sock, safe_strerror(errno));
diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c
index acc612c0a8..611a7872d0 100644
--- a/watchfrr/watchfrr.c
+++ b/watchfrr/watchfrr.c
@@ -44,7 +44,7 @@
#define DEFAULT_PERIOD 5
#define DEFAULT_TIMEOUT 90
-#define DEFAULT_RESTART_TIMEOUT 20
+#define DEFAULT_RESTART_TIMEOUT 90
#define DEFAULT_LOGLEVEL LOG_INFO
#define DEFAULT_MIN_RESTART 60
#define DEFAULT_MAX_RESTART 600
diff --git a/yang/frr-pim-rp.yang b/yang/frr-pim-rp.yang
index cbc6e87b80..5558b0888d 100644
--- a/yang/frr-pim-rp.yang
+++ b/yang/frr-pim-rp.yang
@@ -21,6 +21,10 @@ module frr-pim-rp {
prefix frr-route-types;
}
+ import frr-interface {
+ prefix "frr-interface";
+ }
+
organization
"FRRouting";
@@ -178,7 +182,7 @@ module frr-pim-rp {
leaf announce-interval {
type uint16;
description
- "The time between sending C-RP announcement packets.";
+ "The time between sending C-RP announcement packets (seconds).";
}
leaf announce-holdtime {
@@ -216,6 +220,57 @@ module frr-pim-rp {
}
}
} // candidate-rp-list
+
+ container mapping-agent {
+ leaf send-rp-discovery {
+ type boolean;
+ default false;
+ description
+ "Make this router an AutoRP mapping agent";
+ }
+
+ leaf discovery-scope {
+ type uint8;
+ default 31;
+ description
+ "The TTL of the discovery packet";
+ }
+
+ leaf discovery-interval {
+ type uint16 {
+ range "1 .. 65535";
+ }
+ default 60;
+ description
+ "The time between sending discovery advertisements (seconds)";
+ }
+
+ leaf discovery-holdtime {
+ type uint16 {
+ range "0 .. 65535";
+ }
+ default 180;
+ description
+ "The hold time in seconds advertised in the discovery packet.";
+ }
+
+ choice source-address-or-interface {
+ description "Source address to use for mapping agent operation";
+ default if-loopback;
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf interface {
+ type frr-interface:interface-ref;
+ }
+ leaf if-loopback {
+ type empty;
+ }
+ leaf if-any {
+ type empty;
+ }
+ }
+ } // mapping-agent
} // auto-rp
} // auto-rp-container
diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang
index 6a6c52185d..8dadf4fd7c 100644
--- a/yang/frr-pim.yang
+++ b/yang/frr-pim.yang
@@ -78,6 +78,51 @@ module frr-pim {
type string;
}
+ typedef access-list-ref {
+ type string;
+ }
+
+ /*
+ * Multicast RPF mode configurable type
+ */
+
+ typedef mcast-rpf-lookup-mode {
+ type enumeration {
+ enum "none" {
+ value 0;
+ description
+ "No mode set.";
+ }
+ enum "mrib-only" {
+ value 1;
+ description
+ "Lookup in unicast RIB only.";
+ }
+ enum "urib-only" {
+ value 2;
+ description
+ "Lookup in multicast RIB only.";
+ }
+ enum "mrib-then-urib" {
+ value 3;
+ description
+ "Try multicast RIB first, fall back to unicast RIB.";
+ }
+ enum "lower-distance" {
+ value 4;
+ description
+ "Lookup both unicast and mcast, use entry with lower distance.";
+ }
+ enum "longer-prefix" {
+ value 5;
+ description
+ "Lookup both unicast and mcast, use entry with longer prefix.";
+ }
+ }
+ description
+ "Multicast RPF lookup behavior";
+ }
+
/*
* Groupings
*/
@@ -157,20 +202,27 @@ module frr-pim {
description
"A grouping defining per address family pim global attributes";
+ leaf mcast-rpf-lookup {
+ type mcast-rpf-lookup-mode;
+ default "none";
+ description
+ "Multicast RPF lookup behavior.";
+ }
+
leaf ecmp {
type boolean;
default "false";
description
"Enable PIM ECMP.";
}
-
+
leaf ecmp-rebalance {
type boolean;
default "false";
description
"Enable PIM ECMP Rebalance.";
}
-
+
leaf keep-alive-timer {
type uint16 {
range "1..max";
@@ -179,7 +231,7 @@ module frr-pim {
description
"Keep alive Timer in seconds.";
}
-
+
leaf rp-keep-alive-timer {
type uint16 {
range "1..max";
@@ -250,6 +302,35 @@ module frr-pim {
container msdp {
description "Global MSDP configuration.";
uses msdp-timers;
+
+ leaf log-neighbor-events {
+ type boolean;
+ default false;
+ description
+ "Log all MSDP neighbor related events.";
+ }
+
+ leaf log-sa-events {
+ type boolean;
+ default false;
+ description
+ "Log all MSDP SA related events.";
+ }
+
+ leaf originator-id {
+ type inet:ip-address;
+ description
+ "Configure the RP address for the SAs.
+
+ By default the local system RP address will be used.";
+ }
+
+ leaf shutdown {
+ type boolean;
+ default false;
+ description
+ "Shutdown MSDP functionality.";
+ }
}
list msdp-mesh-groups {
@@ -316,6 +397,12 @@ module frr-pim {
}
uses msdp-authentication;
+
+ leaf sa-limit {
+ type uint32;
+ description
+ "Peer SA maximum limit.";
+ }
}
container mlag {
@@ -486,7 +573,13 @@ module frr-pim {
leaf multicast-boundary-oil {
type plist-ref;
description
- "Prefix-List to define multicast boundary";
+ "Prefix-List to define multicast boundary by group";
+ }
+
+ leaf multicast-boundary-acl {
+ type access-list-ref;
+ description
+ "Access-list to define multicast boundary by source and group";
}
list mroute {
diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang
index f97a4cc129..a3c066c56c 100644
--- a/yang/frr-zebra.yang
+++ b/yang/frr-zebra.yang
@@ -157,47 +157,6 @@ module frr-zebra {
"Zebra interface type gre.";
}
- /*
- * Multicast RPF mode configurable type
- */
-
- typedef mcast-rpf-lookup-mode {
- type enumeration {
- enum "none" {
- value 0;
- description
- "No mode set.";
- }
- enum "mrib-only" {
- value 1;
- description
- "Lookup in unicast RIB only.";
- }
- enum "urib-only" {
- value 2;
- description
- "Lookup in multicast RIB only.";
- }
- enum "mrib-then-urib" {
- value 3;
- description
- "Try multicast RIB first, fall back to unicast RIB.";
- }
- enum "lower-distance" {
- value 4;
- description
- "Lookup both unicast and mcast, use entry with lower distance.";
- }
- enum "longer-prefix" {
- value 5;
- description
- "Lookup both unicast and mcast, use entry with longer prefix.";
- }
- }
- description
- "Multicast RPF lookup behavior";
- }
-
// End of ip6-route
/*
* VxLAN Network Identifier type
@@ -2883,12 +2842,6 @@ module frr-zebra {
container zebra {
description
"Data model for the Zebra daemon.";
- leaf mcast-rpf-lookup {
- type frr-zebra:mcast-rpf-lookup-mode;
- default "mrib-then-urib";
- description
- "Multicast RPF lookup behavior.";
- }
leaf ip-forwarding {
type boolean;
description
diff --git a/zebra/dpdk/zebra_dplane_dpdk.c b/zebra/dpdk/zebra_dplane_dpdk.c
index ae1a3743ce..0a898c1923 100644
--- a/zebra/dpdk/zebra_dplane_dpdk.c
+++ b/zebra/dpdk/zebra_dplane_dpdk.c
@@ -330,14 +330,11 @@ static void zd_dpdk_rule_update(struct zebra_dplane_ctx *ctx)
op = dplane_ctx_get_op(ctx);
- switch (op) {
- case DPLANE_OP_RULE_ADD:
+ if (op == DPLANE_OP_RULE_ADD) {
atomic_fetch_add_explicit(&dpdk_stat->rule_adds, 1,
memory_order_relaxed);
zd_dpdk_rule_add(ctx);
- break;
-
- case DPLANE_OP_RULE_UPDATE:
+ } else if (op == DPLANE_OP_RULE_UPDATE) {
/* delete old rule and install new one */
atomic_fetch_add_explicit(&dpdk_stat->rule_adds, 1,
memory_order_relaxed);
@@ -346,62 +343,12 @@ static void zd_dpdk_rule_update(struct zebra_dplane_ctx *ctx)
zd_dpdk_rule_del(ctx, dplane_ctx_rule_get_ifname(ctx),
in_ifindex, dp_flow_ptr);
zd_dpdk_rule_add(ctx);
- break;
-
- case DPLANE_OP_RULE_DELETE:
+ } else if (op == DPLANE_OP_RULE_DELETE) {
atomic_fetch_add_explicit(&dpdk_stat->rule_dels, 1,
memory_order_relaxed);
in_ifindex = dplane_ctx_get_ifindex(ctx);
dp_flow_ptr = dplane_ctx_rule_get_dp_flow_ptr(ctx);
- zd_dpdk_rule_del(ctx, dplane_ctx_rule_get_ifname(ctx),
- in_ifindex, dp_flow_ptr);
- break;
-
- case DPLANE_OP_NONE:
- case DPLANE_OP_ROUTE_INSTALL:
- case DPLANE_OP_ROUTE_UPDATE:
- case DPLANE_OP_ROUTE_DELETE:
- case DPLANE_OP_ROUTE_NOTIFY:
- case DPLANE_OP_NH_INSTALL:
- case DPLANE_OP_NH_UPDATE:
- case DPLANE_OP_NH_DELETE:
- case DPLANE_OP_LSP_INSTALL:
- case DPLANE_OP_LSP_UPDATE:
- case DPLANE_OP_LSP_DELETE:
- case DPLANE_OP_LSP_NOTIFY:
- case DPLANE_OP_PW_INSTALL:
- case DPLANE_OP_PW_UNINSTALL:
- case DPLANE_OP_SYS_ROUTE_ADD:
- case DPLANE_OP_SYS_ROUTE_DELETE:
- case DPLANE_OP_ADDR_INSTALL:
- case DPLANE_OP_ADDR_UNINSTALL:
- case DPLANE_OP_MAC_INSTALL:
- case DPLANE_OP_MAC_DELETE:
- case DPLANE_OP_NEIGH_INSTALL:
- case DPLANE_OP_NEIGH_UPDATE:
- case DPLANE_OP_NEIGH_DELETE:
- case DPLANE_OP_VTEP_ADD:
- case DPLANE_OP_VTEP_DELETE:
- case DPLANE_OP_NEIGH_DISCOVER:
- case DPLANE_OP_BR_PORT_UPDATE:
- case DPLANE_OP_IPTABLE_ADD:
- case DPLANE_OP_IPTABLE_DELETE:
- case DPLANE_OP_IPSET_ADD:
- case DPLANE_OP_IPSET_DELETE:
- case DPLANE_OP_IPSET_ENTRY_ADD:
- case DPLANE_OP_IPSET_ENTRY_DELETE:
- case DPLANE_OP_NEIGH_IP_INSTALL:
- case DPLANE_OP_NEIGH_IP_DELETE:
- case DPLANE_OP_NEIGH_TABLE_UPDATE:
- case DPLANE_OP_GRE_SET:
- case DPLANE_OP_INTF_ADDR_ADD:
- case DPLANE_OP_INTF_ADDR_DEL:
- case DPLANE_OP_INTF_NETCONFIG:
- case DPLANE_OP_INTF_INSTALL:
- case DPLANE_OP_INTF_UPDATE:
- case DPLANE_OP_INTF_DELETE:
- case DPLANE_OP_VLAN_INSTALL,
- break;
+ zd_dpdk_rule_del(ctx, dplane_ctx_rule_get_ifname(ctx), in_ifindex, dp_flow_ptr);
}
}
@@ -410,62 +357,13 @@ static void zd_dpdk_rule_update(struct zebra_dplane_ctx *ctx)
*/
static void zd_dpdk_process_update(struct zebra_dplane_ctx *ctx)
{
- switch (dplane_ctx_get_op(ctx)) {
+ enum dplane_op_e op;
- case DPLANE_OP_RULE_ADD:
- case DPLANE_OP_RULE_UPDATE:
- case DPLANE_OP_RULE_DELETE:
+ op = dplane_ctx_get_op(ctx);
+ if (op == DPLANE_OP_RULE_ADD || op == DPLANE_OP_RULE_UPDATE || op == DPLANE_OP_RULE_DELETE)
zd_dpdk_rule_update(ctx);
- break;
- case DPLANE_OP_NONE:
- case DPLANE_OP_ROUTE_INSTALL:
- case DPLANE_OP_ROUTE_UPDATE:
- case DPLANE_OP_ROUTE_DELETE:
- case DPLANE_OP_ROUTE_NOTIFY:
- case DPLANE_OP_NH_INSTALL:
- case DPLANE_OP_NH_UPDATE:
- case DPLANE_OP_NH_DELETE:
- case DPLANE_OP_LSP_INSTALL:
- case DPLANE_OP_LSP_UPDATE:
- case DPLANE_OP_LSP_DELETE:
- case DPLANE_OP_LSP_NOTIFY:
- case DPLANE_OP_PW_INSTALL:
- case DPLANE_OP_PW_UNINSTALL:
- case DPLANE_OP_SYS_ROUTE_ADD:
- case DPLANE_OP_SYS_ROUTE_DELETE:
- case DPLANE_OP_ADDR_INSTALL:
- case DPLANE_OP_ADDR_UNINSTALL:
- case DPLANE_OP_MAC_INSTALL:
- case DPLANE_OP_MAC_DELETE:
- case DPLANE_OP_NEIGH_INSTALL:
- case DPLANE_OP_NEIGH_UPDATE:
- case DPLANE_OP_NEIGH_DELETE:
- case DPLANE_OP_VTEP_ADD:
- case DPLANE_OP_VTEP_DELETE:
- case DPLANE_OP_NEIGH_DISCOVER:
- case DPLANE_OP_BR_PORT_UPDATE:
- case DPLANE_OP_IPTABLE_ADD:
- case DPLANE_OP_IPTABLE_DELETE:
- case DPLANE_OP_IPSET_ADD:
- case DPLANE_OP_IPSET_DELETE:
- case DPLANE_OP_IPSET_ENTRY_ADD:
- case DPLANE_OP_IPSET_ENTRY_DELETE:
- case DPLANE_OP_NEIGH_IP_INSTALL:
- case DPLANE_OP_NEIGH_IP_DELETE:
- case DPLANE_OP_NEIGH_TABLE_UPDATE:
- case DPLANE_OP_GRE_SET:
- case DPLANE_OP_INTF_ADDR_ADD:
- case DPLANE_OP_INTF_ADDR_DEL:
- case DPLANE_OP_INTF_NETCONFIG:
- case DPLANE_OP_INTF_INSTALL:
- case DPLANE_OP_INTF_UPDATE:
- case DPLANE_OP_INTF_DELETE:
- case DPLANE_OP_VLAN_INSTALL,
- atomic_fetch_add_explicit(&dpdk_stat->ignored_updates, 1,
- memory_order_relaxed);
-
- break;
- }
+ else
+ atomic_fetch_add_explicit(&dpdk_stat->ignored_updates, 1, memory_order_relaxed);
}
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index e6b4af3674..3ec1c9d657 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -68,6 +68,8 @@
static const char *prov_name = "dplane_fpm_nl";
+static atomic_bool fpm_cleaning_up;
+
struct fpm_nl_ctx {
/* data plane connection. */
int socket;
@@ -524,6 +526,16 @@ static void fpm_connect(struct event *t);
static void fpm_reconnect(struct fpm_nl_ctx *fnc)
{
+ bool cleaning_p = false;
+
+ /* This is being called in the FPM pthread: ensure we don't deadlock
+ * with similar code that may be run in the main pthread.
+ */
+ if (!atomic_compare_exchange_strong_explicit(
+ &fpm_cleaning_up, &cleaning_p, true, memory_order_seq_cst,
+ memory_order_seq_cst))
+ return;
+
/* Cancel all zebra threads first. */
event_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
event_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
@@ -551,6 +563,12 @@ static void fpm_reconnect(struct fpm_nl_ctx *fnc)
EVENT_OFF(fnc->t_read);
EVENT_OFF(fnc->t_write);
+ /* Reset the barrier value */
+ cleaning_p = true;
+ atomic_compare_exchange_strong_explicit(
+ &fpm_cleaning_up, &cleaning_p, false, memory_order_seq_cst,
+ memory_order_seq_cst);
+
/* FPM is disabled, don't attempt to connect. */
if (fnc->disabled)
return;
@@ -1624,6 +1642,16 @@ static int fpm_nl_start(struct zebra_dplane_provider *prov)
static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
{
+ bool cleaning_p = false;
+
+ /* This is being called in the main pthread: ensure we don't deadlock
+ * with similar code that may be run in the FPM pthread.
+ */
+ if (!atomic_compare_exchange_strong_explicit(
+ &fpm_cleaning_up, &cleaning_p, true, memory_order_seq_cst,
+ memory_order_seq_cst))
+ return 0;
+
/* Disable all events and close socket. */
EVENT_OFF(fnc->t_lspreset);
EVENT_OFF(fnc->t_lspwalk);
@@ -1644,6 +1672,12 @@ static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
fnc->socket = -1;
}
+ /* Reset the barrier value */
+ cleaning_p = true;
+ atomic_compare_exchange_strong_explicit(
+ &fpm_cleaning_up, &cleaning_p, false, memory_order_seq_cst,
+ memory_order_seq_cst);
+
return 0;
}
diff --git a/zebra/interface.c b/zebra/interface.c
index 86de5dbae6..1c86a6a5c7 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -47,6 +47,20 @@ DEFINE_MTYPE_STATIC(ZEBRA, ZIF_DESC, "Intf desc");
static void if_down_del_nbr_connected(struct interface *ifp);
+static const char *if_zebra_data_state(uint8_t state)
+{
+ switch (state) {
+ case IF_ZEBRA_DATA_UNSPEC:
+ return "Not specified by CLI";
+ case IF_ZEBRA_DATA_ON:
+ return "Enabled by CLI";
+ case IF_ZEBRA_DATA_OFF:
+ return "Disabled by CLI";
+ }
+
+ return "STATE IS WRONG DEV ESCAPE";
+}
+
static void if_zebra_speed_update(struct event *thread)
{
struct interface *ifp = EVENT_ARG(thread);
@@ -366,6 +380,7 @@ int if_subnet_delete(struct interface *ifp, struct connected *ifc)
return 0;
}
+#ifndef HAVE_NETLINK
/* if_flags_mangle: A place for hacks that require mangling
* or tweaking the interface flags.
*
@@ -417,6 +432,7 @@ void if_flags_update(struct interface *ifp, uint64_t newflags)
if_up(ifp, true);
}
}
+#endif
/* Wake up configured address if it is not in current kernel
address. */
@@ -956,6 +972,8 @@ void if_up(struct interface *ifp, bool install_connected)
event_ignore_late_timer(zif->speed_update);
if_addr_wakeup(ifp);
+
+ rib_update_handle_vrf_all(RIB_UPDATE_KERNEL, ZEBRA_ROUTE_KERNEL);
}
/* Interface goes down. We have to manage different behavior of based
@@ -2627,8 +2645,8 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
vty_out(vty, "mtu6 %d ", ifp->mtu6);
vty_out(vty, "\n flags: %s\n", if_flag_dump(ifp->flags));
- if (zebra_if->mpls)
- vty_out(vty, " MPLS enabled\n");
+ vty_out(vty, " MPLS %s %s\n", zebra_if->mpls ? "enabled" : "",
+ if_zebra_data_state(zebra_if->multicast));
if (zebra_if->linkdown)
vty_out(vty, " Ignore all v4 routes with linkdown\n");
@@ -2640,6 +2658,10 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp)
if (zebra_if->v6mcast_on)
vty_out(vty, " v6 Multicast forwarding is on\n");
+ vty_out(vty, " Multicast config is %s\n", if_zebra_data_state(zebra_if->multicast));
+
+ vty_out(vty, " Shutdown config is %s\n", if_zebra_data_state(zebra_if->shutdown));
+
/* Hardware address. */
vty_out(vty, " Type: %s\n", if_link_type_str(ifp->ll_type));
if (ifp->hw_addr_len != 0) {
@@ -2988,10 +3010,14 @@ static void if_dump_vty_json(struct vty *vty, struct interface *ifp,
json_object_boolean_add(json_if, "mplsEnabled", zebra_if->mpls);
json_object_boolean_add(json_if, "linkDown", zebra_if->linkdown);
json_object_boolean_add(json_if, "linkDownV6", zebra_if->linkdownv6);
- json_object_boolean_add(json_if, "mcForwardingV4",
- zebra_if->v4mcast_on);
- json_object_boolean_add(json_if, "mcForwardingV6",
- zebra_if->v6mcast_on);
+ json_object_boolean_add(json_if, "mcForwardingV4", zebra_if->v4mcast_on);
+ json_object_boolean_add(json_if, "mcForwardingV6", zebra_if->v6mcast_on);
+
+ json_object_string_add(json_if, "multicastConfig", if_zebra_data_state(zebra_if->multicast));
+
+ json_object_string_add(json_if, "shutdownConfig", if_zebra_data_state(zebra_if->shutdown));
+
+ json_object_string_add(json_if, "mplsConfig", if_zebra_data_state(zebra_if->mpls_config));
if (ifp->ifindex == IFINDEX_INTERNAL) {
json_object_boolean_add(json_if, "pseudoInterface", true);
diff --git a/zebra/ioctl.c b/zebra/ioctl.c
index a35784cd36..47ce7c943d 100644
--- a/zebra/ioctl.c
+++ b/zebra/ioctl.c
@@ -390,6 +390,7 @@ int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
#endif /* HAVE_STRUCT_IFALIASREQ */
#endif /* HAVE_NETLINK */
+#ifndef HAVE_NETLINK
/* get interface flags */
void if_get_flags(struct interface *ifp)
{
@@ -485,6 +486,7 @@ void if_get_flags(struct interface *ifp)
out:
if_flags_update(ifp, (ifreqflags.ifr_flags & 0x0000ffff));
}
+#endif
/* Set interface flags */
int if_set_flags(struct interface *ifp, uint64_t flags)
diff --git a/zebra/main.c b/zebra/main.c
index 138a955bc3..4546d14770 100644
--- a/zebra/main.c
+++ b/zebra/main.c
@@ -467,6 +467,9 @@ int main(int argc, char **argv)
zebra_if_init();
zebra_debug_init();
+ /* Open Zebra API server socket */
+ zserv_open(zserv_path);
+
/*
* Initialize NS( and implicitly the VRF module), and make kernel
* routing socket. */
diff --git a/zebra/rib.h b/zebra/rib.h
index 5fedb07335..8484fe1291 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -402,11 +402,7 @@ extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
bool fromkernel);
extern struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
- const union g_addr *addr,
- struct route_node **rn_out);
-extern struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id,
- union g_addr *gaddr,
- struct route_node **rn_out);
+ const union g_addr *addr, struct route_node **rn_out);
extern void rib_update(enum rib_update_event event);
extern void rib_update_table(struct route_table *table,
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index ab07ef8d21..492fe52889 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -3683,6 +3683,13 @@ netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, int cmd,
if (dplane_ctx_get_type(ctx) != 0)
proto = zebra2proto(dplane_ctx_get_type(ctx));
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("Tx %s family %s IF %s(%u) VNI %u MAC %pEA VTEP %pIA vid %u",
+ nl_msg_type_to_str(cmd), nl_family_to_str(PF_BRIDGE),
+ dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx),
+ dplane_ctx_neigh_get_vni(ctx), &dst_mac,
+ dplane_ctx_neigh_get_ipaddr(ctx), dplane_ctx_mac_get_vlan(ctx));
+
return netlink_neigh_update_msg_encode(
ctx, cmd, (const void *)&dst_mac, ETH_ALEN,
dplane_ctx_neigh_get_ipaddr(ctx), false, PF_BRIDGE, 0, NTF_SELF,
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 10acee9be4..ab55998af0 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -282,7 +282,7 @@ int zsend_interface_address(int cmd, struct zserv *client,
{
int blen;
struct prefix *p;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, ifp->vrf->vrf_id);
stream_putl(s, ifp->ifindex);
@@ -323,7 +323,7 @@ static int zsend_interface_nbr_address(int cmd, struct zserv *client,
struct nbr_connected *ifc)
{
int blen;
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
struct prefix *p;
zclient_create_header(s, cmd, ifp->vrf->vrf_id);
@@ -640,10 +640,15 @@ int zsend_redistribute_route(int cmd, struct zserv *client,
* (Otherwise we would need to implement sending NHT updates for the result of
* this "URIB-MRIB-combined" table, but we only decide that here on the fly,
* so it'd be rather complex to do NHT for.)
+ *
+ * 9/19/24 NEB I've updated this API to include the SAFI in the lookup
+ * request and response. This allows PIM to do a syncronous lookup for the
+ * correct table along side NHT.
+ * This also makes this a more generic synchronous lookup not specifically
+ * tied to the mrib.
*/
-static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr,
- struct route_entry *re,
- struct zebra_vrf *zvrf)
+static int zsend_nexthop_lookup(struct zserv *client, struct ipaddr *addr, struct route_entry *re,
+ struct route_node *rn, struct zebra_vrf *zvrf, safi_t safi)
{
struct stream *s;
unsigned long nump;
@@ -651,18 +656,20 @@ static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr,
struct nexthop *nexthop;
/* Get output stream. */
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
stream_reset(s);
/* Fill in result. */
- zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP_MRIB, zvrf_id(zvrf));
+ zclient_create_header(s, ZEBRA_NEXTHOP_LOOKUP, zvrf_id(zvrf));
stream_put_ipaddr(s, addr);
- if (re) {
+ if (re && rn) {
struct nexthop_group *nhg;
stream_putc(s, re->distance);
stream_putl(s, re->metric);
+ stream_putw(s, rn->p.prefixlen);
+
num = 0;
/* remember position for nexthop_num */
nump = stream_get_endp(s);
@@ -679,6 +686,7 @@ static int zsend_nexthop_lookup_mrib(struct zserv *client, struct ipaddr *addr,
} else {
stream_putc(s, 0); /* distance */
stream_putl(s, 0); /* metric */
+ stream_putw(s, 0); /* prefix len */
stream_putw(s, 0); /* nexthop_num */
}
@@ -706,7 +714,7 @@ int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id,
zlog_debug("%s: type %d, id %d, note %s",
__func__, type, id, zapi_nhg_notify_owner2str(note));
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
stream_reset(s);
zclient_create_header(s, ZEBRA_NHG_NOTIFY_OWNER, VRF_DEFAULT);
@@ -835,7 +843,7 @@ void zsend_rule_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_RULE_NOTIFY_OWNER,
dplane_ctx_rule_get_vrfid(ctx));
@@ -889,7 +897,7 @@ void zsend_iptable_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putw(s, note);
@@ -923,7 +931,7 @@ void zsend_ipset_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putw(s, note);
@@ -959,7 +967,7 @@ void zsend_ipset_entry_notify_owner(const struct zebra_dplane_ctx *ctx,
if (!client)
return;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putw(s, note);
@@ -1049,13 +1057,12 @@ int zsend_router_id_update(struct zserv *client, afi_t afi, struct prefix *p,
vrf_id_t vrf_id)
{
int blen;
- struct stream *s;
/* Check this client need interface information. */
if (!vrf_bitmap_check(&client->ridinfo[afi], vrf_id))
return 0;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
/* Message type. */
zclient_create_header(s, ZEBRA_ROUTER_ID_UPDATE, vrf_id);
@@ -1077,7 +1084,7 @@ int zsend_router_id_update(struct zserv *client, afi_t afi, struct prefix *p,
*/
int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_PW_STATUS_UPDATE, pw->vrf_id);
stream_write(s, pw->ifname, IFNAMSIZ);
@@ -1094,7 +1101,7 @@ int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id,
struct label_manager_chunk *lmc)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_GET_LABEL_CHUNK, vrf_id);
/* proto */
@@ -1120,7 +1127,7 @@ int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id,
int zsend_label_manager_connect_response(struct zserv *client, vrf_id_t vrf_id,
unsigned short result)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_LABEL_MANAGER_CONNECT, vrf_id);
@@ -1144,7 +1151,7 @@ static int zsend_assign_table_chunk_response(struct zserv *client,
vrf_id_t vrf_id,
struct table_manager_chunk *tmc)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_GET_TABLE_CHUNK, vrf_id);
@@ -1164,7 +1171,7 @@ static int zsend_table_manager_connect_response(struct zserv *client,
vrf_id_t vrf_id,
uint16_t result)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_TABLE_MANAGER_CONNECT, vrf_id);
@@ -2316,33 +2323,37 @@ static void zread_route_del(ZAPI_HANDLER_ARGS)
}
}
-/* MRIB Nexthop lookup for IPv4. */
-static void zread_nexthop_lookup_mrib(ZAPI_HANDLER_ARGS)
+/* Syncronous Nexthop lookup. */
+static void zread_nexthop_lookup(ZAPI_HANDLER_ARGS)
{
struct ipaddr addr;
struct route_entry *re = NULL;
+ struct route_node *rn = NULL;
union g_addr gaddr;
+ afi_t afi = AFI_IP;
+ safi_t safi = SAFI_UNICAST;
STREAM_GET_IPADDR(msg, &addr);
+ STREAM_GETC(msg, safi);
switch (addr.ipa_type) {
case IPADDR_V4:
gaddr.ipv4 = addr.ipaddr_v4;
- re = rib_match_multicast(AFI_IP, zvrf_id(zvrf), &gaddr, NULL);
+ afi = AFI_IP;
break;
case IPADDR_V6:
gaddr.ipv6 = addr.ipaddr_v6;
- re = rib_match_multicast(AFI_IP6, zvrf_id(zvrf), &gaddr, NULL);
+ afi = AFI_IP6;
break;
case IPADDR_NONE:
/* ??? */
goto stream_failure;
}
- zsend_nexthop_lookup_mrib(client, &addr, re, zvrf);
+ re = rib_match(afi, safi, zvrf_id(zvrf), &gaddr, &rn);
stream_failure:
- return;
+ zsend_nexthop_lookup(client, &addr, re, rn, zvrf, safi);
}
/* Register zebra server router-id information. Send current router-id */
@@ -2406,7 +2417,7 @@ stream_failure:
static void zsend_capabilities(struct zserv *client, struct zebra_vrf *zvrf)
{
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_CAPABILITIES, zvrf->vrf->vrf_id);
stream_putl(s, vrf_get_backend());
@@ -3990,8 +4001,7 @@ static inline void zebra_gre_source_set(ZAPI_HANDLER_ARGS)
static void zsend_error_msg(struct zserv *client, enum zebra_error_types error,
struct zmsghdr *bad_hdr)
{
-
- struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ struct stream *s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_ERROR, bad_hdr->vrf_id);
@@ -4029,7 +4039,7 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
[ZEBRA_REDISTRIBUTE_DELETE] = zebra_redistribute_delete,
[ZEBRA_REDISTRIBUTE_DEFAULT_ADD] = zebra_redistribute_default_add,
[ZEBRA_REDISTRIBUTE_DEFAULT_DELETE] = zebra_redistribute_default_delete,
- [ZEBRA_NEXTHOP_LOOKUP_MRIB] = zread_nexthop_lookup_mrib,
+ [ZEBRA_NEXTHOP_LOOKUP] = zread_nexthop_lookup,
[ZEBRA_HELLO] = zread_hello,
[ZEBRA_NEXTHOP_REGISTER] = zread_rnh_register,
[ZEBRA_NEXTHOP_UNREGISTER] = zread_rnh_unregister,
diff --git a/zebra/zebra_cli.c b/zebra/zebra_cli.c
index 6ee0fdbb8d..ca53eb2eb3 100644
--- a/zebra/zebra_cli.c
+++ b/zebra/zebra_cli.c
@@ -2252,6 +2252,9 @@ static void lib_vrf_mpls_fec_nexthop_resolution_cli_write(
}
}
+#if CONFDATE > 20251207
+CPP_NOTICE("Remove no-op netns command")
+#endif
DEFPY_YANG (vrf_netns,
vrf_netns_cmd,
"[no] netns ![NAME$netns_name]",
diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c
index 0d53591336..3fd84b5257 100644
--- a/zebra/zebra_evpn_mac.c
+++ b/zebra/zebra_evpn_mac.c
@@ -1152,6 +1152,7 @@ int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac)
listcount(mac->neigh_list));
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ mac->rem_seq = 0;
return 0;
}
@@ -1322,6 +1323,7 @@ int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr,
uint32_t flags, bool force)
{
int state = ZEBRA_NEIGH_ACTIVE;
+ struct zebra_vrf *zvrf;
if (!force) {
if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL_INACTIVE) &&
@@ -1329,12 +1331,14 @@ int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr,
/* the host was not advertised - nothing to delete */
return 0;
- /* MAC is LOCAL and DUP_DETECTED, this local mobility event
- * is not known to bgpd. Upon receiving local delete
- * ask bgp to reinstall the best route (remote entry).
+ /* Duplicate detect action is freeze enabled and
+ * Local MAC is duplicate deteced, this local
+ * mobility event is not known to bgpd.
+ * Upon receiving local delete ask bgp to reinstall
+ * the best route (remote entry).
*/
- if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL) &&
- CHECK_FLAG(flags, ZEBRA_MAC_DUPLICATE))
+ zvrf = zebra_vrf_get_evpn();
+ if (zvrf && zvrf->dad_freeze && CHECK_FLAG(flags, ZEBRA_MAC_DUPLICATE))
state = ZEBRA_NEIGH_INACTIVE;
}
@@ -2411,6 +2415,7 @@ int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac,
UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_LOCAL_FLAGS);
UNSET_FLAG(mac->flags, ZEBRA_MAC_STICKY);
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ mac->rem_seq = 0;
}
return 0;
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 9549af5f14..3325532ca9 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -37,7 +37,6 @@
DEFINE_MTYPE_STATIC(ZEBRA, LSP, "MPLS LSP object");
DEFINE_MTYPE_STATIC(ZEBRA, FEC, "MPLS FEC object");
DEFINE_MTYPE_STATIC(ZEBRA, NHLFE, "MPLS nexthop object");
-DEFINE_MTYPE_STATIC(ZEBRA, NH_LABEL, "Nexthop label");
bool mpls_enabled;
bool mpls_pw_reach_strict; /* Strict reachability checking */
@@ -467,7 +466,7 @@ static int fec_send(struct zebra_fec *fec, struct zserv *client)
rn = fec->rn;
/* Get output stream. */
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_FEC_UPDATE, VRF_DEFAULT);
@@ -1453,31 +1452,7 @@ static int nhlfe_del(struct zebra_nhlfe *nhlfe)
static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
struct mpls_label_stack *nh_label)
{
- struct mpls_label_stack *nh_label_tmp;
- int i;
-
- /* Enforce limit on label stack size */
- if (nh_label->num_labels > MPLS_MAX_LABELS)
- nh_label->num_labels = MPLS_MAX_LABELS;
-
- /* Resize the array to accommodate the new label stack */
- if (nh_label->num_labels > nhlfe->nexthop->nh_label->num_labels) {
- nh_label_tmp = XREALLOC(MTYPE_NH_LABEL, nhlfe->nexthop->nh_label,
- sizeof(struct mpls_label_stack) +
- nh_label->num_labels *
- sizeof(mpls_label_t));
- if (nh_label_tmp) {
- nhlfe->nexthop->nh_label = nh_label_tmp;
- nhlfe->nexthop->nh_label->num_labels =
- nh_label->num_labels;
- } else
- nh_label->num_labels =
- nhlfe->nexthop->nh_label->num_labels;
- }
-
- /* Copy the label stack into the array */
- for (i = 0; i < nh_label->num_labels; i++)
- nhlfe->nexthop->nh_label->label[i] = nh_label->label[i];
+ nexthop_change_labels(nhlfe->nexthop, nh_label);
}
static int mpls_lsp_uninstall_all(struct hash *lsp_table, struct zebra_lsp *lsp,
diff --git a/zebra/zebra_mroute.c b/zebra/zebra_mroute.c
index 881b681c2f..86e25469ba 100644
--- a/zebra/zebra_mroute.c
+++ b/zebra/zebra_mroute.c
@@ -61,7 +61,7 @@ void zebra_ipmr_route_stats(ZAPI_HANDLER_ARGS)
suc = kernel_get_ipmr_sg_stats(zvrf, &mroute);
stream_failure:
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
stream_reset(s);
diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c
index 0a7ed5db41..6b41993a95 100644
--- a/zebra/zebra_nb.c
+++ b/zebra/zebra_nb.c
@@ -26,12 +26,6 @@ const struct frr_yang_module_info frr_zebra_info = {
.features = features,
.nodes = {
{
- .xpath = "/frr-zebra:zebra/mcast-rpf-lookup",
- .cbs = {
- .modify = zebra_mcast_rpf_lookup_modify,
- }
- },
- {
.xpath = "/frr-zebra:zebra/ip-forwarding",
.cbs = {
.modify = zebra_ip_forwarding_modify,
diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c
index 9a3579574a..d99010547f 100644
--- a/zebra/zebra_nb_config.c
+++ b/zebra/zebra_nb_config.c
@@ -31,23 +31,6 @@
#include "zebra/table_manager.h"
/*
- * XPath: /frr-zebra:zebra/mcast-rpf-lookup
- */
-int zebra_mcast_rpf_lookup_modify(struct nb_cb_modify_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- /* TODO: implement me. */
- break;
- }
-
- return NB_OK;
-}
-
-/*
* XPath: /frr-zebra:zebra/ip-forwarding
*/
int zebra_ip_forwarding_modify(struct nb_cb_modify_args *args)
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 1519246c17..a32fc2bb14 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -1056,6 +1056,7 @@ static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, struct nh_g
static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe, bool valid)
{
struct nhg_connected *rb_node_dep;
+ bool dependent_valid = valid;
if (valid)
SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID);
@@ -1071,6 +1072,7 @@ static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe, bool valid)
/* Update validity of nexthops depending on it */
frr_each (nhg_connected_tree, &nhe->nhg_dependents, rb_node_dep) {
+ dependent_valid = valid;
if (!valid) {
/*
* Grab the first nexthop from the depending nexthop group
@@ -1080,16 +1082,22 @@ static void zebra_nhg_set_valid(struct nhg_hash_entry *nhe, bool valid)
struct nexthop *nexthop = rb_node_dep->nhe->nhg.nexthop;
while (nexthop) {
- if (nexthop_same(nexthop, nhe->nhg.nexthop))
- break;
-
+ if (nexthop_same(nexthop, nhe->nhg.nexthop)) {
+ /* Invalid Nexthop */
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ } else {
+ /*
+ * If other nexthops in the nexthop
+ * group are valid then we can continue
+ * to use this nexthop group as valid
+ */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ dependent_valid = true;
+ }
nexthop = nexthop->next;
}
-
- if (nexthop)
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
}
- zebra_nhg_set_valid(rb_node_dep->nhe, valid);
+ zebra_nhg_set_valid(rb_node_dep->nhe, dependent_valid);
}
}
@@ -2648,7 +2656,7 @@ static unsigned nexthop_active_check(struct route_node *rn,
ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
- if (ifp && ifp->vrf->vrf_id == vrf_id && if_is_up(ifp)) {
+ if (ifp && ifp->vrf->vrf_id == vrf_id && if_is_up(ifp) && if_is_operative(ifp)) {
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
goto skip_check;
}
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index b2543ca0e8..0226c355c8 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -503,7 +503,7 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
/* Lookup table. */
table = zebra_vrf_table(afi, safi, vrf_id);
if (!table)
- return 0;
+ return NULL;
memset(&p, 0, sizeof(p));
p.family = afi;
@@ -552,65 +552,6 @@ struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
return NULL;
}
-struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id,
- union g_addr *gaddr,
- struct route_node **rn_out)
-{
- struct route_entry *re = NULL, *mre = NULL, *ure = NULL;
- struct route_node *m_rn = NULL, *u_rn = NULL;
-
- switch (zrouter.ipv4_multicast_mode) {
- case MCAST_MRIB_ONLY:
- return rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, rn_out);
- case MCAST_URIB_ONLY:
- return rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, rn_out);
- case MCAST_NO_CONFIG:
- case MCAST_MIX_MRIB_FIRST:
- re = mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn);
- if (!mre)
- re = ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr,
- &u_rn);
- break;
- case MCAST_MIX_DISTANCE:
- mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn);
- ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, &u_rn);
- if (mre && ure)
- re = ure->distance < mre->distance ? ure : mre;
- else if (mre)
- re = mre;
- else if (ure)
- re = ure;
- break;
- case MCAST_MIX_PFXLEN:
- mre = rib_match(afi, SAFI_MULTICAST, vrf_id, gaddr, &m_rn);
- ure = rib_match(afi, SAFI_UNICAST, vrf_id, gaddr, &u_rn);
- if (mre && ure)
- re = u_rn->p.prefixlen > m_rn->p.prefixlen ? ure : mre;
- else if (mre)
- re = mre;
- else if (ure)
- re = ure;
- break;
- }
-
- if (rn_out)
- *rn_out = (re == mre) ? m_rn : u_rn;
-
- if (IS_ZEBRA_DEBUG_RIB) {
- char buf[BUFSIZ];
- inet_ntop(afi == AFI_IP ? AF_INET : AF_INET6, gaddr, buf,
- BUFSIZ);
-
- zlog_debug("%s: %s: %pRN vrf: %s(%u) found %s, using %s",
- __func__, buf, (re == mre) ? m_rn : u_rn,
- vrf_id_to_name(vrf_id), vrf_id,
- mre ? (ure ? "MRIB+URIB" : "MRIB")
- : ure ? "URIB" : "nothing",
- re == ure ? "URIB" : re == mre ? "MRIB" : "none");
- }
- return re;
-}
-
/*
* Is this RIB labeled-unicast? It must be of type BGP and all paths
* (nexthops) must have a label.
@@ -1480,7 +1421,7 @@ static void rib_process(struct route_node *rn)
rib_process_update_fib(zvrf, rn, old_fib, new_fib);
else if (new_fib)
rib_process_add_fib(zvrf, rn, new_fib);
- else if (old_fib)
+ else if (old_fib && !RIB_SYSTEM_ROUTE(old_fib))
rib_process_del_fib(zvrf, rn, old_fib);
/* Remove all RE entries queued for removal */
@@ -2838,6 +2779,8 @@ static void process_subq_early_route_add(struct zebra_early_route *ere)
if (!ere->startup && (re->flags & ZEBRA_FLAG_SELFROUTE) &&
zrouter.asic_offloaded) {
+ struct route_entry *entry;
+
if (!same) {
if (IS_ZEBRA_DEBUG_RIB)
zlog_debug(
@@ -2854,6 +2797,25 @@ static void process_subq_early_route_add(struct zebra_early_route *ere)
early_route_memory_free(ere);
return;
}
+
+ RNODE_FOREACH_RE (rn, entry) {
+ if (CHECK_FLAG(entry->status, ROUTE_ENTRY_REMOVED))
+ continue;
+
+ if (entry->type != ere->re->type)
+ continue;
+
+ /*
+ * If we have an entry that is changed but un
+ * processed and not a self route, then
+ * we should just drop this new self route
+ */
+ if (CHECK_FLAG(entry->status, ROUTE_ENTRY_CHANGED) &&
+ !(entry->flags & ZEBRA_FLAG_SELFROUTE)) {
+ early_route_memory_free(ere);
+ return;
+ }
+ }
}
/* Set default distance by route type. */
@@ -3746,10 +3708,8 @@ static struct meta_queue *meta_queue_new(void)
new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct meta_queue));
- for (i = 0; i < MQ_SIZE; i++) {
+ for (i = 0; i < MQ_SIZE; i++)
new->subq[i] = list_new();
- assert(new->subq[i]);
- }
return new;
}
@@ -3935,12 +3895,7 @@ void meta_queue_free(struct meta_queue *mq, struct zebra_vrf *zvrf)
/* initialise zebra rib work queue */
static void rib_queue_init(void)
{
- if (!(zrouter.ribq = work_queue_new(zrouter.master,
- "route_node processing"))) {
- flog_err(EC_ZEBRA_WQ_NONEXISTENT,
- "%s: could not initialise work queue!", __func__);
- return;
- }
+ zrouter.ribq = work_queue_new(zrouter.master, "route_node processing");
/* fill in the work queue spec */
zrouter.ribq->spec.workfunc = &meta_queue_process;
@@ -3950,11 +3905,8 @@ static void rib_queue_init(void)
zrouter.ribq->spec.hold = ZEBRA_RIB_PROCESS_HOLD_TIME;
zrouter.ribq->spec.retry = ZEBRA_RIB_PROCESS_RETRY_TIME;
- if (!(zrouter.mq = meta_queue_new())) {
- flog_err(EC_ZEBRA_WQ_NONEXISTENT,
- "%s: could not initialise meta queue!", __func__);
- return;
- }
+ zrouter.mq = meta_queue_new();
+
return;
}
@@ -4406,9 +4358,7 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
if (ng) {
nhe.nhg.nexthop = ng->nexthop;
- if (re->type == ZEBRA_ROUTE_CONNECT ||
- re->type == ZEBRA_ROUTE_LOCAL ||
- re->type == ZEBRA_ROUTE_KERNEL)
+ if (RIB_SYSTEM_ROUTE(re))
SET_FLAG(nhe.flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL);
} else if (re->nhe_id > 0)
nhe.id = re->nhe_id;
diff --git a/zebra/zebra_router.c b/zebra/zebra_router.c
index 4022c1a26f..ae2910af41 100644
--- a/zebra/zebra_router.c
+++ b/zebra/zebra_router.c
@@ -23,7 +23,6 @@ DEFINE_MTYPE_STATIC(ZEBRA, ZEBRA_RT_TABLE, "Zebra VRF table");
struct zebra_router zrouter = {
.multipath_num = MULTIPATH_NUM,
- .ipv4_multicast_mode = MCAST_NO_CONFIG,
};
static inline int
@@ -221,19 +220,6 @@ uint32_t zebra_router_get_next_sequence(void)
memory_order_relaxed);
}
-void multicast_mode_ipv4_set(enum multicast_mode mode)
-{
- if (IS_ZEBRA_DEBUG_RIB)
- zlog_debug("%s: multicast lookup mode set (%d)", __func__,
- mode);
- zrouter.ipv4_multicast_mode = mode;
-}
-
-enum multicast_mode multicast_mode_ipv4_get(void)
-{
- return zrouter.ipv4_multicast_mode;
-}
-
void zebra_router_terminate(void)
{
struct zebra_router_table *zrt, *tmp;
diff --git a/zebra/zebra_router.h b/zebra/zebra_router.h
index a637c3214e..28c4cf0790 100644
--- a/zebra/zebra_router.h
+++ b/zebra/zebra_router.h
@@ -34,17 +34,6 @@ RB_HEAD(zebra_router_table_head, zebra_router_table);
RB_PROTOTYPE(zebra_router_table_head, zebra_router_table,
zebra_router_table_entry, zebra_router_table_entry_compare)
-/* RPF lookup behaviour */
-enum multicast_mode {
- MCAST_NO_CONFIG = 0, /* MIX_MRIB_FIRST, but no show in config write */
- MCAST_MRIB_ONLY, /* MRIB only */
- MCAST_URIB_ONLY, /* URIB only */
- MCAST_MIX_MRIB_FIRST, /* MRIB, if nothing at all then URIB */
- MCAST_MIX_DISTANCE, /* MRIB & URIB, lower distance wins */
- MCAST_MIX_PFXLEN, /* MRIB & URIB, longer prefix wins */
- /* on equal value, MRIB wins for last 2 */
-};
-
/* An interface can be error-disabled if a protocol (such as EVPN or
* VRRP) detects a problem with keeping it operationally-up.
* If any of the protodown bits are set protodown-on is programmed
@@ -187,9 +176,6 @@ struct zebra_router {
uint32_t multipath_num;
- /* RPF Lookup behavior */
- enum multicast_mode ipv4_multicast_mode;
-
/*
* zebra start time and time of sweeping RIB of old routes
*/
@@ -287,10 +273,6 @@ static inline struct zebra_vrf *zebra_vrf_get_evpn(void)
: zebra_vrf_lookup_by_id(VRF_DEFAULT);
}
-extern void multicast_mode_ipv4_set(enum multicast_mode mode);
-
-extern enum multicast_mode multicast_mode_ipv4_get(void);
-
extern bool zebra_router_notify_on_ack(void);
static inline void zebra_router_set_supports_nhgs(bool support)
diff --git a/zebra/zebra_srv6_vty.c b/zebra/zebra_srv6_vty.c
index 5a80524149..6867b1bbb6 100644
--- a/zebra/zebra_srv6_vty.c
+++ b/zebra/zebra_srv6_vty.c
@@ -338,10 +338,6 @@ DEFUN_NOSH (srv6_locator,
}
locator = srv6_locator_alloc(argv[1]->arg);
- if (!locator) {
- vty_out(vty, "%% Alloc failed\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
locator->status_up = true;
VTY_PUSH_CONTEXT(SRV6_LOC_NODE, locator);
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index b65097e725..582d15627c 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -81,126 +81,14 @@ static void show_nexthop_detail_helper(struct vty *vty,
const struct nexthop *nexthop,
bool is_backup);
-static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table);
+static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table, afi_t afi,
+ safi_t safi);
static void show_ip_route_nht_dump(struct vty *vty,
const struct nexthop *nexthop,
const struct route_node *rn,
const struct route_entry *re,
unsigned int num);
-DEFUN (ip_multicast_mode,
- ip_multicast_mode_cmd,
- "ip multicast rpf-lookup-mode <urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix>",
- IP_STR
- "Multicast options\n"
- "RPF lookup behavior\n"
- "Lookup in unicast RIB only\n"
- "Lookup in multicast RIB only\n"
- "Try multicast RIB first, fall back to unicast RIB\n"
- "Lookup both, use entry with lower distance\n"
- "Lookup both, use entry with longer prefix\n")
-{
- char *mode = argv[3]->text;
-
- if (strmatch(mode, "urib-only"))
- multicast_mode_ipv4_set(MCAST_URIB_ONLY);
- else if (strmatch(mode, "mrib-only"))
- multicast_mode_ipv4_set(MCAST_MRIB_ONLY);
- else if (strmatch(mode, "mrib-then-urib"))
- multicast_mode_ipv4_set(MCAST_MIX_MRIB_FIRST);
- else if (strmatch(mode, "lower-distance"))
- multicast_mode_ipv4_set(MCAST_MIX_DISTANCE);
- else if (strmatch(mode, "longer-prefix"))
- multicast_mode_ipv4_set(MCAST_MIX_PFXLEN);
- else {
- vty_out(vty, "Invalid mode specified\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- return CMD_SUCCESS;
-}
-
-DEFUN (no_ip_multicast_mode,
- no_ip_multicast_mode_cmd,
- "no ip multicast rpf-lookup-mode [<urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix>]",
- NO_STR
- IP_STR
- "Multicast options\n"
- "RPF lookup behavior\n"
- "Lookup in unicast RIB only\n"
- "Lookup in multicast RIB only\n"
- "Try multicast RIB first, fall back to unicast RIB\n"
- "Lookup both, use entry with lower distance\n"
- "Lookup both, use entry with longer prefix\n")
-{
- multicast_mode_ipv4_set(MCAST_NO_CONFIG);
- return CMD_SUCCESS;
-}
-
-
-DEFPY (show_ip_rpf,
- show_ip_rpf_cmd,
- "show [ip$ip|ipv6$ipv6] rpf [json]",
- SHOW_STR
- IP_STR
- IPV6_STR
- "Display RPF information for multicast source\n"
- JSON_STR)
-{
- bool uj = use_json(argc, argv);
- struct route_show_ctx ctx = {
- .multi = false,
- };
-
- return do_show_ip_route(vty, VRF_DEFAULT_NAME, ip ? AFI_IP : AFI_IP6,
- SAFI_MULTICAST, false, uj, 0, NULL, false, 0, 0,
- 0, false, &ctx);
-}
-
-DEFPY (show_ip_rpf_addr,
- show_ip_rpf_addr_cmd,
- "show ip rpf A.B.C.D$address",
- SHOW_STR
- IP_STR
- "Display RPF information for multicast source\n"
- "IP multicast source address (e.g. 10.0.0.0)\n")
-{
- struct route_node *rn;
- struct route_entry *re;
-
- re = rib_match_multicast(AFI_IP, VRF_DEFAULT, (union g_addr *)&address,
- &rn);
-
- if (re)
- vty_show_ip_route_detail(vty, rn, 1, false, false);
- else
- vty_out(vty, "%% No match for RPF lookup\n");
-
- return CMD_SUCCESS;
-}
-
-DEFPY (show_ipv6_rpf_addr,
- show_ipv6_rpf_addr_cmd,
- "show ipv6 rpf X:X::X:X$address",
- SHOW_STR
- IPV6_STR
- "Display RPF information for multicast source\n"
- "IPv6 multicast source address\n")
-{
- struct route_node *rn;
- struct route_entry *re;
-
- re = rib_match_multicast(AFI_IP6, VRF_DEFAULT, (union g_addr *)&address,
- &rn);
-
- if (re)
- vty_show_ip_route_detail(vty, rn, 1, false, false);
- else
- vty_out(vty, "%% No match for RPF lookup\n");
-
- return CMD_SUCCESS;
-}
-
static char re_status_output_char(const struct route_entry *re,
const struct nexthop *nhop,
bool is_fib)
@@ -858,35 +746,36 @@ static void vty_show_ip_route_detail_json(struct vty *vty,
vty_json(vty, json);
}
-static void zebra_vty_display_vrf_header(struct vty *vty, struct zebra_vrf *zvrf, uint32_t tableid)
+static void zebra_vty_display_vrf_header(struct vty *vty, struct zebra_vrf *zvrf, uint32_t tableid,
+ afi_t afi, safi_t safi)
{
if (!tableid)
- vty_out(vty, "VRF %s:\n", zvrf_name(zvrf));
+ vty_out(vty, "%s %s VRF %s:\n", afi2str(afi), safi2str(safi), zvrf_name(zvrf));
else {
if (vrf_is_backend_netns())
- vty_out(vty, "VRF %s table %u:\n", zvrf_name(zvrf), tableid);
+ vty_out(vty, "%s %s VRF %s table %u:\n", afi2str(afi), safi2str(safi),
+ zvrf_name(zvrf), tableid);
else {
vrf_id_t vrf = zebra_vrf_lookup_by_table(tableid, zvrf->zns->ns_id);
if (vrf == VRF_DEFAULT && tableid != RT_TABLE_ID_MAIN)
- vty_out(vty, "table %u:\n", tableid);
+ vty_out(vty, "%s %s table %u:\n", afi2str(afi), safi2str(safi),
+ tableid);
else {
struct zebra_vrf *zvrf2 = zebra_vrf_lookup_by_id(vrf);
- vty_out(vty, "VRF %s table %u:\n", zvrf_name(zvrf2), tableid);
+ vty_out(vty, "%s %s VRF %s table %u:\n", afi2str(afi),
+ safi2str(safi), zvrf_name(zvrf2), tableid);
}
}
}
}
-static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
- struct route_table *table, afi_t afi,
- bool use_fib, route_tag_t tag,
- const struct prefix *longer_prefix_p,
- bool supernets_only, int type,
- unsigned short ospf_instance_id, bool use_json,
- uint32_t tableid, bool show_ng,
- struct route_show_ctx *ctx)
+static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf, struct route_table *table,
+ afi_t afi, safi_t safi, bool use_fib, route_tag_t tag,
+ const struct prefix *longer_prefix_p, bool supernets_only,
+ int type, unsigned short ospf_instance_id, bool use_json,
+ uint32_t tableid, bool show_ng, struct route_show_ctx *ctx)
{
struct route_node *rn;
struct route_entry *re;
@@ -958,9 +847,7 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
}
if (ctx->multi && ctx->header_done)
vty_out(vty, "\n");
- if (ctx->multi || zvrf_id(zvrf) != VRF_DEFAULT || tableid)
- zebra_vty_display_vrf_header(vty, zvrf, tableid);
-
+ zebra_vty_display_vrf_header(vty, zvrf, tableid, afi, safi);
ctx->header_done = true;
first = 0;
}
@@ -982,12 +869,10 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
vty_json_close(vty, first_json);
}
-static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf,
- afi_t afi, bool use_fib, bool use_json,
- route_tag_t tag,
- const struct prefix *longer_prefix_p,
- bool supernets_only, int type,
- unsigned short ospf_instance_id, bool show_ng,
+static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf, afi_t afi, safi_t safi,
+ bool use_fib, bool use_json, route_tag_t tag,
+ const struct prefix *longer_prefix_p, bool supernets_only,
+ int type, unsigned short ospf_instance_id, bool show_ng,
struct route_show_ctx *ctx)
{
struct zebra_router_table *zrt;
@@ -999,13 +884,11 @@ static void do_show_ip_route_all(struct vty *vty, struct zebra_vrf *zvrf,
if (zvrf != info->zvrf)
continue;
- if (zrt->afi != afi ||
- zrt->safi != SAFI_UNICAST)
+ if (zrt->afi != afi || zrt->safi != safi)
continue;
- do_show_ip_route(vty, zvrf_name(zvrf), afi, SAFI_UNICAST,
- use_fib, use_json, tag, longer_prefix_p,
- supernets_only, type, ospf_instance_id,
+ do_show_ip_route(vty, zvrf_name(zvrf), afi, safi, use_fib, use_json, tag,
+ longer_prefix_p, supernets_only, type, ospf_instance_id,
zrt->tableid, show_ng, ctx);
}
}
@@ -1038,7 +921,7 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi,
}
if (tableid)
- table = zebra_router_find_table(zvrf, tableid, afi, SAFI_UNICAST);
+ table = zebra_router_find_table(zvrf, tableid, afi, safi);
else
table = zebra_vrf_table(afi, safi, zvrf_id(zvrf));
if (!table) {
@@ -1047,9 +930,9 @@ static int do_show_ip_route(struct vty *vty, const char *vrf_name, afi_t afi,
return CMD_SUCCESS;
}
- do_show_route_helper(vty, zvrf, table, afi, use_fib, tag,
- longer_prefix_p, supernets_only, type,
- ospf_instance_id, use_json, tableid, show_ng, ctx);
+ do_show_route_helper(vty, zvrf, table, afi, safi, use_fib, tag, longer_prefix_p,
+ supernets_only, type, ospf_instance_id, use_json, tableid, show_ng,
+ ctx);
return CMD_SUCCESS;
}
@@ -1702,27 +1585,35 @@ DEFPY_HIDDEN(rnh_hide_backups, rnh_hide_backups_cmd,
DEFPY (show_route,
show_route_cmd,
"show\
- <\
- ip$ipv4 <fib$fib|route> [table <(1-4294967295)$table|all$table_all>]\
- [vrf <NAME$vrf_name|all$vrf_all>]\
- [{\
- tag (1-4294967295)\
- |A.B.C.D/M$prefix longer-prefixes\
- |supernets-only$supernets_only\
- }]\
- [<\
- " FRR_IP_REDIST_STR_ZEBRA "$type_str\
- |ospf$type_str (1-65535)$ospf_instance_id\
- >]\
- |ipv6$ipv6 <fib$fib|route> [table <(1-4294967295)$table|all$table_all>]\
- [vrf <NAME$vrf_name|all$vrf_all>]\
- [{\
- tag (1-4294967295)\
- |X:X::X:X/M$prefix longer-prefixes\
- }]\
- [" FRR_IP6_REDIST_STR_ZEBRA "$type_str]\
- >\
- [<json$json|nexthop-group$ng>]",
+ <\
+ ip$ipv4 <fib$fib|route>\
+ [{\
+ table <(1-4294967295)$table|all$table_all>\
+ |mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ [{\
+ tag (1-4294967295)\
+ |A.B.C.D/M$prefix longer-prefixes\
+ |supernets-only$supernets_only\
+ }]\
+ [<\
+ " FRR_IP_REDIST_STR_ZEBRA "$type_str\
+ |ospf$type_str (1-65535)$ospf_instance_id\
+ >]\
+ |ipv6$ipv6 <fib$fib|route>\
+ [{\
+ table <(1-4294967295)$table|all$table_all>\
+ |mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ [{\
+ tag (1-4294967295)\
+ |X:X::X:X/M$prefix longer-prefixes\
+ }]\
+ [" FRR_IP6_REDIST_STR_ZEBRA "$type_str]\
+ >\
+ [<json$json|nexthop-group$ng>]",
SHOW_STR
IP_STR
"IP forwarding table\n"
@@ -1730,6 +1621,7 @@ DEFPY (show_route,
"Table to display\n"
"The table number to display\n"
"All tables\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Show only routes with tag\n"
"Tag value\n"
@@ -1745,6 +1637,7 @@ DEFPY (show_route,
"Table to display\n"
"The table number to display\n"
"All tables\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Show only routes with tag\n"
"Tag value\n"
@@ -1755,6 +1648,7 @@ DEFPY (show_route,
"Nexthop Group Information\n")
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
bool first_vrf_json = true;
struct vrf *vrf;
int type = 0;
@@ -1784,26 +1678,19 @@ DEFPY (show_route,
if (vrf_all) {
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if ((zvrf = vrf->info) == NULL
- || (zvrf->table[afi][SAFI_UNICAST] == NULL))
+ if ((zvrf = vrf->info) == NULL || (zvrf->table[afi][safi] == NULL))
continue;
if (json)
vty_json_key(vty, zvrf_name(zvrf),
&first_vrf_json);
if (table_all)
- do_show_ip_route_all(vty, zvrf, afi, !!fib,
- !!json, tag,
- prefix_str ? prefix : NULL,
- !!supernets_only, type,
- ospf_instance_id, !!ng,
- &ctx);
+ do_show_ip_route_all(vty, zvrf, afi, safi, !!fib, !!json, tag,
+ prefix_str ? prefix : NULL, !!supernets_only,
+ type, ospf_instance_id, !!ng, &ctx);
else
- do_show_ip_route(vty, zvrf_name(zvrf), afi,
- SAFI_UNICAST, !!fib, !!json,
- tag, prefix_str ? prefix : NULL,
- !!supernets_only, type,
- ospf_instance_id, table, !!ng,
- &ctx);
+ do_show_ip_route(vty, zvrf_name(zvrf), afi, safi, !!fib, !!json,
+ tag, prefix_str ? prefix : NULL, !!supernets_only,
+ type, ospf_instance_id, table, !!ng, &ctx);
}
if (json)
vty_json_close(vty, first_vrf_json);
@@ -1821,21 +1708,27 @@ DEFPY (show_route,
return CMD_SUCCESS;
if (table_all)
- do_show_ip_route_all(vty, zvrf, afi, !!fib, !!json, tag,
- prefix_str ? prefix : NULL,
- !!supernets_only, type,
+ do_show_ip_route_all(vty, zvrf, afi, safi, !!fib, !!json, tag,
+ prefix_str ? prefix : NULL, !!supernets_only, type,
ospf_instance_id, !!ng, &ctx);
else
- do_show_ip_route(vty, vrf->name, afi, SAFI_UNICAST,
- !!fib, !!json, tag,
- prefix_str ? prefix : NULL,
- !!supernets_only, type,
+ do_show_ip_route(vty, vrf->name, afi, safi, !!fib, !!json, tag,
+ prefix_str ? prefix : NULL, !!supernets_only, type,
ospf_instance_id, table, !!ng, &ctx);
}
return CMD_SUCCESS;
}
+ALIAS_DEPRECATED (show_route,
+ show_ip_rpf_cmd,
+ "show <ip$ipv4|ipv6$ipv6> rpf$mrib [json$json]",
+ SHOW_STR
+ IP_STR
+ IPV6_STR
+ "Display RPF information for multicast source\n"
+ JSON_STR);
+
ALIAS_HIDDEN (show_route,
show_ro_cmd,
"show <ip$ipv4|ipv6$ipv6> ro",
@@ -1849,28 +1742,38 @@ DEFPY (show_route_detail,
show_route_detail_cmd,
"show\
<\
- ip$ipv4 <fib$fib|route> [vrf <NAME$vrf_name|all$vrf_all>]\
- <\
- A.B.C.D$address\
- |A.B.C.D/M$prefix\
- >\
- |ipv6$ipv6 <fib$fib|route> [vrf <NAME$vrf_name|all$vrf_all>]\
- <\
- X:X::X:X$address\
- |X:X::X:X/M$prefix\
- >\
- >\
- [json$json] [nexthop-group$ng]",
+ ip$ipv4 <fib$fib|route>\
+ [{\
+ mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ <\
+ A.B.C.D$address\
+ |A.B.C.D/M$prefix\
+ >\
+ |ipv6$ipv6 <fib$fib|route>\
+ [{\
+ mrib$mrib\
+ |vrf <NAME$vrf_name|all$vrf_all>\
+ }]\
+ <\
+ X:X::X:X$address\
+ |X:X::X:X/M$prefix\
+ >\
+ >\
+ [json$json] [nexthop-group$ng]",
SHOW_STR
IP_STR
"IP forwarding table\n"
"IP routing table\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Network in the IP routing table to display\n"
"IP prefix <network>/<length>, e.g., 35.0.0.0/8\n"
IP6_STR
"IPv6 forwarding table\n"
"IPv6 routing table\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"IPv6 Address\n"
"IPv6 prefix\n"
@@ -1878,6 +1781,7 @@ DEFPY (show_route_detail,
"Nexthop Group Information\n")
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
struct route_table *table;
struct prefix p;
struct route_node *rn;
@@ -1898,8 +1802,7 @@ DEFPY (show_route_detail,
struct zebra_vrf *zvrf;
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if ((zvrf = vrf->info) == NULL
- || (table = zvrf->table[afi][SAFI_UNICAST]) == NULL)
+ if ((zvrf = vrf->info) == NULL || (table = zvrf->table[afi][safi]) == NULL)
continue;
rn = route_node_match(table, &p);
@@ -1920,7 +1823,7 @@ DEFPY (show_route_detail,
if (json)
vty_show_ip_route_detail_json(vty, rn, use_fib);
else
- vty_show_ip_route_detail(vty, rn, 0, use_fib,
+ vty_show_ip_route_detail(vty, rn, (safi == SAFI_MULTICAST), use_fib,
show_ng);
route_unlock_node(rn);
@@ -1945,7 +1848,7 @@ DEFPY (show_route_detail,
if (vrf_name)
VRF_GET_ID(vrf_id, vrf_name, false);
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
+ table = zebra_vrf_table(afi, safi, vrf_id);
if (!table)
return CMD_SUCCESS;
@@ -1973,7 +1876,8 @@ DEFPY (show_route_detail,
if (json)
vty_show_ip_route_detail_json(vty, rn, use_fib);
else
- vty_show_ip_route_detail(vty, rn, 0, use_fib, show_ng);
+ vty_show_ip_route_detail(vty, rn, (safi == SAFI_MULTICAST), use_fib,
+ show_ng);
route_unlock_node(rn);
}
@@ -1983,12 +1887,13 @@ DEFPY (show_route_detail,
DEFPY (show_route_summary,
show_route_summary_cmd,
- "show <ip$ipv4|ipv6$ipv6> route [vrf <NAME$vrf_name|all$vrf_all>] \
+ "show <ip$ipv4|ipv6$ipv6> route [{mrib$mrib|vrf <NAME$vrf_name|all$vrf_all>}] \
summary [table (1-4294967295)$table_id] [prefix$prefix] [json]",
SHOW_STR
IP_STR
IP6_STR
"IP routing table\n"
+ "Multicast SAFI table\n"
VRF_FULL_CMD_HELP_STR
"Summary of all routes\n"
"Table to display summary for\n"
@@ -1997,6 +1902,7 @@ DEFPY (show_route_summary,
JSON_STR)
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
struct route_table *table;
bool uj = use_json(argc, argv);
json_object *vrf_json = NULL;
@@ -2013,12 +1919,11 @@ DEFPY (show_route_summary,
continue;
if (table_id == 0)
- table = zebra_vrf_table(afi, SAFI_UNICAST,
- zvrf->vrf->vrf_id);
+ table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id);
else
- table = zebra_vrf_lookup_table_with_table_id(
- afi, SAFI_UNICAST, zvrf->vrf->vrf_id,
- table_id);
+ table = zebra_vrf_lookup_table_with_table_id(afi, safi,
+ zvrf->vrf->vrf_id,
+ table_id);
if (!table)
continue;
@@ -2040,10 +1945,9 @@ DEFPY (show_route_summary,
VRF_GET_ID(vrf_id, vrf_name, false);
if (table_id == 0)
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
+ table = zebra_vrf_table(afi, safi, vrf_id);
else
- table = zebra_vrf_lookup_table_with_table_id(
- afi, SAFI_UNICAST, vrf_id, table_id);
+ table = zebra_vrf_lookup_table_with_table_id(afi, safi, vrf_id, table_id);
if (!table)
return CMD_SUCCESS;
@@ -2056,50 +1960,49 @@ DEFPY (show_route_summary,
return CMD_SUCCESS;
}
-DEFUN_HIDDEN (show_route_zebra_dump,
+DEFPY_HIDDEN (show_route_zebra_dump,
show_route_zebra_dump_cmd,
- "show <ip|ipv6> zebra route dump [vrf VRFNAME]",
+ "show <ip$ipv4|ipv6$ipv6> zebra route dump [{mrib$mrib|vrf <NAME$vrf_name|all$vrf_all>}]",
SHOW_STR
IP_STR
IP6_STR
"Zebra daemon\n"
"Routing table\n"
"All information\n"
- VRF_CMD_HELP_STR)
+ "Multicast SAFI table\n"
+ VRF_FULL_CMD_HELP_STR)
{
- afi_t afi = AFI_IP;
+ afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ safi_t safi = mrib ? SAFI_MULTICAST : SAFI_UNICAST;
struct route_table *table;
- const char *vrf_name = NULL;
- int idx = 0;
- afi = strmatch(argv[1]->text, "ipv6") ? AFI_IP6 : AFI_IP;
-
- if (argv_find(argv, argc, "vrf", &idx))
- vrf_name = argv[++idx]->arg;
-
- if (!vrf_name) {
+ if (vrf_all) {
struct vrf *vrf;
struct zebra_vrf *zvrf;
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
zvrf = vrf->info;
- if ((zvrf == NULL)
- || (zvrf->table[afi][SAFI_UNICAST] == NULL))
+ if (zvrf == NULL)
continue;
- table = zvrf->table[afi][SAFI_UNICAST];
- show_ip_route_dump_vty(vty, table);
+ table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id);
+ if (!table)
+ continue;
+
+ show_ip_route_dump_vty(vty, table, afi, safi);
}
} else {
vrf_id_t vrf_id = VRF_DEFAULT;
- VRF_GET_ID(vrf_id, vrf_name, true);
+ if (vrf_name)
+ VRF_GET_ID(vrf_id, vrf_name, false);
+
+ table = zebra_vrf_table(afi, safi, vrf_id);
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
if (!table)
return CMD_SUCCESS;
- show_ip_route_dump_vty(vty, table);
+ show_ip_route_dump_vty(vty, table, afi, safi);
}
return CMD_SUCCESS;
@@ -2193,7 +2096,8 @@ static void show_ip_route_nht_dump(struct vty *vty,
}
}
-static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table)
+static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table, afi_t afi,
+ safi_t safi)
{
struct route_node *rn;
struct route_entry *re;
@@ -2205,7 +2109,7 @@ static void show_ip_route_dump_vty(struct vty *vty, struct route_table *table)
struct nexthop *nexthop = NULL;
int nexthop_num = 0;
- vty_out(vty, "\nIPv4/IPv6 Routing table dump\n");
+ vty_out(vty, "\n%s %s Routing table dump\n", afi2str(afi), safi2str(safi));
vty_out(vty, "----------------------------\n");
for (rn = route_top(table); rn; rn = route_next(rn)) {
@@ -3757,22 +3661,6 @@ static int config_write_protocol(struct vty *vty)
vty_out(vty, "zebra zapi-packets %u\n",
zrouter.packets_to_process);
- enum multicast_mode ipv4_multicast_mode = multicast_mode_ipv4_get();
-
- if (ipv4_multicast_mode != MCAST_NO_CONFIG)
- vty_out(vty, "ip multicast rpf-lookup-mode %s\n",
- ipv4_multicast_mode == MCAST_URIB_ONLY
- ? "urib-only"
- : ipv4_multicast_mode == MCAST_MRIB_ONLY
- ? "mrib-only"
- : ipv4_multicast_mode
- == MCAST_MIX_MRIB_FIRST
- ? "mrib-then-urib"
- : ipv4_multicast_mode
- == MCAST_MIX_DISTANCE
- ? "lower-distance"
- : "longer-prefix");
-
/* Include dataplane info */
dplane_config_write_helper(vty);
@@ -4356,9 +4244,6 @@ void zebra_vty_init(void)
install_element(CONFIG_NODE, &allow_external_route_update_cmd);
install_element(CONFIG_NODE, &no_allow_external_route_update_cmd);
- install_element(CONFIG_NODE, &ip_multicast_mode_cmd);
- install_element(CONFIG_NODE, &no_ip_multicast_mode_cmd);
-
install_element(CONFIG_NODE, &zebra_nexthop_group_keep_cmd);
install_element(CONFIG_NODE, &ip_zebra_import_table_distance_cmd);
install_element(CONFIG_NODE, &no_ip_zebra_import_table_cmd);
@@ -4376,15 +4261,12 @@ void zebra_vty_init(void)
install_element(VIEW_NODE, &show_vrf_cmd);
install_element(VIEW_NODE, &show_vrf_vni_cmd);
install_element(VIEW_NODE, &show_route_cmd);
+ install_element(VIEW_NODE, &show_ip_rpf_cmd);
install_element(VIEW_NODE, &show_ro_cmd);
install_element(VIEW_NODE, &show_route_detail_cmd);
install_element(VIEW_NODE, &show_route_summary_cmd);
install_element(VIEW_NODE, &show_ip_nht_cmd);
- install_element(VIEW_NODE, &show_ip_rpf_cmd);
- install_element(VIEW_NODE, &show_ip_rpf_addr_cmd);
- install_element(VIEW_NODE, &show_ipv6_rpf_addr_cmd);
-
install_element(CONFIG_NODE, &rnh_hide_backups_cmd);
install_element(VIEW_NODE, &show_frr_cmd);
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 9e2c138e21..c60eeab946 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -2203,7 +2203,7 @@ static int zl3vni_send_add_to_client(struct zebra_l3vni *zl3vni)
is_anycast_mac = false;
}
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
/* The message is used for both vni add and/or update like
* vrr mac is added for l3vni SVI.
@@ -2246,7 +2246,7 @@ static int zl3vni_send_del_to_client(struct zebra_l3vni *zl3vni)
if (!client)
return 0;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, ZEBRA_L3VNI_DEL, zl3vni_vrf_id(zl3vni));
stream_putl(s, zl3vni->vni);
@@ -4403,6 +4403,7 @@ static int zebra_vxlan_check_del_local_mac(struct interface *ifp,
UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_LOCAL_FLAGS);
UNSET_FLAG(mac->flags, ZEBRA_MAC_STICKY);
SET_FLAG(mac->flags, ZEBRA_MAC_AUTO);
+ mac->rem_seq = 0;
}
return 0;
@@ -4755,8 +4756,13 @@ void zebra_vxlan_remote_vtep_add(vrf_id_t vrf_id, vni_t vni,
zif = ifp->info;
/* If down or not mapped to a bridge, we're done. */
- if (!if_is_operative(ifp) || !zif->brslave_info.br_if)
+ if (!if_is_operative(ifp) || !zif->brslave_info.br_if) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("%s VNI %u VTEP %pI4 ifp %s oper %u br_if %u skipping update",
+ __func__, zevpn->vni, &vtep_ip, ifp->name, if_is_operative(ifp),
+ !zif->brslave_info.br_if);
return;
+ }
zvtep = zebra_evpn_vtep_find(zevpn, &vtep_ip);
if (zvtep) {
@@ -5855,7 +5861,7 @@ static int zebra_vxlan_sg_send(struct zebra_vrf *zvrf,
if (!CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG))
return 0;
- s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ s = stream_new(ZEBRA_SMALL_PACKET_SIZE);
zclient_create_header(s, cmd, VRF_DEFAULT);
stream_putl(s, IPV4_MAX_BYTELEN);
diff --git a/zebra/zebra_vxlan_if.c b/zebra/zebra_vxlan_if.c
index 17ab05c1f3..ea0be2f644 100644
--- a/zebra/zebra_vxlan_if.c
+++ b/zebra/zebra_vxlan_if.c
@@ -1032,7 +1032,13 @@ int zebra_vxlan_if_vni_up(struct interface *ifp, struct zebra_vxlan_vni *vnip)
/* If part of a bridge, inform BGP about this VNI. */
/* Also, read and populate local MACs and neighbors. */
if (zif->brslave_info.br_if) {
- zebra_evpn_send_add_to_client(zevpn);
+ if (if_is_operative(zevpn->vxlan_if)) {
+ zebra_evpn_send_add_to_client(zevpn);
+ } else {
+ if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("%s VNI %u vxlan_if %s oper down skipping vni up to client",
+ __func__, zevpn->vni, zevpn->vxlan_if->name);
+ }
zebra_evpn_read_mac_neigh(zevpn, ifp);
}
}
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 07e3996643..7ef3582329 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -57,6 +57,7 @@ extern struct zebra_privs_t zserv_privs;
/* The listener socket for clients connecting to us */
static int zsock;
+static bool started_p;
/* The lock that protects access to zapi client objects */
static pthread_mutex_t client_mutex;
@@ -183,10 +184,9 @@ void zserv_log_message(const char *errmsg, struct stream *msg,
*/
static void zserv_client_fail(struct zserv *client)
{
- flog_warn(
- EC_ZEBRA_CLIENT_IO_ERROR,
- "Client '%s' (session id %d) encountered an error and is shutting down.",
- zebra_route_string(client->proto), client->session_id);
+ flog_warn(EC_ZEBRA_CLIENT_IO_ERROR,
+ "Client %d '%s' (session id %d) encountered an error and is shutting down.",
+ client->sock, zebra_route_string(client->proto), client->session_id);
atomic_store_explicit(&client->pthread->running, false,
memory_order_relaxed);
@@ -467,8 +467,8 @@ static void zserv_read(struct event *thread)
}
if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("Read %d packets from client: %s. Current ibuf fifo count: %zu. Conf P2p %d",
- p2p_avail - p2p, zebra_route_string(client->proto),
+ zlog_debug("Read %d packets from client: %s(%d). Current ibuf fifo count: %zu. Conf P2p %d",
+ p2p_avail - p2p, zebra_route_string(client->proto), client->sock,
client_ibuf_fifo_cnt, p2p_orig);
/* Reschedule ourselves since we have space in ibuf_fifo */
@@ -929,9 +929,16 @@ void zserv_close(void)
/* Free client list's mutex */
pthread_mutex_destroy(&client_mutex);
+
+ started_p = false;
}
-void zserv_start(char *path)
+
+/*
+ * Open zebra's ZAPI listener socket. This is done early during startup,
+ * before zebra is ready to listen and accept client connections.
+ */
+void zserv_open(const char *path)
{
int ret;
mode_t old_mask;
@@ -973,6 +980,26 @@ void zserv_start(char *path)
path, safe_strerror(errno));
close(zsock);
zsock = -1;
+ }
+
+ umask(old_mask);
+}
+
+/*
+ * Start listening for ZAPI client connections.
+ */
+void zserv_start(const char *path)
+{
+ int ret;
+
+ /* This may be called more than once during startup - potentially once
+ * per netns - but only do this work once.
+ */
+ if (started_p)
+ return;
+
+ if (zsock <= 0) {
+ flog_err_sys(EC_LIB_SOCKET, "Zserv socket open failed");
return;
}
@@ -986,7 +1013,7 @@ void zserv_start(char *path)
return;
}
- umask(old_mask);
+ started_p = true;
zserv_event(NULL, ZSERV_ACCEPT);
}
diff --git a/zebra/zserv.h b/zebra/zserv.h
index 87d2b4adbf..ce47ef19fa 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -256,15 +256,24 @@ extern void zserv_init(void);
extern void zserv_close(void);
/*
+ * Open Zebra API server socket.
+ *
+ * Create and open the server socket.
+ *
+ * path
+ * where to place the Unix domain socket
+ */
+extern void zserv_open(const char *path);
+
+/*
* Start Zebra API server.
*
- * Allocates resources, creates the server socket and begins listening on the
- * socket.
+ * Allocates resources and begins listening on the server socket.
*
* path
* where to place the Unix domain socket
*/
-extern void zserv_start(char *path);
+extern void zserv_start(const char *path);
/*
* Send a message to a connected Zebra API client.